Import from savannah.gnu.org:
[official-gcc.git] / gcc / dwarf2cfi.c
blob5a096adcd0ff09b5577fbb8c69caa8a264e63270
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "function.h"
28 #include "basic-block.h"
29 #include "dwarf2.h"
30 #include "dwarf2out.h"
31 #include "dwarf2asm.h"
32 #include "ggc.h"
33 #include "hash-table.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
67 dw_cfa_location cfa;
68 dw_cfi_ref cfa_cfi;
70 /* The expressions for any register column that is saved. */
71 cfi_vec reg_save;
72 } dw_cfi_row;
74 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
75 typedef struct GTY(()) reg_saved_in_data_struct {
76 rtx orig_reg;
77 rtx saved_in_reg;
78 } reg_saved_in_data;
81 /* Since we no longer have a proper CFG, we're going to create a facsimile
82 of one on the fly while processing the frame-related insns.
84 We create dw_trace_info structures for each extended basic block beginning
85 and ending at a "save point". Save points are labels, barriers, certain
86 notes, and of course the beginning and end of the function.
88 As we encounter control transfer insns, we propagate the "current"
89 row state across the edges to the starts of traces. When checking is
90 enabled, we validate that we propagate the same data from all sources.
92 All traces are members of the TRACE_INFO array, in the order in which
93 they appear in the instruction stream.
95 All save points are present in the TRACE_INDEX hash, mapping the insn
96 starting a trace to the dw_trace_info describing the trace. */
98 typedef struct
100 /* The insn that begins the trace. */
101 rtx head;
103 /* The row state at the beginning and end of the trace. */
104 dw_cfi_row *beg_row, *end_row;
106 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
107 while scanning insns. However, the args_size value is irrelevant at
108 any point except can_throw_internal_p insns. Therefore the "delay"
109 sizes the values that must actually be emitted for this trace. */
110 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
111 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
113 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
114 rtx eh_head;
116 /* The following variables contain data used in interpreting frame related
117 expressions. These are not part of the "real" row state as defined by
118 Dwarf, but it seems like they need to be propagated into a trace in case
119 frame related expressions have been sunk. */
120 /* ??? This seems fragile. These variables are fragments of a larger
121 expression. If we do not keep the entire expression together, we risk
122 not being able to put it together properly. Consider forcing targets
123 to generate self-contained expressions and dropping all of the magic
124 interpretation code in this file. Or at least refusing to shrink wrap
125 any frame related insn that doesn't contain a complete expression. */
127 /* The register used for saving registers to the stack, and its offset
128 from the CFA. */
129 dw_cfa_location cfa_store;
131 /* A temporary register holding an integral value used in adjusting SP
132 or setting up the store_reg. The "offset" field holds the integer
133 value, not an offset. */
134 dw_cfa_location cfa_temp;
136 /* A set of registers saved in other registers. This is the inverse of
137 the row->reg_save info, if the entry is a DW_CFA_register. This is
138 implemented as a flat array because it normally contains zero or 1
139 entry, depending on the target. IA-64 is the big spender here, using
140 a maximum of 5 entries. */
141 vec<reg_saved_in_data> regs_saved_in_regs;
143 /* An identifier for this trace. Used only for debugging dumps. */
144 unsigned id;
146 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
147 bool switch_sections;
149 /* True if we've seen different values incoming to beg_true_args_size. */
150 bool args_size_undefined;
151 } dw_trace_info;
154 typedef dw_trace_info *dw_trace_info_ref;
157 /* Hashtable helpers. */
159 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
161 typedef dw_trace_info value_type;
162 typedef dw_trace_info compare_type;
163 static inline hashval_t hash (const value_type *);
164 static inline bool equal (const value_type *, const compare_type *);
167 inline hashval_t
168 trace_info_hasher::hash (const value_type *ti)
170 return INSN_UID (ti->head);
173 inline bool
174 trace_info_hasher::equal (const value_type *a, const compare_type *b)
176 return a->head == b->head;
180 /* The variables making up the pseudo-cfg, as described above. */
181 static vec<dw_trace_info> trace_info;
182 static vec<dw_trace_info_ref> trace_work_list;
183 static hash_table <trace_info_hasher> trace_index;
185 /* A vector of call frame insns for the CIE. */
186 cfi_vec cie_cfi_vec;
188 /* The state of the first row of the FDE table, which includes the
189 state provided by the CIE. */
190 static GTY(()) dw_cfi_row *cie_cfi_row;
192 static GTY(()) reg_saved_in_data *cie_return_save;
194 static GTY(()) unsigned long dwarf2out_cfi_label_num;
196 /* The insn after which a new CFI note should be emitted. */
197 static rtx add_cfi_insn;
199 /* When non-null, add_cfi will add the CFI to this vector. */
200 static cfi_vec *add_cfi_vec;
202 /* The current instruction trace. */
203 static dw_trace_info *cur_trace;
205 /* The current, i.e. most recently generated, row of the CFI table. */
206 static dw_cfi_row *cur_row;
208 /* A copy of the current CFA, for use during the processing of a
209 single insn. */
210 static dw_cfa_location *cur_cfa;
212 /* We delay emitting a register save until either (a) we reach the end
213 of the prologue or (b) the register is clobbered. This clusters
214 register saves so that there are fewer pc advances. */
216 typedef struct {
217 rtx reg;
218 rtx saved_reg;
219 HOST_WIDE_INT cfa_offset;
220 } queued_reg_save;
223 static vec<queued_reg_save> queued_reg_saves;
225 /* True if any CFI directives were emitted at the current insn. */
226 static bool any_cfis_emitted;
228 /* Short-hand for commonly used register numbers. */
229 static unsigned dw_stack_pointer_regnum;
230 static unsigned dw_frame_pointer_regnum;
232 /* Hook used by __throw. */
235 expand_builtin_dwarf_sp_column (void)
237 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
238 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
241 /* MEM is a memory reference for the register size table, each element of
242 which has mode MODE. Initialize column C as a return address column. */
244 static void
245 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
247 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
248 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
249 emit_move_insn (adjust_address (mem, mode, offset),
250 gen_int_mode (size, mode));
253 /* Generate code to initialize the register size table. */
255 void
256 expand_builtin_init_dwarf_reg_sizes (tree address)
258 unsigned int i;
259 enum machine_mode mode = TYPE_MODE (char_type_node);
260 rtx addr = expand_normal (address);
261 rtx mem = gen_rtx_MEM (BLKmode, addr);
262 bool wrote_return_column = false;
264 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
266 unsigned int dnum = DWARF_FRAME_REGNUM (i);
267 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
269 if (rnum < DWARF_FRAME_REGISTERS)
271 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
272 enum machine_mode save_mode = reg_raw_mode[i];
273 HOST_WIDE_INT size;
275 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
276 save_mode = choose_hard_reg_mode (i, 1, true);
277 if (dnum == DWARF_FRAME_RETURN_COLUMN)
279 if (save_mode == VOIDmode)
280 continue;
281 wrote_return_column = true;
283 size = GET_MODE_SIZE (save_mode);
284 if (offset < 0)
285 continue;
287 emit_move_insn (adjust_address (mem, mode, offset),
288 gen_int_mode (size, mode));
292 if (!wrote_return_column)
293 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
295 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
296 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
297 #endif
299 targetm.init_dwarf_reg_sizes_extra (address);
303 static dw_trace_info *
304 get_trace_info (rtx insn)
306 dw_trace_info dummy;
307 dummy.head = insn;
308 return trace_index.find_with_hash (&dummy, INSN_UID (insn));
311 static bool
312 save_point_p (rtx insn)
314 /* Labels, except those that are really jump tables. */
315 if (LABEL_P (insn))
316 return inside_basic_block_p (insn);
318 /* We split traces at the prologue/epilogue notes because those
319 are points at which the unwind info is usually stable. This
320 makes it easier to find spots with identical unwind info so
321 that we can use remember/restore_state opcodes. */
322 if (NOTE_P (insn))
323 switch (NOTE_KIND (insn))
325 case NOTE_INSN_PROLOGUE_END:
326 case NOTE_INSN_EPILOGUE_BEG:
327 return true;
330 return false;
333 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
335 static inline HOST_WIDE_INT
336 div_data_align (HOST_WIDE_INT off)
338 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
339 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
340 return r;
343 /* Return true if we need a signed version of a given opcode
344 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
346 static inline bool
347 need_data_align_sf_opcode (HOST_WIDE_INT off)
349 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
352 /* Return a pointer to a newly allocated Call Frame Instruction. */
354 static inline dw_cfi_ref
355 new_cfi (void)
357 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
359 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
360 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
362 return cfi;
365 /* Return a newly allocated CFI row, with no defined data. */
367 static dw_cfi_row *
368 new_cfi_row (void)
370 dw_cfi_row *row = ggc_alloc_cleared_dw_cfi_row ();
372 row->cfa.reg = INVALID_REGNUM;
374 return row;
377 /* Return a copy of an existing CFI row. */
379 static dw_cfi_row *
380 copy_cfi_row (dw_cfi_row *src)
382 dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
384 *dst = *src;
385 dst->reg_save = vec_safe_copy (src->reg_save);
387 return dst;
390 /* Generate a new label for the CFI info to refer to. */
392 static char *
393 dwarf2out_cfi_label (void)
395 int num = dwarf2out_cfi_label_num++;
396 char label[20];
398 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
400 return xstrdup (label);
403 /* Add CFI either to the current insn stream or to a vector, or both. */
405 static void
406 add_cfi (dw_cfi_ref cfi)
408 any_cfis_emitted = true;
410 if (add_cfi_insn != NULL)
412 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
413 NOTE_CFI (add_cfi_insn) = cfi;
416 if (add_cfi_vec != NULL)
417 vec_safe_push (*add_cfi_vec, cfi);
420 static void
421 add_cfi_args_size (HOST_WIDE_INT size)
423 dw_cfi_ref cfi = new_cfi ();
425 /* While we can occasionally have args_size < 0 internally, this state
426 should not persist at a point we actually need an opcode. */
427 gcc_assert (size >= 0);
429 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
430 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
432 add_cfi (cfi);
435 static void
436 add_cfi_restore (unsigned reg)
438 dw_cfi_ref cfi = new_cfi ();
440 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
441 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
443 add_cfi (cfi);
446 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
447 that the register column is no longer saved. */
449 static void
450 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
452 if (vec_safe_length (row->reg_save) <= column)
453 vec_safe_grow_cleared (row->reg_save, column + 1);
454 (*row->reg_save)[column] = cfi;
457 /* This function fills in aa dw_cfa_location structure from a dwarf location
458 descriptor sequence. */
460 static void
461 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
463 struct dw_loc_descr_struct *ptr;
464 cfa->offset = 0;
465 cfa->base_offset = 0;
466 cfa->indirect = 0;
467 cfa->reg = -1;
469 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
471 enum dwarf_location_atom op = ptr->dw_loc_opc;
473 switch (op)
475 case DW_OP_reg0:
476 case DW_OP_reg1:
477 case DW_OP_reg2:
478 case DW_OP_reg3:
479 case DW_OP_reg4:
480 case DW_OP_reg5:
481 case DW_OP_reg6:
482 case DW_OP_reg7:
483 case DW_OP_reg8:
484 case DW_OP_reg9:
485 case DW_OP_reg10:
486 case DW_OP_reg11:
487 case DW_OP_reg12:
488 case DW_OP_reg13:
489 case DW_OP_reg14:
490 case DW_OP_reg15:
491 case DW_OP_reg16:
492 case DW_OP_reg17:
493 case DW_OP_reg18:
494 case DW_OP_reg19:
495 case DW_OP_reg20:
496 case DW_OP_reg21:
497 case DW_OP_reg22:
498 case DW_OP_reg23:
499 case DW_OP_reg24:
500 case DW_OP_reg25:
501 case DW_OP_reg26:
502 case DW_OP_reg27:
503 case DW_OP_reg28:
504 case DW_OP_reg29:
505 case DW_OP_reg30:
506 case DW_OP_reg31:
507 cfa->reg = op - DW_OP_reg0;
508 break;
509 case DW_OP_regx:
510 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
511 break;
512 case DW_OP_breg0:
513 case DW_OP_breg1:
514 case DW_OP_breg2:
515 case DW_OP_breg3:
516 case DW_OP_breg4:
517 case DW_OP_breg5:
518 case DW_OP_breg6:
519 case DW_OP_breg7:
520 case DW_OP_breg8:
521 case DW_OP_breg9:
522 case DW_OP_breg10:
523 case DW_OP_breg11:
524 case DW_OP_breg12:
525 case DW_OP_breg13:
526 case DW_OP_breg14:
527 case DW_OP_breg15:
528 case DW_OP_breg16:
529 case DW_OP_breg17:
530 case DW_OP_breg18:
531 case DW_OP_breg19:
532 case DW_OP_breg20:
533 case DW_OP_breg21:
534 case DW_OP_breg22:
535 case DW_OP_breg23:
536 case DW_OP_breg24:
537 case DW_OP_breg25:
538 case DW_OP_breg26:
539 case DW_OP_breg27:
540 case DW_OP_breg28:
541 case DW_OP_breg29:
542 case DW_OP_breg30:
543 case DW_OP_breg31:
544 cfa->reg = op - DW_OP_breg0;
545 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
546 break;
547 case DW_OP_bregx:
548 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
549 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
550 break;
551 case DW_OP_deref:
552 cfa->indirect = 1;
553 break;
554 case DW_OP_plus_uconst:
555 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
556 break;
557 default:
558 gcc_unreachable ();
563 /* Find the previous value for the CFA, iteratively. CFI is the opcode
564 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
565 one level of remember/restore state processing. */
567 void
568 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
570 switch (cfi->dw_cfi_opc)
572 case DW_CFA_def_cfa_offset:
573 case DW_CFA_def_cfa_offset_sf:
574 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
575 break;
576 case DW_CFA_def_cfa_register:
577 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
578 break;
579 case DW_CFA_def_cfa:
580 case DW_CFA_def_cfa_sf:
581 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
582 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
583 break;
584 case DW_CFA_def_cfa_expression:
585 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
586 break;
588 case DW_CFA_remember_state:
589 gcc_assert (!remember->in_use);
590 *remember = *loc;
591 remember->in_use = 1;
592 break;
593 case DW_CFA_restore_state:
594 gcc_assert (remember->in_use);
595 *loc = *remember;
596 remember->in_use = 0;
597 break;
599 default:
600 break;
604 /* Determine if two dw_cfa_location structures define the same data. */
606 bool
607 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
609 return (loc1->reg == loc2->reg
610 && loc1->offset == loc2->offset
611 && loc1->indirect == loc2->indirect
612 && (loc1->indirect == 0
613 || loc1->base_offset == loc2->base_offset));
616 /* Determine if two CFI operands are identical. */
618 static bool
619 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
621 switch (t)
623 case dw_cfi_oprnd_unused:
624 return true;
625 case dw_cfi_oprnd_reg_num:
626 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
627 case dw_cfi_oprnd_offset:
628 return a->dw_cfi_offset == b->dw_cfi_offset;
629 case dw_cfi_oprnd_addr:
630 return (a->dw_cfi_addr == b->dw_cfi_addr
631 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
632 case dw_cfi_oprnd_loc:
633 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
635 gcc_unreachable ();
638 /* Determine if two CFI entries are identical. */
640 static bool
641 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
643 enum dwarf_call_frame_info opc;
645 /* Make things easier for our callers, including missing operands. */
646 if (a == b)
647 return true;
648 if (a == NULL || b == NULL)
649 return false;
651 /* Obviously, the opcodes must match. */
652 opc = a->dw_cfi_opc;
653 if (opc != b->dw_cfi_opc)
654 return false;
656 /* Compare the two operands, re-using the type of the operands as
657 already exposed elsewhere. */
658 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
659 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
660 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
661 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
664 /* Determine if two CFI_ROW structures are identical. */
666 static bool
667 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
669 size_t i, n_a, n_b, n_max;
671 if (a->cfa_cfi)
673 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
674 return false;
676 else if (!cfa_equal_p (&a->cfa, &b->cfa))
677 return false;
679 n_a = vec_safe_length (a->reg_save);
680 n_b = vec_safe_length (b->reg_save);
681 n_max = MAX (n_a, n_b);
683 for (i = 0; i < n_max; ++i)
685 dw_cfi_ref r_a = NULL, r_b = NULL;
687 if (i < n_a)
688 r_a = (*a->reg_save)[i];
689 if (i < n_b)
690 r_b = (*b->reg_save)[i];
692 if (!cfi_equal_p (r_a, r_b))
693 return false;
696 return true;
699 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
700 what opcode to emit. Returns the CFI opcode to effect the change, or
701 NULL if NEW_CFA == OLD_CFA. */
703 static dw_cfi_ref
704 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
706 dw_cfi_ref cfi;
708 /* If nothing changed, no need to issue any call frame instructions. */
709 if (cfa_equal_p (old_cfa, new_cfa))
710 return NULL;
712 cfi = new_cfi ();
714 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
716 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
717 the CFA register did not change but the offset did. The data
718 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
719 in the assembler via the .cfi_def_cfa_offset directive. */
720 if (new_cfa->offset < 0)
721 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
722 else
723 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
724 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
726 else if (new_cfa->offset == old_cfa->offset
727 && old_cfa->reg != INVALID_REGNUM
728 && !new_cfa->indirect
729 && !old_cfa->indirect)
731 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
732 indicating the CFA register has changed to <register> but the
733 offset has not changed. */
734 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
735 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
737 else if (new_cfa->indirect == 0)
739 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
740 indicating the CFA register has changed to <register> with
741 the specified offset. The data factoring for DW_CFA_def_cfa_sf
742 happens in output_cfi, or in the assembler via the .cfi_def_cfa
743 directive. */
744 if (new_cfa->offset < 0)
745 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
746 else
747 cfi->dw_cfi_opc = DW_CFA_def_cfa;
748 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
749 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
751 else
753 /* Construct a DW_CFA_def_cfa_expression instruction to
754 calculate the CFA using a full location expression since no
755 register-offset pair is available. */
756 struct dw_loc_descr_struct *loc_list;
758 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
759 loc_list = build_cfa_loc (new_cfa, 0);
760 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
763 return cfi;
766 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
768 static void
769 def_cfa_1 (dw_cfa_location *new_cfa)
771 dw_cfi_ref cfi;
773 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
774 cur_trace->cfa_store.offset = new_cfa->offset;
776 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
777 if (cfi)
779 cur_row->cfa = *new_cfa;
780 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
781 ? cfi : NULL);
783 add_cfi (cfi);
787 /* Add the CFI for saving a register. REG is the CFA column number.
788 If SREG is -1, the register is saved at OFFSET from the CFA;
789 otherwise it is saved in SREG. */
791 static void
792 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
794 dw_fde_ref fde = cfun ? cfun->fde : NULL;
795 dw_cfi_ref cfi = new_cfi ();
797 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
799 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
800 if (fde
801 && fde->stack_realign
802 && sreg == INVALID_REGNUM)
804 cfi->dw_cfi_opc = DW_CFA_expression;
805 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
806 cfi->dw_cfi_oprnd2.dw_cfi_loc
807 = build_cfa_aligned_loc (&cur_row->cfa, offset,
808 fde->stack_realignment);
810 else if (sreg == INVALID_REGNUM)
812 if (need_data_align_sf_opcode (offset))
813 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
814 else if (reg & ~0x3f)
815 cfi->dw_cfi_opc = DW_CFA_offset_extended;
816 else
817 cfi->dw_cfi_opc = DW_CFA_offset;
818 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
820 else if (sreg == reg)
822 /* While we could emit something like DW_CFA_same_value or
823 DW_CFA_restore, we never expect to see something like that
824 in a prologue. This is more likely to be a bug. A backend
825 can always bypass this by using REG_CFA_RESTORE directly. */
826 gcc_unreachable ();
828 else
830 cfi->dw_cfi_opc = DW_CFA_register;
831 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
834 add_cfi (cfi);
835 update_row_reg_save (cur_row, reg, cfi);
838 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
839 and adjust data structures to match. */
841 static void
842 notice_args_size (rtx insn)
844 HOST_WIDE_INT args_size, delta;
845 rtx note;
847 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
848 if (note == NULL)
849 return;
851 args_size = INTVAL (XEXP (note, 0));
852 delta = args_size - cur_trace->end_true_args_size;
853 if (delta == 0)
854 return;
856 cur_trace->end_true_args_size = args_size;
858 /* If the CFA is computed off the stack pointer, then we must adjust
859 the computation of the CFA as well. */
860 if (cur_cfa->reg == dw_stack_pointer_regnum)
862 gcc_assert (!cur_cfa->indirect);
864 /* Convert a change in args_size (always a positive in the
865 direction of stack growth) to a change in stack pointer. */
866 #ifndef STACK_GROWS_DOWNWARD
867 delta = -delta;
868 #endif
869 cur_cfa->offset += delta;
873 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
874 data within the trace related to EH insns and args_size. */
876 static void
877 notice_eh_throw (rtx insn)
879 HOST_WIDE_INT args_size;
881 args_size = cur_trace->end_true_args_size;
882 if (cur_trace->eh_head == NULL)
884 cur_trace->eh_head = insn;
885 cur_trace->beg_delay_args_size = args_size;
886 cur_trace->end_delay_args_size = args_size;
888 else if (cur_trace->end_delay_args_size != args_size)
890 cur_trace->end_delay_args_size = args_size;
892 /* ??? If the CFA is the stack pointer, search backward for the last
893 CFI note and insert there. Given that the stack changed for the
894 args_size change, there *must* be such a note in between here and
895 the last eh insn. */
896 add_cfi_args_size (args_size);
900 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
901 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
902 used in places where rtl is prohibited. */
904 static inline unsigned
905 dwf_regno (const_rtx reg)
907 return DWARF_FRAME_REGNUM (REGNO (reg));
910 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
912 static bool
913 compare_reg_or_pc (rtx x, rtx y)
915 if (REG_P (x) && REG_P (y))
916 return REGNO (x) == REGNO (y);
917 return x == y;
920 /* Record SRC as being saved in DEST. DEST may be null to delete an
921 existing entry. SRC may be a register or PC_RTX. */
923 static void
924 record_reg_saved_in_reg (rtx dest, rtx src)
926 reg_saved_in_data *elt;
927 size_t i;
929 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
930 if (compare_reg_or_pc (elt->orig_reg, src))
932 if (dest == NULL)
933 cur_trace->regs_saved_in_regs.unordered_remove (i);
934 else
935 elt->saved_in_reg = dest;
936 return;
939 if (dest == NULL)
940 return;
942 reg_saved_in_data e = {src, dest};
943 cur_trace->regs_saved_in_regs.safe_push (e);
946 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
947 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
949 static void
950 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
952 queued_reg_save *q;
953 queued_reg_save e = {reg, sreg, offset};
954 size_t i;
956 /* Duplicates waste space, but it's also necessary to remove them
957 for correctness, since the queue gets output in reverse order. */
958 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
959 if (compare_reg_or_pc (q->reg, reg))
961 *q = e;
962 return;
965 queued_reg_saves.safe_push (e);
968 /* Output all the entries in QUEUED_REG_SAVES. */
970 static void
971 dwarf2out_flush_queued_reg_saves (void)
973 queued_reg_save *q;
974 size_t i;
976 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
978 unsigned int reg, sreg;
980 record_reg_saved_in_reg (q->saved_reg, q->reg);
982 if (q->reg == pc_rtx)
983 reg = DWARF_FRAME_RETURN_COLUMN;
984 else
985 reg = dwf_regno (q->reg);
986 if (q->saved_reg)
987 sreg = dwf_regno (q->saved_reg);
988 else
989 sreg = INVALID_REGNUM;
990 reg_save (reg, sreg, q->cfa_offset);
993 queued_reg_saves.truncate (0);
996 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
997 location for? Or, does it clobber a register which we've previously
998 said that some other register is saved in, and for which we now
999 have a new location for? */
1001 static bool
1002 clobbers_queued_reg_save (const_rtx insn)
1004 queued_reg_save *q;
1005 size_t iq;
1007 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1009 size_t ir;
1010 reg_saved_in_data *rir;
1012 if (modified_in_p (q->reg, insn))
1013 return true;
1015 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1016 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1017 && modified_in_p (rir->saved_in_reg, insn))
1018 return true;
1021 return false;
1024 /* What register, if any, is currently saved in REG? */
1026 static rtx
1027 reg_saved_in (rtx reg)
1029 unsigned int regn = REGNO (reg);
1030 queued_reg_save *q;
1031 reg_saved_in_data *rir;
1032 size_t i;
1034 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1035 if (q->saved_reg && regn == REGNO (q->saved_reg))
1036 return q->reg;
1038 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1039 if (regn == REGNO (rir->saved_in_reg))
1040 return rir->orig_reg;
1042 return NULL_RTX;
1045 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1047 static void
1048 dwarf2out_frame_debug_def_cfa (rtx pat)
1050 memset (cur_cfa, 0, sizeof (*cur_cfa));
1052 if (GET_CODE (pat) == PLUS)
1054 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1055 pat = XEXP (pat, 0);
1057 if (MEM_P (pat))
1059 cur_cfa->indirect = 1;
1060 pat = XEXP (pat, 0);
1061 if (GET_CODE (pat) == PLUS)
1063 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1064 pat = XEXP (pat, 0);
1067 /* ??? If this fails, we could be calling into the _loc functions to
1068 define a full expression. So far no port does that. */
1069 gcc_assert (REG_P (pat));
1070 cur_cfa->reg = dwf_regno (pat);
1073 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1075 static void
1076 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1078 rtx src, dest;
1080 gcc_assert (GET_CODE (pat) == SET);
1081 dest = XEXP (pat, 0);
1082 src = XEXP (pat, 1);
1084 switch (GET_CODE (src))
1086 case PLUS:
1087 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1088 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1089 break;
1091 case REG:
1092 break;
1094 default:
1095 gcc_unreachable ();
1098 cur_cfa->reg = dwf_regno (dest);
1099 gcc_assert (cur_cfa->indirect == 0);
1102 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1104 static void
1105 dwarf2out_frame_debug_cfa_offset (rtx set)
1107 HOST_WIDE_INT offset;
1108 rtx src, addr, span;
1109 unsigned int sregno;
1111 src = XEXP (set, 1);
1112 addr = XEXP (set, 0);
1113 gcc_assert (MEM_P (addr));
1114 addr = XEXP (addr, 0);
1116 /* As documented, only consider extremely simple addresses. */
1117 switch (GET_CODE (addr))
1119 case REG:
1120 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1121 offset = -cur_cfa->offset;
1122 break;
1123 case PLUS:
1124 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1125 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1126 break;
1127 default:
1128 gcc_unreachable ();
1131 if (src == pc_rtx)
1133 span = NULL;
1134 sregno = DWARF_FRAME_RETURN_COLUMN;
1136 else
1138 span = targetm.dwarf_register_span (src);
1139 sregno = dwf_regno (src);
1142 /* ??? We'd like to use queue_reg_save, but we need to come up with
1143 a different flushing heuristic for epilogues. */
1144 if (!span)
1145 reg_save (sregno, INVALID_REGNUM, offset);
1146 else
1148 /* We have a PARALLEL describing where the contents of SRC live.
1149 Queue register saves for each piece of the PARALLEL. */
1150 int par_index;
1151 int limit;
1152 HOST_WIDE_INT span_offset = offset;
1154 gcc_assert (GET_CODE (span) == PARALLEL);
1156 limit = XVECLEN (span, 0);
1157 for (par_index = 0; par_index < limit; par_index++)
1159 rtx elem = XVECEXP (span, 0, par_index);
1161 sregno = dwf_regno (src);
1162 reg_save (sregno, INVALID_REGNUM, span_offset);
1163 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1168 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1170 static void
1171 dwarf2out_frame_debug_cfa_register (rtx set)
1173 rtx src, dest;
1174 unsigned sregno, dregno;
1176 src = XEXP (set, 1);
1177 dest = XEXP (set, 0);
1179 record_reg_saved_in_reg (dest, src);
1180 if (src == pc_rtx)
1181 sregno = DWARF_FRAME_RETURN_COLUMN;
1182 else
1183 sregno = dwf_regno (src);
1185 dregno = dwf_regno (dest);
1187 /* ??? We'd like to use queue_reg_save, but we need to come up with
1188 a different flushing heuristic for epilogues. */
1189 reg_save (sregno, dregno, 0);
1192 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1194 static void
1195 dwarf2out_frame_debug_cfa_expression (rtx set)
1197 rtx src, dest, span;
1198 dw_cfi_ref cfi = new_cfi ();
1199 unsigned regno;
1201 dest = SET_DEST (set);
1202 src = SET_SRC (set);
1204 gcc_assert (REG_P (src));
1205 gcc_assert (MEM_P (dest));
1207 span = targetm.dwarf_register_span (src);
1208 gcc_assert (!span);
1210 regno = dwf_regno (src);
1212 cfi->dw_cfi_opc = DW_CFA_expression;
1213 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1214 cfi->dw_cfi_oprnd2.dw_cfi_loc
1215 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1216 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1218 /* ??? We'd like to use queue_reg_save, were the interface different,
1219 and, as above, we could manage flushing for epilogues. */
1220 add_cfi (cfi);
1221 update_row_reg_save (cur_row, regno, cfi);
1224 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1226 static void
1227 dwarf2out_frame_debug_cfa_restore (rtx reg)
1229 unsigned int regno = dwf_regno (reg);
1231 add_cfi_restore (regno);
1232 update_row_reg_save (cur_row, regno, NULL);
1235 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1236 ??? Perhaps we should note in the CIE where windows are saved (instead of
1237 assuming 0(cfa)) and what registers are in the window. */
1239 static void
1240 dwarf2out_frame_debug_cfa_window_save (void)
1242 dw_cfi_ref cfi = new_cfi ();
1244 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1245 add_cfi (cfi);
1248 /* Record call frame debugging information for an expression EXPR,
1249 which either sets SP or FP (adjusting how we calculate the frame
1250 address) or saves a register to the stack or another register.
1251 LABEL indicates the address of EXPR.
1253 This function encodes a state machine mapping rtxes to actions on
1254 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1255 users need not read the source code.
1257 The High-Level Picture
1259 Changes in the register we use to calculate the CFA: Currently we
1260 assume that if you copy the CFA register into another register, we
1261 should take the other one as the new CFA register; this seems to
1262 work pretty well. If it's wrong for some target, it's simple
1263 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1265 Changes in the register we use for saving registers to the stack:
1266 This is usually SP, but not always. Again, we deduce that if you
1267 copy SP into another register (and SP is not the CFA register),
1268 then the new register is the one we will be using for register
1269 saves. This also seems to work.
1271 Register saves: There's not much guesswork about this one; if
1272 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1273 register save, and the register used to calculate the destination
1274 had better be the one we think we're using for this purpose.
1275 It's also assumed that a copy from a call-saved register to another
1276 register is saving that register if RTX_FRAME_RELATED_P is set on
1277 that instruction. If the copy is from a call-saved register to
1278 the *same* register, that means that the register is now the same
1279 value as in the caller.
1281 Except: If the register being saved is the CFA register, and the
1282 offset is nonzero, we are saving the CFA, so we assume we have to
1283 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1284 the intent is to save the value of SP from the previous frame.
1286 In addition, if a register has previously been saved to a different
1287 register,
1289 Invariants / Summaries of Rules
1291 cfa current rule for calculating the CFA. It usually
1292 consists of a register and an offset. This is
1293 actually stored in *cur_cfa, but abbreviated
1294 for the purposes of this documentation.
1295 cfa_store register used by prologue code to save things to the stack
1296 cfa_store.offset is the offset from the value of
1297 cfa_store.reg to the actual CFA
1298 cfa_temp register holding an integral value. cfa_temp.offset
1299 stores the value, which will be used to adjust the
1300 stack pointer. cfa_temp is also used like cfa_store,
1301 to track stores to the stack via fp or a temp reg.
1303 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1304 with cfa.reg as the first operand changes the cfa.reg and its
1305 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1306 cfa_temp.offset.
1308 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1309 expression yielding a constant. This sets cfa_temp.reg
1310 and cfa_temp.offset.
1312 Rule 5: Create a new register cfa_store used to save items to the
1313 stack.
1315 Rules 10-14: Save a register to the stack. Define offset as the
1316 difference of the original location and cfa_store's
1317 location (or cfa_temp's location if cfa_temp is used).
1319 Rules 16-20: If AND operation happens on sp in prologue, we assume
1320 stack is realigned. We will use a group of DW_OP_XXX
1321 expressions to represent the location of the stored
1322 register instead of CFA+offset.
1324 The Rules
1326 "{a,b}" indicates a choice of a xor b.
1327 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1329 Rule 1:
1330 (set <reg1> <reg2>:cfa.reg)
1331 effects: cfa.reg = <reg1>
1332 cfa.offset unchanged
1333 cfa_temp.reg = <reg1>
1334 cfa_temp.offset = cfa.offset
1336 Rule 2:
1337 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1338 {<const_int>,<reg>:cfa_temp.reg}))
1339 effects: cfa.reg = sp if fp used
1340 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1341 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1342 if cfa_store.reg==sp
1344 Rule 3:
1345 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1346 effects: cfa.reg = fp
1347 cfa_offset += +/- <const_int>
1349 Rule 4:
1350 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1351 constraints: <reg1> != fp
1352 <reg1> != sp
1353 effects: cfa.reg = <reg1>
1354 cfa_temp.reg = <reg1>
1355 cfa_temp.offset = cfa.offset
1357 Rule 5:
1358 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1359 constraints: <reg1> != fp
1360 <reg1> != sp
1361 effects: cfa_store.reg = <reg1>
1362 cfa_store.offset = cfa.offset - cfa_temp.offset
1364 Rule 6:
1365 (set <reg> <const_int>)
1366 effects: cfa_temp.reg = <reg>
1367 cfa_temp.offset = <const_int>
1369 Rule 7:
1370 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1371 effects: cfa_temp.reg = <reg1>
1372 cfa_temp.offset |= <const_int>
1374 Rule 8:
1375 (set <reg> (high <exp>))
1376 effects: none
1378 Rule 9:
1379 (set <reg> (lo_sum <exp> <const_int>))
1380 effects: cfa_temp.reg = <reg>
1381 cfa_temp.offset = <const_int>
1383 Rule 10:
1384 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1385 effects: cfa_store.offset -= <const_int>
1386 cfa.offset = cfa_store.offset if cfa.reg == sp
1387 cfa.reg = sp
1388 cfa.base_offset = -cfa_store.offset
1390 Rule 11:
1391 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1392 effects: cfa_store.offset += -/+ mode_size(mem)
1393 cfa.offset = cfa_store.offset if cfa.reg == sp
1394 cfa.reg = sp
1395 cfa.base_offset = -cfa_store.offset
1397 Rule 12:
1398 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1400 <reg2>)
1401 effects: cfa.reg = <reg1>
1402 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1404 Rule 13:
1405 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1406 effects: cfa.reg = <reg1>
1407 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1409 Rule 14:
1410 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1411 effects: cfa.reg = <reg1>
1412 cfa.base_offset = -cfa_temp.offset
1413 cfa_temp.offset -= mode_size(mem)
1415 Rule 15:
1416 (set <reg> {unspec, unspec_volatile})
1417 effects: target-dependent
1419 Rule 16:
1420 (set sp (and: sp <const_int>))
1421 constraints: cfa_store.reg == sp
1422 effects: cfun->fde.stack_realign = 1
1423 cfa_store.offset = 0
1424 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1426 Rule 17:
1427 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1428 effects: cfa_store.offset += -/+ mode_size(mem)
1430 Rule 18:
1431 (set (mem ({pre_inc, pre_dec} sp)) fp)
1432 constraints: fde->stack_realign == 1
1433 effects: cfa_store.offset = 0
1434 cfa.reg != HARD_FRAME_POINTER_REGNUM
1436 Rule 19:
1437 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1438 constraints: fde->stack_realign == 1
1439 && cfa.offset == 0
1440 && cfa.indirect == 0
1441 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1442 effects: Use DW_CFA_def_cfa_expression to define cfa
1443 cfa.reg == fde->drap_reg */
1445 static void
1446 dwarf2out_frame_debug_expr (rtx expr)
1448 rtx src, dest, span;
1449 HOST_WIDE_INT offset;
1450 dw_fde_ref fde;
1452 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1453 the PARALLEL independently. The first element is always processed if
1454 it is a SET. This is for backward compatibility. Other elements
1455 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1456 flag is set in them. */
1457 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1459 int par_index;
1460 int limit = XVECLEN (expr, 0);
1461 rtx elem;
1463 /* PARALLELs have strict read-modify-write semantics, so we
1464 ought to evaluate every rvalue before changing any lvalue.
1465 It's cumbersome to do that in general, but there's an
1466 easy approximation that is enough for all current users:
1467 handle register saves before register assignments. */
1468 if (GET_CODE (expr) == PARALLEL)
1469 for (par_index = 0; par_index < limit; par_index++)
1471 elem = XVECEXP (expr, 0, par_index);
1472 if (GET_CODE (elem) == SET
1473 && MEM_P (SET_DEST (elem))
1474 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1475 dwarf2out_frame_debug_expr (elem);
1478 for (par_index = 0; par_index < limit; par_index++)
1480 elem = XVECEXP (expr, 0, par_index);
1481 if (GET_CODE (elem) == SET
1482 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1483 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1484 dwarf2out_frame_debug_expr (elem);
1486 return;
1489 gcc_assert (GET_CODE (expr) == SET);
1491 src = SET_SRC (expr);
1492 dest = SET_DEST (expr);
1494 if (REG_P (src))
1496 rtx rsi = reg_saved_in (src);
1497 if (rsi)
1498 src = rsi;
1501 fde = cfun->fde;
1503 switch (GET_CODE (dest))
1505 case REG:
1506 switch (GET_CODE (src))
1508 /* Setting FP from SP. */
1509 case REG:
1510 if (cur_cfa->reg == dwf_regno (src))
1512 /* Rule 1 */
1513 /* Update the CFA rule wrt SP or FP. Make sure src is
1514 relative to the current CFA register.
1516 We used to require that dest be either SP or FP, but the
1517 ARM copies SP to a temporary register, and from there to
1518 FP. So we just rely on the backends to only set
1519 RTX_FRAME_RELATED_P on appropriate insns. */
1520 cur_cfa->reg = dwf_regno (dest);
1521 cur_trace->cfa_temp.reg = cur_cfa->reg;
1522 cur_trace->cfa_temp.offset = cur_cfa->offset;
1524 else
1526 /* Saving a register in a register. */
1527 gcc_assert (!fixed_regs [REGNO (dest)]
1528 /* For the SPARC and its register window. */
1529 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1531 /* After stack is aligned, we can only save SP in FP
1532 if drap register is used. In this case, we have
1533 to restore stack pointer with the CFA value and we
1534 don't generate this DWARF information. */
1535 if (fde
1536 && fde->stack_realign
1537 && REGNO (src) == STACK_POINTER_REGNUM)
1538 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1539 && fde->drap_reg != INVALID_REGNUM
1540 && cur_cfa->reg != dwf_regno (src));
1541 else
1542 queue_reg_save (src, dest, 0);
1544 break;
1546 case PLUS:
1547 case MINUS:
1548 case LO_SUM:
1549 if (dest == stack_pointer_rtx)
1551 /* Rule 2 */
1552 /* Adjusting SP. */
1553 switch (GET_CODE (XEXP (src, 1)))
1555 case CONST_INT:
1556 offset = INTVAL (XEXP (src, 1));
1557 break;
1558 case REG:
1559 gcc_assert (dwf_regno (XEXP (src, 1))
1560 == cur_trace->cfa_temp.reg);
1561 offset = cur_trace->cfa_temp.offset;
1562 break;
1563 default:
1564 gcc_unreachable ();
1567 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1569 /* Restoring SP from FP in the epilogue. */
1570 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1571 cur_cfa->reg = dw_stack_pointer_regnum;
1573 else if (GET_CODE (src) == LO_SUM)
1574 /* Assume we've set the source reg of the LO_SUM from sp. */
1576 else
1577 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1579 if (GET_CODE (src) != MINUS)
1580 offset = -offset;
1581 if (cur_cfa->reg == dw_stack_pointer_regnum)
1582 cur_cfa->offset += offset;
1583 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1584 cur_trace->cfa_store.offset += offset;
1586 else if (dest == hard_frame_pointer_rtx)
1588 /* Rule 3 */
1589 /* Either setting the FP from an offset of the SP,
1590 or adjusting the FP */
1591 gcc_assert (frame_pointer_needed);
1593 gcc_assert (REG_P (XEXP (src, 0))
1594 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1595 && CONST_INT_P (XEXP (src, 1)));
1596 offset = INTVAL (XEXP (src, 1));
1597 if (GET_CODE (src) != MINUS)
1598 offset = -offset;
1599 cur_cfa->offset += offset;
1600 cur_cfa->reg = dw_frame_pointer_regnum;
1602 else
1604 gcc_assert (GET_CODE (src) != MINUS);
1606 /* Rule 4 */
1607 if (REG_P (XEXP (src, 0))
1608 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1609 && CONST_INT_P (XEXP (src, 1)))
1611 /* Setting a temporary CFA register that will be copied
1612 into the FP later on. */
1613 offset = - INTVAL (XEXP (src, 1));
1614 cur_cfa->offset += offset;
1615 cur_cfa->reg = dwf_regno (dest);
1616 /* Or used to save regs to the stack. */
1617 cur_trace->cfa_temp.reg = cur_cfa->reg;
1618 cur_trace->cfa_temp.offset = cur_cfa->offset;
1621 /* Rule 5 */
1622 else if (REG_P (XEXP (src, 0))
1623 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1624 && XEXP (src, 1) == stack_pointer_rtx)
1626 /* Setting a scratch register that we will use instead
1627 of SP for saving registers to the stack. */
1628 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1629 cur_trace->cfa_store.reg = dwf_regno (dest);
1630 cur_trace->cfa_store.offset
1631 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1634 /* Rule 9 */
1635 else if (GET_CODE (src) == LO_SUM
1636 && CONST_INT_P (XEXP (src, 1)))
1638 cur_trace->cfa_temp.reg = dwf_regno (dest);
1639 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1641 else
1642 gcc_unreachable ();
1644 break;
1646 /* Rule 6 */
1647 case CONST_INT:
1648 cur_trace->cfa_temp.reg = dwf_regno (dest);
1649 cur_trace->cfa_temp.offset = INTVAL (src);
1650 break;
1652 /* Rule 7 */
1653 case IOR:
1654 gcc_assert (REG_P (XEXP (src, 0))
1655 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1656 && CONST_INT_P (XEXP (src, 1)));
1658 cur_trace->cfa_temp.reg = dwf_regno (dest);
1659 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1660 break;
1662 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1663 which will fill in all of the bits. */
1664 /* Rule 8 */
1665 case HIGH:
1666 break;
1668 /* Rule 15 */
1669 case UNSPEC:
1670 case UNSPEC_VOLATILE:
1671 /* All unspecs should be represented by REG_CFA_* notes. */
1672 gcc_unreachable ();
1673 return;
1675 /* Rule 16 */
1676 case AND:
1677 /* If this AND operation happens on stack pointer in prologue,
1678 we assume the stack is realigned and we extract the
1679 alignment. */
1680 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1682 /* We interpret reg_save differently with stack_realign set.
1683 Thus we must flush whatever we have queued first. */
1684 dwarf2out_flush_queued_reg_saves ();
1686 gcc_assert (cur_trace->cfa_store.reg
1687 == dwf_regno (XEXP (src, 0)));
1688 fde->stack_realign = 1;
1689 fde->stack_realignment = INTVAL (XEXP (src, 1));
1690 cur_trace->cfa_store.offset = 0;
1692 if (cur_cfa->reg != dw_stack_pointer_regnum
1693 && cur_cfa->reg != dw_frame_pointer_regnum)
1694 fde->drap_reg = cur_cfa->reg;
1696 return;
1698 default:
1699 gcc_unreachable ();
1701 break;
1703 case MEM:
1705 /* Saving a register to the stack. Make sure dest is relative to the
1706 CFA register. */
1707 switch (GET_CODE (XEXP (dest, 0)))
1709 /* Rule 10 */
1710 /* With a push. */
1711 case PRE_MODIFY:
1712 case POST_MODIFY:
1713 /* We can't handle variable size modifications. */
1714 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1715 == CONST_INT);
1716 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1718 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1719 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1721 cur_trace->cfa_store.offset += offset;
1722 if (cur_cfa->reg == dw_stack_pointer_regnum)
1723 cur_cfa->offset = cur_trace->cfa_store.offset;
1725 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1726 offset -= cur_trace->cfa_store.offset;
1727 else
1728 offset = -cur_trace->cfa_store.offset;
1729 break;
1731 /* Rule 11 */
1732 case PRE_INC:
1733 case PRE_DEC:
1734 case POST_DEC:
1735 offset = GET_MODE_SIZE (GET_MODE (dest));
1736 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1737 offset = -offset;
1739 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1740 == STACK_POINTER_REGNUM)
1741 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1743 cur_trace->cfa_store.offset += offset;
1745 /* Rule 18: If stack is aligned, we will use FP as a
1746 reference to represent the address of the stored
1747 regiser. */
1748 if (fde
1749 && fde->stack_realign
1750 && REG_P (src)
1751 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1753 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1754 cur_trace->cfa_store.offset = 0;
1757 if (cur_cfa->reg == dw_stack_pointer_regnum)
1758 cur_cfa->offset = cur_trace->cfa_store.offset;
1760 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1761 offset += -cur_trace->cfa_store.offset;
1762 else
1763 offset = -cur_trace->cfa_store.offset;
1764 break;
1766 /* Rule 12 */
1767 /* With an offset. */
1768 case PLUS:
1769 case MINUS:
1770 case LO_SUM:
1772 unsigned int regno;
1774 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1775 && REG_P (XEXP (XEXP (dest, 0), 0)));
1776 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1777 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1778 offset = -offset;
1780 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1782 if (cur_cfa->reg == regno)
1783 offset -= cur_cfa->offset;
1784 else if (cur_trace->cfa_store.reg == regno)
1785 offset -= cur_trace->cfa_store.offset;
1786 else
1788 gcc_assert (cur_trace->cfa_temp.reg == regno);
1789 offset -= cur_trace->cfa_temp.offset;
1792 break;
1794 /* Rule 13 */
1795 /* Without an offset. */
1796 case REG:
1798 unsigned int regno = dwf_regno (XEXP (dest, 0));
1800 if (cur_cfa->reg == regno)
1801 offset = -cur_cfa->offset;
1802 else if (cur_trace->cfa_store.reg == regno)
1803 offset = -cur_trace->cfa_store.offset;
1804 else
1806 gcc_assert (cur_trace->cfa_temp.reg == regno);
1807 offset = -cur_trace->cfa_temp.offset;
1810 break;
1812 /* Rule 14 */
1813 case POST_INC:
1814 gcc_assert (cur_trace->cfa_temp.reg
1815 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1816 offset = -cur_trace->cfa_temp.offset;
1817 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1818 break;
1820 default:
1821 gcc_unreachable ();
1824 /* Rule 17 */
1825 /* If the source operand of this MEM operation is a memory,
1826 we only care how much stack grew. */
1827 if (MEM_P (src))
1828 break;
1830 if (REG_P (src)
1831 && REGNO (src) != STACK_POINTER_REGNUM
1832 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1833 && dwf_regno (src) == cur_cfa->reg)
1835 /* We're storing the current CFA reg into the stack. */
1837 if (cur_cfa->offset == 0)
1839 /* Rule 19 */
1840 /* If stack is aligned, putting CFA reg into stack means
1841 we can no longer use reg + offset to represent CFA.
1842 Here we use DW_CFA_def_cfa_expression instead. The
1843 result of this expression equals to the original CFA
1844 value. */
1845 if (fde
1846 && fde->stack_realign
1847 && cur_cfa->indirect == 0
1848 && cur_cfa->reg != dw_frame_pointer_regnum)
1850 gcc_assert (fde->drap_reg == cur_cfa->reg);
1852 cur_cfa->indirect = 1;
1853 cur_cfa->reg = dw_frame_pointer_regnum;
1854 cur_cfa->base_offset = offset;
1855 cur_cfa->offset = 0;
1857 fde->drap_reg_saved = 1;
1858 break;
1861 /* If the source register is exactly the CFA, assume
1862 we're saving SP like any other register; this happens
1863 on the ARM. */
1864 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1865 break;
1867 else
1869 /* Otherwise, we'll need to look in the stack to
1870 calculate the CFA. */
1871 rtx x = XEXP (dest, 0);
1873 if (!REG_P (x))
1874 x = XEXP (x, 0);
1875 gcc_assert (REG_P (x));
1877 cur_cfa->reg = dwf_regno (x);
1878 cur_cfa->base_offset = offset;
1879 cur_cfa->indirect = 1;
1880 break;
1884 span = NULL;
1885 if (REG_P (src))
1886 span = targetm.dwarf_register_span (src);
1887 if (!span)
1888 queue_reg_save (src, NULL_RTX, offset);
1889 else
1891 /* We have a PARALLEL describing where the contents of SRC live.
1892 Queue register saves for each piece of the PARALLEL. */
1893 int par_index;
1894 int limit;
1895 HOST_WIDE_INT span_offset = offset;
1897 gcc_assert (GET_CODE (span) == PARALLEL);
1899 limit = XVECLEN (span, 0);
1900 for (par_index = 0; par_index < limit; par_index++)
1902 rtx elem = XVECEXP (span, 0, par_index);
1903 queue_reg_save (elem, NULL_RTX, span_offset);
1904 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1907 break;
1909 default:
1910 gcc_unreachable ();
1914 /* Record call frame debugging information for INSN, which either sets
1915 SP or FP (adjusting how we calculate the frame address) or saves a
1916 register to the stack. */
1918 static void
1919 dwarf2out_frame_debug (rtx insn)
1921 rtx note, n;
1922 bool handled_one = false;
1924 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1925 switch (REG_NOTE_KIND (note))
1927 case REG_FRAME_RELATED_EXPR:
1928 insn = XEXP (note, 0);
1929 goto do_frame_expr;
1931 case REG_CFA_DEF_CFA:
1932 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
1933 handled_one = true;
1934 break;
1936 case REG_CFA_ADJUST_CFA:
1937 n = XEXP (note, 0);
1938 if (n == NULL)
1940 n = PATTERN (insn);
1941 if (GET_CODE (n) == PARALLEL)
1942 n = XVECEXP (n, 0, 0);
1944 dwarf2out_frame_debug_adjust_cfa (n);
1945 handled_one = true;
1946 break;
1948 case REG_CFA_OFFSET:
1949 n = XEXP (note, 0);
1950 if (n == NULL)
1951 n = single_set (insn);
1952 dwarf2out_frame_debug_cfa_offset (n);
1953 handled_one = true;
1954 break;
1956 case REG_CFA_REGISTER:
1957 n = XEXP (note, 0);
1958 if (n == NULL)
1960 n = PATTERN (insn);
1961 if (GET_CODE (n) == PARALLEL)
1962 n = XVECEXP (n, 0, 0);
1964 dwarf2out_frame_debug_cfa_register (n);
1965 handled_one = true;
1966 break;
1968 case REG_CFA_EXPRESSION:
1969 n = XEXP (note, 0);
1970 if (n == NULL)
1971 n = single_set (insn);
1972 dwarf2out_frame_debug_cfa_expression (n);
1973 handled_one = true;
1974 break;
1976 case REG_CFA_RESTORE:
1977 n = XEXP (note, 0);
1978 if (n == NULL)
1980 n = PATTERN (insn);
1981 if (GET_CODE (n) == PARALLEL)
1982 n = XVECEXP (n, 0, 0);
1983 n = XEXP (n, 0);
1985 dwarf2out_frame_debug_cfa_restore (n);
1986 handled_one = true;
1987 break;
1989 case REG_CFA_SET_VDRAP:
1990 n = XEXP (note, 0);
1991 if (REG_P (n))
1993 dw_fde_ref fde = cfun->fde;
1994 if (fde)
1996 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
1997 if (REG_P (n))
1998 fde->vdrap_reg = dwf_regno (n);
2001 handled_one = true;
2002 break;
2004 case REG_CFA_WINDOW_SAVE:
2005 dwarf2out_frame_debug_cfa_window_save ();
2006 handled_one = true;
2007 break;
2009 case REG_CFA_FLUSH_QUEUE:
2010 /* The actual flush happens elsewhere. */
2011 handled_one = true;
2012 break;
2014 default:
2015 break;
2018 if (!handled_one)
2020 insn = PATTERN (insn);
2021 do_frame_expr:
2022 dwarf2out_frame_debug_expr (insn);
2024 /* Check again. A parallel can save and update the same register.
2025 We could probably check just once, here, but this is safer than
2026 removing the check at the start of the function. */
2027 if (clobbers_queued_reg_save (insn))
2028 dwarf2out_flush_queued_reg_saves ();
2032 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2034 static void
2035 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2037 size_t i, n_old, n_new, n_max;
2038 dw_cfi_ref cfi;
2040 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2041 add_cfi (new_row->cfa_cfi);
2042 else
2044 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2045 if (cfi)
2046 add_cfi (cfi);
2049 n_old = vec_safe_length (old_row->reg_save);
2050 n_new = vec_safe_length (new_row->reg_save);
2051 n_max = MAX (n_old, n_new);
2053 for (i = 0; i < n_max; ++i)
2055 dw_cfi_ref r_old = NULL, r_new = NULL;
2057 if (i < n_old)
2058 r_old = (*old_row->reg_save)[i];
2059 if (i < n_new)
2060 r_new = (*new_row->reg_save)[i];
2062 if (r_old == r_new)
2064 else if (r_new == NULL)
2065 add_cfi_restore (i);
2066 else if (!cfi_equal_p (r_old, r_new))
2067 add_cfi (r_new);
2071 /* Examine CFI and return true if a cfi label and set_loc is needed
2072 beforehand. Even when generating CFI assembler instructions, we
2073 still have to add the cfi to the list so that lookup_cfa_1 works
2074 later on. When -g2 and above we even need to force emitting of
2075 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2076 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2077 and so don't use convert_cfa_to_fb_loc_list. */
2079 static bool
2080 cfi_label_required_p (dw_cfi_ref cfi)
2082 if (!dwarf2out_do_cfi_asm ())
2083 return true;
2085 if (dwarf_version == 2
2086 && debug_info_level > DINFO_LEVEL_TERSE
2087 && (write_symbols == DWARF2_DEBUG
2088 || write_symbols == VMS_AND_DWARF2_DEBUG))
2090 switch (cfi->dw_cfi_opc)
2092 case DW_CFA_def_cfa_offset:
2093 case DW_CFA_def_cfa_offset_sf:
2094 case DW_CFA_def_cfa_register:
2095 case DW_CFA_def_cfa:
2096 case DW_CFA_def_cfa_sf:
2097 case DW_CFA_def_cfa_expression:
2098 case DW_CFA_restore_state:
2099 return true;
2100 default:
2101 return false;
2104 return false;
2107 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2108 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2109 necessary. */
2110 static void
2111 add_cfis_to_fde (void)
2113 dw_fde_ref fde = cfun->fde;
2114 rtx insn, next;
2115 /* We always start with a function_begin label. */
2116 bool first = false;
2118 for (insn = get_insns (); insn; insn = next)
2120 next = NEXT_INSN (insn);
2122 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2124 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2125 /* Don't attempt to advance_loc4 between labels
2126 in different sections. */
2127 first = true;
2130 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2132 bool required = cfi_label_required_p (NOTE_CFI (insn));
2133 while (next)
2134 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2136 required |= cfi_label_required_p (NOTE_CFI (next));
2137 next = NEXT_INSN (next);
2139 else if (active_insn_p (next)
2140 || (NOTE_P (next) && (NOTE_KIND (next)
2141 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2142 break;
2143 else
2144 next = NEXT_INSN (next);
2145 if (required)
2147 int num = dwarf2out_cfi_label_num;
2148 const char *label = dwarf2out_cfi_label ();
2149 dw_cfi_ref xcfi;
2150 rtx tmp;
2152 /* Set the location counter to the new label. */
2153 xcfi = new_cfi ();
2154 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2155 : DW_CFA_advance_loc4);
2156 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2157 vec_safe_push (fde->dw_fde_cfi, xcfi);
2159 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2160 NOTE_LABEL_NUMBER (tmp) = num;
2165 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2166 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2167 insn = NEXT_INSN (insn);
2169 while (insn != next);
2170 first = false;
2175 /* If LABEL is the start of a trace, then initialize the state of that
2176 trace from CUR_TRACE and CUR_ROW. */
2178 static void
2179 maybe_record_trace_start (rtx start, rtx origin)
2181 dw_trace_info *ti;
2182 HOST_WIDE_INT args_size;
2184 ti = get_trace_info (start);
2185 gcc_assert (ti != NULL);
2187 if (dump_file)
2189 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2190 cur_trace->id, ti->id,
2191 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2192 (origin ? INSN_UID (origin) : 0));
2195 args_size = cur_trace->end_true_args_size;
2196 if (ti->beg_row == NULL)
2198 /* This is the first time we've encountered this trace. Propagate
2199 state across the edge and push the trace onto the work list. */
2200 ti->beg_row = copy_cfi_row (cur_row);
2201 ti->beg_true_args_size = args_size;
2203 ti->cfa_store = cur_trace->cfa_store;
2204 ti->cfa_temp = cur_trace->cfa_temp;
2205 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2207 trace_work_list.safe_push (ti);
2209 if (dump_file)
2210 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2212 else
2215 /* We ought to have the same state incoming to a given trace no
2216 matter how we arrive at the trace. Anything else means we've
2217 got some kind of optimization error. */
2218 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2220 /* The args_size is allowed to conflict if it isn't actually used. */
2221 if (ti->beg_true_args_size != args_size)
2222 ti->args_size_undefined = true;
2226 /* Similarly, but handle the args_size and CFA reset across EH
2227 and non-local goto edges. */
2229 static void
2230 maybe_record_trace_start_abnormal (rtx start, rtx origin)
2232 HOST_WIDE_INT save_args_size, delta;
2233 dw_cfa_location save_cfa;
2235 save_args_size = cur_trace->end_true_args_size;
2236 if (save_args_size == 0)
2238 maybe_record_trace_start (start, origin);
2239 return;
2242 delta = -save_args_size;
2243 cur_trace->end_true_args_size = 0;
2245 save_cfa = cur_row->cfa;
2246 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2248 /* Convert a change in args_size (always a positive in the
2249 direction of stack growth) to a change in stack pointer. */
2250 #ifndef STACK_GROWS_DOWNWARD
2251 delta = -delta;
2252 #endif
2253 cur_row->cfa.offset += delta;
2256 maybe_record_trace_start (start, origin);
2258 cur_trace->end_true_args_size = save_args_size;
2259 cur_row->cfa = save_cfa;
2262 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2263 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2265 static void
2266 create_trace_edges (rtx insn)
2268 rtx tmp, lab;
2269 int i, n;
2271 if (JUMP_P (insn))
2273 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2274 return;
2276 if (tablejump_p (insn, NULL, &tmp))
2278 rtvec vec;
2280 tmp = PATTERN (tmp);
2281 vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
2283 n = GET_NUM_ELEM (vec);
2284 for (i = 0; i < n; ++i)
2286 lab = XEXP (RTVEC_ELT (vec, i), 0);
2287 maybe_record_trace_start (lab, insn);
2290 else if (computed_jump_p (insn))
2292 for (lab = forced_labels; lab; lab = XEXP (lab, 1))
2293 maybe_record_trace_start (XEXP (lab, 0), insn);
2295 else if (returnjump_p (insn))
2297 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2299 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2300 for (i = 0; i < n; ++i)
2302 lab = XEXP (ASM_OPERANDS_LABEL (tmp, i), 0);
2303 maybe_record_trace_start (lab, insn);
2306 else
2308 lab = JUMP_LABEL (insn);
2309 gcc_assert (lab != NULL);
2310 maybe_record_trace_start (lab, insn);
2313 else if (CALL_P (insn))
2315 /* Sibling calls don't have edges inside this function. */
2316 if (SIBLING_CALL_P (insn))
2317 return;
2319 /* Process non-local goto edges. */
2320 if (can_nonlocal_goto (insn))
2321 for (lab = nonlocal_goto_handler_labels; lab; lab = XEXP (lab, 1))
2322 maybe_record_trace_start_abnormal (XEXP (lab, 0), insn);
2324 else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
2326 rtx seq = PATTERN (insn);
2327 int i, n = XVECLEN (seq, 0);
2328 for (i = 0; i < n; ++i)
2329 create_trace_edges (XVECEXP (seq, 0, i));
2330 return;
2333 /* Process EH edges. */
2334 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2336 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2337 if (lp)
2338 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2342 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2344 static void
2345 scan_insn_after (rtx insn)
2347 if (RTX_FRAME_RELATED_P (insn))
2348 dwarf2out_frame_debug (insn);
2349 notice_args_size (insn);
2352 /* Scan the trace beginning at INSN and create the CFI notes for the
2353 instructions therein. */
2355 static void
2356 scan_trace (dw_trace_info *trace)
2358 rtx prev, insn = trace->head;
2359 dw_cfa_location this_cfa;
2361 if (dump_file)
2362 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2363 trace->id, rtx_name[(int) GET_CODE (insn)],
2364 INSN_UID (insn));
2366 trace->end_row = copy_cfi_row (trace->beg_row);
2367 trace->end_true_args_size = trace->beg_true_args_size;
2369 cur_trace = trace;
2370 cur_row = trace->end_row;
2372 this_cfa = cur_row->cfa;
2373 cur_cfa = &this_cfa;
2375 for (prev = insn, insn = NEXT_INSN (insn);
2376 insn;
2377 prev = insn, insn = NEXT_INSN (insn))
2379 rtx control;
2381 /* Do everything that happens "before" the insn. */
2382 add_cfi_insn = prev;
2384 /* Notice the end of a trace. */
2385 if (BARRIER_P (insn))
2387 /* Don't bother saving the unneeded queued registers at all. */
2388 queued_reg_saves.truncate (0);
2389 break;
2391 if (save_point_p (insn))
2393 /* Propagate across fallthru edges. */
2394 dwarf2out_flush_queued_reg_saves ();
2395 maybe_record_trace_start (insn, NULL);
2396 break;
2399 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2400 continue;
2402 /* Handle all changes to the row state. Sequences require special
2403 handling for the positioning of the notes. */
2404 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
2406 rtx elt, pat = PATTERN (insn);
2407 int i, n = XVECLEN (pat, 0);
2409 control = XVECEXP (pat, 0, 0);
2410 if (can_throw_internal (control))
2411 notice_eh_throw (control);
2412 dwarf2out_flush_queued_reg_saves ();
2414 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2416 /* ??? Hopefully multiple delay slots are not annulled. */
2417 gcc_assert (n == 2);
2418 gcc_assert (!RTX_FRAME_RELATED_P (control));
2419 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2421 elt = XVECEXP (pat, 0, 1);
2423 if (INSN_FROM_TARGET_P (elt))
2425 HOST_WIDE_INT restore_args_size;
2426 cfi_vec save_row_reg_save;
2428 /* If ELT is an instruction from target of an annulled
2429 branch, the effects are for the target only and so
2430 the args_size and CFA along the current path
2431 shouldn't change. */
2432 add_cfi_insn = NULL;
2433 restore_args_size = cur_trace->end_true_args_size;
2434 cur_cfa = &cur_row->cfa;
2435 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2437 scan_insn_after (elt);
2439 /* ??? Should we instead save the entire row state? */
2440 gcc_assert (!queued_reg_saves.length ());
2442 create_trace_edges (control);
2444 cur_trace->end_true_args_size = restore_args_size;
2445 cur_row->cfa = this_cfa;
2446 cur_row->reg_save = save_row_reg_save;
2447 cur_cfa = &this_cfa;
2449 else
2451 /* If ELT is a annulled branch-taken instruction (i.e.
2452 executed only when branch is not taken), the args_size
2453 and CFA should not change through the jump. */
2454 create_trace_edges (control);
2456 /* Update and continue with the trace. */
2457 add_cfi_insn = insn;
2458 scan_insn_after (elt);
2459 def_cfa_1 (&this_cfa);
2461 continue;
2464 /* The insns in the delay slot should all be considered to happen
2465 "before" a call insn. Consider a call with a stack pointer
2466 adjustment in the delay slot. The backtrace from the callee
2467 should include the sp adjustment. Unfortunately, that leaves
2468 us with an unavoidable unwinding error exactly at the call insn
2469 itself. For jump insns we'd prefer to avoid this error by
2470 placing the notes after the sequence. */
2471 if (JUMP_P (control))
2472 add_cfi_insn = insn;
2474 for (i = 1; i < n; ++i)
2476 elt = XVECEXP (pat, 0, i);
2477 scan_insn_after (elt);
2480 /* Make sure any register saves are visible at the jump target. */
2481 dwarf2out_flush_queued_reg_saves ();
2482 any_cfis_emitted = false;
2484 /* However, if there is some adjustment on the call itself, e.g.
2485 a call_pop, that action should be considered to happen after
2486 the call returns. */
2487 add_cfi_insn = insn;
2488 scan_insn_after (control);
2490 else
2492 /* Flush data before calls and jumps, and of course if necessary. */
2493 if (can_throw_internal (insn))
2495 notice_eh_throw (insn);
2496 dwarf2out_flush_queued_reg_saves ();
2498 else if (!NONJUMP_INSN_P (insn)
2499 || clobbers_queued_reg_save (insn)
2500 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2501 dwarf2out_flush_queued_reg_saves ();
2502 any_cfis_emitted = false;
2504 add_cfi_insn = insn;
2505 scan_insn_after (insn);
2506 control = insn;
2509 /* Between frame-related-p and args_size we might have otherwise
2510 emitted two cfa adjustments. Do it now. */
2511 def_cfa_1 (&this_cfa);
2513 /* Minimize the number of advances by emitting the entire queue
2514 once anything is emitted. */
2515 if (any_cfis_emitted
2516 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2517 dwarf2out_flush_queued_reg_saves ();
2519 /* Note that a test for control_flow_insn_p does exactly the
2520 same tests as are done to actually create the edges. So
2521 always call the routine and let it not create edges for
2522 non-control-flow insns. */
2523 create_trace_edges (control);
2526 add_cfi_insn = NULL;
2527 cur_row = NULL;
2528 cur_trace = NULL;
2529 cur_cfa = NULL;
2532 /* Scan the function and create the initial set of CFI notes. */
2534 static void
2535 create_cfi_notes (void)
2537 dw_trace_info *ti;
2539 gcc_checking_assert (!queued_reg_saves.exists ());
2540 gcc_checking_assert (!trace_work_list.exists ());
2542 /* Always begin at the entry trace. */
2543 ti = &trace_info[0];
2544 scan_trace (ti);
2546 while (!trace_work_list.is_empty ())
2548 ti = trace_work_list.pop ();
2549 scan_trace (ti);
2552 queued_reg_saves.release ();
2553 trace_work_list.release ();
2556 /* Return the insn before the first NOTE_INSN_CFI after START. */
2558 static rtx
2559 before_next_cfi_note (rtx start)
2561 rtx prev = start;
2562 while (start)
2564 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2565 return prev;
2566 prev = start;
2567 start = NEXT_INSN (start);
2569 gcc_unreachable ();
2572 /* Insert CFI notes between traces to properly change state between them. */
2574 static void
2575 connect_traces (void)
2577 unsigned i, n = trace_info.length ();
2578 dw_trace_info *prev_ti, *ti;
2580 /* ??? Ideally, we should have both queued and processed every trace.
2581 However the current representation of constant pools on various targets
2582 is indistinguishable from unreachable code. Assume for the moment that
2583 we can simply skip over such traces. */
2584 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2585 these are not "real" instructions, and should not be considered.
2586 This could be generically useful for tablejump data as well. */
2587 /* Remove all unprocessed traces from the list. */
2588 for (i = n - 1; i > 0; --i)
2590 ti = &trace_info[i];
2591 if (ti->beg_row == NULL)
2593 trace_info.ordered_remove (i);
2594 n -= 1;
2596 else
2597 gcc_assert (ti->end_row != NULL);
2600 /* Work from the end back to the beginning. This lets us easily insert
2601 remember/restore_state notes in the correct order wrt other notes. */
2602 prev_ti = &trace_info[n - 1];
2603 for (i = n - 1; i > 0; --i)
2605 dw_cfi_row *old_row;
2607 ti = prev_ti;
2608 prev_ti = &trace_info[i - 1];
2610 add_cfi_insn = ti->head;
2612 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2613 for the portion of the function in the alternate text
2614 section. The row state at the very beginning of that
2615 new FDE will be exactly the row state from the CIE. */
2616 if (ti->switch_sections)
2617 old_row = cie_cfi_row;
2618 else
2620 old_row = prev_ti->end_row;
2621 /* If there's no change from the previous end state, fine. */
2622 if (cfi_row_equal_p (old_row, ti->beg_row))
2624 /* Otherwise check for the common case of sharing state with
2625 the beginning of an epilogue, but not the end. Insert
2626 remember/restore opcodes in that case. */
2627 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2629 dw_cfi_ref cfi;
2631 /* Note that if we blindly insert the remember at the
2632 start of the trace, we can wind up increasing the
2633 size of the unwind info due to extra advance opcodes.
2634 Instead, put the remember immediately before the next
2635 state change. We know there must be one, because the
2636 state at the beginning and head of the trace differ. */
2637 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2638 cfi = new_cfi ();
2639 cfi->dw_cfi_opc = DW_CFA_remember_state;
2640 add_cfi (cfi);
2642 add_cfi_insn = ti->head;
2643 cfi = new_cfi ();
2644 cfi->dw_cfi_opc = DW_CFA_restore_state;
2645 add_cfi (cfi);
2647 old_row = prev_ti->beg_row;
2649 /* Otherwise, we'll simply change state from the previous end. */
2652 change_cfi_row (old_row, ti->beg_row);
2654 if (dump_file && add_cfi_insn != ti->head)
2656 rtx note;
2658 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2659 prev_ti->id, ti->id);
2661 note = ti->head;
2664 note = NEXT_INSN (note);
2665 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2666 output_cfi_directive (dump_file, NOTE_CFI (note));
2668 while (note != add_cfi_insn);
2672 /* Connect args_size between traces that have can_throw_internal insns. */
2673 if (cfun->eh->lp_array)
2675 HOST_WIDE_INT prev_args_size = 0;
2677 for (i = 0; i < n; ++i)
2679 ti = &trace_info[i];
2681 if (ti->switch_sections)
2682 prev_args_size = 0;
2683 if (ti->eh_head == NULL)
2684 continue;
2685 gcc_assert (!ti->args_size_undefined);
2687 if (ti->beg_delay_args_size != prev_args_size)
2689 /* ??? Search back to previous CFI note. */
2690 add_cfi_insn = PREV_INSN (ti->eh_head);
2691 add_cfi_args_size (ti->beg_delay_args_size);
2694 prev_args_size = ti->end_delay_args_size;
2699 /* Set up the pseudo-cfg of instruction traces, as described at the
2700 block comment at the top of the file. */
2702 static void
2703 create_pseudo_cfg (void)
2705 bool saw_barrier, switch_sections;
2706 dw_trace_info ti;
2707 rtx insn;
2708 unsigned i;
2710 /* The first trace begins at the start of the function,
2711 and begins with the CIE row state. */
2712 trace_info.create (16);
2713 memset (&ti, 0, sizeof (ti));
2714 ti.head = get_insns ();
2715 ti.beg_row = cie_cfi_row;
2716 ti.cfa_store = cie_cfi_row->cfa;
2717 ti.cfa_temp.reg = INVALID_REGNUM;
2718 trace_info.quick_push (ti);
2720 if (cie_return_save)
2721 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2723 /* Walk all the insns, collecting start of trace locations. */
2724 saw_barrier = false;
2725 switch_sections = false;
2726 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2728 if (BARRIER_P (insn))
2729 saw_barrier = true;
2730 else if (NOTE_P (insn)
2731 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2733 /* We should have just seen a barrier. */
2734 gcc_assert (saw_barrier);
2735 switch_sections = true;
2737 /* Watch out for save_point notes between basic blocks.
2738 In particular, a note after a barrier. Do not record these,
2739 delaying trace creation until the label. */
2740 else if (save_point_p (insn)
2741 && (LABEL_P (insn) || !saw_barrier))
2743 memset (&ti, 0, sizeof (ti));
2744 ti.head = insn;
2745 ti.switch_sections = switch_sections;
2746 ti.id = trace_info.length () - 1;
2747 trace_info.safe_push (ti);
2749 saw_barrier = false;
2750 switch_sections = false;
2754 /* Create the trace index after we've finished building trace_info,
2755 avoiding stale pointer problems due to reallocation. */
2756 trace_index.create (trace_info.length ());
2757 dw_trace_info *tp;
2758 FOR_EACH_VEC_ELT (trace_info, i, tp)
2760 dw_trace_info **slot;
2762 if (dump_file)
2763 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", i,
2764 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2765 tp->switch_sections ? " (section switch)" : "");
2767 slot = trace_index.find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2768 gcc_assert (*slot == NULL);
2769 *slot = tp;
2773 /* Record the initial position of the return address. RTL is
2774 INCOMING_RETURN_ADDR_RTX. */
2776 static void
2777 initial_return_save (rtx rtl)
2779 unsigned int reg = INVALID_REGNUM;
2780 HOST_WIDE_INT offset = 0;
2782 switch (GET_CODE (rtl))
2784 case REG:
2785 /* RA is in a register. */
2786 reg = dwf_regno (rtl);
2787 break;
2789 case MEM:
2790 /* RA is on the stack. */
2791 rtl = XEXP (rtl, 0);
2792 switch (GET_CODE (rtl))
2794 case REG:
2795 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2796 offset = 0;
2797 break;
2799 case PLUS:
2800 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2801 offset = INTVAL (XEXP (rtl, 1));
2802 break;
2804 case MINUS:
2805 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2806 offset = -INTVAL (XEXP (rtl, 1));
2807 break;
2809 default:
2810 gcc_unreachable ();
2813 break;
2815 case PLUS:
2816 /* The return address is at some offset from any value we can
2817 actually load. For instance, on the SPARC it is in %i7+8. Just
2818 ignore the offset for now; it doesn't matter for unwinding frames. */
2819 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2820 initial_return_save (XEXP (rtl, 0));
2821 return;
2823 default:
2824 gcc_unreachable ();
2827 if (reg != DWARF_FRAME_RETURN_COLUMN)
2829 if (reg != INVALID_REGNUM)
2830 record_reg_saved_in_reg (rtl, pc_rtx);
2831 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2835 static void
2836 create_cie_data (void)
2838 dw_cfa_location loc;
2839 dw_trace_info cie_trace;
2841 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2842 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2844 memset (&cie_trace, 0, sizeof (cie_trace));
2845 cur_trace = &cie_trace;
2847 add_cfi_vec = &cie_cfi_vec;
2848 cie_cfi_row = cur_row = new_cfi_row ();
2850 /* On entry, the Canonical Frame Address is at SP. */
2851 memset (&loc, 0, sizeof (loc));
2852 loc.reg = dw_stack_pointer_regnum;
2853 loc.offset = INCOMING_FRAME_SP_OFFSET;
2854 def_cfa_1 (&loc);
2856 if (targetm.debug_unwind_info () == UI_DWARF2
2857 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2859 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2861 /* For a few targets, we have the return address incoming into a
2862 register, but choose a different return column. This will result
2863 in a DW_CFA_register for the return, and an entry in
2864 regs_saved_in_regs to match. If the target later stores that
2865 return address register to the stack, we want to be able to emit
2866 the DW_CFA_offset against the return column, not the intermediate
2867 save register. Save the contents of regs_saved_in_regs so that
2868 we can re-initialize it at the start of each function. */
2869 switch (cie_trace.regs_saved_in_regs.length ())
2871 case 0:
2872 break;
2873 case 1:
2874 cie_return_save = ggc_alloc_reg_saved_in_data ();
2875 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2876 cie_trace.regs_saved_in_regs.release ();
2877 break;
2878 default:
2879 gcc_unreachable ();
2883 add_cfi_vec = NULL;
2884 cur_row = NULL;
2885 cur_trace = NULL;
2888 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2889 state at each location within the function. These notes will be
2890 emitted during pass_final. */
2892 static unsigned int
2893 execute_dwarf2_frame (void)
2895 /* The first time we're called, compute the incoming frame state. */
2896 if (cie_cfi_vec == NULL)
2897 create_cie_data ();
2899 dwarf2out_alloc_current_fde ();
2901 create_pseudo_cfg ();
2903 /* Do the work. */
2904 create_cfi_notes ();
2905 connect_traces ();
2906 add_cfis_to_fde ();
2908 /* Free all the data we allocated. */
2910 size_t i;
2911 dw_trace_info *ti;
2913 FOR_EACH_VEC_ELT (trace_info, i, ti)
2914 ti->regs_saved_in_regs.release ();
2916 trace_info.release ();
2918 trace_index.dispose ();
2920 return 0;
2923 /* Convert a DWARF call frame info. operation to its string name */
2925 static const char *
2926 dwarf_cfi_name (unsigned int cfi_opc)
2928 const char *name = get_DW_CFA_name (cfi_opc);
2930 if (name != NULL)
2931 return name;
2933 return "DW_CFA_<unknown>";
2936 /* This routine will generate the correct assembly data for a location
2937 description based on a cfi entry with a complex address. */
2939 static void
2940 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2942 dw_loc_descr_ref loc;
2943 unsigned long size;
2945 if (cfi->dw_cfi_opc == DW_CFA_expression)
2947 unsigned r =
2948 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2949 dw2_asm_output_data (1, r, NULL);
2950 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2952 else
2953 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2955 /* Output the size of the block. */
2956 size = size_of_locs (loc);
2957 dw2_asm_output_data_uleb128 (size, NULL);
2959 /* Now output the operations themselves. */
2960 output_loc_sequence (loc, for_eh);
2963 /* Similar, but used for .cfi_escape. */
2965 static void
2966 output_cfa_loc_raw (dw_cfi_ref cfi)
2968 dw_loc_descr_ref loc;
2969 unsigned long size;
2971 if (cfi->dw_cfi_opc == DW_CFA_expression)
2973 unsigned r =
2974 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2975 fprintf (asm_out_file, "%#x,", r);
2976 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2978 else
2979 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2981 /* Output the size of the block. */
2982 size = size_of_locs (loc);
2983 dw2_asm_output_data_uleb128_raw (size);
2984 fputc (',', asm_out_file);
2986 /* Now output the operations themselves. */
2987 output_loc_sequence_raw (loc);
2990 /* Output a Call Frame Information opcode and its operand(s). */
2992 void
2993 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2995 unsigned long r;
2996 HOST_WIDE_INT off;
2998 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2999 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3000 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3001 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3002 ((unsigned HOST_WIDE_INT)
3003 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3004 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3006 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3007 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3008 "DW_CFA_offset, column %#lx", r);
3009 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3010 dw2_asm_output_data_uleb128 (off, NULL);
3012 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3014 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3015 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3016 "DW_CFA_restore, column %#lx", r);
3018 else
3020 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3021 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3023 switch (cfi->dw_cfi_opc)
3025 case DW_CFA_set_loc:
3026 if (for_eh)
3027 dw2_asm_output_encoded_addr_rtx (
3028 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3029 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3030 false, NULL);
3031 else
3032 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3033 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3034 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3035 break;
3037 case DW_CFA_advance_loc1:
3038 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3039 fde->dw_fde_current_label, NULL);
3040 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3041 break;
3043 case DW_CFA_advance_loc2:
3044 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3045 fde->dw_fde_current_label, NULL);
3046 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3047 break;
3049 case DW_CFA_advance_loc4:
3050 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3051 fde->dw_fde_current_label, NULL);
3052 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3053 break;
3055 case DW_CFA_MIPS_advance_loc8:
3056 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3057 fde->dw_fde_current_label, NULL);
3058 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3059 break;
3061 case DW_CFA_offset_extended:
3062 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3063 dw2_asm_output_data_uleb128 (r, NULL);
3064 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3065 dw2_asm_output_data_uleb128 (off, NULL);
3066 break;
3068 case DW_CFA_def_cfa:
3069 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3070 dw2_asm_output_data_uleb128 (r, NULL);
3071 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3072 break;
3074 case DW_CFA_offset_extended_sf:
3075 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3076 dw2_asm_output_data_uleb128 (r, NULL);
3077 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3078 dw2_asm_output_data_sleb128 (off, NULL);
3079 break;
3081 case DW_CFA_def_cfa_sf:
3082 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3083 dw2_asm_output_data_uleb128 (r, NULL);
3084 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3085 dw2_asm_output_data_sleb128 (off, NULL);
3086 break;
3088 case DW_CFA_restore_extended:
3089 case DW_CFA_undefined:
3090 case DW_CFA_same_value:
3091 case DW_CFA_def_cfa_register:
3092 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3093 dw2_asm_output_data_uleb128 (r, NULL);
3094 break;
3096 case DW_CFA_register:
3097 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3098 dw2_asm_output_data_uleb128 (r, NULL);
3099 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3100 dw2_asm_output_data_uleb128 (r, NULL);
3101 break;
3103 case DW_CFA_def_cfa_offset:
3104 case DW_CFA_GNU_args_size:
3105 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3106 break;
3108 case DW_CFA_def_cfa_offset_sf:
3109 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3110 dw2_asm_output_data_sleb128 (off, NULL);
3111 break;
3113 case DW_CFA_GNU_window_save:
3114 break;
3116 case DW_CFA_def_cfa_expression:
3117 case DW_CFA_expression:
3118 output_cfa_loc (cfi, for_eh);
3119 break;
3121 case DW_CFA_GNU_negative_offset_extended:
3122 /* Obsoleted by DW_CFA_offset_extended_sf. */
3123 gcc_unreachable ();
3125 default:
3126 break;
3131 /* Similar, but do it via assembler directives instead. */
3133 void
3134 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3136 unsigned long r, r2;
3138 switch (cfi->dw_cfi_opc)
3140 case DW_CFA_advance_loc:
3141 case DW_CFA_advance_loc1:
3142 case DW_CFA_advance_loc2:
3143 case DW_CFA_advance_loc4:
3144 case DW_CFA_MIPS_advance_loc8:
3145 case DW_CFA_set_loc:
3146 /* Should only be created in a code path not followed when emitting
3147 via directives. The assembler is going to take care of this for
3148 us. But this routines is also used for debugging dumps, so
3149 print something. */
3150 gcc_assert (f != asm_out_file);
3151 fprintf (f, "\t.cfi_advance_loc\n");
3152 break;
3154 case DW_CFA_offset:
3155 case DW_CFA_offset_extended:
3156 case DW_CFA_offset_extended_sf:
3157 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3158 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3159 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3160 break;
3162 case DW_CFA_restore:
3163 case DW_CFA_restore_extended:
3164 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3165 fprintf (f, "\t.cfi_restore %lu\n", r);
3166 break;
3168 case DW_CFA_undefined:
3169 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3170 fprintf (f, "\t.cfi_undefined %lu\n", r);
3171 break;
3173 case DW_CFA_same_value:
3174 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3175 fprintf (f, "\t.cfi_same_value %lu\n", r);
3176 break;
3178 case DW_CFA_def_cfa:
3179 case DW_CFA_def_cfa_sf:
3180 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3181 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3182 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3183 break;
3185 case DW_CFA_def_cfa_register:
3186 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3187 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3188 break;
3190 case DW_CFA_register:
3191 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3192 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3193 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3194 break;
3196 case DW_CFA_def_cfa_offset:
3197 case DW_CFA_def_cfa_offset_sf:
3198 fprintf (f, "\t.cfi_def_cfa_offset "
3199 HOST_WIDE_INT_PRINT_DEC"\n",
3200 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3201 break;
3203 case DW_CFA_remember_state:
3204 fprintf (f, "\t.cfi_remember_state\n");
3205 break;
3206 case DW_CFA_restore_state:
3207 fprintf (f, "\t.cfi_restore_state\n");
3208 break;
3210 case DW_CFA_GNU_args_size:
3211 if (f == asm_out_file)
3213 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3214 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3215 if (flag_debug_asm)
3216 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3217 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3218 fputc ('\n', f);
3220 else
3222 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3223 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3225 break;
3227 case DW_CFA_GNU_window_save:
3228 fprintf (f, "\t.cfi_window_save\n");
3229 break;
3231 case DW_CFA_def_cfa_expression:
3232 if (f != asm_out_file)
3234 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3235 break;
3237 /* FALLTHRU */
3238 case DW_CFA_expression:
3239 if (f != asm_out_file)
3241 fprintf (f, "\t.cfi_cfa_expression ...\n");
3242 break;
3244 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3245 output_cfa_loc_raw (cfi);
3246 fputc ('\n', f);
3247 break;
3249 default:
3250 gcc_unreachable ();
3254 void
3255 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3257 if (dwarf2out_do_cfi_asm ())
3258 output_cfi_directive (asm_out_file, cfi);
3261 static void
3262 dump_cfi_row (FILE *f, dw_cfi_row *row)
3264 dw_cfi_ref cfi;
3265 unsigned i;
3267 cfi = row->cfa_cfi;
3268 if (!cfi)
3270 dw_cfa_location dummy;
3271 memset (&dummy, 0, sizeof (dummy));
3272 dummy.reg = INVALID_REGNUM;
3273 cfi = def_cfa_0 (&dummy, &row->cfa);
3275 output_cfi_directive (f, cfi);
3277 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3278 if (cfi)
3279 output_cfi_directive (f, cfi);
3282 void debug_cfi_row (dw_cfi_row *row);
3284 void
3285 debug_cfi_row (dw_cfi_row *row)
3287 dump_cfi_row (stderr, row);
3291 /* Save the result of dwarf2out_do_frame across PCH.
3292 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3293 static GTY(()) signed char saved_do_cfi_asm = 0;
3295 /* Decide whether we want to emit frame unwind information for the current
3296 translation unit. */
3298 bool
3299 dwarf2out_do_frame (void)
3301 /* We want to emit correct CFA location expressions or lists, so we
3302 have to return true if we're going to output debug info, even if
3303 we're not going to output frame or unwind info. */
3304 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3305 return true;
3307 if (saved_do_cfi_asm > 0)
3308 return true;
3310 if (targetm.debug_unwind_info () == UI_DWARF2)
3311 return true;
3313 if ((flag_unwind_tables || flag_exceptions)
3314 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3315 return true;
3317 return false;
3320 /* Decide whether to emit frame unwind via assembler directives. */
3322 bool
3323 dwarf2out_do_cfi_asm (void)
3325 int enc;
3327 if (saved_do_cfi_asm != 0)
3328 return saved_do_cfi_asm > 0;
3330 /* Assume failure for a moment. */
3331 saved_do_cfi_asm = -1;
3333 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3334 return false;
3335 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3336 return false;
3338 /* Make sure the personality encoding is one the assembler can support.
3339 In particular, aligned addresses can't be handled. */
3340 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3341 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3342 return false;
3343 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3344 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3345 return false;
3347 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3348 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3349 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3350 && !flag_unwind_tables && !flag_exceptions
3351 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3352 return false;
3354 /* Success! */
3355 saved_do_cfi_asm = 1;
3356 return true;
3359 static bool
3360 gate_dwarf2_frame (void)
3362 #ifndef HAVE_prologue
3363 /* Targets which still implement the prologue in assembler text
3364 cannot use the generic dwarf2 unwinding. */
3365 return false;
3366 #endif
3368 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3369 from the optimized shrink-wrapping annotations that we will compute.
3370 For now, only produce the CFI notes for dwarf2. */
3371 return dwarf2out_do_frame ();
3374 namespace {
3376 const pass_data pass_data_dwarf2_frame =
3378 RTL_PASS, /* type */
3379 "dwarf2", /* name */
3380 OPTGROUP_NONE, /* optinfo_flags */
3381 true, /* has_gate */
3382 true, /* has_execute */
3383 TV_FINAL, /* tv_id */
3384 0, /* properties_required */
3385 0, /* properties_provided */
3386 0, /* properties_destroyed */
3387 0, /* todo_flags_start */
3388 0, /* todo_flags_finish */
3391 class pass_dwarf2_frame : public rtl_opt_pass
3393 public:
3394 pass_dwarf2_frame (gcc::context *ctxt)
3395 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3398 /* opt_pass methods: */
3399 bool gate () { return gate_dwarf2_frame (); }
3400 unsigned int execute () { return execute_dwarf2_frame (); }
3402 }; // class pass_dwarf2_frame
3404 } // anon namespace
3406 rtl_opt_pass *
3407 make_pass_dwarf2_frame (gcc::context *ctxt)
3409 return new pass_dwarf2_frame (ctxt);
3412 #include "gt-dwarf2cfi.h"