Make all gimple_omp_for_ accessors typesafe
[official-gcc.git] / gcc / dwarf2cfi.c
blobe1a60c2d11a7936a76ebbfcdda76512803552191
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "hashtab.h"
30 #include "hash-set.h"
31 #include "vec.h"
32 #include "machmode.h"
33 #include "hard-reg-set.h"
34 #include "input.h"
35 #include "function.h"
36 #include "cfgbuild.h"
37 #include "dwarf2.h"
38 #include "dwarf2out.h"
39 #include "dwarf2asm.h"
40 #include "ggc.h"
41 #include "hash-table.h"
42 #include "tm_p.h"
43 #include "target.h"
44 #include "common/common-target.h"
45 #include "tree-pass.h"
47 #include "except.h" /* expand_builtin_dwarf_sp_column */
48 #include "expr.h" /* init_return_column_size */
49 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
50 #include "output.h" /* asm_out_file */
51 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
54 /* ??? Poison these here until it can be done generically. They've been
55 totally replaced in this file; make sure it stays that way. */
56 #undef DWARF2_UNWIND_INFO
57 #undef DWARF2_FRAME_INFO
58 #if (GCC_VERSION >= 3000)
59 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
60 #endif
62 #ifndef INCOMING_RETURN_ADDR_RTX
63 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
64 #endif
66 /* Maximum size (in bytes) of an artificially generated label. */
67 #define MAX_ARTIFICIAL_LABEL_BYTES 30
69 /* A collected description of an entire row of the abstract CFI table. */
70 typedef struct GTY(()) dw_cfi_row_struct
72 /* The expression that computes the CFA, expressed in two different ways.
73 The CFA member for the simple cases, and the full CFI expression for
74 the complex cases. The later will be a DW_CFA_cfa_expression. */
75 dw_cfa_location cfa;
76 dw_cfi_ref cfa_cfi;
78 /* The expressions for any register column that is saved. */
79 cfi_vec reg_save;
80 } dw_cfi_row;
82 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
83 typedef struct GTY(()) reg_saved_in_data_struct {
84 rtx orig_reg;
85 rtx saved_in_reg;
86 } reg_saved_in_data;
89 /* Since we no longer have a proper CFG, we're going to create a facsimile
90 of one on the fly while processing the frame-related insns.
92 We create dw_trace_info structures for each extended basic block beginning
93 and ending at a "save point". Save points are labels, barriers, certain
94 notes, and of course the beginning and end of the function.
96 As we encounter control transfer insns, we propagate the "current"
97 row state across the edges to the starts of traces. When checking is
98 enabled, we validate that we propagate the same data from all sources.
100 All traces are members of the TRACE_INFO array, in the order in which
101 they appear in the instruction stream.
103 All save points are present in the TRACE_INDEX hash, mapping the insn
104 starting a trace to the dw_trace_info describing the trace. */
106 typedef struct
108 /* The insn that begins the trace. */
109 rtx_insn *head;
111 /* The row state at the beginning and end of the trace. */
112 dw_cfi_row *beg_row, *end_row;
114 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
115 while scanning insns. However, the args_size value is irrelevant at
116 any point except can_throw_internal_p insns. Therefore the "delay"
117 sizes the values that must actually be emitted for this trace. */
118 HOST_WIDE_INT beg_true_args_size, end_true_args_size;
119 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
121 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
122 rtx_insn *eh_head;
124 /* The following variables contain data used in interpreting frame related
125 expressions. These are not part of the "real" row state as defined by
126 Dwarf, but it seems like they need to be propagated into a trace in case
127 frame related expressions have been sunk. */
128 /* ??? This seems fragile. These variables are fragments of a larger
129 expression. If we do not keep the entire expression together, we risk
130 not being able to put it together properly. Consider forcing targets
131 to generate self-contained expressions and dropping all of the magic
132 interpretation code in this file. Or at least refusing to shrink wrap
133 any frame related insn that doesn't contain a complete expression. */
135 /* The register used for saving registers to the stack, and its offset
136 from the CFA. */
137 dw_cfa_location cfa_store;
139 /* A temporary register holding an integral value used in adjusting SP
140 or setting up the store_reg. The "offset" field holds the integer
141 value, not an offset. */
142 dw_cfa_location cfa_temp;
144 /* A set of registers saved in other registers. This is the inverse of
145 the row->reg_save info, if the entry is a DW_CFA_register. This is
146 implemented as a flat array because it normally contains zero or 1
147 entry, depending on the target. IA-64 is the big spender here, using
148 a maximum of 5 entries. */
149 vec<reg_saved_in_data> regs_saved_in_regs;
151 /* An identifier for this trace. Used only for debugging dumps. */
152 unsigned id;
154 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
155 bool switch_sections;
157 /* True if we've seen different values incoming to beg_true_args_size. */
158 bool args_size_undefined;
159 } dw_trace_info;
162 typedef dw_trace_info *dw_trace_info_ref;
165 /* Hashtable helpers. */
167 struct trace_info_hasher : typed_noop_remove <dw_trace_info>
169 typedef dw_trace_info value_type;
170 typedef dw_trace_info compare_type;
171 static inline hashval_t hash (const value_type *);
172 static inline bool equal (const value_type *, const compare_type *);
175 inline hashval_t
176 trace_info_hasher::hash (const value_type *ti)
178 return INSN_UID (ti->head);
181 inline bool
182 trace_info_hasher::equal (const value_type *a, const compare_type *b)
184 return a->head == b->head;
188 /* The variables making up the pseudo-cfg, as described above. */
189 static vec<dw_trace_info> trace_info;
190 static vec<dw_trace_info_ref> trace_work_list;
191 static hash_table<trace_info_hasher> *trace_index;
193 /* A vector of call frame insns for the CIE. */
194 cfi_vec cie_cfi_vec;
196 /* The state of the first row of the FDE table, which includes the
197 state provided by the CIE. */
198 static GTY(()) dw_cfi_row *cie_cfi_row;
200 static GTY(()) reg_saved_in_data *cie_return_save;
202 static GTY(()) unsigned long dwarf2out_cfi_label_num;
204 /* The insn after which a new CFI note should be emitted. */
205 static rtx add_cfi_insn;
207 /* When non-null, add_cfi will add the CFI to this vector. */
208 static cfi_vec *add_cfi_vec;
210 /* The current instruction trace. */
211 static dw_trace_info *cur_trace;
213 /* The current, i.e. most recently generated, row of the CFI table. */
214 static dw_cfi_row *cur_row;
216 /* A copy of the current CFA, for use during the processing of a
217 single insn. */
218 static dw_cfa_location *cur_cfa;
220 /* We delay emitting a register save until either (a) we reach the end
221 of the prologue or (b) the register is clobbered. This clusters
222 register saves so that there are fewer pc advances. */
224 typedef struct {
225 rtx reg;
226 rtx saved_reg;
227 HOST_WIDE_INT cfa_offset;
228 } queued_reg_save;
231 static vec<queued_reg_save> queued_reg_saves;
233 /* True if any CFI directives were emitted at the current insn. */
234 static bool any_cfis_emitted;
236 /* Short-hand for commonly used register numbers. */
237 static unsigned dw_stack_pointer_regnum;
238 static unsigned dw_frame_pointer_regnum;
240 /* Hook used by __throw. */
243 expand_builtin_dwarf_sp_column (void)
245 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
246 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
249 /* MEM is a memory reference for the register size table, each element of
250 which has mode MODE. Initialize column C as a return address column. */
252 static void
253 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
255 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
256 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
257 emit_move_insn (adjust_address (mem, mode, offset),
258 gen_int_mode (size, mode));
261 /* Generate code to initialize the register size table. */
263 void
264 expand_builtin_init_dwarf_reg_sizes (tree address)
266 unsigned int i;
267 enum machine_mode mode = TYPE_MODE (char_type_node);
268 rtx addr = expand_normal (address);
269 rtx mem = gen_rtx_MEM (BLKmode, addr);
270 bool wrote_return_column = false;
272 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
274 unsigned int dnum = DWARF_FRAME_REGNUM (i);
275 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
277 if (rnum < DWARF_FRAME_REGISTERS)
279 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
280 HOST_WIDE_INT size;
281 enum machine_mode save_mode = targetm.dwarf_frame_reg_mode (i);
283 if (dnum == DWARF_FRAME_RETURN_COLUMN)
285 if (save_mode == VOIDmode)
286 continue;
287 wrote_return_column = true;
289 size = GET_MODE_SIZE (save_mode);
290 if (offset < 0)
291 continue;
293 emit_move_insn (adjust_address (mem, mode, offset),
294 gen_int_mode (size, mode));
298 if (!wrote_return_column)
299 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
301 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
302 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
303 #endif
305 targetm.init_dwarf_reg_sizes_extra (address);
309 static dw_trace_info *
310 get_trace_info (rtx_insn *insn)
312 dw_trace_info dummy;
313 dummy.head = insn;
314 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
317 static bool
318 save_point_p (rtx_insn *insn)
320 /* Labels, except those that are really jump tables. */
321 if (LABEL_P (insn))
322 return inside_basic_block_p (insn);
324 /* We split traces at the prologue/epilogue notes because those
325 are points at which the unwind info is usually stable. This
326 makes it easier to find spots with identical unwind info so
327 that we can use remember/restore_state opcodes. */
328 if (NOTE_P (insn))
329 switch (NOTE_KIND (insn))
331 case NOTE_INSN_PROLOGUE_END:
332 case NOTE_INSN_EPILOGUE_BEG:
333 return true;
336 return false;
339 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
341 static inline HOST_WIDE_INT
342 div_data_align (HOST_WIDE_INT off)
344 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
345 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
346 return r;
349 /* Return true if we need a signed version of a given opcode
350 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
352 static inline bool
353 need_data_align_sf_opcode (HOST_WIDE_INT off)
355 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
358 /* Return a pointer to a newly allocated Call Frame Instruction. */
360 static inline dw_cfi_ref
361 new_cfi (void)
363 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
365 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
366 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
368 return cfi;
371 /* Return a newly allocated CFI row, with no defined data. */
373 static dw_cfi_row *
374 new_cfi_row (void)
376 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
378 row->cfa.reg = INVALID_REGNUM;
380 return row;
383 /* Return a copy of an existing CFI row. */
385 static dw_cfi_row *
386 copy_cfi_row (dw_cfi_row *src)
388 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
390 *dst = *src;
391 dst->reg_save = vec_safe_copy (src->reg_save);
393 return dst;
396 /* Generate a new label for the CFI info to refer to. */
398 static char *
399 dwarf2out_cfi_label (void)
401 int num = dwarf2out_cfi_label_num++;
402 char label[20];
404 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
406 return xstrdup (label);
409 /* Add CFI either to the current insn stream or to a vector, or both. */
411 static void
412 add_cfi (dw_cfi_ref cfi)
414 any_cfis_emitted = true;
416 if (add_cfi_insn != NULL)
418 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
419 NOTE_CFI (add_cfi_insn) = cfi;
422 if (add_cfi_vec != NULL)
423 vec_safe_push (*add_cfi_vec, cfi);
426 static void
427 add_cfi_args_size (HOST_WIDE_INT size)
429 dw_cfi_ref cfi = new_cfi ();
431 /* While we can occasionally have args_size < 0 internally, this state
432 should not persist at a point we actually need an opcode. */
433 gcc_assert (size >= 0);
435 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
436 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
438 add_cfi (cfi);
441 static void
442 add_cfi_restore (unsigned reg)
444 dw_cfi_ref cfi = new_cfi ();
446 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
447 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
449 add_cfi (cfi);
452 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
453 that the register column is no longer saved. */
455 static void
456 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
458 if (vec_safe_length (row->reg_save) <= column)
459 vec_safe_grow_cleared (row->reg_save, column + 1);
460 (*row->reg_save)[column] = cfi;
463 /* This function fills in aa dw_cfa_location structure from a dwarf location
464 descriptor sequence. */
466 static void
467 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
469 struct dw_loc_descr_node *ptr;
470 cfa->offset = 0;
471 cfa->base_offset = 0;
472 cfa->indirect = 0;
473 cfa->reg = -1;
475 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
477 enum dwarf_location_atom op = ptr->dw_loc_opc;
479 switch (op)
481 case DW_OP_reg0:
482 case DW_OP_reg1:
483 case DW_OP_reg2:
484 case DW_OP_reg3:
485 case DW_OP_reg4:
486 case DW_OP_reg5:
487 case DW_OP_reg6:
488 case DW_OP_reg7:
489 case DW_OP_reg8:
490 case DW_OP_reg9:
491 case DW_OP_reg10:
492 case DW_OP_reg11:
493 case DW_OP_reg12:
494 case DW_OP_reg13:
495 case DW_OP_reg14:
496 case DW_OP_reg15:
497 case DW_OP_reg16:
498 case DW_OP_reg17:
499 case DW_OP_reg18:
500 case DW_OP_reg19:
501 case DW_OP_reg20:
502 case DW_OP_reg21:
503 case DW_OP_reg22:
504 case DW_OP_reg23:
505 case DW_OP_reg24:
506 case DW_OP_reg25:
507 case DW_OP_reg26:
508 case DW_OP_reg27:
509 case DW_OP_reg28:
510 case DW_OP_reg29:
511 case DW_OP_reg30:
512 case DW_OP_reg31:
513 cfa->reg = op - DW_OP_reg0;
514 break;
515 case DW_OP_regx:
516 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
517 break;
518 case DW_OP_breg0:
519 case DW_OP_breg1:
520 case DW_OP_breg2:
521 case DW_OP_breg3:
522 case DW_OP_breg4:
523 case DW_OP_breg5:
524 case DW_OP_breg6:
525 case DW_OP_breg7:
526 case DW_OP_breg8:
527 case DW_OP_breg9:
528 case DW_OP_breg10:
529 case DW_OP_breg11:
530 case DW_OP_breg12:
531 case DW_OP_breg13:
532 case DW_OP_breg14:
533 case DW_OP_breg15:
534 case DW_OP_breg16:
535 case DW_OP_breg17:
536 case DW_OP_breg18:
537 case DW_OP_breg19:
538 case DW_OP_breg20:
539 case DW_OP_breg21:
540 case DW_OP_breg22:
541 case DW_OP_breg23:
542 case DW_OP_breg24:
543 case DW_OP_breg25:
544 case DW_OP_breg26:
545 case DW_OP_breg27:
546 case DW_OP_breg28:
547 case DW_OP_breg29:
548 case DW_OP_breg30:
549 case DW_OP_breg31:
550 cfa->reg = op - DW_OP_breg0;
551 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
552 break;
553 case DW_OP_bregx:
554 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
555 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
556 break;
557 case DW_OP_deref:
558 cfa->indirect = 1;
559 break;
560 case DW_OP_plus_uconst:
561 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
562 break;
563 default:
564 gcc_unreachable ();
569 /* Find the previous value for the CFA, iteratively. CFI is the opcode
570 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
571 one level of remember/restore state processing. */
573 void
574 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
576 switch (cfi->dw_cfi_opc)
578 case DW_CFA_def_cfa_offset:
579 case DW_CFA_def_cfa_offset_sf:
580 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
581 break;
582 case DW_CFA_def_cfa_register:
583 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
584 break;
585 case DW_CFA_def_cfa:
586 case DW_CFA_def_cfa_sf:
587 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
588 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
589 break;
590 case DW_CFA_def_cfa_expression:
591 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
592 break;
594 case DW_CFA_remember_state:
595 gcc_assert (!remember->in_use);
596 *remember = *loc;
597 remember->in_use = 1;
598 break;
599 case DW_CFA_restore_state:
600 gcc_assert (remember->in_use);
601 *loc = *remember;
602 remember->in_use = 0;
603 break;
605 default:
606 break;
610 /* Determine if two dw_cfa_location structures define the same data. */
612 bool
613 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
615 return (loc1->reg == loc2->reg
616 && loc1->offset == loc2->offset
617 && loc1->indirect == loc2->indirect
618 && (loc1->indirect == 0
619 || loc1->base_offset == loc2->base_offset));
622 /* Determine if two CFI operands are identical. */
624 static bool
625 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
627 switch (t)
629 case dw_cfi_oprnd_unused:
630 return true;
631 case dw_cfi_oprnd_reg_num:
632 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
633 case dw_cfi_oprnd_offset:
634 return a->dw_cfi_offset == b->dw_cfi_offset;
635 case dw_cfi_oprnd_addr:
636 return (a->dw_cfi_addr == b->dw_cfi_addr
637 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
638 case dw_cfi_oprnd_loc:
639 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
641 gcc_unreachable ();
644 /* Determine if two CFI entries are identical. */
646 static bool
647 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
649 enum dwarf_call_frame_info opc;
651 /* Make things easier for our callers, including missing operands. */
652 if (a == b)
653 return true;
654 if (a == NULL || b == NULL)
655 return false;
657 /* Obviously, the opcodes must match. */
658 opc = a->dw_cfi_opc;
659 if (opc != b->dw_cfi_opc)
660 return false;
662 /* Compare the two operands, re-using the type of the operands as
663 already exposed elsewhere. */
664 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
665 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
666 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
667 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
670 /* Determine if two CFI_ROW structures are identical. */
672 static bool
673 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
675 size_t i, n_a, n_b, n_max;
677 if (a->cfa_cfi)
679 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
680 return false;
682 else if (!cfa_equal_p (&a->cfa, &b->cfa))
683 return false;
685 n_a = vec_safe_length (a->reg_save);
686 n_b = vec_safe_length (b->reg_save);
687 n_max = MAX (n_a, n_b);
689 for (i = 0; i < n_max; ++i)
691 dw_cfi_ref r_a = NULL, r_b = NULL;
693 if (i < n_a)
694 r_a = (*a->reg_save)[i];
695 if (i < n_b)
696 r_b = (*b->reg_save)[i];
698 if (!cfi_equal_p (r_a, r_b))
699 return false;
702 return true;
705 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
706 what opcode to emit. Returns the CFI opcode to effect the change, or
707 NULL if NEW_CFA == OLD_CFA. */
709 static dw_cfi_ref
710 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
712 dw_cfi_ref cfi;
714 /* If nothing changed, no need to issue any call frame instructions. */
715 if (cfa_equal_p (old_cfa, new_cfa))
716 return NULL;
718 cfi = new_cfi ();
720 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
722 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
723 the CFA register did not change but the offset did. The data
724 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
725 in the assembler via the .cfi_def_cfa_offset directive. */
726 if (new_cfa->offset < 0)
727 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
728 else
729 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
730 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
732 else if (new_cfa->offset == old_cfa->offset
733 && old_cfa->reg != INVALID_REGNUM
734 && !new_cfa->indirect
735 && !old_cfa->indirect)
737 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
738 indicating the CFA register has changed to <register> but the
739 offset has not changed. */
740 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
741 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
743 else if (new_cfa->indirect == 0)
745 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
746 indicating the CFA register has changed to <register> with
747 the specified offset. The data factoring for DW_CFA_def_cfa_sf
748 happens in output_cfi, or in the assembler via the .cfi_def_cfa
749 directive. */
750 if (new_cfa->offset < 0)
751 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
752 else
753 cfi->dw_cfi_opc = DW_CFA_def_cfa;
754 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
755 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
757 else
759 /* Construct a DW_CFA_def_cfa_expression instruction to
760 calculate the CFA using a full location expression since no
761 register-offset pair is available. */
762 struct dw_loc_descr_node *loc_list;
764 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
765 loc_list = build_cfa_loc (new_cfa, 0);
766 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
769 return cfi;
772 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
774 static void
775 def_cfa_1 (dw_cfa_location *new_cfa)
777 dw_cfi_ref cfi;
779 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
780 cur_trace->cfa_store.offset = new_cfa->offset;
782 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
783 if (cfi)
785 cur_row->cfa = *new_cfa;
786 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
787 ? cfi : NULL);
789 add_cfi (cfi);
793 /* Add the CFI for saving a register. REG is the CFA column number.
794 If SREG is -1, the register is saved at OFFSET from the CFA;
795 otherwise it is saved in SREG. */
797 static void
798 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
800 dw_fde_ref fde = cfun ? cfun->fde : NULL;
801 dw_cfi_ref cfi = new_cfi ();
803 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
805 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
806 if (fde
807 && fde->stack_realign
808 && sreg == INVALID_REGNUM)
810 cfi->dw_cfi_opc = DW_CFA_expression;
811 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
812 cfi->dw_cfi_oprnd2.dw_cfi_loc
813 = build_cfa_aligned_loc (&cur_row->cfa, offset,
814 fde->stack_realignment);
816 else if (sreg == INVALID_REGNUM)
818 if (need_data_align_sf_opcode (offset))
819 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
820 else if (reg & ~0x3f)
821 cfi->dw_cfi_opc = DW_CFA_offset_extended;
822 else
823 cfi->dw_cfi_opc = DW_CFA_offset;
824 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
826 else if (sreg == reg)
828 /* While we could emit something like DW_CFA_same_value or
829 DW_CFA_restore, we never expect to see something like that
830 in a prologue. This is more likely to be a bug. A backend
831 can always bypass this by using REG_CFA_RESTORE directly. */
832 gcc_unreachable ();
834 else
836 cfi->dw_cfi_opc = DW_CFA_register;
837 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
840 add_cfi (cfi);
841 update_row_reg_save (cur_row, reg, cfi);
844 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
845 and adjust data structures to match. */
847 static void
848 notice_args_size (rtx insn)
850 HOST_WIDE_INT args_size, delta;
851 rtx note;
853 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
854 if (note == NULL)
855 return;
857 args_size = INTVAL (XEXP (note, 0));
858 delta = args_size - cur_trace->end_true_args_size;
859 if (delta == 0)
860 return;
862 cur_trace->end_true_args_size = args_size;
864 /* If the CFA is computed off the stack pointer, then we must adjust
865 the computation of the CFA as well. */
866 if (cur_cfa->reg == dw_stack_pointer_regnum)
868 gcc_assert (!cur_cfa->indirect);
870 /* Convert a change in args_size (always a positive in the
871 direction of stack growth) to a change in stack pointer. */
872 #ifndef STACK_GROWS_DOWNWARD
873 delta = -delta;
874 #endif
875 cur_cfa->offset += delta;
879 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
880 data within the trace related to EH insns and args_size. */
882 static void
883 notice_eh_throw (rtx_insn *insn)
885 HOST_WIDE_INT args_size;
887 args_size = cur_trace->end_true_args_size;
888 if (cur_trace->eh_head == NULL)
890 cur_trace->eh_head = insn;
891 cur_trace->beg_delay_args_size = args_size;
892 cur_trace->end_delay_args_size = args_size;
894 else if (cur_trace->end_delay_args_size != args_size)
896 cur_trace->end_delay_args_size = args_size;
898 /* ??? If the CFA is the stack pointer, search backward for the last
899 CFI note and insert there. Given that the stack changed for the
900 args_size change, there *must* be such a note in between here and
901 the last eh insn. */
902 add_cfi_args_size (args_size);
906 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
907 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
908 used in places where rtl is prohibited. */
910 static inline unsigned
911 dwf_regno (const_rtx reg)
913 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
914 return DWARF_FRAME_REGNUM (REGNO (reg));
917 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
919 static bool
920 compare_reg_or_pc (rtx x, rtx y)
922 if (REG_P (x) && REG_P (y))
923 return REGNO (x) == REGNO (y);
924 return x == y;
927 /* Record SRC as being saved in DEST. DEST may be null to delete an
928 existing entry. SRC may be a register or PC_RTX. */
930 static void
931 record_reg_saved_in_reg (rtx dest, rtx src)
933 reg_saved_in_data *elt;
934 size_t i;
936 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
937 if (compare_reg_or_pc (elt->orig_reg, src))
939 if (dest == NULL)
940 cur_trace->regs_saved_in_regs.unordered_remove (i);
941 else
942 elt->saved_in_reg = dest;
943 return;
946 if (dest == NULL)
947 return;
949 reg_saved_in_data e = {src, dest};
950 cur_trace->regs_saved_in_regs.safe_push (e);
953 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
954 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
956 static void
957 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
959 queued_reg_save *q;
960 queued_reg_save e = {reg, sreg, offset};
961 size_t i;
963 /* Duplicates waste space, but it's also necessary to remove them
964 for correctness, since the queue gets output in reverse order. */
965 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
966 if (compare_reg_or_pc (q->reg, reg))
968 *q = e;
969 return;
972 queued_reg_saves.safe_push (e);
975 /* Output all the entries in QUEUED_REG_SAVES. */
977 static void
978 dwarf2out_flush_queued_reg_saves (void)
980 queued_reg_save *q;
981 size_t i;
983 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
985 unsigned int reg, sreg;
987 record_reg_saved_in_reg (q->saved_reg, q->reg);
989 if (q->reg == pc_rtx)
990 reg = DWARF_FRAME_RETURN_COLUMN;
991 else
992 reg = dwf_regno (q->reg);
993 if (q->saved_reg)
994 sreg = dwf_regno (q->saved_reg);
995 else
996 sreg = INVALID_REGNUM;
997 reg_save (reg, sreg, q->cfa_offset);
1000 queued_reg_saves.truncate (0);
1003 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1004 location for? Or, does it clobber a register which we've previously
1005 said that some other register is saved in, and for which we now
1006 have a new location for? */
1008 static bool
1009 clobbers_queued_reg_save (const_rtx insn)
1011 queued_reg_save *q;
1012 size_t iq;
1014 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1016 size_t ir;
1017 reg_saved_in_data *rir;
1019 if (modified_in_p (q->reg, insn))
1020 return true;
1022 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1023 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1024 && modified_in_p (rir->saved_in_reg, insn))
1025 return true;
1028 return false;
1031 /* What register, if any, is currently saved in REG? */
1033 static rtx
1034 reg_saved_in (rtx reg)
1036 unsigned int regn = REGNO (reg);
1037 queued_reg_save *q;
1038 reg_saved_in_data *rir;
1039 size_t i;
1041 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1042 if (q->saved_reg && regn == REGNO (q->saved_reg))
1043 return q->reg;
1045 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1046 if (regn == REGNO (rir->saved_in_reg))
1047 return rir->orig_reg;
1049 return NULL_RTX;
1052 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1054 static void
1055 dwarf2out_frame_debug_def_cfa (rtx pat)
1057 memset (cur_cfa, 0, sizeof (*cur_cfa));
1059 if (GET_CODE (pat) == PLUS)
1061 cur_cfa->offset = INTVAL (XEXP (pat, 1));
1062 pat = XEXP (pat, 0);
1064 if (MEM_P (pat))
1066 cur_cfa->indirect = 1;
1067 pat = XEXP (pat, 0);
1068 if (GET_CODE (pat) == PLUS)
1070 cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1071 pat = XEXP (pat, 0);
1074 /* ??? If this fails, we could be calling into the _loc functions to
1075 define a full expression. So far no port does that. */
1076 gcc_assert (REG_P (pat));
1077 cur_cfa->reg = dwf_regno (pat);
1080 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1082 static void
1083 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1085 rtx src, dest;
1087 gcc_assert (GET_CODE (pat) == SET);
1088 dest = XEXP (pat, 0);
1089 src = XEXP (pat, 1);
1091 switch (GET_CODE (src))
1093 case PLUS:
1094 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1095 cur_cfa->offset -= INTVAL (XEXP (src, 1));
1096 break;
1098 case REG:
1099 break;
1101 default:
1102 gcc_unreachable ();
1105 cur_cfa->reg = dwf_regno (dest);
1106 gcc_assert (cur_cfa->indirect == 0);
1109 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1111 static void
1112 dwarf2out_frame_debug_cfa_offset (rtx set)
1114 HOST_WIDE_INT offset;
1115 rtx src, addr, span;
1116 unsigned int sregno;
1118 src = XEXP (set, 1);
1119 addr = XEXP (set, 0);
1120 gcc_assert (MEM_P (addr));
1121 addr = XEXP (addr, 0);
1123 /* As documented, only consider extremely simple addresses. */
1124 switch (GET_CODE (addr))
1126 case REG:
1127 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1128 offset = -cur_cfa->offset;
1129 break;
1130 case PLUS:
1131 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1132 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1133 break;
1134 default:
1135 gcc_unreachable ();
1138 if (src == pc_rtx)
1140 span = NULL;
1141 sregno = DWARF_FRAME_RETURN_COLUMN;
1143 else
1145 span = targetm.dwarf_register_span (src);
1146 sregno = dwf_regno (src);
1149 /* ??? We'd like to use queue_reg_save, but we need to come up with
1150 a different flushing heuristic for epilogues. */
1151 if (!span)
1152 reg_save (sregno, INVALID_REGNUM, offset);
1153 else
1155 /* We have a PARALLEL describing where the contents of SRC live.
1156 Adjust the offset for each piece of the PARALLEL. */
1157 HOST_WIDE_INT span_offset = offset;
1159 gcc_assert (GET_CODE (span) == PARALLEL);
1161 const int par_len = XVECLEN (span, 0);
1162 for (int par_index = 0; par_index < par_len; par_index++)
1164 rtx elem = XVECEXP (span, 0, par_index);
1165 sregno = dwf_regno (src);
1166 reg_save (sregno, INVALID_REGNUM, span_offset);
1167 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1172 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1174 static void
1175 dwarf2out_frame_debug_cfa_register (rtx set)
1177 rtx src, dest;
1178 unsigned sregno, dregno;
1180 src = XEXP (set, 1);
1181 dest = XEXP (set, 0);
1183 record_reg_saved_in_reg (dest, src);
1184 if (src == pc_rtx)
1185 sregno = DWARF_FRAME_RETURN_COLUMN;
1186 else
1187 sregno = dwf_regno (src);
1189 dregno = dwf_regno (dest);
1191 /* ??? We'd like to use queue_reg_save, but we need to come up with
1192 a different flushing heuristic for epilogues. */
1193 reg_save (sregno, dregno, 0);
1196 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1198 static void
1199 dwarf2out_frame_debug_cfa_expression (rtx set)
1201 rtx src, dest, span;
1202 dw_cfi_ref cfi = new_cfi ();
1203 unsigned regno;
1205 dest = SET_DEST (set);
1206 src = SET_SRC (set);
1208 gcc_assert (REG_P (src));
1209 gcc_assert (MEM_P (dest));
1211 span = targetm.dwarf_register_span (src);
1212 gcc_assert (!span);
1214 regno = dwf_regno (src);
1216 cfi->dw_cfi_opc = DW_CFA_expression;
1217 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1218 cfi->dw_cfi_oprnd2.dw_cfi_loc
1219 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1220 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1222 /* ??? We'd like to use queue_reg_save, were the interface different,
1223 and, as above, we could manage flushing for epilogues. */
1224 add_cfi (cfi);
1225 update_row_reg_save (cur_row, regno, cfi);
1228 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1230 static void
1231 dwarf2out_frame_debug_cfa_restore (rtx reg)
1233 gcc_assert (REG_P (reg));
1235 rtx span = targetm.dwarf_register_span (reg);
1236 if (!span)
1238 unsigned int regno = dwf_regno (reg);
1239 add_cfi_restore (regno);
1240 update_row_reg_save (cur_row, regno, NULL);
1242 else
1244 /* We have a PARALLEL describing where the contents of REG live.
1245 Restore the register for each piece of the PARALLEL. */
1246 gcc_assert (GET_CODE (span) == PARALLEL);
1248 const int par_len = XVECLEN (span, 0);
1249 for (int par_index = 0; par_index < par_len; par_index++)
1251 reg = XVECEXP (span, 0, par_index);
1252 gcc_assert (REG_P (reg));
1253 unsigned int regno = dwf_regno (reg);
1254 add_cfi_restore (regno);
1255 update_row_reg_save (cur_row, regno, NULL);
1260 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1261 ??? Perhaps we should note in the CIE where windows are saved (instead of
1262 assuming 0(cfa)) and what registers are in the window. */
1264 static void
1265 dwarf2out_frame_debug_cfa_window_save (void)
1267 dw_cfi_ref cfi = new_cfi ();
1269 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1270 add_cfi (cfi);
1273 /* Record call frame debugging information for an expression EXPR,
1274 which either sets SP or FP (adjusting how we calculate the frame
1275 address) or saves a register to the stack or another register.
1276 LABEL indicates the address of EXPR.
1278 This function encodes a state machine mapping rtxes to actions on
1279 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1280 users need not read the source code.
1282 The High-Level Picture
1284 Changes in the register we use to calculate the CFA: Currently we
1285 assume that if you copy the CFA register into another register, we
1286 should take the other one as the new CFA register; this seems to
1287 work pretty well. If it's wrong for some target, it's simple
1288 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1290 Changes in the register we use for saving registers to the stack:
1291 This is usually SP, but not always. Again, we deduce that if you
1292 copy SP into another register (and SP is not the CFA register),
1293 then the new register is the one we will be using for register
1294 saves. This also seems to work.
1296 Register saves: There's not much guesswork about this one; if
1297 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1298 register save, and the register used to calculate the destination
1299 had better be the one we think we're using for this purpose.
1300 It's also assumed that a copy from a call-saved register to another
1301 register is saving that register if RTX_FRAME_RELATED_P is set on
1302 that instruction. If the copy is from a call-saved register to
1303 the *same* register, that means that the register is now the same
1304 value as in the caller.
1306 Except: If the register being saved is the CFA register, and the
1307 offset is nonzero, we are saving the CFA, so we assume we have to
1308 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1309 the intent is to save the value of SP from the previous frame.
1311 In addition, if a register has previously been saved to a different
1312 register,
1314 Invariants / Summaries of Rules
1316 cfa current rule for calculating the CFA. It usually
1317 consists of a register and an offset. This is
1318 actually stored in *cur_cfa, but abbreviated
1319 for the purposes of this documentation.
1320 cfa_store register used by prologue code to save things to the stack
1321 cfa_store.offset is the offset from the value of
1322 cfa_store.reg to the actual CFA
1323 cfa_temp register holding an integral value. cfa_temp.offset
1324 stores the value, which will be used to adjust the
1325 stack pointer. cfa_temp is also used like cfa_store,
1326 to track stores to the stack via fp or a temp reg.
1328 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1329 with cfa.reg as the first operand changes the cfa.reg and its
1330 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1331 cfa_temp.offset.
1333 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1334 expression yielding a constant. This sets cfa_temp.reg
1335 and cfa_temp.offset.
1337 Rule 5: Create a new register cfa_store used to save items to the
1338 stack.
1340 Rules 10-14: Save a register to the stack. Define offset as the
1341 difference of the original location and cfa_store's
1342 location (or cfa_temp's location if cfa_temp is used).
1344 Rules 16-20: If AND operation happens on sp in prologue, we assume
1345 stack is realigned. We will use a group of DW_OP_XXX
1346 expressions to represent the location of the stored
1347 register instead of CFA+offset.
1349 The Rules
1351 "{a,b}" indicates a choice of a xor b.
1352 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1354 Rule 1:
1355 (set <reg1> <reg2>:cfa.reg)
1356 effects: cfa.reg = <reg1>
1357 cfa.offset unchanged
1358 cfa_temp.reg = <reg1>
1359 cfa_temp.offset = cfa.offset
1361 Rule 2:
1362 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1363 {<const_int>,<reg>:cfa_temp.reg}))
1364 effects: cfa.reg = sp if fp used
1365 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1366 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1367 if cfa_store.reg==sp
1369 Rule 3:
1370 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1371 effects: cfa.reg = fp
1372 cfa_offset += +/- <const_int>
1374 Rule 4:
1375 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1376 constraints: <reg1> != fp
1377 <reg1> != sp
1378 effects: cfa.reg = <reg1>
1379 cfa_temp.reg = <reg1>
1380 cfa_temp.offset = cfa.offset
1382 Rule 5:
1383 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1384 constraints: <reg1> != fp
1385 <reg1> != sp
1386 effects: cfa_store.reg = <reg1>
1387 cfa_store.offset = cfa.offset - cfa_temp.offset
1389 Rule 6:
1390 (set <reg> <const_int>)
1391 effects: cfa_temp.reg = <reg>
1392 cfa_temp.offset = <const_int>
1394 Rule 7:
1395 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1396 effects: cfa_temp.reg = <reg1>
1397 cfa_temp.offset |= <const_int>
1399 Rule 8:
1400 (set <reg> (high <exp>))
1401 effects: none
1403 Rule 9:
1404 (set <reg> (lo_sum <exp> <const_int>))
1405 effects: cfa_temp.reg = <reg>
1406 cfa_temp.offset = <const_int>
1408 Rule 10:
1409 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1410 effects: cfa_store.offset -= <const_int>
1411 cfa.offset = cfa_store.offset if cfa.reg == sp
1412 cfa.reg = sp
1413 cfa.base_offset = -cfa_store.offset
1415 Rule 11:
1416 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1417 effects: cfa_store.offset += -/+ mode_size(mem)
1418 cfa.offset = cfa_store.offset if cfa.reg == sp
1419 cfa.reg = sp
1420 cfa.base_offset = -cfa_store.offset
1422 Rule 12:
1423 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1425 <reg2>)
1426 effects: cfa.reg = <reg1>
1427 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1429 Rule 13:
1430 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1431 effects: cfa.reg = <reg1>
1432 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1434 Rule 14:
1435 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1436 effects: cfa.reg = <reg1>
1437 cfa.base_offset = -cfa_temp.offset
1438 cfa_temp.offset -= mode_size(mem)
1440 Rule 15:
1441 (set <reg> {unspec, unspec_volatile})
1442 effects: target-dependent
1444 Rule 16:
1445 (set sp (and: sp <const_int>))
1446 constraints: cfa_store.reg == sp
1447 effects: cfun->fde.stack_realign = 1
1448 cfa_store.offset = 0
1449 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1451 Rule 17:
1452 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1453 effects: cfa_store.offset += -/+ mode_size(mem)
1455 Rule 18:
1456 (set (mem ({pre_inc, pre_dec} sp)) fp)
1457 constraints: fde->stack_realign == 1
1458 effects: cfa_store.offset = 0
1459 cfa.reg != HARD_FRAME_POINTER_REGNUM
1461 Rule 19:
1462 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1463 constraints: fde->stack_realign == 1
1464 && cfa.offset == 0
1465 && cfa.indirect == 0
1466 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1467 effects: Use DW_CFA_def_cfa_expression to define cfa
1468 cfa.reg == fde->drap_reg */
1470 static void
1471 dwarf2out_frame_debug_expr (rtx expr)
1473 rtx src, dest, span;
1474 HOST_WIDE_INT offset;
1475 dw_fde_ref fde;
1477 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1478 the PARALLEL independently. The first element is always processed if
1479 it is a SET. This is for backward compatibility. Other elements
1480 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1481 flag is set in them. */
1482 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1484 int par_index;
1485 int limit = XVECLEN (expr, 0);
1486 rtx elem;
1488 /* PARALLELs have strict read-modify-write semantics, so we
1489 ought to evaluate every rvalue before changing any lvalue.
1490 It's cumbersome to do that in general, but there's an
1491 easy approximation that is enough for all current users:
1492 handle register saves before register assignments. */
1493 if (GET_CODE (expr) == PARALLEL)
1494 for (par_index = 0; par_index < limit; par_index++)
1496 elem = XVECEXP (expr, 0, par_index);
1497 if (GET_CODE (elem) == SET
1498 && MEM_P (SET_DEST (elem))
1499 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1500 dwarf2out_frame_debug_expr (elem);
1503 for (par_index = 0; par_index < limit; par_index++)
1505 elem = XVECEXP (expr, 0, par_index);
1506 if (GET_CODE (elem) == SET
1507 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1508 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1509 dwarf2out_frame_debug_expr (elem);
1511 return;
1514 gcc_assert (GET_CODE (expr) == SET);
1516 src = SET_SRC (expr);
1517 dest = SET_DEST (expr);
1519 if (REG_P (src))
1521 rtx rsi = reg_saved_in (src);
1522 if (rsi)
1523 src = rsi;
1526 fde = cfun->fde;
1528 switch (GET_CODE (dest))
1530 case REG:
1531 switch (GET_CODE (src))
1533 /* Setting FP from SP. */
1534 case REG:
1535 if (cur_cfa->reg == dwf_regno (src))
1537 /* Rule 1 */
1538 /* Update the CFA rule wrt SP or FP. Make sure src is
1539 relative to the current CFA register.
1541 We used to require that dest be either SP or FP, but the
1542 ARM copies SP to a temporary register, and from there to
1543 FP. So we just rely on the backends to only set
1544 RTX_FRAME_RELATED_P on appropriate insns. */
1545 cur_cfa->reg = dwf_regno (dest);
1546 cur_trace->cfa_temp.reg = cur_cfa->reg;
1547 cur_trace->cfa_temp.offset = cur_cfa->offset;
1549 else
1551 /* Saving a register in a register. */
1552 gcc_assert (!fixed_regs [REGNO (dest)]
1553 /* For the SPARC and its register window. */
1554 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1556 /* After stack is aligned, we can only save SP in FP
1557 if drap register is used. In this case, we have
1558 to restore stack pointer with the CFA value and we
1559 don't generate this DWARF information. */
1560 if (fde
1561 && fde->stack_realign
1562 && REGNO (src) == STACK_POINTER_REGNUM)
1563 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1564 && fde->drap_reg != INVALID_REGNUM
1565 && cur_cfa->reg != dwf_regno (src));
1566 else
1567 queue_reg_save (src, dest, 0);
1569 break;
1571 case PLUS:
1572 case MINUS:
1573 case LO_SUM:
1574 if (dest == stack_pointer_rtx)
1576 /* Rule 2 */
1577 /* Adjusting SP. */
1578 switch (GET_CODE (XEXP (src, 1)))
1580 case CONST_INT:
1581 offset = INTVAL (XEXP (src, 1));
1582 break;
1583 case REG:
1584 gcc_assert (dwf_regno (XEXP (src, 1))
1585 == cur_trace->cfa_temp.reg);
1586 offset = cur_trace->cfa_temp.offset;
1587 break;
1588 default:
1589 gcc_unreachable ();
1592 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1594 /* Restoring SP from FP in the epilogue. */
1595 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1596 cur_cfa->reg = dw_stack_pointer_regnum;
1598 else if (GET_CODE (src) == LO_SUM)
1599 /* Assume we've set the source reg of the LO_SUM from sp. */
1601 else
1602 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1604 if (GET_CODE (src) != MINUS)
1605 offset = -offset;
1606 if (cur_cfa->reg == dw_stack_pointer_regnum)
1607 cur_cfa->offset += offset;
1608 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1609 cur_trace->cfa_store.offset += offset;
1611 else if (dest == hard_frame_pointer_rtx)
1613 /* Rule 3 */
1614 /* Either setting the FP from an offset of the SP,
1615 or adjusting the FP */
1616 gcc_assert (frame_pointer_needed);
1618 gcc_assert (REG_P (XEXP (src, 0))
1619 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1620 && CONST_INT_P (XEXP (src, 1)));
1621 offset = INTVAL (XEXP (src, 1));
1622 if (GET_CODE (src) != MINUS)
1623 offset = -offset;
1624 cur_cfa->offset += offset;
1625 cur_cfa->reg = dw_frame_pointer_regnum;
1627 else
1629 gcc_assert (GET_CODE (src) != MINUS);
1631 /* Rule 4 */
1632 if (REG_P (XEXP (src, 0))
1633 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1634 && CONST_INT_P (XEXP (src, 1)))
1636 /* Setting a temporary CFA register that will be copied
1637 into the FP later on. */
1638 offset = - INTVAL (XEXP (src, 1));
1639 cur_cfa->offset += offset;
1640 cur_cfa->reg = dwf_regno (dest);
1641 /* Or used to save regs to the stack. */
1642 cur_trace->cfa_temp.reg = cur_cfa->reg;
1643 cur_trace->cfa_temp.offset = cur_cfa->offset;
1646 /* Rule 5 */
1647 else if (REG_P (XEXP (src, 0))
1648 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1649 && XEXP (src, 1) == stack_pointer_rtx)
1651 /* Setting a scratch register that we will use instead
1652 of SP for saving registers to the stack. */
1653 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1654 cur_trace->cfa_store.reg = dwf_regno (dest);
1655 cur_trace->cfa_store.offset
1656 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1659 /* Rule 9 */
1660 else if (GET_CODE (src) == LO_SUM
1661 && CONST_INT_P (XEXP (src, 1)))
1663 cur_trace->cfa_temp.reg = dwf_regno (dest);
1664 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1666 else
1667 gcc_unreachable ();
1669 break;
1671 /* Rule 6 */
1672 case CONST_INT:
1673 cur_trace->cfa_temp.reg = dwf_regno (dest);
1674 cur_trace->cfa_temp.offset = INTVAL (src);
1675 break;
1677 /* Rule 7 */
1678 case IOR:
1679 gcc_assert (REG_P (XEXP (src, 0))
1680 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1681 && CONST_INT_P (XEXP (src, 1)));
1683 cur_trace->cfa_temp.reg = dwf_regno (dest);
1684 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1685 break;
1687 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1688 which will fill in all of the bits. */
1689 /* Rule 8 */
1690 case HIGH:
1691 break;
1693 /* Rule 15 */
1694 case UNSPEC:
1695 case UNSPEC_VOLATILE:
1696 /* All unspecs should be represented by REG_CFA_* notes. */
1697 gcc_unreachable ();
1698 return;
1700 /* Rule 16 */
1701 case AND:
1702 /* If this AND operation happens on stack pointer in prologue,
1703 we assume the stack is realigned and we extract the
1704 alignment. */
1705 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1707 /* We interpret reg_save differently with stack_realign set.
1708 Thus we must flush whatever we have queued first. */
1709 dwarf2out_flush_queued_reg_saves ();
1711 gcc_assert (cur_trace->cfa_store.reg
1712 == dwf_regno (XEXP (src, 0)));
1713 fde->stack_realign = 1;
1714 fde->stack_realignment = INTVAL (XEXP (src, 1));
1715 cur_trace->cfa_store.offset = 0;
1717 if (cur_cfa->reg != dw_stack_pointer_regnum
1718 && cur_cfa->reg != dw_frame_pointer_regnum)
1719 fde->drap_reg = cur_cfa->reg;
1721 return;
1723 default:
1724 gcc_unreachable ();
1726 break;
1728 case MEM:
1730 /* Saving a register to the stack. Make sure dest is relative to the
1731 CFA register. */
1732 switch (GET_CODE (XEXP (dest, 0)))
1734 /* Rule 10 */
1735 /* With a push. */
1736 case PRE_MODIFY:
1737 case POST_MODIFY:
1738 /* We can't handle variable size modifications. */
1739 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1740 == CONST_INT);
1741 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1743 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1744 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1746 cur_trace->cfa_store.offset += offset;
1747 if (cur_cfa->reg == dw_stack_pointer_regnum)
1748 cur_cfa->offset = cur_trace->cfa_store.offset;
1750 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1751 offset -= cur_trace->cfa_store.offset;
1752 else
1753 offset = -cur_trace->cfa_store.offset;
1754 break;
1756 /* Rule 11 */
1757 case PRE_INC:
1758 case PRE_DEC:
1759 case POST_DEC:
1760 offset = GET_MODE_SIZE (GET_MODE (dest));
1761 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1762 offset = -offset;
1764 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1765 == STACK_POINTER_REGNUM)
1766 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1768 cur_trace->cfa_store.offset += offset;
1770 /* Rule 18: If stack is aligned, we will use FP as a
1771 reference to represent the address of the stored
1772 regiser. */
1773 if (fde
1774 && fde->stack_realign
1775 && REG_P (src)
1776 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1778 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1779 cur_trace->cfa_store.offset = 0;
1782 if (cur_cfa->reg == dw_stack_pointer_regnum)
1783 cur_cfa->offset = cur_trace->cfa_store.offset;
1785 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1786 offset += -cur_trace->cfa_store.offset;
1787 else
1788 offset = -cur_trace->cfa_store.offset;
1789 break;
1791 /* Rule 12 */
1792 /* With an offset. */
1793 case PLUS:
1794 case MINUS:
1795 case LO_SUM:
1797 unsigned int regno;
1799 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1800 && REG_P (XEXP (XEXP (dest, 0), 0)));
1801 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1802 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1803 offset = -offset;
1805 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1807 if (cur_cfa->reg == regno)
1808 offset -= cur_cfa->offset;
1809 else if (cur_trace->cfa_store.reg == regno)
1810 offset -= cur_trace->cfa_store.offset;
1811 else
1813 gcc_assert (cur_trace->cfa_temp.reg == regno);
1814 offset -= cur_trace->cfa_temp.offset;
1817 break;
1819 /* Rule 13 */
1820 /* Without an offset. */
1821 case REG:
1823 unsigned int regno = dwf_regno (XEXP (dest, 0));
1825 if (cur_cfa->reg == regno)
1826 offset = -cur_cfa->offset;
1827 else if (cur_trace->cfa_store.reg == regno)
1828 offset = -cur_trace->cfa_store.offset;
1829 else
1831 gcc_assert (cur_trace->cfa_temp.reg == regno);
1832 offset = -cur_trace->cfa_temp.offset;
1835 break;
1837 /* Rule 14 */
1838 case POST_INC:
1839 gcc_assert (cur_trace->cfa_temp.reg
1840 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1841 offset = -cur_trace->cfa_temp.offset;
1842 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1843 break;
1845 default:
1846 gcc_unreachable ();
1849 /* Rule 17 */
1850 /* If the source operand of this MEM operation is a memory,
1851 we only care how much stack grew. */
1852 if (MEM_P (src))
1853 break;
1855 if (REG_P (src)
1856 && REGNO (src) != STACK_POINTER_REGNUM
1857 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1858 && dwf_regno (src) == cur_cfa->reg)
1860 /* We're storing the current CFA reg into the stack. */
1862 if (cur_cfa->offset == 0)
1864 /* Rule 19 */
1865 /* If stack is aligned, putting CFA reg into stack means
1866 we can no longer use reg + offset to represent CFA.
1867 Here we use DW_CFA_def_cfa_expression instead. The
1868 result of this expression equals to the original CFA
1869 value. */
1870 if (fde
1871 && fde->stack_realign
1872 && cur_cfa->indirect == 0
1873 && cur_cfa->reg != dw_frame_pointer_regnum)
1875 gcc_assert (fde->drap_reg == cur_cfa->reg);
1877 cur_cfa->indirect = 1;
1878 cur_cfa->reg = dw_frame_pointer_regnum;
1879 cur_cfa->base_offset = offset;
1880 cur_cfa->offset = 0;
1882 fde->drap_reg_saved = 1;
1883 break;
1886 /* If the source register is exactly the CFA, assume
1887 we're saving SP like any other register; this happens
1888 on the ARM. */
1889 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1890 break;
1892 else
1894 /* Otherwise, we'll need to look in the stack to
1895 calculate the CFA. */
1896 rtx x = XEXP (dest, 0);
1898 if (!REG_P (x))
1899 x = XEXP (x, 0);
1900 gcc_assert (REG_P (x));
1902 cur_cfa->reg = dwf_regno (x);
1903 cur_cfa->base_offset = offset;
1904 cur_cfa->indirect = 1;
1905 break;
1909 if (REG_P (src))
1910 span = targetm.dwarf_register_span (src);
1911 else
1912 span = NULL;
1914 if (!span)
1915 queue_reg_save (src, NULL_RTX, offset);
1916 else
1918 /* We have a PARALLEL describing where the contents of SRC live.
1919 Queue register saves for each piece of the PARALLEL. */
1920 HOST_WIDE_INT span_offset = offset;
1922 gcc_assert (GET_CODE (span) == PARALLEL);
1924 const int par_len = XVECLEN (span, 0);
1925 for (int par_index = 0; par_index < par_len; par_index++)
1927 rtx elem = XVECEXP (span, 0, par_index);
1928 queue_reg_save (elem, NULL_RTX, span_offset);
1929 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1932 break;
1934 default:
1935 gcc_unreachable ();
1939 /* Record call frame debugging information for INSN, which either sets
1940 SP or FP (adjusting how we calculate the frame address) or saves a
1941 register to the stack. */
1943 static void
1944 dwarf2out_frame_debug (rtx_insn *insn)
1946 rtx note, n, pat;
1947 bool handled_one = false;
1949 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1950 switch (REG_NOTE_KIND (note))
1952 case REG_FRAME_RELATED_EXPR:
1953 pat = XEXP (note, 0);
1954 goto do_frame_expr;
1956 case REG_CFA_DEF_CFA:
1957 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
1958 handled_one = true;
1959 break;
1961 case REG_CFA_ADJUST_CFA:
1962 n = XEXP (note, 0);
1963 if (n == NULL)
1965 n = PATTERN (insn);
1966 if (GET_CODE (n) == PARALLEL)
1967 n = XVECEXP (n, 0, 0);
1969 dwarf2out_frame_debug_adjust_cfa (n);
1970 handled_one = true;
1971 break;
1973 case REG_CFA_OFFSET:
1974 n = XEXP (note, 0);
1975 if (n == NULL)
1976 n = single_set (insn);
1977 dwarf2out_frame_debug_cfa_offset (n);
1978 handled_one = true;
1979 break;
1981 case REG_CFA_REGISTER:
1982 n = XEXP (note, 0);
1983 if (n == NULL)
1985 n = PATTERN (insn);
1986 if (GET_CODE (n) == PARALLEL)
1987 n = XVECEXP (n, 0, 0);
1989 dwarf2out_frame_debug_cfa_register (n);
1990 handled_one = true;
1991 break;
1993 case REG_CFA_EXPRESSION:
1994 n = XEXP (note, 0);
1995 if (n == NULL)
1996 n = single_set (insn);
1997 dwarf2out_frame_debug_cfa_expression (n);
1998 handled_one = true;
1999 break;
2001 case REG_CFA_RESTORE:
2002 n = XEXP (note, 0);
2003 if (n == NULL)
2005 n = PATTERN (insn);
2006 if (GET_CODE (n) == PARALLEL)
2007 n = XVECEXP (n, 0, 0);
2008 n = XEXP (n, 0);
2010 dwarf2out_frame_debug_cfa_restore (n);
2011 handled_one = true;
2012 break;
2014 case REG_CFA_SET_VDRAP:
2015 n = XEXP (note, 0);
2016 if (REG_P (n))
2018 dw_fde_ref fde = cfun->fde;
2019 if (fde)
2021 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2022 if (REG_P (n))
2023 fde->vdrap_reg = dwf_regno (n);
2026 handled_one = true;
2027 break;
2029 case REG_CFA_WINDOW_SAVE:
2030 dwarf2out_frame_debug_cfa_window_save ();
2031 handled_one = true;
2032 break;
2034 case REG_CFA_FLUSH_QUEUE:
2035 /* The actual flush happens elsewhere. */
2036 handled_one = true;
2037 break;
2039 default:
2040 break;
2043 if (!handled_one)
2045 pat = PATTERN (insn);
2046 do_frame_expr:
2047 dwarf2out_frame_debug_expr (pat);
2049 /* Check again. A parallel can save and update the same register.
2050 We could probably check just once, here, but this is safer than
2051 removing the check at the start of the function. */
2052 if (clobbers_queued_reg_save (pat))
2053 dwarf2out_flush_queued_reg_saves ();
2057 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2059 static void
2060 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2062 size_t i, n_old, n_new, n_max;
2063 dw_cfi_ref cfi;
2065 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2066 add_cfi (new_row->cfa_cfi);
2067 else
2069 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2070 if (cfi)
2071 add_cfi (cfi);
2074 n_old = vec_safe_length (old_row->reg_save);
2075 n_new = vec_safe_length (new_row->reg_save);
2076 n_max = MAX (n_old, n_new);
2078 for (i = 0; i < n_max; ++i)
2080 dw_cfi_ref r_old = NULL, r_new = NULL;
2082 if (i < n_old)
2083 r_old = (*old_row->reg_save)[i];
2084 if (i < n_new)
2085 r_new = (*new_row->reg_save)[i];
2087 if (r_old == r_new)
2089 else if (r_new == NULL)
2090 add_cfi_restore (i);
2091 else if (!cfi_equal_p (r_old, r_new))
2092 add_cfi (r_new);
2096 /* Examine CFI and return true if a cfi label and set_loc is needed
2097 beforehand. Even when generating CFI assembler instructions, we
2098 still have to add the cfi to the list so that lookup_cfa_1 works
2099 later on. When -g2 and above we even need to force emitting of
2100 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2101 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2102 and so don't use convert_cfa_to_fb_loc_list. */
2104 static bool
2105 cfi_label_required_p (dw_cfi_ref cfi)
2107 if (!dwarf2out_do_cfi_asm ())
2108 return true;
2110 if (dwarf_version == 2
2111 && debug_info_level > DINFO_LEVEL_TERSE
2112 && (write_symbols == DWARF2_DEBUG
2113 || write_symbols == VMS_AND_DWARF2_DEBUG))
2115 switch (cfi->dw_cfi_opc)
2117 case DW_CFA_def_cfa_offset:
2118 case DW_CFA_def_cfa_offset_sf:
2119 case DW_CFA_def_cfa_register:
2120 case DW_CFA_def_cfa:
2121 case DW_CFA_def_cfa_sf:
2122 case DW_CFA_def_cfa_expression:
2123 case DW_CFA_restore_state:
2124 return true;
2125 default:
2126 return false;
2129 return false;
2132 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2133 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2134 necessary. */
2135 static void
2136 add_cfis_to_fde (void)
2138 dw_fde_ref fde = cfun->fde;
2139 rtx_insn *insn, *next;
2140 /* We always start with a function_begin label. */
2141 bool first = false;
2143 for (insn = get_insns (); insn; insn = next)
2145 next = NEXT_INSN (insn);
2147 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2149 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2150 /* Don't attempt to advance_loc4 between labels
2151 in different sections. */
2152 first = true;
2155 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2157 bool required = cfi_label_required_p (NOTE_CFI (insn));
2158 while (next)
2159 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2161 required |= cfi_label_required_p (NOTE_CFI (next));
2162 next = NEXT_INSN (next);
2164 else if (active_insn_p (next)
2165 || (NOTE_P (next) && (NOTE_KIND (next)
2166 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2167 break;
2168 else
2169 next = NEXT_INSN (next);
2170 if (required)
2172 int num = dwarf2out_cfi_label_num;
2173 const char *label = dwarf2out_cfi_label ();
2174 dw_cfi_ref xcfi;
2175 rtx tmp;
2177 /* Set the location counter to the new label. */
2178 xcfi = new_cfi ();
2179 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2180 : DW_CFA_advance_loc4);
2181 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2182 vec_safe_push (fde->dw_fde_cfi, xcfi);
2184 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2185 NOTE_LABEL_NUMBER (tmp) = num;
2190 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2191 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2192 insn = NEXT_INSN (insn);
2194 while (insn != next);
2195 first = false;
2200 /* If LABEL is the start of a trace, then initialize the state of that
2201 trace from CUR_TRACE and CUR_ROW. */
2203 static void
2204 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2206 dw_trace_info *ti;
2207 HOST_WIDE_INT args_size;
2209 ti = get_trace_info (start);
2210 gcc_assert (ti != NULL);
2212 if (dump_file)
2214 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2215 cur_trace->id, ti->id,
2216 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2217 (origin ? INSN_UID (origin) : 0));
2220 args_size = cur_trace->end_true_args_size;
2221 if (ti->beg_row == NULL)
2223 /* This is the first time we've encountered this trace. Propagate
2224 state across the edge and push the trace onto the work list. */
2225 ti->beg_row = copy_cfi_row (cur_row);
2226 ti->beg_true_args_size = args_size;
2228 ti->cfa_store = cur_trace->cfa_store;
2229 ti->cfa_temp = cur_trace->cfa_temp;
2230 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2232 trace_work_list.safe_push (ti);
2234 if (dump_file)
2235 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2237 else
2240 /* We ought to have the same state incoming to a given trace no
2241 matter how we arrive at the trace. Anything else means we've
2242 got some kind of optimization error. */
2243 gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2245 /* The args_size is allowed to conflict if it isn't actually used. */
2246 if (ti->beg_true_args_size != args_size)
2247 ti->args_size_undefined = true;
2251 /* Similarly, but handle the args_size and CFA reset across EH
2252 and non-local goto edges. */
2254 static void
2255 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2257 HOST_WIDE_INT save_args_size, delta;
2258 dw_cfa_location save_cfa;
2260 save_args_size = cur_trace->end_true_args_size;
2261 if (save_args_size == 0)
2263 maybe_record_trace_start (start, origin);
2264 return;
2267 delta = -save_args_size;
2268 cur_trace->end_true_args_size = 0;
2270 save_cfa = cur_row->cfa;
2271 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2273 /* Convert a change in args_size (always a positive in the
2274 direction of stack growth) to a change in stack pointer. */
2275 #ifndef STACK_GROWS_DOWNWARD
2276 delta = -delta;
2277 #endif
2278 cur_row->cfa.offset += delta;
2281 maybe_record_trace_start (start, origin);
2283 cur_trace->end_true_args_size = save_args_size;
2284 cur_row->cfa = save_cfa;
2287 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2288 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2290 static void
2291 create_trace_edges (rtx_insn *insn)
2293 rtx tmp;
2294 int i, n;
2296 if (JUMP_P (insn))
2298 rtx_jump_table_data *table;
2300 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2301 return;
2303 if (tablejump_p (insn, NULL, &table))
2305 rtvec vec = table->get_labels ();
2307 n = GET_NUM_ELEM (vec);
2308 for (i = 0; i < n; ++i)
2310 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2311 maybe_record_trace_start (lab, insn);
2314 else if (computed_jump_p (insn))
2316 for (rtx_insn_list *lab = forced_labels; lab; lab = lab->next ())
2317 maybe_record_trace_start (lab->insn (), insn);
2319 else if (returnjump_p (insn))
2321 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2323 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2324 for (i = 0; i < n; ++i)
2326 rtx_insn *lab =
2327 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2328 maybe_record_trace_start (lab, insn);
2331 else
2333 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2334 gcc_assert (lab != NULL);
2335 maybe_record_trace_start (lab, insn);
2338 else if (CALL_P (insn))
2340 /* Sibling calls don't have edges inside this function. */
2341 if (SIBLING_CALL_P (insn))
2342 return;
2344 /* Process non-local goto edges. */
2345 if (can_nonlocal_goto (insn))
2346 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2347 lab;
2348 lab = lab->next ())
2349 maybe_record_trace_start_abnormal (lab->insn (), insn);
2351 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2353 int i, n = seq->len ();
2354 for (i = 0; i < n; ++i)
2355 create_trace_edges (seq->insn (i));
2356 return;
2359 /* Process EH edges. */
2360 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2362 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2363 if (lp)
2364 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2368 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2370 static void
2371 scan_insn_after (rtx_insn *insn)
2373 if (RTX_FRAME_RELATED_P (insn))
2374 dwarf2out_frame_debug (insn);
2375 notice_args_size (insn);
2378 /* Scan the trace beginning at INSN and create the CFI notes for the
2379 instructions therein. */
2381 static void
2382 scan_trace (dw_trace_info *trace)
2384 rtx_insn *prev, *insn = trace->head;
2385 dw_cfa_location this_cfa;
2387 if (dump_file)
2388 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2389 trace->id, rtx_name[(int) GET_CODE (insn)],
2390 INSN_UID (insn));
2392 trace->end_row = copy_cfi_row (trace->beg_row);
2393 trace->end_true_args_size = trace->beg_true_args_size;
2395 cur_trace = trace;
2396 cur_row = trace->end_row;
2398 this_cfa = cur_row->cfa;
2399 cur_cfa = &this_cfa;
2401 for (prev = insn, insn = NEXT_INSN (insn);
2402 insn;
2403 prev = insn, insn = NEXT_INSN (insn))
2405 rtx_insn *control;
2407 /* Do everything that happens "before" the insn. */
2408 add_cfi_insn = prev;
2410 /* Notice the end of a trace. */
2411 if (BARRIER_P (insn))
2413 /* Don't bother saving the unneeded queued registers at all. */
2414 queued_reg_saves.truncate (0);
2415 break;
2417 if (save_point_p (insn))
2419 /* Propagate across fallthru edges. */
2420 dwarf2out_flush_queued_reg_saves ();
2421 maybe_record_trace_start (insn, NULL);
2422 break;
2425 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2426 continue;
2428 /* Handle all changes to the row state. Sequences require special
2429 handling for the positioning of the notes. */
2430 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2432 rtx_insn *elt;
2433 int i, n = pat->len ();
2435 control = pat->insn (0);
2436 if (can_throw_internal (control))
2437 notice_eh_throw (control);
2438 dwarf2out_flush_queued_reg_saves ();
2440 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2442 /* ??? Hopefully multiple delay slots are not annulled. */
2443 gcc_assert (n == 2);
2444 gcc_assert (!RTX_FRAME_RELATED_P (control));
2445 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2447 elt = pat->insn (1);
2449 if (INSN_FROM_TARGET_P (elt))
2451 HOST_WIDE_INT restore_args_size;
2452 cfi_vec save_row_reg_save;
2454 /* If ELT is an instruction from target of an annulled
2455 branch, the effects are for the target only and so
2456 the args_size and CFA along the current path
2457 shouldn't change. */
2458 add_cfi_insn = NULL;
2459 restore_args_size = cur_trace->end_true_args_size;
2460 cur_cfa = &cur_row->cfa;
2461 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2463 scan_insn_after (elt);
2465 /* ??? Should we instead save the entire row state? */
2466 gcc_assert (!queued_reg_saves.length ());
2468 create_trace_edges (control);
2470 cur_trace->end_true_args_size = restore_args_size;
2471 cur_row->cfa = this_cfa;
2472 cur_row->reg_save = save_row_reg_save;
2473 cur_cfa = &this_cfa;
2475 else
2477 /* If ELT is a annulled branch-taken instruction (i.e.
2478 executed only when branch is not taken), the args_size
2479 and CFA should not change through the jump. */
2480 create_trace_edges (control);
2482 /* Update and continue with the trace. */
2483 add_cfi_insn = insn;
2484 scan_insn_after (elt);
2485 def_cfa_1 (&this_cfa);
2487 continue;
2490 /* The insns in the delay slot should all be considered to happen
2491 "before" a call insn. Consider a call with a stack pointer
2492 adjustment in the delay slot. The backtrace from the callee
2493 should include the sp adjustment. Unfortunately, that leaves
2494 us with an unavoidable unwinding error exactly at the call insn
2495 itself. For jump insns we'd prefer to avoid this error by
2496 placing the notes after the sequence. */
2497 if (JUMP_P (control))
2498 add_cfi_insn = insn;
2500 for (i = 1; i < n; ++i)
2502 elt = pat->insn (i);
2503 scan_insn_after (elt);
2506 /* Make sure any register saves are visible at the jump target. */
2507 dwarf2out_flush_queued_reg_saves ();
2508 any_cfis_emitted = false;
2510 /* However, if there is some adjustment on the call itself, e.g.
2511 a call_pop, that action should be considered to happen after
2512 the call returns. */
2513 add_cfi_insn = insn;
2514 scan_insn_after (control);
2516 else
2518 /* Flush data before calls and jumps, and of course if necessary. */
2519 if (can_throw_internal (insn))
2521 notice_eh_throw (insn);
2522 dwarf2out_flush_queued_reg_saves ();
2524 else if (!NONJUMP_INSN_P (insn)
2525 || clobbers_queued_reg_save (insn)
2526 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2527 dwarf2out_flush_queued_reg_saves ();
2528 any_cfis_emitted = false;
2530 add_cfi_insn = insn;
2531 scan_insn_after (insn);
2532 control = insn;
2535 /* Between frame-related-p and args_size we might have otherwise
2536 emitted two cfa adjustments. Do it now. */
2537 def_cfa_1 (&this_cfa);
2539 /* Minimize the number of advances by emitting the entire queue
2540 once anything is emitted. */
2541 if (any_cfis_emitted
2542 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2543 dwarf2out_flush_queued_reg_saves ();
2545 /* Note that a test for control_flow_insn_p does exactly the
2546 same tests as are done to actually create the edges. So
2547 always call the routine and let it not create edges for
2548 non-control-flow insns. */
2549 create_trace_edges (control);
2552 add_cfi_insn = NULL;
2553 cur_row = NULL;
2554 cur_trace = NULL;
2555 cur_cfa = NULL;
2558 /* Scan the function and create the initial set of CFI notes. */
2560 static void
2561 create_cfi_notes (void)
2563 dw_trace_info *ti;
2565 gcc_checking_assert (!queued_reg_saves.exists ());
2566 gcc_checking_assert (!trace_work_list.exists ());
2568 /* Always begin at the entry trace. */
2569 ti = &trace_info[0];
2570 scan_trace (ti);
2572 while (!trace_work_list.is_empty ())
2574 ti = trace_work_list.pop ();
2575 scan_trace (ti);
2578 queued_reg_saves.release ();
2579 trace_work_list.release ();
2582 /* Return the insn before the first NOTE_INSN_CFI after START. */
2584 static rtx_insn *
2585 before_next_cfi_note (rtx_insn *start)
2587 rtx_insn *prev = start;
2588 while (start)
2590 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2591 return prev;
2592 prev = start;
2593 start = NEXT_INSN (start);
2595 gcc_unreachable ();
2598 /* Insert CFI notes between traces to properly change state between them. */
2600 static void
2601 connect_traces (void)
2603 unsigned i, n = trace_info.length ();
2604 dw_trace_info *prev_ti, *ti;
2606 /* ??? Ideally, we should have both queued and processed every trace.
2607 However the current representation of constant pools on various targets
2608 is indistinguishable from unreachable code. Assume for the moment that
2609 we can simply skip over such traces. */
2610 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2611 these are not "real" instructions, and should not be considered.
2612 This could be generically useful for tablejump data as well. */
2613 /* Remove all unprocessed traces from the list. */
2614 for (i = n - 1; i > 0; --i)
2616 ti = &trace_info[i];
2617 if (ti->beg_row == NULL)
2619 trace_info.ordered_remove (i);
2620 n -= 1;
2622 else
2623 gcc_assert (ti->end_row != NULL);
2626 /* Work from the end back to the beginning. This lets us easily insert
2627 remember/restore_state notes in the correct order wrt other notes. */
2628 prev_ti = &trace_info[n - 1];
2629 for (i = n - 1; i > 0; --i)
2631 dw_cfi_row *old_row;
2633 ti = prev_ti;
2634 prev_ti = &trace_info[i - 1];
2636 add_cfi_insn = ti->head;
2638 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2639 for the portion of the function in the alternate text
2640 section. The row state at the very beginning of that
2641 new FDE will be exactly the row state from the CIE. */
2642 if (ti->switch_sections)
2643 old_row = cie_cfi_row;
2644 else
2646 old_row = prev_ti->end_row;
2647 /* If there's no change from the previous end state, fine. */
2648 if (cfi_row_equal_p (old_row, ti->beg_row))
2650 /* Otherwise check for the common case of sharing state with
2651 the beginning of an epilogue, but not the end. Insert
2652 remember/restore opcodes in that case. */
2653 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2655 dw_cfi_ref cfi;
2657 /* Note that if we blindly insert the remember at the
2658 start of the trace, we can wind up increasing the
2659 size of the unwind info due to extra advance opcodes.
2660 Instead, put the remember immediately before the next
2661 state change. We know there must be one, because the
2662 state at the beginning and head of the trace differ. */
2663 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2664 cfi = new_cfi ();
2665 cfi->dw_cfi_opc = DW_CFA_remember_state;
2666 add_cfi (cfi);
2668 add_cfi_insn = ti->head;
2669 cfi = new_cfi ();
2670 cfi->dw_cfi_opc = DW_CFA_restore_state;
2671 add_cfi (cfi);
2673 old_row = prev_ti->beg_row;
2675 /* Otherwise, we'll simply change state from the previous end. */
2678 change_cfi_row (old_row, ti->beg_row);
2680 if (dump_file && add_cfi_insn != ti->head)
2682 rtx_insn *note;
2684 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2685 prev_ti->id, ti->id);
2687 note = ti->head;
2690 note = NEXT_INSN (note);
2691 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2692 output_cfi_directive (dump_file, NOTE_CFI (note));
2694 while (note != add_cfi_insn);
2698 /* Connect args_size between traces that have can_throw_internal insns. */
2699 if (cfun->eh->lp_array)
2701 HOST_WIDE_INT prev_args_size = 0;
2703 for (i = 0; i < n; ++i)
2705 ti = &trace_info[i];
2707 if (ti->switch_sections)
2708 prev_args_size = 0;
2709 if (ti->eh_head == NULL)
2710 continue;
2711 gcc_assert (!ti->args_size_undefined);
2713 if (ti->beg_delay_args_size != prev_args_size)
2715 /* ??? Search back to previous CFI note. */
2716 add_cfi_insn = PREV_INSN (ti->eh_head);
2717 add_cfi_args_size (ti->beg_delay_args_size);
2720 prev_args_size = ti->end_delay_args_size;
2725 /* Set up the pseudo-cfg of instruction traces, as described at the
2726 block comment at the top of the file. */
2728 static void
2729 create_pseudo_cfg (void)
2731 bool saw_barrier, switch_sections;
2732 dw_trace_info ti;
2733 rtx_insn *insn;
2734 unsigned i;
2736 /* The first trace begins at the start of the function,
2737 and begins with the CIE row state. */
2738 trace_info.create (16);
2739 memset (&ti, 0, sizeof (ti));
2740 ti.head = get_insns ();
2741 ti.beg_row = cie_cfi_row;
2742 ti.cfa_store = cie_cfi_row->cfa;
2743 ti.cfa_temp.reg = INVALID_REGNUM;
2744 trace_info.quick_push (ti);
2746 if (cie_return_save)
2747 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2749 /* Walk all the insns, collecting start of trace locations. */
2750 saw_barrier = false;
2751 switch_sections = false;
2752 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2754 if (BARRIER_P (insn))
2755 saw_barrier = true;
2756 else if (NOTE_P (insn)
2757 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2759 /* We should have just seen a barrier. */
2760 gcc_assert (saw_barrier);
2761 switch_sections = true;
2763 /* Watch out for save_point notes between basic blocks.
2764 In particular, a note after a barrier. Do not record these,
2765 delaying trace creation until the label. */
2766 else if (save_point_p (insn)
2767 && (LABEL_P (insn) || !saw_barrier))
2769 memset (&ti, 0, sizeof (ti));
2770 ti.head = insn;
2771 ti.switch_sections = switch_sections;
2772 ti.id = trace_info.length ();
2773 trace_info.safe_push (ti);
2775 saw_barrier = false;
2776 switch_sections = false;
2780 /* Create the trace index after we've finished building trace_info,
2781 avoiding stale pointer problems due to reallocation. */
2782 trace_index
2783 = new hash_table<trace_info_hasher> (trace_info.length ());
2784 dw_trace_info *tp;
2785 FOR_EACH_VEC_ELT (trace_info, i, tp)
2787 dw_trace_info **slot;
2789 if (dump_file)
2790 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2791 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2792 tp->switch_sections ? " (section switch)" : "");
2794 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2795 gcc_assert (*slot == NULL);
2796 *slot = tp;
2800 /* Record the initial position of the return address. RTL is
2801 INCOMING_RETURN_ADDR_RTX. */
2803 static void
2804 initial_return_save (rtx rtl)
2806 unsigned int reg = INVALID_REGNUM;
2807 HOST_WIDE_INT offset = 0;
2809 switch (GET_CODE (rtl))
2811 case REG:
2812 /* RA is in a register. */
2813 reg = dwf_regno (rtl);
2814 break;
2816 case MEM:
2817 /* RA is on the stack. */
2818 rtl = XEXP (rtl, 0);
2819 switch (GET_CODE (rtl))
2821 case REG:
2822 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2823 offset = 0;
2824 break;
2826 case PLUS:
2827 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2828 offset = INTVAL (XEXP (rtl, 1));
2829 break;
2831 case MINUS:
2832 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2833 offset = -INTVAL (XEXP (rtl, 1));
2834 break;
2836 default:
2837 gcc_unreachable ();
2840 break;
2842 case PLUS:
2843 /* The return address is at some offset from any value we can
2844 actually load. For instance, on the SPARC it is in %i7+8. Just
2845 ignore the offset for now; it doesn't matter for unwinding frames. */
2846 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2847 initial_return_save (XEXP (rtl, 0));
2848 return;
2850 default:
2851 gcc_unreachable ();
2854 if (reg != DWARF_FRAME_RETURN_COLUMN)
2856 if (reg != INVALID_REGNUM)
2857 record_reg_saved_in_reg (rtl, pc_rtx);
2858 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2862 static void
2863 create_cie_data (void)
2865 dw_cfa_location loc;
2866 dw_trace_info cie_trace;
2868 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2869 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2871 memset (&cie_trace, 0, sizeof (cie_trace));
2872 cur_trace = &cie_trace;
2874 add_cfi_vec = &cie_cfi_vec;
2875 cie_cfi_row = cur_row = new_cfi_row ();
2877 /* On entry, the Canonical Frame Address is at SP. */
2878 memset (&loc, 0, sizeof (loc));
2879 loc.reg = dw_stack_pointer_regnum;
2880 loc.offset = INCOMING_FRAME_SP_OFFSET;
2881 def_cfa_1 (&loc);
2883 if (targetm.debug_unwind_info () == UI_DWARF2
2884 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2886 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2888 /* For a few targets, we have the return address incoming into a
2889 register, but choose a different return column. This will result
2890 in a DW_CFA_register for the return, and an entry in
2891 regs_saved_in_regs to match. If the target later stores that
2892 return address register to the stack, we want to be able to emit
2893 the DW_CFA_offset against the return column, not the intermediate
2894 save register. Save the contents of regs_saved_in_regs so that
2895 we can re-initialize it at the start of each function. */
2896 switch (cie_trace.regs_saved_in_regs.length ())
2898 case 0:
2899 break;
2900 case 1:
2901 cie_return_save = ggc_alloc<reg_saved_in_data> ();
2902 *cie_return_save = cie_trace.regs_saved_in_regs[0];
2903 cie_trace.regs_saved_in_regs.release ();
2904 break;
2905 default:
2906 gcc_unreachable ();
2910 add_cfi_vec = NULL;
2911 cur_row = NULL;
2912 cur_trace = NULL;
2915 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2916 state at each location within the function. These notes will be
2917 emitted during pass_final. */
2919 static unsigned int
2920 execute_dwarf2_frame (void)
2922 /* The first time we're called, compute the incoming frame state. */
2923 if (cie_cfi_vec == NULL)
2924 create_cie_data ();
2926 dwarf2out_alloc_current_fde ();
2928 create_pseudo_cfg ();
2930 /* Do the work. */
2931 create_cfi_notes ();
2932 connect_traces ();
2933 add_cfis_to_fde ();
2935 /* Free all the data we allocated. */
2937 size_t i;
2938 dw_trace_info *ti;
2940 FOR_EACH_VEC_ELT (trace_info, i, ti)
2941 ti->regs_saved_in_regs.release ();
2943 trace_info.release ();
2945 delete trace_index;
2946 trace_index = NULL;
2948 return 0;
2951 /* Convert a DWARF call frame info. operation to its string name */
2953 static const char *
2954 dwarf_cfi_name (unsigned int cfi_opc)
2956 const char *name = get_DW_CFA_name (cfi_opc);
2958 if (name != NULL)
2959 return name;
2961 return "DW_CFA_<unknown>";
2964 /* This routine will generate the correct assembly data for a location
2965 description based on a cfi entry with a complex address. */
2967 static void
2968 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2970 dw_loc_descr_ref loc;
2971 unsigned long size;
2973 if (cfi->dw_cfi_opc == DW_CFA_expression)
2975 unsigned r =
2976 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2977 dw2_asm_output_data (1, r, NULL);
2978 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2980 else
2981 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2983 /* Output the size of the block. */
2984 size = size_of_locs (loc);
2985 dw2_asm_output_data_uleb128 (size, NULL);
2987 /* Now output the operations themselves. */
2988 output_loc_sequence (loc, for_eh);
2991 /* Similar, but used for .cfi_escape. */
2993 static void
2994 output_cfa_loc_raw (dw_cfi_ref cfi)
2996 dw_loc_descr_ref loc;
2997 unsigned long size;
2999 if (cfi->dw_cfi_opc == DW_CFA_expression)
3001 unsigned r =
3002 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3003 fprintf (asm_out_file, "%#x,", r);
3004 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3006 else
3007 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3009 /* Output the size of the block. */
3010 size = size_of_locs (loc);
3011 dw2_asm_output_data_uleb128_raw (size);
3012 fputc (',', asm_out_file);
3014 /* Now output the operations themselves. */
3015 output_loc_sequence_raw (loc);
3018 /* Output a Call Frame Information opcode and its operand(s). */
3020 void
3021 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3023 unsigned long r;
3024 HOST_WIDE_INT off;
3026 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3027 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3028 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3029 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3030 ((unsigned HOST_WIDE_INT)
3031 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3032 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3034 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3035 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3036 "DW_CFA_offset, column %#lx", r);
3037 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3038 dw2_asm_output_data_uleb128 (off, NULL);
3040 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3042 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3043 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3044 "DW_CFA_restore, column %#lx", r);
3046 else
3048 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3049 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3051 switch (cfi->dw_cfi_opc)
3053 case DW_CFA_set_loc:
3054 if (for_eh)
3055 dw2_asm_output_encoded_addr_rtx (
3056 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3057 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3058 false, NULL);
3059 else
3060 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3061 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3062 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3063 break;
3065 case DW_CFA_advance_loc1:
3066 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3067 fde->dw_fde_current_label, NULL);
3068 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3069 break;
3071 case DW_CFA_advance_loc2:
3072 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3073 fde->dw_fde_current_label, NULL);
3074 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3075 break;
3077 case DW_CFA_advance_loc4:
3078 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3079 fde->dw_fde_current_label, NULL);
3080 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3081 break;
3083 case DW_CFA_MIPS_advance_loc8:
3084 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3085 fde->dw_fde_current_label, NULL);
3086 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3087 break;
3089 case DW_CFA_offset_extended:
3090 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3091 dw2_asm_output_data_uleb128 (r, NULL);
3092 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3093 dw2_asm_output_data_uleb128 (off, NULL);
3094 break;
3096 case DW_CFA_def_cfa:
3097 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3098 dw2_asm_output_data_uleb128 (r, NULL);
3099 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3100 break;
3102 case DW_CFA_offset_extended_sf:
3103 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3104 dw2_asm_output_data_uleb128 (r, NULL);
3105 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3106 dw2_asm_output_data_sleb128 (off, NULL);
3107 break;
3109 case DW_CFA_def_cfa_sf:
3110 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3111 dw2_asm_output_data_uleb128 (r, NULL);
3112 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3113 dw2_asm_output_data_sleb128 (off, NULL);
3114 break;
3116 case DW_CFA_restore_extended:
3117 case DW_CFA_undefined:
3118 case DW_CFA_same_value:
3119 case DW_CFA_def_cfa_register:
3120 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3121 dw2_asm_output_data_uleb128 (r, NULL);
3122 break;
3124 case DW_CFA_register:
3125 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3126 dw2_asm_output_data_uleb128 (r, NULL);
3127 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3128 dw2_asm_output_data_uleb128 (r, NULL);
3129 break;
3131 case DW_CFA_def_cfa_offset:
3132 case DW_CFA_GNU_args_size:
3133 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3134 break;
3136 case DW_CFA_def_cfa_offset_sf:
3137 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3138 dw2_asm_output_data_sleb128 (off, NULL);
3139 break;
3141 case DW_CFA_GNU_window_save:
3142 break;
3144 case DW_CFA_def_cfa_expression:
3145 case DW_CFA_expression:
3146 output_cfa_loc (cfi, for_eh);
3147 break;
3149 case DW_CFA_GNU_negative_offset_extended:
3150 /* Obsoleted by DW_CFA_offset_extended_sf. */
3151 gcc_unreachable ();
3153 default:
3154 break;
3159 /* Similar, but do it via assembler directives instead. */
3161 void
3162 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3164 unsigned long r, r2;
3166 switch (cfi->dw_cfi_opc)
3168 case DW_CFA_advance_loc:
3169 case DW_CFA_advance_loc1:
3170 case DW_CFA_advance_loc2:
3171 case DW_CFA_advance_loc4:
3172 case DW_CFA_MIPS_advance_loc8:
3173 case DW_CFA_set_loc:
3174 /* Should only be created in a code path not followed when emitting
3175 via directives. The assembler is going to take care of this for
3176 us. But this routines is also used for debugging dumps, so
3177 print something. */
3178 gcc_assert (f != asm_out_file);
3179 fprintf (f, "\t.cfi_advance_loc\n");
3180 break;
3182 case DW_CFA_offset:
3183 case DW_CFA_offset_extended:
3184 case DW_CFA_offset_extended_sf:
3185 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3186 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3187 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3188 break;
3190 case DW_CFA_restore:
3191 case DW_CFA_restore_extended:
3192 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3193 fprintf (f, "\t.cfi_restore %lu\n", r);
3194 break;
3196 case DW_CFA_undefined:
3197 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3198 fprintf (f, "\t.cfi_undefined %lu\n", r);
3199 break;
3201 case DW_CFA_same_value:
3202 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3203 fprintf (f, "\t.cfi_same_value %lu\n", r);
3204 break;
3206 case DW_CFA_def_cfa:
3207 case DW_CFA_def_cfa_sf:
3208 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3209 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3210 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3211 break;
3213 case DW_CFA_def_cfa_register:
3214 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3215 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3216 break;
3218 case DW_CFA_register:
3219 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3220 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3221 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3222 break;
3224 case DW_CFA_def_cfa_offset:
3225 case DW_CFA_def_cfa_offset_sf:
3226 fprintf (f, "\t.cfi_def_cfa_offset "
3227 HOST_WIDE_INT_PRINT_DEC"\n",
3228 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3229 break;
3231 case DW_CFA_remember_state:
3232 fprintf (f, "\t.cfi_remember_state\n");
3233 break;
3234 case DW_CFA_restore_state:
3235 fprintf (f, "\t.cfi_restore_state\n");
3236 break;
3238 case DW_CFA_GNU_args_size:
3239 if (f == asm_out_file)
3241 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3242 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3243 if (flag_debug_asm)
3244 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3245 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3246 fputc ('\n', f);
3248 else
3250 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3251 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3253 break;
3255 case DW_CFA_GNU_window_save:
3256 fprintf (f, "\t.cfi_window_save\n");
3257 break;
3259 case DW_CFA_def_cfa_expression:
3260 if (f != asm_out_file)
3262 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3263 break;
3265 /* FALLTHRU */
3266 case DW_CFA_expression:
3267 if (f != asm_out_file)
3269 fprintf (f, "\t.cfi_cfa_expression ...\n");
3270 break;
3272 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3273 output_cfa_loc_raw (cfi);
3274 fputc ('\n', f);
3275 break;
3277 default:
3278 gcc_unreachable ();
3282 void
3283 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3285 if (dwarf2out_do_cfi_asm ())
3286 output_cfi_directive (asm_out_file, cfi);
3289 static void
3290 dump_cfi_row (FILE *f, dw_cfi_row *row)
3292 dw_cfi_ref cfi;
3293 unsigned i;
3295 cfi = row->cfa_cfi;
3296 if (!cfi)
3298 dw_cfa_location dummy;
3299 memset (&dummy, 0, sizeof (dummy));
3300 dummy.reg = INVALID_REGNUM;
3301 cfi = def_cfa_0 (&dummy, &row->cfa);
3303 output_cfi_directive (f, cfi);
3305 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3306 if (cfi)
3307 output_cfi_directive (f, cfi);
3310 void debug_cfi_row (dw_cfi_row *row);
3312 void
3313 debug_cfi_row (dw_cfi_row *row)
3315 dump_cfi_row (stderr, row);
3319 /* Save the result of dwarf2out_do_frame across PCH.
3320 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3321 static GTY(()) signed char saved_do_cfi_asm = 0;
3323 /* Decide whether we want to emit frame unwind information for the current
3324 translation unit. */
3326 bool
3327 dwarf2out_do_frame (void)
3329 /* We want to emit correct CFA location expressions or lists, so we
3330 have to return true if we're going to output debug info, even if
3331 we're not going to output frame or unwind info. */
3332 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3333 return true;
3335 if (saved_do_cfi_asm > 0)
3336 return true;
3338 if (targetm.debug_unwind_info () == UI_DWARF2)
3339 return true;
3341 if ((flag_unwind_tables || flag_exceptions)
3342 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3343 return true;
3345 return false;
3348 /* Decide whether to emit frame unwind via assembler directives. */
3350 bool
3351 dwarf2out_do_cfi_asm (void)
3353 int enc;
3355 if (saved_do_cfi_asm != 0)
3356 return saved_do_cfi_asm > 0;
3358 /* Assume failure for a moment. */
3359 saved_do_cfi_asm = -1;
3361 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3362 return false;
3363 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3364 return false;
3366 /* Make sure the personality encoding is one the assembler can support.
3367 In particular, aligned addresses can't be handled. */
3368 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3369 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3370 return false;
3371 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3372 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3373 return false;
3375 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3376 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3377 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3378 && !flag_unwind_tables && !flag_exceptions
3379 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3380 return false;
3382 /* Success! */
3383 saved_do_cfi_asm = 1;
3384 return true;
3387 namespace {
3389 const pass_data pass_data_dwarf2_frame =
3391 RTL_PASS, /* type */
3392 "dwarf2", /* name */
3393 OPTGROUP_NONE, /* optinfo_flags */
3394 TV_FINAL, /* tv_id */
3395 0, /* properties_required */
3396 0, /* properties_provided */
3397 0, /* properties_destroyed */
3398 0, /* todo_flags_start */
3399 0, /* todo_flags_finish */
3402 class pass_dwarf2_frame : public rtl_opt_pass
3404 public:
3405 pass_dwarf2_frame (gcc::context *ctxt)
3406 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3409 /* opt_pass methods: */
3410 virtual bool gate (function *);
3411 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3413 }; // class pass_dwarf2_frame
3415 bool
3416 pass_dwarf2_frame::gate (function *)
3418 #ifndef HAVE_prologue
3419 /* Targets which still implement the prologue in assembler text
3420 cannot use the generic dwarf2 unwinding. */
3421 return false;
3422 #endif
3424 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3425 from the optimized shrink-wrapping annotations that we will compute.
3426 For now, only produce the CFI notes for dwarf2. */
3427 return dwarf2out_do_frame ();
3430 } // anon namespace
3432 rtl_opt_pass *
3433 make_pass_dwarf2_frame (gcc::context *ctxt)
3435 return new pass_dwarf2_frame (ctxt);
3438 #include "gt-dwarf2cfi.h"