4 * Copyright (C) 1999, 2005 Alexandre Julliard
5 * Copyright (C) 2009, 2011 Eric Pouech.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
24 #define NONAMELESSUNION
25 #define NONAMELESSSTRUCT
27 #define WIN32_NO_STATUS
28 #include "dbghelp_private.h"
30 #include "wine/debug.h"
32 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp
);
34 /* x86-64 unwind information, for PE modules, as described on MSDN */
36 typedef enum _UNWIND_OP_CODES
50 typedef union _UNWIND_CODE
59 } UNWIND_CODE
, *PUNWIND_CODE
;
61 typedef struct _UNWIND_INFO
67 BYTE FrameRegister
: 4;
69 UNWIND_CODE UnwindCode
[1]; /* actually CountOfCodes (aligned) */
73 * OPTIONAL ULONG ExceptionHandler;
74 * OPTIONAL ULONG FunctionEntry;
76 * OPTIONAL ULONG ExceptionData[];
78 } UNWIND_INFO
, *PUNWIND_INFO
;
80 static BOOL
x86_64_get_addr(HANDLE hThread
, const CONTEXT
* ctx
,
81 enum cpu_addr ca
, ADDRESS64
* addr
)
83 addr
->Mode
= AddrModeFlat
;
87 case cpu_addr_pc
: addr
->Segment
= ctx
->SegCs
; addr
->Offset
= ctx
->Rip
; return TRUE
;
88 case cpu_addr_stack
: addr
->Segment
= ctx
->SegSs
; addr
->Offset
= ctx
->Rsp
; return TRUE
;
89 case cpu_addr_frame
: addr
->Segment
= ctx
->SegSs
; addr
->Offset
= ctx
->Rbp
; return TRUE
;
91 default: addr
->Mode
= -1;
98 enum st_mode
{stm_start
, stm_64bit
, stm_done
};
100 /* indexes in Reserved array */
101 #define __CurrentMode 0
102 #define __CurrentCount 1
103 /* #define __ 2 (unused) */
105 #define curr_mode (frame->Reserved[__CurrentMode])
106 #define curr_count (frame->Reserved[__CurrentCount])
107 /* #define ??? (frame->Reserved[__]) (unused) */
111 RUNTIME_FUNCTION chain
;
115 static void dump_unwind_info(struct cpu_stack_walk
* csw
, ULONG64 base
, RUNTIME_FUNCTION
*function
)
117 static const char * const reg_names
[16] =
118 { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
119 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" };
121 union handler_data handler_data
;
122 char buffer
[sizeof(UNWIND_INFO
) + 256 * sizeof(UNWIND_CODE
)];
123 UNWIND_INFO
* info
= (UNWIND_INFO
*)buffer
;
124 unsigned int i
, count
;
125 RUNTIME_FUNCTION snext
;
128 TRACE("**** func %lx-%lx\n", function
->BeginAddress
, function
->EndAddress
);
131 if (function
->UnwindData
& 1)
133 if (!sw_read_mem(csw
, base
+ function
->UnwindData
, &snext
, sizeof(snext
)))
135 TRACE("Couldn't unwind RUNTIME_INFO at %Ix\n", base
+ function
->UnwindData
);
138 TRACE("unwind info for function %p-%p chained to function %p-%p\n",
139 (char*)base
+ function
->BeginAddress
, (char*)base
+ function
->EndAddress
,
140 (char*)base
+ snext
.BeginAddress
, (char*)base
+ snext
.EndAddress
);
144 addr
= base
+ function
->UnwindData
;
145 if (!sw_read_mem(csw
, addr
, info
, FIELD_OFFSET(UNWIND_INFO
, UnwindCode
)) ||
146 !sw_read_mem(csw
, addr
+ FIELD_OFFSET(UNWIND_INFO
, UnwindCode
),
147 info
->UnwindCode
, info
->CountOfCodes
* sizeof(UNWIND_CODE
)))
149 FIXME("couldn't read memory for UNWIND_INFO at %Ix\n", addr
);
152 TRACE("unwind info at %p flags %x prolog 0x%x bytes function %p-%p\n",
153 (char*)addr
, info
->Flags
, info
->SizeOfProlog
,
154 (char*)base
+ function
->BeginAddress
, (char*)base
+ function
->EndAddress
);
156 if (info
->FrameRegister
)
157 TRACE(" frame register %s offset 0x%x(%%rsp)\n",
158 reg_names
[info
->FrameRegister
], info
->FrameOffset
* 16);
160 for (i
= 0; i
< info
->CountOfCodes
; i
++)
162 TRACE(" 0x%x: ", info
->UnwindCode
[i
].u
.CodeOffset
);
163 switch (info
->UnwindCode
[i
].u
.UnwindOp
)
165 case UWOP_PUSH_NONVOL
:
166 TRACE("pushq %%%s\n", reg_names
[info
->UnwindCode
[i
].u
.OpInfo
]);
168 case UWOP_ALLOC_LARGE
:
169 if (info
->UnwindCode
[i
].u
.OpInfo
)
171 count
= *(DWORD
*)&info
->UnwindCode
[i
+1];
176 count
= *(USHORT
*)&info
->UnwindCode
[i
+1] * 8;
179 TRACE("subq $0x%x,%%rsp\n", count
);
181 case UWOP_ALLOC_SMALL
:
182 count
= (info
->UnwindCode
[i
].u
.OpInfo
+ 1) * 8;
183 TRACE("subq $0x%x,%%rsp\n", count
);
186 TRACE("leaq 0x%x(%%rsp),%s\n",
187 info
->FrameOffset
* 16, reg_names
[info
->FrameRegister
]);
189 case UWOP_SAVE_NONVOL
:
190 count
= *(USHORT
*)&info
->UnwindCode
[i
+1] * 8;
191 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names
[info
->UnwindCode
[i
].u
.OpInfo
], count
);
194 case UWOP_SAVE_NONVOL_FAR
:
195 count
= *(DWORD
*)&info
->UnwindCode
[i
+1];
196 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names
[info
->UnwindCode
[i
].u
.OpInfo
], count
);
199 case UWOP_SAVE_XMM128
:
200 count
= *(USHORT
*)&info
->UnwindCode
[i
+1] * 16;
201 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info
->UnwindCode
[i
].u
.OpInfo
, count
);
204 case UWOP_SAVE_XMM128_FAR
:
205 count
= *(DWORD
*)&info
->UnwindCode
[i
+1];
206 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info
->UnwindCode
[i
].u
.OpInfo
, count
);
209 case UWOP_PUSH_MACHFRAME
:
210 TRACE("PUSH_MACHFRAME %u\n", info
->UnwindCode
[i
].u
.OpInfo
);
213 if (info
->Version
== 2)
216 if (info
->UnwindCode
[i
].u
.OpInfo
)
217 offset
= info
->UnwindCode
[i
].u
.CodeOffset
;
219 offset
= (info
->UnwindCode
[i
+1].u
.OpInfo
<< 8) + info
->UnwindCode
[i
+1].u
.CodeOffset
;
220 TRACE("UWOP_EPILOG %u offset %u\n", info
->UnwindCode
[i
].u
.OpInfo
, offset
);
226 FIXME("unknown code %u\n", info
->UnwindCode
[i
].u
.UnwindOp
);
231 addr
+= FIELD_OFFSET(UNWIND_INFO
, UnwindCode
) +
232 ((info
->CountOfCodes
+ 1) & ~1) * sizeof(UNWIND_CODE
);
233 if (info
->Flags
& UNW_FLAG_CHAININFO
)
235 if (!sw_read_mem(csw
, addr
, &handler_data
, sizeof(handler_data
.chain
)))
237 FIXME("couldn't read memory for handler_data.chain\n");
240 TRACE(" chained to function %p-%p\n",
241 (char*)base
+ handler_data
.chain
.BeginAddress
,
242 (char*)base
+ handler_data
.chain
.EndAddress
);
243 function
= &handler_data
.chain
;
246 if (info
->Flags
& (UNW_FLAG_EHANDLER
| UNW_FLAG_UHANDLER
))
248 if (!sw_read_mem(csw
, addr
, &handler_data
, sizeof(handler_data
.handler
)))
250 FIXME("couldn't read memory for handler_data.handler\n");
253 TRACE(" handler %p data at %p\n",
254 (char*)base
+ handler_data
.handler
, (char*)addr
+ sizeof(handler_data
.handler
));
260 /* highly derived from dlls/ntdll/signal_x86_64.c */
261 static ULONG64
get_int_reg(CONTEXT
*context
, int reg
)
263 return *(&context
->Rax
+ reg
);
266 static void set_int_reg(CONTEXT
*context
, int reg
, ULONG64 val
)
268 *(&context
->Rax
+ reg
) = val
;
271 static void set_float_reg(CONTEXT
*context
, int reg
, M128A val
)
273 *(&context
->u
.s
.Xmm0
+ reg
) = val
;
276 static int get_opcode_size(UNWIND_CODE op
)
278 switch (op
.u
.UnwindOp
)
280 case UWOP_ALLOC_LARGE
:
281 return 2 + (op
.u
.OpInfo
!= 0);
282 case UWOP_SAVE_NONVOL
:
283 case UWOP_SAVE_XMM128
:
285 case UWOP_SAVE_NONVOL_FAR
:
286 case UWOP_SAVE_XMM128_FAR
:
293 static BOOL
is_inside_epilog(struct cpu_stack_walk
* csw
, DWORD64 pc
,
294 DWORD64 base
, const RUNTIME_FUNCTION
*function
)
299 if (!sw_read_mem(csw
, pc
, &op0
, 1)) return FALSE
;
301 /* add or lea must be the first instruction, and it must have a rex.W prefix */
302 if ((op0
& 0xf8) == 0x48)
304 if (!sw_read_mem(csw
, pc
+ 1, &op1
, 1)) return FALSE
;
307 case 0x81: /* add $nnnn,%rsp */
308 if (!sw_read_mem(csw
, pc
+ 2, &op2
, 1)) return FALSE
;
309 if (op0
== 0x48 && op2
== 0xc4)
315 case 0x83: /* add $n,%rsp */
316 if (!sw_read_mem(csw
, pc
+ 2, &op2
, 1)) return FALSE
;
317 if (op0
== 0x48 && op2
== 0xc4)
323 case 0x8d: /* lea n(reg),%rsp */
324 if (!sw_read_mem(csw
, pc
+ 2, &op2
, 1)) return FALSE
;
325 if (op0
& 0x06) return FALSE
; /* rex.RX must be cleared */
326 if (((op2
>> 3) & 7) != 4) return FALSE
; /* dest reg mus be %rsp */
327 if ((op2
& 7) == 4) return FALSE
; /* no SIB byte allowed */
328 if ((op2
>> 6) == 1) /* 8-bit offset */
333 if ((op2
>> 6) == 2) /* 32-bit offset */
342 /* now check for various pop instructions */
345 if (!sw_read_mem(csw
, pc
, &op0
, 1)) return FALSE
;
346 if ((op0
& 0xf0) == 0x40) /* rex prefix */
348 if (!sw_read_mem(csw
, ++pc
, &op0
, 1)) return FALSE
;
353 case 0x58: /* pop %rax/%r8 */
354 case 0x59: /* pop %rcx/%r9 */
355 case 0x5a: /* pop %rdx/%r10 */
356 case 0x5b: /* pop %rbx/%r11 */
357 case 0x5c: /* pop %rsp/%r12 */
358 case 0x5d: /* pop %rbp/%r13 */
359 case 0x5e: /* pop %rsi/%r14 */
360 case 0x5f: /* pop %rdi/%r15 */
363 case 0xc2: /* ret $nn */
366 case 0xe9: /* jmp nnnn */
367 if (!sw_read_mem(csw
, pc
+ 1, &val32
, sizeof(LONG
))) return FALSE
;
369 if (pc
- base
>= function
->BeginAddress
&& pc
- base
< function
->EndAddress
)
372 case 0xeb: /* jmp n */
373 if (!sw_read_mem(csw
, pc
+ 1, &op1
, 1)) return FALSE
;
374 pc
+= 2 + (signed char)op1
;
375 if (pc
- base
>= function
->BeginAddress
&& pc
- base
< function
->EndAddress
)
378 case 0xf3: /* rep; ret (for amd64 prediction bug) */
379 if (!sw_read_mem(csw
, pc
+ 1, &op1
, 1)) return FALSE
;
386 static BOOL
interpret_epilog(struct cpu_stack_walk
* csw
, ULONG64 pc
, CONTEXT
*context
)
397 if (!sw_read_mem(csw
, pc
, &insn
, 1)) return FALSE
;
398 if ((insn
& 0xf0) == 0x40)
400 rex
= insn
& 0x0f; /* rex prefix */
401 if (!sw_read_mem(csw
, ++pc
, &insn
, 1)) return FALSE
;
406 case 0x58: /* pop %rax/r8 */
407 case 0x59: /* pop %rcx/r9 */
408 case 0x5a: /* pop %rdx/r10 */
409 case 0x5b: /* pop %rbx/r11 */
410 case 0x5c: /* pop %rsp/r12 */
411 case 0x5d: /* pop %rbp/r13 */
412 case 0x5e: /* pop %rsi/r14 */
413 case 0x5f: /* pop %rdi/r15 */
414 if (!sw_read_mem(csw
, context
->Rsp
, &val64
, sizeof(DWORD64
))) return FALSE
;
415 set_int_reg(context
, insn
- 0x58 + (rex
& 1) * 8, val64
);
416 context
->Rsp
+= sizeof(ULONG64
);
419 case 0x81: /* add $nnnn,%rsp */
420 if (!sw_read_mem(csw
, pc
+ 2, &val32
, sizeof(LONG
))) return FALSE
;
421 context
->Rsp
+= val32
;
422 pc
+= 2 + sizeof(LONG
);
424 case 0x83: /* add $n,%rsp */
425 if (!sw_read_mem(csw
, pc
+ 2, &val8
, sizeof(BYTE
))) return FALSE
;
426 context
->Rsp
+= (signed char)val8
;
430 if (!sw_read_mem(csw
, pc
+ 1, &insn
, sizeof(BYTE
))) return FALSE
;
431 if ((insn
>> 6) == 1) /* lea n(reg),%rsp */
433 if (!sw_read_mem(csw
, pc
+ 2, &val8
, sizeof(BYTE
))) return FALSE
;
434 context
->Rsp
= get_int_reg( context
, (insn
& 7) + (rex
& 1) * 8 ) + (signed char)val8
;
437 else /* lea nnnn(reg),%rsp */
439 if (!sw_read_mem(csw
, pc
+ 2, &val32
, sizeof(LONG
))) return FALSE
;
440 context
->Rsp
= get_int_reg( context
, (insn
& 7) + (rex
& 1) * 8 ) + val32
;
441 pc
+= 2 + sizeof(LONG
);
444 case 0xc2: /* ret $nn */
445 if (!sw_read_mem(csw
, context
->Rsp
, &val64
, sizeof(DWORD64
))) return FALSE
;
446 if (!sw_read_mem(csw
, pc
+ 1, &val16
, sizeof(WORD
))) return FALSE
;
447 context
->Rip
= val64
;
448 context
->Rsp
+= sizeof(ULONG64
) + val16
;
451 case 0xf3: /* rep; ret */
452 if (!sw_read_mem(csw
, context
->Rsp
, &val64
, sizeof(DWORD64
))) return FALSE
;
453 context
->Rip
= val64
;
454 context
->Rsp
+= sizeof(ULONG64
);
456 case 0xe9: /* jmp nnnn */
457 if (!sw_read_mem(csw
, pc
+ 1, &val32
, sizeof(LONG
))) return FALSE
;
460 case 0xeb: /* jmp n */
461 if (!sw_read_mem(csw
, pc
+ 1, &val8
, sizeof(BYTE
))) return FALSE
;
462 pc
+= 2 + (signed char)val8
;
465 FIXME("unsupported insn %x\n", insn
);
470 static BOOL
default_unwind(struct cpu_stack_walk
* csw
, CONTEXT
* context
)
472 if (!sw_read_mem(csw
, context
->Rsp
, &context
->Rip
, sizeof(DWORD64
)))
474 WARN("Cannot read new frame offset %s\n", wine_dbgstr_longlong(context
->Rsp
));
477 context
->Rsp
+= sizeof(DWORD64
);
481 static BOOL
interpret_function_table_entry(struct cpu_stack_walk
* csw
,
482 CONTEXT
* context
, RUNTIME_FUNCTION
* function
, DWORD64 base
)
484 char buffer
[sizeof(UNWIND_INFO
) + 256 * sizeof(UNWIND_CODE
)];
485 UNWIND_INFO
* info
= (UNWIND_INFO
*)buffer
;
487 DWORD64 newframe
, prolog_offset
, off
, value
;
489 union handler_data handler_data
;
490 BOOL mach_frame
= FALSE
;
492 /* FIXME: we have some assumptions here */
494 dump_unwind_info(csw
, sw_module_base(csw
, context
->Rip
), function
);
495 newframe
= context
->Rsp
;
498 if (!sw_read_mem(csw
, base
+ function
->UnwindData
, info
, sizeof(*info
)) ||
499 !sw_read_mem(csw
, base
+ function
->UnwindData
+ FIELD_OFFSET(UNWIND_INFO
, UnwindCode
),
500 info
->UnwindCode
, info
->CountOfCodes
* sizeof(UNWIND_CODE
)))
502 WARN("Couldn't read unwind_code at %Ix\n", base
+ function
->UnwindData
);
506 if (info
->Version
!= 1 && info
->Version
!= 2)
508 WARN("unknown unwind info version %u at %Ix\n", info
->Version
, base
+ function
->UnwindData
);
512 if (info
->FrameRegister
)
513 newframe
= get_int_reg(context
, info
->FrameRegister
) - info
->FrameOffset
* 16;
515 /* check if in prolog */
516 if (context
->Rip
>= base
+ function
->BeginAddress
&&
517 context
->Rip
< base
+ function
->BeginAddress
+ info
->SizeOfProlog
)
519 prolog_offset
= context
->Rip
- base
- function
->BeginAddress
;
524 if (is_inside_epilog(csw
, context
->Rip
, base
, function
))
526 interpret_epilog(csw
, context
->Rip
, context
);
531 for (i
= 0; i
< info
->CountOfCodes
; i
+= get_opcode_size(info
->UnwindCode
[i
]))
533 if (prolog_offset
< info
->UnwindCode
[i
].u
.CodeOffset
) continue; /* skip it */
535 switch (info
->UnwindCode
[i
].u
.UnwindOp
)
537 case UWOP_PUSH_NONVOL
: /* pushq %reg */
538 if (!sw_read_mem(csw
, context
->Rsp
, &value
, sizeof(DWORD64
))) return FALSE
;
539 set_int_reg(context
, info
->UnwindCode
[i
].u
.OpInfo
, value
);
540 context
->Rsp
+= sizeof(ULONG64
);
542 case UWOP_ALLOC_LARGE
: /* subq $nn,%rsp */
543 if (info
->UnwindCode
[i
].u
.OpInfo
) context
->Rsp
+= *(DWORD
*)&info
->UnwindCode
[i
+1];
544 else context
->Rsp
+= *(USHORT
*)&info
->UnwindCode
[i
+1] * 8;
546 case UWOP_ALLOC_SMALL
: /* subq $n,%rsp */
547 context
->Rsp
+= (info
->UnwindCode
[i
].u
.OpInfo
+ 1) * 8;
549 case UWOP_SET_FPREG
: /* leaq nn(%rsp),%framereg */
550 context
->Rsp
= newframe
;
552 case UWOP_SAVE_NONVOL
: /* movq %reg,n(%rsp) */
553 off
= newframe
+ *(USHORT
*)&info
->UnwindCode
[i
+1] * 8;
554 if (!sw_read_mem(csw
, off
, &value
, sizeof(DWORD64
))) return FALSE
;
555 set_int_reg(context
, info
->UnwindCode
[i
].u
.OpInfo
, value
);
557 case UWOP_SAVE_NONVOL_FAR
: /* movq %reg,nn(%rsp) */
558 off
= newframe
+ *(DWORD
*)&info
->UnwindCode
[i
+1];
559 if (!sw_read_mem(csw
, off
, &value
, sizeof(DWORD64
))) return FALSE
;
560 set_int_reg(context
, info
->UnwindCode
[i
].u
.OpInfo
, value
);
562 case UWOP_SAVE_XMM128
: /* movaps %xmmreg,n(%rsp) */
563 off
= newframe
+ *(USHORT
*)&info
->UnwindCode
[i
+1] * 16;
564 if (!sw_read_mem(csw
, off
, &floatvalue
, sizeof(M128A
))) return FALSE
;
565 set_float_reg(context
, info
->UnwindCode
[i
].u
.OpInfo
, floatvalue
);
567 case UWOP_SAVE_XMM128_FAR
: /* movaps %xmmreg,nn(%rsp) */
568 off
= newframe
+ *(DWORD
*)&info
->UnwindCode
[i
+1];
569 if (!sw_read_mem(csw
, off
, &floatvalue
, sizeof(M128A
))) return FALSE
;
570 set_float_reg(context
, info
->UnwindCode
[i
].u
.OpInfo
, floatvalue
);
572 case UWOP_PUSH_MACHFRAME
:
573 if (info
->Flags
& UNW_FLAG_CHAININFO
)
575 FIXME("PUSH_MACHFRAME with chained unwind info.\n");
578 if (i
+ get_opcode_size(info
->UnwindCode
[i
]) < info
->CountOfCodes
)
580 FIXME("PUSH_MACHFRAME is not the last opcode.\n");
584 if (info
->UnwindCode
[i
].u
.OpInfo
)
587 if (!sw_read_mem(csw
, context
->Rsp
, &context
->Rip
, sizeof(DWORD64
))) return FALSE
;
588 if (!sw_read_mem(csw
, context
->Rsp
+ 24, &context
->Rsp
, sizeof(DWORD64
))) return FALSE
;
592 FIXME("unknown code %u\n", info
->UnwindCode
[i
].u
.UnwindOp
);
596 if (!(info
->Flags
& UNW_FLAG_CHAININFO
)) break;
597 if (!sw_read_mem(csw
, base
+ function
->UnwindData
+ FIELD_OFFSET(UNWIND_INFO
, UnwindCode
) +
598 ((info
->CountOfCodes
+ 1) & ~1) * sizeof(UNWIND_CODE
),
599 &handler_data
, sizeof(handler_data
))) return FALSE
;
600 function
= &handler_data
.chain
; /* restart with the chained info */
602 return mach_frame
? TRUE
: default_unwind(csw
, context
);
605 /* fetch_next_frame()
607 * modify (at least) context.{rip, rsp, rbp} using unwind information
608 * either out of PE exception handlers, debug info (dwarf), or simple stack unwind
610 static BOOL
fetch_next_frame(struct cpu_stack_walk
*csw
, union ctx
*pcontext
,
611 DWORD_PTR curr_pc
, void** prtf
)
614 RUNTIME_FUNCTION
* rtf
;
616 CONTEXT
*context
= &pcontext
->ctx
;
618 if (!curr_pc
|| !(base
= sw_module_base(csw
, curr_pc
))) return FALSE
;
619 rtf
= sw_table_access(csw
, curr_pc
);
620 if (prtf
) *prtf
= rtf
;
623 return interpret_function_table_entry(csw
, context
, rtf
, base
);
625 else if (dwarf2_virtual_unwind(csw
, curr_pc
, pcontext
, &cfa
))
628 TRACE("next function rip=%016Ix\n", context
->Rip
);
629 TRACE(" rax=%016Ix rbx=%016Ix rcx=%016Ix rdx=%016Ix\n",
630 context
->Rax
, context
->Rbx
, context
->Rcx
, context
->Rdx
);
631 TRACE(" rsi=%016Ix rdi=%016Ix rbp=%016Ix rsp=%016Ix\n",
632 context
->Rsi
, context
->Rdi
, context
->Rbp
, context
->Rsp
);
633 TRACE(" r8=%016Ix r9=%016Ix r10=%016Ix r11=%016Ix\n",
634 context
->R8
, context
->R9
, context
->R10
, context
->R11
);
635 TRACE(" r12=%016Ix r13=%016Ix r14=%016Ix r15=%016Ix\n",
636 context
->R12
, context
->R13
, context
->R14
, context
->R15
);
640 return default_unwind(csw
, context
);
643 static BOOL
x86_64_stack_walk(struct cpu_stack_walk
*csw
, STACKFRAME64
*frame
,
646 unsigned deltapc
= curr_count
<= 1 ? 0 : 1;
649 if (curr_mode
>= stm_done
) return FALSE
;
652 TRACE("Enter: PC=%s Frame=%s Return=%s Stack=%s Mode=%s Count=%s\n",
653 wine_dbgstr_addr(&frame
->AddrPC
),
654 wine_dbgstr_addr(&frame
->AddrFrame
),
655 wine_dbgstr_addr(&frame
->AddrReturn
),
656 wine_dbgstr_addr(&frame
->AddrStack
),
657 curr_mode
== stm_start
? "start" : "64bit",
658 wine_dbgstr_longlong(curr_count
));
660 if (curr_mode
== stm_start
)
662 if ((frame
->AddrPC
.Mode
== AddrModeFlat
) &&
663 (frame
->AddrFrame
.Mode
!= AddrModeFlat
))
665 WARN("Bad AddrPC.Mode / AddrFrame.Mode combination\n");
670 curr_mode
= stm_64bit
;
671 frame
->AddrReturn
.Mode
= frame
->AddrStack
.Mode
= AddrModeFlat
;
672 /* don't set up AddrStack on first call. Either the caller has set it up, or
673 * we will get it in the next frame
675 memset(&frame
->AddrBStore
, 0, sizeof(frame
->AddrBStore
));
679 if (context
->ctx
.Rsp
!= frame
->AddrStack
.Offset
) FIXME("inconsistent Stack Pointer\n");
680 if (context
->ctx
.Rip
!= frame
->AddrPC
.Offset
) FIXME("inconsistent Instruction Pointer\n");
682 if (frame
->AddrReturn
.Offset
== 0) goto done_err
;
683 if (!fetch_next_frame(csw
, context
, frame
->AddrPC
.Offset
- deltapc
, &frame
->FuncTableEntry
))
688 memset(&frame
->Params
, 0, sizeof(frame
->Params
));
690 /* set frame information */
691 frame
->AddrStack
.Offset
= context
->ctx
.Rsp
;
692 frame
->AddrFrame
.Offset
= context
->ctx
.Rbp
;
693 frame
->AddrPC
.Offset
= context
->ctx
.Rip
;
696 union ctx newctx
= *context
;
698 if (!fetch_next_frame(csw
, &newctx
, frame
->AddrPC
.Offset
- deltapc
, NULL
))
700 frame
->AddrReturn
.Mode
= AddrModeFlat
;
701 frame
->AddrReturn
.Offset
= newctx
.ctx
.Rip
;
705 frame
->Virtual
= TRUE
;
708 TRACE("Leave: PC=%s Frame=%s Return=%s Stack=%s Mode=%s Count=%s FuncTable=%p\n",
709 wine_dbgstr_addr(&frame
->AddrPC
),
710 wine_dbgstr_addr(&frame
->AddrFrame
),
711 wine_dbgstr_addr(&frame
->AddrReturn
),
712 wine_dbgstr_addr(&frame
->AddrStack
),
713 curr_mode
== stm_start
? "start" : "64bit",
714 wine_dbgstr_longlong(curr_count
),
715 frame
->FuncTableEntry
);
719 curr_mode
= stm_done
;
723 static BOOL
x86_64_stack_walk(struct cpu_stack_walk
*csw
, STACKFRAME64
*frame
,
730 static void* x86_64_find_runtime_function(struct module
* module
, DWORD64 addr
)
733 RUNTIME_FUNCTION
* rtf
;
737 rtf
= (RUNTIME_FUNCTION
*)pe_map_directory(module
, IMAGE_DIRECTORY_ENTRY_EXCEPTION
, &size
);
738 if (rtf
) for (min
= 0, max
= size
/ sizeof(*rtf
); min
<= max
; )
740 int pos
= (min
+ max
) / 2;
741 if (addr
< module
->module
.BaseOfImage
+ rtf
[pos
].BeginAddress
) max
= pos
- 1;
742 else if (addr
>= module
->module
.BaseOfImage
+ rtf
[pos
].EndAddress
) min
= pos
+ 1;
746 while (rtf
->UnwindData
& 1) /* follow chained entry */
748 FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n");
750 /* we need to read into the other process */
751 /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */
760 static unsigned x86_64_map_dwarf_register(unsigned regno
, const struct module
* module
, BOOL eh_frame
)
764 if (regno
>= 17 && regno
<= 24)
765 reg
= CV_AMD64_XMM0
+ regno
- 17;
766 else if (regno
>= 25 && regno
<= 32)
767 reg
= CV_AMD64_XMM8
+ regno
- 25;
768 else if (regno
>= 33 && regno
<= 40)
769 reg
= CV_AMD64_ST0
+ regno
- 33;
772 case 0: reg
= CV_AMD64_RAX
; break;
773 case 1: reg
= CV_AMD64_RDX
; break;
774 case 2: reg
= CV_AMD64_RCX
; break;
775 case 3: reg
= CV_AMD64_RBX
; break;
776 case 4: reg
= CV_AMD64_RSI
; break;
777 case 5: reg
= CV_AMD64_RDI
; break;
778 case 6: reg
= CV_AMD64_RBP
; break;
779 case 7: reg
= CV_AMD64_RSP
; break;
780 case 8: reg
= CV_AMD64_R8
; break;
781 case 9: reg
= CV_AMD64_R9
; break;
782 case 10: reg
= CV_AMD64_R10
; break;
783 case 11: reg
= CV_AMD64_R11
; break;
784 case 12: reg
= CV_AMD64_R12
; break;
785 case 13: reg
= CV_AMD64_R13
; break;
786 case 14: reg
= CV_AMD64_R14
; break;
787 case 15: reg
= CV_AMD64_R15
; break;
788 case 16: reg
= CV_AMD64_RIP
; break;
789 case 49: reg
= CV_AMD64_EFLAGS
; break;
790 case 50: reg
= CV_AMD64_ES
; break;
791 case 51: reg
= CV_AMD64_CS
; break;
792 case 52: reg
= CV_AMD64_SS
; break;
793 case 53: reg
= CV_AMD64_DS
; break;
794 case 54: reg
= CV_AMD64_FS
; break;
795 case 55: reg
= CV_AMD64_GS
; break;
796 case 62: reg
= CV_AMD64_TR
; break;
797 case 63: reg
= CV_AMD64_LDTR
; break;
798 case 64: reg
= CV_AMD64_MXCSR
; break;
799 case 65: reg
= CV_AMD64_CTRL
; break;
800 case 66: reg
= CV_AMD64_STAT
; break;
808 FIXME("Don't know how to map register %d\n", regno
);
814 static void *x86_64_fetch_context_reg(union ctx
*pctx
, unsigned regno
, unsigned *size
)
817 CONTEXT
*ctx
= &pctx
->ctx
;
821 case CV_AMD64_RAX
: *size
= sizeof(ctx
->Rax
); return &ctx
->Rax
;
822 case CV_AMD64_RDX
: *size
= sizeof(ctx
->Rdx
); return &ctx
->Rdx
;
823 case CV_AMD64_RCX
: *size
= sizeof(ctx
->Rcx
); return &ctx
->Rcx
;
824 case CV_AMD64_RBX
: *size
= sizeof(ctx
->Rbx
); return &ctx
->Rbx
;
825 case CV_AMD64_RSI
: *size
= sizeof(ctx
->Rsi
); return &ctx
->Rsi
;
826 case CV_AMD64_RDI
: *size
= sizeof(ctx
->Rdi
); return &ctx
->Rdi
;
827 case CV_AMD64_RBP
: *size
= sizeof(ctx
->Rbp
); return &ctx
->Rbp
;
828 case CV_AMD64_RSP
: *size
= sizeof(ctx
->Rsp
); return &ctx
->Rsp
;
829 case CV_AMD64_R8
: *size
= sizeof(ctx
->R8
); return &ctx
->R8
;
830 case CV_AMD64_R9
: *size
= sizeof(ctx
->R9
); return &ctx
->R9
;
831 case CV_AMD64_R10
: *size
= sizeof(ctx
->R10
); return &ctx
->R10
;
832 case CV_AMD64_R11
: *size
= sizeof(ctx
->R11
); return &ctx
->R11
;
833 case CV_AMD64_R12
: *size
= sizeof(ctx
->R12
); return &ctx
->R12
;
834 case CV_AMD64_R13
: *size
= sizeof(ctx
->R13
); return &ctx
->R13
;
835 case CV_AMD64_R14
: *size
= sizeof(ctx
->R14
); return &ctx
->R14
;
836 case CV_AMD64_R15
: *size
= sizeof(ctx
->R15
); return &ctx
->R15
;
837 case CV_AMD64_RIP
: *size
= sizeof(ctx
->Rip
); return &ctx
->Rip
;
839 case CV_AMD64_XMM0
+ 0: *size
= sizeof(ctx
->u
.s
.Xmm0
); return &ctx
->u
.s
.Xmm0
;
840 case CV_AMD64_XMM0
+ 1: *size
= sizeof(ctx
->u
.s
.Xmm1
); return &ctx
->u
.s
.Xmm1
;
841 case CV_AMD64_XMM0
+ 2: *size
= sizeof(ctx
->u
.s
.Xmm2
); return &ctx
->u
.s
.Xmm2
;
842 case CV_AMD64_XMM0
+ 3: *size
= sizeof(ctx
->u
.s
.Xmm3
); return &ctx
->u
.s
.Xmm3
;
843 case CV_AMD64_XMM0
+ 4: *size
= sizeof(ctx
->u
.s
.Xmm4
); return &ctx
->u
.s
.Xmm4
;
844 case CV_AMD64_XMM0
+ 5: *size
= sizeof(ctx
->u
.s
.Xmm5
); return &ctx
->u
.s
.Xmm5
;
845 case CV_AMD64_XMM0
+ 6: *size
= sizeof(ctx
->u
.s
.Xmm6
); return &ctx
->u
.s
.Xmm6
;
846 case CV_AMD64_XMM0
+ 7: *size
= sizeof(ctx
->u
.s
.Xmm7
); return &ctx
->u
.s
.Xmm7
;
847 case CV_AMD64_XMM8
+ 0: *size
= sizeof(ctx
->u
.s
.Xmm8
); return &ctx
->u
.s
.Xmm8
;
848 case CV_AMD64_XMM8
+ 1: *size
= sizeof(ctx
->u
.s
.Xmm9
); return &ctx
->u
.s
.Xmm9
;
849 case CV_AMD64_XMM8
+ 2: *size
= sizeof(ctx
->u
.s
.Xmm10
); return &ctx
->u
.s
.Xmm10
;
850 case CV_AMD64_XMM8
+ 3: *size
= sizeof(ctx
->u
.s
.Xmm11
); return &ctx
->u
.s
.Xmm11
;
851 case CV_AMD64_XMM8
+ 4: *size
= sizeof(ctx
->u
.s
.Xmm12
); return &ctx
->u
.s
.Xmm12
;
852 case CV_AMD64_XMM8
+ 5: *size
= sizeof(ctx
->u
.s
.Xmm13
); return &ctx
->u
.s
.Xmm13
;
853 case CV_AMD64_XMM8
+ 6: *size
= sizeof(ctx
->u
.s
.Xmm14
); return &ctx
->u
.s
.Xmm14
;
854 case CV_AMD64_XMM8
+ 7: *size
= sizeof(ctx
->u
.s
.Xmm15
); return &ctx
->u
.s
.Xmm15
;
856 case CV_AMD64_ST0
+ 0: *size
= sizeof(ctx
->u
.s
.Legacy
[0]); return &ctx
->u
.s
.Legacy
[0];
857 case CV_AMD64_ST0
+ 1: *size
= sizeof(ctx
->u
.s
.Legacy
[1]); return &ctx
->u
.s
.Legacy
[1];
858 case CV_AMD64_ST0
+ 2: *size
= sizeof(ctx
->u
.s
.Legacy
[2]); return &ctx
->u
.s
.Legacy
[2];
859 case CV_AMD64_ST0
+ 3: *size
= sizeof(ctx
->u
.s
.Legacy
[3]); return &ctx
->u
.s
.Legacy
[3];
860 case CV_AMD64_ST0
+ 4: *size
= sizeof(ctx
->u
.s
.Legacy
[4]); return &ctx
->u
.s
.Legacy
[4];
861 case CV_AMD64_ST0
+ 5: *size
= sizeof(ctx
->u
.s
.Legacy
[5]); return &ctx
->u
.s
.Legacy
[5];
862 case CV_AMD64_ST0
+ 6: *size
= sizeof(ctx
->u
.s
.Legacy
[6]); return &ctx
->u
.s
.Legacy
[6];
863 case CV_AMD64_ST0
+ 7: *size
= sizeof(ctx
->u
.s
.Legacy
[7]); return &ctx
->u
.s
.Legacy
[7];
865 case CV_AMD64_EFLAGS
: *size
= sizeof(ctx
->EFlags
); return &ctx
->EFlags
;
866 case CV_AMD64_ES
: *size
= sizeof(ctx
->SegEs
); return &ctx
->SegEs
;
867 case CV_AMD64_CS
: *size
= sizeof(ctx
->SegCs
); return &ctx
->SegCs
;
868 case CV_AMD64_SS
: *size
= sizeof(ctx
->SegSs
); return &ctx
->SegSs
;
869 case CV_AMD64_DS
: *size
= sizeof(ctx
->SegDs
); return &ctx
->SegDs
;
870 case CV_AMD64_FS
: *size
= sizeof(ctx
->SegFs
); return &ctx
->SegFs
;
871 case CV_AMD64_GS
: *size
= sizeof(ctx
->SegGs
); return &ctx
->SegGs
;
875 FIXME("Unknown register %x\n", regno
);
879 static const char* x86_64_fetch_regname(unsigned regno
)
883 case CV_AMD64_RAX
: return "rax";
884 case CV_AMD64_RDX
: return "rdx";
885 case CV_AMD64_RCX
: return "rcx";
886 case CV_AMD64_RBX
: return "rbx";
887 case CV_AMD64_RSI
: return "rsi";
888 case CV_AMD64_RDI
: return "rdi";
889 case CV_AMD64_RBP
: return "rbp";
890 case CV_AMD64_RSP
: return "rsp";
891 case CV_AMD64_R8
: return "r8";
892 case CV_AMD64_R9
: return "r9";
893 case CV_AMD64_R10
: return "r10";
894 case CV_AMD64_R11
: return "r11";
895 case CV_AMD64_R12
: return "r12";
896 case CV_AMD64_R13
: return "r13";
897 case CV_AMD64_R14
: return "r14";
898 case CV_AMD64_R15
: return "r15";
899 case CV_AMD64_RIP
: return "rip";
901 case CV_AMD64_XMM0
+ 0: return "xmm0";
902 case CV_AMD64_XMM0
+ 1: return "xmm1";
903 case CV_AMD64_XMM0
+ 2: return "xmm2";
904 case CV_AMD64_XMM0
+ 3: return "xmm3";
905 case CV_AMD64_XMM0
+ 4: return "xmm4";
906 case CV_AMD64_XMM0
+ 5: return "xmm5";
907 case CV_AMD64_XMM0
+ 6: return "xmm6";
908 case CV_AMD64_XMM0
+ 7: return "xmm7";
909 case CV_AMD64_XMM8
+ 0: return "xmm8";
910 case CV_AMD64_XMM8
+ 1: return "xmm9";
911 case CV_AMD64_XMM8
+ 2: return "xmm10";
912 case CV_AMD64_XMM8
+ 3: return "xmm11";
913 case CV_AMD64_XMM8
+ 4: return "xmm12";
914 case CV_AMD64_XMM8
+ 5: return "xmm13";
915 case CV_AMD64_XMM8
+ 6: return "xmm14";
916 case CV_AMD64_XMM8
+ 7: return "xmm15";
918 case CV_AMD64_ST0
+ 0: return "st0";
919 case CV_AMD64_ST0
+ 1: return "st1";
920 case CV_AMD64_ST0
+ 2: return "st2";
921 case CV_AMD64_ST0
+ 3: return "st3";
922 case CV_AMD64_ST0
+ 4: return "st4";
923 case CV_AMD64_ST0
+ 5: return "st5";
924 case CV_AMD64_ST0
+ 6: return "st6";
925 case CV_AMD64_ST0
+ 7: return "st7";
927 case CV_AMD64_EFLAGS
: return "eflags";
928 case CV_AMD64_ES
: return "es";
929 case CV_AMD64_CS
: return "cs";
930 case CV_AMD64_SS
: return "ss";
931 case CV_AMD64_DS
: return "ds";
932 case CV_AMD64_FS
: return "fs";
933 case CV_AMD64_GS
: return "gs";
935 FIXME("Unknown register %x\n", regno
);
939 static BOOL
x86_64_fetch_minidump_thread(struct dump_context
* dc
, unsigned index
, unsigned flags
, const CONTEXT
* ctx
)
941 if (ctx
->ContextFlags
&& (flags
& ThreadWriteInstructionWindow
))
943 /* FIXME: crop values across module boundaries, */
945 ULONG64 base
= ctx
->Rip
<= 0x80 ? 0 : ctx
->Rip
- 0x80;
946 minidump_add_memory_block(dc
, base
, ctx
->Rip
+ 0x80 - base
, 0);
953 static BOOL
x86_64_fetch_minidump_module(struct dump_context
* dc
, unsigned index
, unsigned flags
)
955 /* FIXME: not sure about the flags... */
958 /* FIXME: crop values across module boundaries, */
961 struct module
* module
;
962 const RUNTIME_FUNCTION
* rtf
;
965 if (!(pcs
= process_find_by_handle(dc
->process
->handle
)) ||
966 !(module
= module_find_by_addr(pcs
, dc
->modules
[index
].base
, DMT_UNKNOWN
)))
968 rtf
= (const RUNTIME_FUNCTION
*)pe_map_directory(module
, IMAGE_DIRECTORY_ENTRY_EXCEPTION
, &size
);
971 const RUNTIME_FUNCTION
* end
= (const RUNTIME_FUNCTION
*)((const char*)rtf
+ size
);
974 while (rtf
+ 1 < end
)
976 while (rtf
->UnwindData
& 1) /* follow chained entry */
978 FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n");
980 /* we need to read into the other process */
981 /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */
983 if (read_process_memory(dc
->process
, dc
->modules
[index
].base
+ rtf
->UnwindData
, &ui
, sizeof(ui
)))
984 minidump_add_memory_block(dc
, dc
->modules
[index
].base
+ rtf
->UnwindData
,
985 FIELD_OFFSET(UNWIND_INFO
, UnwindCode
) + ui
.CountOfCodes
* sizeof(UNWIND_CODE
), 0);
995 DECLSPEC_HIDDEN
struct cpu cpu_x86_64
= {
996 IMAGE_FILE_MACHINE_AMD64
,
1001 x86_64_find_runtime_function
,
1002 x86_64_map_dwarf_register
,
1003 x86_64_fetch_context_reg
,
1004 x86_64_fetch_regname
,
1005 x86_64_fetch_minidump_thread
,
1006 x86_64_fetch_minidump_module
,