2 * x86-64 signal handling routines
4 * Copyright 1999, 2005 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "wine/port.h"
36 #include <sys/types.h>
40 #ifdef HAVE_MACHINE_SYSARCH_H
41 # include <machine/sysarch.h>
43 #ifdef HAVE_SYS_PARAM_H
44 # include <sys/param.h>
49 # ifdef HAVE_SYS_SYSCALL_H
50 # include <sys/syscall.h>
53 #ifdef HAVE_SYS_SIGNAL_H
54 # include <sys/signal.h>
56 #ifdef HAVE_SYS_UCONTEXT_H
57 # include <sys/ucontext.h>
60 # define UNW_LOCAL_ONLY
61 # include <libunwind.h>
64 # include <mach/mach.h>
67 #define NONAMELESSUNION
68 #define NONAMELESSSTRUCT
70 #define WIN32_NO_STATUS
74 #include "wine/exception.h"
75 #include "wine/list.h"
77 #include "unix_private.h"
78 #include "wine/debug.h"
80 WINE_DEFAULT_DEBUG_CHANNEL(seh
);
82 /***********************************************************************
83 * signal context platform-specific definitions
87 #include <asm/prctl.h>
88 static inline int arch_prctl( int func
, void *ptr
) { return syscall( __NR_arch_prctl
, func
, ptr
); }
90 #ifndef FP_XSTATE_MAGIC1
91 #define FP_XSTATE_MAGIC1 0x46505853
94 #define RAX_sig(context) ((context)->uc_mcontext.gregs[REG_RAX])
95 #define RBX_sig(context) ((context)->uc_mcontext.gregs[REG_RBX])
96 #define RCX_sig(context) ((context)->uc_mcontext.gregs[REG_RCX])
97 #define RDX_sig(context) ((context)->uc_mcontext.gregs[REG_RDX])
98 #define RSI_sig(context) ((context)->uc_mcontext.gregs[REG_RSI])
99 #define RDI_sig(context) ((context)->uc_mcontext.gregs[REG_RDI])
100 #define RBP_sig(context) ((context)->uc_mcontext.gregs[REG_RBP])
101 #define R8_sig(context) ((context)->uc_mcontext.gregs[REG_R8])
102 #define R9_sig(context) ((context)->uc_mcontext.gregs[REG_R9])
103 #define R10_sig(context) ((context)->uc_mcontext.gregs[REG_R10])
104 #define R11_sig(context) ((context)->uc_mcontext.gregs[REG_R11])
105 #define R12_sig(context) ((context)->uc_mcontext.gregs[REG_R12])
106 #define R13_sig(context) ((context)->uc_mcontext.gregs[REG_R13])
107 #define R14_sig(context) ((context)->uc_mcontext.gregs[REG_R14])
108 #define R15_sig(context) ((context)->uc_mcontext.gregs[REG_R15])
109 #define CS_sig(context) (*((WORD *)&(context)->uc_mcontext.gregs[REG_CSGSFS] + 0))
110 #define GS_sig(context) (*((WORD *)&(context)->uc_mcontext.gregs[REG_CSGSFS] + 1))
111 #define FS_sig(context) (*((WORD *)&(context)->uc_mcontext.gregs[REG_CSGSFS] + 2))
112 #define RSP_sig(context) ((context)->uc_mcontext.gregs[REG_RSP])
113 #define RIP_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
114 #define EFL_sig(context) ((context)->uc_mcontext.gregs[REG_EFL])
115 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
116 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
117 #define FPU_sig(context) ((XMM_SAVE_AREA32 *)((context)->uc_mcontext.fpregs))
118 #define XState_sig(fpu) (((unsigned int *)fpu->Reserved4)[12] == FP_XSTATE_MAGIC1 ? (XSTATE *)(fpu + 1) : NULL)
120 #elif defined(__FreeBSD__) || defined (__FreeBSD_kernel__)
122 #define RAX_sig(context) ((context)->uc_mcontext.mc_rax)
123 #define RBX_sig(context) ((context)->uc_mcontext.mc_rbx)
124 #define RCX_sig(context) ((context)->uc_mcontext.mc_rcx)
125 #define RDX_sig(context) ((context)->uc_mcontext.mc_rdx)
126 #define RSI_sig(context) ((context)->uc_mcontext.mc_rsi)
127 #define RDI_sig(context) ((context)->uc_mcontext.mc_rdi)
128 #define RBP_sig(context) ((context)->uc_mcontext.mc_rbp)
129 #define R8_sig(context) ((context)->uc_mcontext.mc_r8)
130 #define R9_sig(context) ((context)->uc_mcontext.mc_r9)
131 #define R10_sig(context) ((context)->uc_mcontext.mc_r10)
132 #define R11_sig(context) ((context)->uc_mcontext.mc_r11)
133 #define R12_sig(context) ((context)->uc_mcontext.mc_r12)
134 #define R13_sig(context) ((context)->uc_mcontext.mc_r13)
135 #define R14_sig(context) ((context)->uc_mcontext.mc_r14)
136 #define R15_sig(context) ((context)->uc_mcontext.mc_r15)
137 #define CS_sig(context) ((context)->uc_mcontext.mc_cs)
138 #define DS_sig(context) ((context)->uc_mcontext.mc_ds)
139 #define ES_sig(context) ((context)->uc_mcontext.mc_es)
140 #define FS_sig(context) ((context)->uc_mcontext.mc_fs)
141 #define GS_sig(context) ((context)->uc_mcontext.mc_gs)
142 #define SS_sig(context) ((context)->uc_mcontext.mc_ss)
143 #define EFL_sig(context) ((context)->uc_mcontext.mc_rflags)
144 #define RIP_sig(context) ((context)->uc_mcontext.mc_rip)
145 #define RSP_sig(context) ((context)->uc_mcontext.mc_rsp)
146 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
147 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
148 #define FPU_sig(context) ((XMM_SAVE_AREA32 *)((context)->uc_mcontext.mc_fpstate))
149 #define XState_sig(context) NULL
151 #elif defined(__NetBSD__)
153 #define RAX_sig(context) ((context)->uc_mcontext.__gregs[_REG_RAX])
154 #define RBX_sig(context) ((context)->uc_mcontext.__gregs[_REG_RBX])
155 #define RCX_sig(context) ((context)->uc_mcontext.__gregs[_REG_RCX])
156 #define RDX_sig(context) ((context)->uc_mcontext.__gregs[_REG_RDX])
157 #define RSI_sig(context) ((context)->uc_mcontext.__gregs[_REG_RSI])
158 #define RDI_sig(context) ((context)->uc_mcontext.__gregs[_REG_RDI])
159 #define RBP_sig(context) ((context)->uc_mcontext.__gregs[_REG_RBP])
160 #define R8_sig(context) ((context)->uc_mcontext.__gregs[_REG_R8])
161 #define R9_sig(context) ((context)->uc_mcontext.__gregs[_REG_R9])
162 #define R10_sig(context) ((context)->uc_mcontext.__gregs[_REG_R10])
163 #define R11_sig(context) ((context)->uc_mcontext.__gregs[_REG_R11])
164 #define R12_sig(context) ((context)->uc_mcontext.__gregs[_REG_R12])
165 #define R13_sig(context) ((context)->uc_mcontext.__gregs[_REG_R13])
166 #define R14_sig(context) ((context)->uc_mcontext.__gregs[_REG_R14])
167 #define R15_sig(context) ((context)->uc_mcontext.__gregs[_REG_R15])
168 #define CS_sig(context) ((context)->uc_mcontext.__gregs[_REG_CS])
169 #define DS_sig(context) ((context)->uc_mcontext.__gregs[_REG_DS])
170 #define ES_sig(context) ((context)->uc_mcontext.__gregs[_REG_ES])
171 #define FS_sig(context) ((context)->uc_mcontext.__gregs[_REG_FS])
172 #define GS_sig(context) ((context)->uc_mcontext.__gregs[_REG_GS])
173 #define SS_sig(context) ((context)->uc_mcontext.__gregs[_REG_SS])
174 #define EFL_sig(context) ((context)->uc_mcontext.__gregs[_REG_RFL])
175 #define RIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.__gregs[_REG_RIP]))
176 #define RSP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.__gregs[_REG_URSP]))
177 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
178 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
179 #define FPU_sig(context) ((XMM_SAVE_AREA32 *)((context)->uc_mcontext.__fpregs))
180 #define XState_sig(context) NULL
182 #elif defined (__APPLE__)
184 #define RAX_sig(context) ((context)->uc_mcontext->__ss.__rax)
185 #define RBX_sig(context) ((context)->uc_mcontext->__ss.__rbx)
186 #define RCX_sig(context) ((context)->uc_mcontext->__ss.__rcx)
187 #define RDX_sig(context) ((context)->uc_mcontext->__ss.__rdx)
188 #define RSI_sig(context) ((context)->uc_mcontext->__ss.__rsi)
189 #define RDI_sig(context) ((context)->uc_mcontext->__ss.__rdi)
190 #define RBP_sig(context) ((context)->uc_mcontext->__ss.__rbp)
191 #define R8_sig(context) ((context)->uc_mcontext->__ss.__r8)
192 #define R9_sig(context) ((context)->uc_mcontext->__ss.__r9)
193 #define R10_sig(context) ((context)->uc_mcontext->__ss.__r10)
194 #define R11_sig(context) ((context)->uc_mcontext->__ss.__r11)
195 #define R12_sig(context) ((context)->uc_mcontext->__ss.__r12)
196 #define R13_sig(context) ((context)->uc_mcontext->__ss.__r13)
197 #define R14_sig(context) ((context)->uc_mcontext->__ss.__r14)
198 #define R15_sig(context) ((context)->uc_mcontext->__ss.__r15)
199 #define CS_sig(context) ((context)->uc_mcontext->__ss.__cs)
200 #define FS_sig(context) ((context)->uc_mcontext->__ss.__fs)
201 #define GS_sig(context) ((context)->uc_mcontext->__ss.__gs)
202 #define EFL_sig(context) ((context)->uc_mcontext->__ss.__rflags)
203 #define RIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->__ss.__rip))
204 #define RSP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->__ss.__rsp))
205 #define TRAP_sig(context) ((context)->uc_mcontext->__es.__trapno)
206 #define ERROR_sig(context) ((context)->uc_mcontext->__es.__err)
207 #define FPU_sig(context) ((XMM_SAVE_AREA32 *)&(context)->uc_mcontext->__fs.__fpu_fcw)
208 #define XState_sig(context) NULL
211 #error You must define the signal context functions for your platform
216 TRAP_x86_DIVIDE
= 0, /* Division by zero exception */
217 TRAP_x86_TRCTRAP
= 1, /* Single-step exception */
218 TRAP_x86_NMI
= 2, /* NMI interrupt */
219 TRAP_x86_BPTFLT
= 3, /* Breakpoint exception */
220 TRAP_x86_OFLOW
= 4, /* Overflow exception */
221 TRAP_x86_BOUND
= 5, /* Bound range exception */
222 TRAP_x86_PRIVINFLT
= 6, /* Invalid opcode exception */
223 TRAP_x86_DNA
= 7, /* Device not available exception */
224 TRAP_x86_DOUBLEFLT
= 8, /* Double fault exception */
225 TRAP_x86_FPOPFLT
= 9, /* Coprocessor segment overrun */
226 TRAP_x86_TSSFLT
= 10, /* Invalid TSS exception */
227 TRAP_x86_SEGNPFLT
= 11, /* Segment not present exception */
228 TRAP_x86_STKFLT
= 12, /* Stack fault */
229 TRAP_x86_PROTFLT
= 13, /* General protection fault */
230 TRAP_x86_PAGEFLT
= 14, /* Page fault */
231 TRAP_x86_ARITHTRAP
= 16, /* Floating point exception */
232 TRAP_x86_ALIGNFLT
= 17, /* Alignment check exception */
233 TRAP_x86_MCHK
= 18, /* Machine check exception */
234 TRAP_x86_CACHEFLT
= 19 /* Cache flush exception */
237 /* stack layout when calling an exception raise function */
241 CONTEXT_EX context_ex
;
242 EXCEPTION_RECORD rec
;
244 char xstate
[0]; /* If xstate is present it is allocated
245 * dynamically to provide 64 byte alignment. */
248 C_ASSERT((offsetof(struct stack_layout
, xstate
) == sizeof(struct stack_layout
)));
250 C_ASSERT( sizeof(XSTATE
) == 0x140 );
251 C_ASSERT( sizeof(struct stack_layout
) == 0x590 ); /* Should match the size in call_user_exception_dispatcher(). */
253 /* stack layout when calling an user apc function.
254 * FIXME: match Windows ABI. */
255 struct apc_stack_layout
257 ULONG64 save_regs
[4];
265 /* Should match size and offset in call_user_apc_dispatcher(). */
266 C_ASSERT( offsetof(struct apc_stack_layout
, context
) == 0x30 );
267 C_ASSERT( sizeof(struct apc_stack_layout
) == 0x510 );
271 ULONG64 xmm
[10 * 2]; /* xmm6-xmm15 */
285 /* Should match the offset in call_user_apc_dispatcher(). */
286 C_ASSERT( offsetof( struct syscall_frame
, ret_addr
) == 0xf0);
288 struct amd64_thread_data
290 DWORD_PTR dr0
; /* 02f0 debug registers */
291 DWORD_PTR dr1
; /* 02f8 */
292 DWORD_PTR dr2
; /* 0300 */
293 DWORD_PTR dr3
; /* 0308 */
294 DWORD_PTR dr6
; /* 0310 */
295 DWORD_PTR dr7
; /* 0318 */
296 void *exit_frame
; /* 0320 exit frame pointer */
297 struct syscall_frame
*syscall_frame
; /* 0328 syscall frame pointer */
300 C_ASSERT( sizeof(struct amd64_thread_data
) <= sizeof(((struct ntdll_thread_data
*)0)->cpu_data
) );
301 C_ASSERT( offsetof( TEB
, GdiTebBatch
) + offsetof( struct amd64_thread_data
, exit_frame
) == 0x320 );
302 C_ASSERT( offsetof( TEB
, GdiTebBatch
) + offsetof( struct amd64_thread_data
, syscall_frame
) == 0x328 );
304 static inline struct amd64_thread_data
*amd64_thread_data(void)
306 return (struct amd64_thread_data
*)ntdll_get_thread_data()->cpu_data
;
309 void *get_syscall_frame(void)
311 return amd64_thread_data()->syscall_frame
;
314 void set_syscall_frame(void *frame
)
316 amd64_thread_data()->syscall_frame
= frame
;
319 /***********************************************************************
320 * Definitions for Dwarf unwind tables
323 enum dwarf_call_frame_info
325 DW_CFA_advance_loc
= 0x40,
326 DW_CFA_offset
= 0x80,
327 DW_CFA_restore
= 0xc0,
329 DW_CFA_set_loc
= 0x01,
330 DW_CFA_advance_loc1
= 0x02,
331 DW_CFA_advance_loc2
= 0x03,
332 DW_CFA_advance_loc4
= 0x04,
333 DW_CFA_offset_extended
= 0x05,
334 DW_CFA_restore_extended
= 0x06,
335 DW_CFA_undefined
= 0x07,
336 DW_CFA_same_value
= 0x08,
337 DW_CFA_register
= 0x09,
338 DW_CFA_remember_state
= 0x0a,
339 DW_CFA_restore_state
= 0x0b,
340 DW_CFA_def_cfa
= 0x0c,
341 DW_CFA_def_cfa_register
= 0x0d,
342 DW_CFA_def_cfa_offset
= 0x0e,
343 DW_CFA_def_cfa_expression
= 0x0f,
344 DW_CFA_expression
= 0x10,
345 DW_CFA_offset_extended_sf
= 0x11,
346 DW_CFA_def_cfa_sf
= 0x12,
347 DW_CFA_def_cfa_offset_sf
= 0x13,
348 DW_CFA_val_offset
= 0x14,
349 DW_CFA_val_offset_sf
= 0x15,
350 DW_CFA_val_expression
= 0x16,
357 DW_OP_const1u
= 0x08,
358 DW_OP_const1s
= 0x09,
359 DW_OP_const2u
= 0x0a,
360 DW_OP_const2s
= 0x0b,
361 DW_OP_const4u
= 0x0c,
362 DW_OP_const4s
= 0x0d,
363 DW_OP_const8u
= 0x0e,
364 DW_OP_const8s
= 0x0f,
384 DW_OP_plus_uconst
= 0x23,
497 DW_OP_deref_size
= 0x94,
498 DW_OP_xderef_size
= 0x95,
500 DW_OP_push_object_address
= 0x97,
503 DW_OP_call_ref
= 0x9a,
504 DW_OP_form_tls_address
= 0x9b,
505 DW_OP_call_frame_cfa
= 0x9c,
506 DW_OP_bit_piece
= 0x9d,
507 DW_OP_lo_user
= 0xe0,
508 DW_OP_hi_user
= 0xff,
509 DW_OP_GNU_push_tls_address
= 0xe0,
510 DW_OP_GNU_uninit
= 0xf0,
511 DW_OP_GNU_encoded_addr
= 0xf1,
514 #define DW_EH_PE_native 0x00
515 #define DW_EH_PE_leb128 0x01
516 #define DW_EH_PE_data2 0x02
517 #define DW_EH_PE_data4 0x03
518 #define DW_EH_PE_data8 0x04
519 #define DW_EH_PE_signed 0x08
520 #define DW_EH_PE_abs 0x00
521 #define DW_EH_PE_pcrel 0x10
522 #define DW_EH_PE_textrel 0x20
523 #define DW_EH_PE_datarel 0x30
524 #define DW_EH_PE_funcrel 0x40
525 #define DW_EH_PE_aligned 0x50
526 #define DW_EH_PE_indirect 0x80
527 #define DW_EH_PE_omit 0xff
529 struct dwarf_eh_bases
540 unsigned char version
;
541 unsigned char augmentation
[1];
547 unsigned int cie_offset
;
550 extern const struct dwarf_fde
*_Unwind_Find_FDE (void *, struct dwarf_eh_bases
*);
552 static unsigned char dwarf_get_u1( const unsigned char **p
)
557 static unsigned short dwarf_get_u2( const unsigned char **p
)
559 unsigned int ret
= (*p
)[0] | ((*p
)[1] << 8);
564 static unsigned int dwarf_get_u4( const unsigned char **p
)
566 unsigned int ret
= (*p
)[0] | ((*p
)[1] << 8) | ((*p
)[2] << 16) | ((*p
)[3] << 24);
571 static ULONG64
dwarf_get_u8( const unsigned char **p
)
573 ULONG64 low
= dwarf_get_u4( p
);
574 ULONG64 high
= dwarf_get_u4( p
);
575 return low
| (high
<< 32);
578 static ULONG_PTR
dwarf_get_uleb128( const unsigned char **p
)
581 unsigned int shift
= 0;
587 ret
|= (ULONG_PTR
)(byte
& 0x7f) << shift
;
590 } while (byte
& 0x80);
594 static LONG_PTR
dwarf_get_sleb128( const unsigned char **p
)
597 unsigned int shift
= 0;
603 ret
|= (ULONG_PTR
)(byte
& 0x7f) << shift
;
606 } while (byte
& 0x80);
608 if ((shift
< 8 * sizeof(ret
)) && (byte
& 0x40)) ret
|= -((ULONG_PTR
)1 << shift
);
612 static ULONG_PTR
dwarf_get_ptr( const unsigned char **p
, unsigned char encoding
)
616 if (encoding
== DW_EH_PE_omit
) return 0;
618 switch (encoding
& 0xf0)
624 base
= (ULONG_PTR
)*p
;
627 FIXME( "unsupported encoding %02x\n", encoding
);
631 switch (encoding
& 0x0f)
633 case DW_EH_PE_native
:
634 return base
+ dwarf_get_u8( p
);
635 case DW_EH_PE_leb128
:
636 return base
+ dwarf_get_uleb128( p
);
638 return base
+ dwarf_get_u2( p
);
640 return base
+ dwarf_get_u4( p
);
642 return base
+ dwarf_get_u8( p
);
643 case DW_EH_PE_signed
|DW_EH_PE_leb128
:
644 return base
+ dwarf_get_sleb128( p
);
645 case DW_EH_PE_signed
|DW_EH_PE_data2
:
646 return base
+ (signed short)dwarf_get_u2( p
);
647 case DW_EH_PE_signed
|DW_EH_PE_data4
:
648 return base
+ (signed int)dwarf_get_u4( p
);
649 case DW_EH_PE_signed
|DW_EH_PE_data8
:
650 return base
+ (LONG64
)dwarf_get_u8( p
);
652 FIXME( "unsupported encoding %02x\n", encoding
);
659 RULE_UNSET
, /* not set at all */
660 RULE_UNDEFINED
, /* undefined value */
661 RULE_SAME
, /* same value as previous frame */
662 RULE_CFA_OFFSET
, /* stored at cfa offset */
663 RULE_OTHER_REG
, /* stored in other register */
664 RULE_EXPRESSION
, /* address specified by expression */
665 RULE_VAL_EXPRESSION
/* value specified by expression */
668 #define NB_FRAME_REGS 41
669 #define MAX_SAVED_STATES 16
673 ULONG_PTR cfa_offset
;
674 unsigned char cfa_reg
;
675 enum reg_rule cfa_rule
;
676 enum reg_rule rules
[NB_FRAME_REGS
];
677 ULONG64 regs
[NB_FRAME_REGS
];
683 ULONG_PTR code_align
;
685 unsigned char retaddr_reg
;
686 unsigned char fde_encoding
;
687 unsigned char signal_frame
;
688 unsigned char state_sp
;
689 struct frame_state state
;
690 struct frame_state
*state_stack
;
693 static const char *dwarf_reg_names
[NB_FRAME_REGS
] =
695 /* 0-7 */ "%rax", "%rdx", "%rcx", "%rbx", "%rsi", "%rdi", "%rbp", "%rsp",
696 /* 8-16 */ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "%rip",
697 /* 17-24 */ "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
698 /* 25-32 */ "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15",
699 /* 33-40 */ "%st0", "%st1", "%st2", "%st3", "%st4", "%st5", "%st6", "%st7"
702 static BOOL
valid_reg( ULONG_PTR reg
)
704 if (reg
>= NB_FRAME_REGS
) FIXME( "unsupported reg %lx\n", reg
);
705 return (reg
< NB_FRAME_REGS
);
708 static void execute_cfa_instructions( const unsigned char *ptr
, const unsigned char *end
,
709 ULONG_PTR last_ip
, struct frame_info
*info
)
711 while (ptr
< end
&& info
->ip
< last_ip
+ info
->signal_frame
)
713 enum dwarf_call_frame_info op
= *ptr
++;
719 case DW_CFA_advance_loc
:
721 ULONG_PTR offset
= (op
& 0x3f) * info
->code_align
;
722 TRACE( "%lx: DW_CFA_advance_loc %lu\n", info
->ip
, offset
);
728 ULONG_PTR reg
= op
& 0x3f;
729 LONG_PTR offset
= dwarf_get_uleb128( &ptr
) * info
->data_align
;
730 if (!valid_reg( reg
)) break;
731 TRACE( "%lx: DW_CFA_offset %s, %ld\n", info
->ip
, dwarf_reg_names
[reg
], offset
);
732 info
->state
.regs
[reg
] = offset
;
733 info
->state
.rules
[reg
] = RULE_CFA_OFFSET
;
738 ULONG_PTR reg
= op
& 0x3f;
739 if (!valid_reg( reg
)) break;
740 TRACE( "%lx: DW_CFA_restore %s\n", info
->ip
, dwarf_reg_names
[reg
] );
741 info
->state
.rules
[reg
] = RULE_UNSET
;
752 ULONG_PTR loc
= dwarf_get_ptr( &ptr
, info
->fde_encoding
);
753 TRACE( "%lx: DW_CFA_set_loc %lx\n", info
->ip
, loc
);
757 case DW_CFA_advance_loc1
:
759 ULONG_PTR offset
= *ptr
++ * info
->code_align
;
760 TRACE( "%lx: DW_CFA_advance_loc1 %lu\n", info
->ip
, offset
);
764 case DW_CFA_advance_loc2
:
766 ULONG_PTR offset
= dwarf_get_u2( &ptr
) * info
->code_align
;
767 TRACE( "%lx: DW_CFA_advance_loc2 %lu\n", info
->ip
, offset
);
771 case DW_CFA_advance_loc4
:
773 ULONG_PTR offset
= dwarf_get_u4( &ptr
) * info
->code_align
;
774 TRACE( "%lx: DW_CFA_advance_loc4 %lu\n", info
->ip
, offset
);
778 case DW_CFA_offset_extended
:
779 case DW_CFA_offset_extended_sf
:
781 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
782 LONG_PTR offset
= (op
== DW_CFA_offset_extended
) ? dwarf_get_uleb128( &ptr
) * info
->data_align
783 : dwarf_get_sleb128( &ptr
) * info
->data_align
;
784 if (!valid_reg( reg
)) break;
785 TRACE( "%lx: DW_CFA_offset_extended %s, %ld\n", info
->ip
, dwarf_reg_names
[reg
], offset
);
786 info
->state
.regs
[reg
] = offset
;
787 info
->state
.rules
[reg
] = RULE_CFA_OFFSET
;
790 case DW_CFA_restore_extended
:
792 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
793 if (!valid_reg( reg
)) break;
794 TRACE( "%lx: DW_CFA_restore_extended %s\n", info
->ip
, dwarf_reg_names
[reg
] );
795 info
->state
.rules
[reg
] = RULE_UNSET
;
798 case DW_CFA_undefined
:
800 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
801 if (!valid_reg( reg
)) break;
802 TRACE( "%lx: DW_CFA_undefined %s\n", info
->ip
, dwarf_reg_names
[reg
] );
803 info
->state
.rules
[reg
] = RULE_UNDEFINED
;
806 case DW_CFA_same_value
:
808 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
809 if (!valid_reg( reg
)) break;
810 TRACE( "%lx: DW_CFA_same_value %s\n", info
->ip
, dwarf_reg_names
[reg
] );
811 info
->state
.regs
[reg
] = reg
;
812 info
->state
.rules
[reg
] = RULE_SAME
;
815 case DW_CFA_register
:
817 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
818 ULONG_PTR reg2
= dwarf_get_uleb128( &ptr
);
819 if (!valid_reg( reg
) || !valid_reg( reg2
)) break;
820 TRACE( "%lx: DW_CFA_register %s == %s\n", info
->ip
, dwarf_reg_names
[reg
], dwarf_reg_names
[reg2
] );
821 info
->state
.regs
[reg
] = reg2
;
822 info
->state
.rules
[reg
] = RULE_OTHER_REG
;
825 case DW_CFA_remember_state
:
826 TRACE( "%lx: DW_CFA_remember_state\n", info
->ip
);
827 if (info
->state_sp
>= MAX_SAVED_STATES
)
828 FIXME( "%lx: DW_CFA_remember_state too many nested saves\n", info
->ip
);
830 info
->state_stack
[info
->state_sp
++] = info
->state
;
832 case DW_CFA_restore_state
:
833 TRACE( "%lx: DW_CFA_restore_state\n", info
->ip
);
835 FIXME( "%lx: DW_CFA_restore_state without corresponding save\n", info
->ip
);
837 info
->state
= info
->state_stack
[--info
->state_sp
];
840 case DW_CFA_def_cfa_sf
:
842 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
843 ULONG_PTR offset
= (op
== DW_CFA_def_cfa
) ? dwarf_get_uleb128( &ptr
)
844 : dwarf_get_sleb128( &ptr
) * info
->data_align
;
845 if (!valid_reg( reg
)) break;
846 TRACE( "%lx: DW_CFA_def_cfa %s, %lu\n", info
->ip
, dwarf_reg_names
[reg
], offset
);
847 info
->state
.cfa_reg
= reg
;
848 info
->state
.cfa_offset
= offset
;
849 info
->state
.cfa_rule
= RULE_CFA_OFFSET
;
852 case DW_CFA_def_cfa_register
:
854 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
855 if (!valid_reg( reg
)) break;
856 TRACE( "%lx: DW_CFA_def_cfa_register %s\n", info
->ip
, dwarf_reg_names
[reg
] );
857 info
->state
.cfa_reg
= reg
;
858 info
->state
.cfa_rule
= RULE_CFA_OFFSET
;
861 case DW_CFA_def_cfa_offset
:
862 case DW_CFA_def_cfa_offset_sf
:
864 ULONG_PTR offset
= (op
== DW_CFA_def_cfa_offset
) ? dwarf_get_uleb128( &ptr
)
865 : dwarf_get_sleb128( &ptr
) * info
->data_align
;
866 TRACE( "%lx: DW_CFA_def_cfa_offset %lu\n", info
->ip
, offset
);
867 info
->state
.cfa_offset
= offset
;
868 info
->state
.cfa_rule
= RULE_CFA_OFFSET
;
871 case DW_CFA_def_cfa_expression
:
873 ULONG_PTR expr
= (ULONG_PTR
)ptr
;
874 ULONG_PTR len
= dwarf_get_uleb128( &ptr
);
875 TRACE( "%lx: DW_CFA_def_cfa_expression %lx-%lx\n", info
->ip
, expr
, expr
+len
);
876 info
->state
.cfa_offset
= expr
;
877 info
->state
.cfa_rule
= RULE_VAL_EXPRESSION
;
881 case DW_CFA_expression
:
882 case DW_CFA_val_expression
:
884 ULONG_PTR reg
= dwarf_get_uleb128( &ptr
);
885 ULONG_PTR expr
= (ULONG_PTR
)ptr
;
886 ULONG_PTR len
= dwarf_get_uleb128( &ptr
);
887 if (!valid_reg( reg
)) break;
888 TRACE( "%lx: DW_CFA_%sexpression %s %lx-%lx\n",
889 info
->ip
, (op
== DW_CFA_expression
) ? "" : "val_", dwarf_reg_names
[reg
], expr
, expr
+len
);
890 info
->state
.regs
[reg
] = expr
;
891 info
->state
.rules
[reg
] = (op
== DW_CFA_expression
) ? RULE_EXPRESSION
: RULE_VAL_EXPRESSION
;
896 FIXME( "%lx: unknown CFA opcode %02x\n", info
->ip
, op
);
902 /* retrieve a context register from its dwarf number */
903 static void *get_context_reg( CONTEXT
*context
, ULONG_PTR dw_reg
)
907 case 0: return &context
->Rax
;
908 case 1: return &context
->Rdx
;
909 case 2: return &context
->Rcx
;
910 case 3: return &context
->Rbx
;
911 case 4: return &context
->Rsi
;
912 case 5: return &context
->Rdi
;
913 case 6: return &context
->Rbp
;
914 case 7: return &context
->Rsp
;
915 case 8: return &context
->R8
;
916 case 9: return &context
->R9
;
917 case 10: return &context
->R10
;
918 case 11: return &context
->R11
;
919 case 12: return &context
->R12
;
920 case 13: return &context
->R13
;
921 case 14: return &context
->R14
;
922 case 15: return &context
->R15
;
923 case 16: return &context
->Rip
;
924 case 17: return &context
->u
.s
.Xmm0
;
925 case 18: return &context
->u
.s
.Xmm1
;
926 case 19: return &context
->u
.s
.Xmm2
;
927 case 20: return &context
->u
.s
.Xmm3
;
928 case 21: return &context
->u
.s
.Xmm4
;
929 case 22: return &context
->u
.s
.Xmm5
;
930 case 23: return &context
->u
.s
.Xmm6
;
931 case 24: return &context
->u
.s
.Xmm7
;
932 case 25: return &context
->u
.s
.Xmm8
;
933 case 26: return &context
->u
.s
.Xmm9
;
934 case 27: return &context
->u
.s
.Xmm10
;
935 case 28: return &context
->u
.s
.Xmm11
;
936 case 29: return &context
->u
.s
.Xmm12
;
937 case 30: return &context
->u
.s
.Xmm13
;
938 case 31: return &context
->u
.s
.Xmm14
;
939 case 32: return &context
->u
.s
.Xmm15
;
940 case 33: return &context
->u
.s
.Legacy
[0];
941 case 34: return &context
->u
.s
.Legacy
[1];
942 case 35: return &context
->u
.s
.Legacy
[2];
943 case 36: return &context
->u
.s
.Legacy
[3];
944 case 37: return &context
->u
.s
.Legacy
[4];
945 case 38: return &context
->u
.s
.Legacy
[5];
946 case 39: return &context
->u
.s
.Legacy
[6];
947 case 40: return &context
->u
.s
.Legacy
[7];
948 default: return NULL
;
952 /* set a context register from its dwarf number */
953 static void set_context_reg( CONTEXT
*context
, ULONG_PTR dw_reg
, void *val
)
957 case 0: context
->Rax
= *(ULONG64
*)val
; break;
958 case 1: context
->Rdx
= *(ULONG64
*)val
; break;
959 case 2: context
->Rcx
= *(ULONG64
*)val
; break;
960 case 3: context
->Rbx
= *(ULONG64
*)val
; break;
961 case 4: context
->Rsi
= *(ULONG64
*)val
; break;
962 case 5: context
->Rdi
= *(ULONG64
*)val
; break;
963 case 6: context
->Rbp
= *(ULONG64
*)val
; break;
964 case 7: context
->Rsp
= *(ULONG64
*)val
; break;
965 case 8: context
->R8
= *(ULONG64
*)val
; break;
966 case 9: context
->R9
= *(ULONG64
*)val
; break;
967 case 10: context
->R10
= *(ULONG64
*)val
; break;
968 case 11: context
->R11
= *(ULONG64
*)val
; break;
969 case 12: context
->R12
= *(ULONG64
*)val
; break;
970 case 13: context
->R13
= *(ULONG64
*)val
; break;
971 case 14: context
->R14
= *(ULONG64
*)val
; break;
972 case 15: context
->R15
= *(ULONG64
*)val
; break;
973 case 16: context
->Rip
= *(ULONG64
*)val
; break;
974 case 17: memcpy( &context
->u
.s
.Xmm0
, val
, sizeof(M128A
) ); break;
975 case 18: memcpy( &context
->u
.s
.Xmm1
, val
, sizeof(M128A
) ); break;
976 case 19: memcpy( &context
->u
.s
.Xmm2
, val
, sizeof(M128A
) ); break;
977 case 20: memcpy( &context
->u
.s
.Xmm3
, val
, sizeof(M128A
) ); break;
978 case 21: memcpy( &context
->u
.s
.Xmm4
, val
, sizeof(M128A
) ); break;
979 case 22: memcpy( &context
->u
.s
.Xmm5
, val
, sizeof(M128A
) ); break;
980 case 23: memcpy( &context
->u
.s
.Xmm6
, val
, sizeof(M128A
) ); break;
981 case 24: memcpy( &context
->u
.s
.Xmm7
, val
, sizeof(M128A
) ); break;
982 case 25: memcpy( &context
->u
.s
.Xmm8
, val
, sizeof(M128A
) ); break;
983 case 26: memcpy( &context
->u
.s
.Xmm9
, val
, sizeof(M128A
) ); break;
984 case 27: memcpy( &context
->u
.s
.Xmm10
, val
, sizeof(M128A
) ); break;
985 case 28: memcpy( &context
->u
.s
.Xmm11
, val
, sizeof(M128A
) ); break;
986 case 29: memcpy( &context
->u
.s
.Xmm12
, val
, sizeof(M128A
) ); break;
987 case 30: memcpy( &context
->u
.s
.Xmm13
, val
, sizeof(M128A
) ); break;
988 case 31: memcpy( &context
->u
.s
.Xmm14
, val
, sizeof(M128A
) ); break;
989 case 32: memcpy( &context
->u
.s
.Xmm15
, val
, sizeof(M128A
) ); break;
990 case 33: memcpy( &context
->u
.s
.Legacy
[0], val
, sizeof(M128A
) ); break;
991 case 34: memcpy( &context
->u
.s
.Legacy
[1], val
, sizeof(M128A
) ); break;
992 case 35: memcpy( &context
->u
.s
.Legacy
[2], val
, sizeof(M128A
) ); break;
993 case 36: memcpy( &context
->u
.s
.Legacy
[3], val
, sizeof(M128A
) ); break;
994 case 37: memcpy( &context
->u
.s
.Legacy
[4], val
, sizeof(M128A
) ); break;
995 case 38: memcpy( &context
->u
.s
.Legacy
[5], val
, sizeof(M128A
) ); break;
996 case 39: memcpy( &context
->u
.s
.Legacy
[6], val
, sizeof(M128A
) ); break;
997 case 40: memcpy( &context
->u
.s
.Legacy
[7], val
, sizeof(M128A
) ); break;
1001 static ULONG_PTR
eval_expression( const unsigned char *p
, CONTEXT
*context
)
1003 ULONG_PTR reg
, tmp
, stack
[64];
1005 ULONG_PTR len
= dwarf_get_uleb128(&p
);
1006 const unsigned char *end
= p
+ len
;
1010 unsigned char opcode
= dwarf_get_u1(&p
);
1012 if (opcode
>= DW_OP_lit0
&& opcode
<= DW_OP_lit31
)
1013 stack
[++sp
] = opcode
- DW_OP_lit0
;
1014 else if (opcode
>= DW_OP_reg0
&& opcode
<= DW_OP_reg31
)
1015 stack
[++sp
] = *(ULONG_PTR
*)get_context_reg( context
, opcode
- DW_OP_reg0
);
1016 else if (opcode
>= DW_OP_breg0
&& opcode
<= DW_OP_breg31
)
1017 stack
[++sp
] = *(ULONG_PTR
*)get_context_reg( context
, opcode
- DW_OP_breg0
) + dwarf_get_sleb128(&p
);
1018 else switch (opcode
)
1020 case DW_OP_nop
: break;
1021 case DW_OP_addr
: stack
[++sp
] = dwarf_get_u8(&p
); break;
1022 case DW_OP_const1u
: stack
[++sp
] = dwarf_get_u1(&p
); break;
1023 case DW_OP_const1s
: stack
[++sp
] = (signed char)dwarf_get_u1(&p
); break;
1024 case DW_OP_const2u
: stack
[++sp
] = dwarf_get_u2(&p
); break;
1025 case DW_OP_const2s
: stack
[++sp
] = (short)dwarf_get_u2(&p
); break;
1026 case DW_OP_const4u
: stack
[++sp
] = dwarf_get_u4(&p
); break;
1027 case DW_OP_const4s
: stack
[++sp
] = (signed int)dwarf_get_u4(&p
); break;
1028 case DW_OP_const8u
: stack
[++sp
] = dwarf_get_u8(&p
); break;
1029 case DW_OP_const8s
: stack
[++sp
] = (LONG_PTR
)dwarf_get_u8(&p
); break;
1030 case DW_OP_constu
: stack
[++sp
] = dwarf_get_uleb128(&p
); break;
1031 case DW_OP_consts
: stack
[++sp
] = dwarf_get_sleb128(&p
); break;
1032 case DW_OP_deref
: stack
[sp
] = *(ULONG_PTR
*)stack
[sp
]; break;
1033 case DW_OP_dup
: stack
[sp
+ 1] = stack
[sp
]; sp
++; break;
1034 case DW_OP_drop
: sp
--; break;
1035 case DW_OP_over
: stack
[sp
+ 1] = stack
[sp
- 1]; sp
++; break;
1036 case DW_OP_pick
: stack
[sp
+ 1] = stack
[sp
- dwarf_get_u1(&p
)]; sp
++; break;
1037 case DW_OP_swap
: tmp
= stack
[sp
]; stack
[sp
] = stack
[sp
-1]; stack
[sp
-1] = tmp
; break;
1038 case DW_OP_rot
: tmp
= stack
[sp
]; stack
[sp
] = stack
[sp
-1]; stack
[sp
-1] = stack
[sp
-2]; stack
[sp
-2] = tmp
; break;
1039 case DW_OP_abs
: stack
[sp
] = labs(stack
[sp
]); break;
1040 case DW_OP_neg
: stack
[sp
] = -stack
[sp
]; break;
1041 case DW_OP_not
: stack
[sp
] = ~stack
[sp
]; break;
1042 case DW_OP_and
: stack
[sp
-1] &= stack
[sp
]; sp
--; break;
1043 case DW_OP_or
: stack
[sp
-1] |= stack
[sp
]; sp
--; break;
1044 case DW_OP_minus
: stack
[sp
-1] -= stack
[sp
]; sp
--; break;
1045 case DW_OP_mul
: stack
[sp
-1] *= stack
[sp
]; sp
--; break;
1046 case DW_OP_plus
: stack
[sp
-1] += stack
[sp
]; sp
--; break;
1047 case DW_OP_xor
: stack
[sp
-1] ^= stack
[sp
]; sp
--; break;
1048 case DW_OP_shl
: stack
[sp
-1] <<= stack
[sp
]; sp
--; break;
1049 case DW_OP_shr
: stack
[sp
-1] >>= stack
[sp
]; sp
--; break;
1050 case DW_OP_plus_uconst
: stack
[sp
] += dwarf_get_uleb128(&p
); break;
1051 case DW_OP_shra
: stack
[sp
-1] = (LONG_PTR
)stack
[sp
-1] / (1 << stack
[sp
]); sp
--; break;
1052 case DW_OP_div
: stack
[sp
-1] = (LONG_PTR
)stack
[sp
-1] / (LONG_PTR
)stack
[sp
]; sp
--; break;
1053 case DW_OP_mod
: stack
[sp
-1] = (LONG_PTR
)stack
[sp
-1] % (LONG_PTR
)stack
[sp
]; sp
--; break;
1054 case DW_OP_ge
: stack
[sp
-1] = ((LONG_PTR
)stack
[sp
-1] >= (LONG_PTR
)stack
[sp
]); sp
--; break;
1055 case DW_OP_gt
: stack
[sp
-1] = ((LONG_PTR
)stack
[sp
-1] > (LONG_PTR
)stack
[sp
]); sp
--; break;
1056 case DW_OP_le
: stack
[sp
-1] = ((LONG_PTR
)stack
[sp
-1] <= (LONG_PTR
)stack
[sp
]); sp
--; break;
1057 case DW_OP_lt
: stack
[sp
-1] = ((LONG_PTR
)stack
[sp
-1] < (LONG_PTR
)stack
[sp
]); sp
--; break;
1058 case DW_OP_eq
: stack
[sp
-1] = (stack
[sp
-1] == stack
[sp
]); sp
--; break;
1059 case DW_OP_ne
: stack
[sp
-1] = (stack
[sp
-1] != stack
[sp
]); sp
--; break;
1060 case DW_OP_skip
: tmp
= (short)dwarf_get_u2(&p
); p
+= tmp
; break;
1061 case DW_OP_bra
: tmp
= (short)dwarf_get_u2(&p
); if (!stack
[sp
--]) p
+= tmp
; break;
1062 case DW_OP_GNU_encoded_addr
: tmp
= *p
++; stack
[++sp
] = dwarf_get_ptr( &p
, tmp
); break;
1063 case DW_OP_regx
: stack
[++sp
] = *(ULONG_PTR
*)get_context_reg( context
, dwarf_get_uleb128(&p
) ); break;
1065 reg
= dwarf_get_uleb128(&p
);
1066 tmp
= dwarf_get_sleb128(&p
);
1067 stack
[++sp
] = *(ULONG_PTR
*)get_context_reg( context
, reg
) + tmp
;
1069 case DW_OP_deref_size
:
1072 case 1: stack
[sp
] = *(unsigned char *)stack
[sp
]; break;
1073 case 2: stack
[sp
] = *(unsigned short *)stack
[sp
]; break;
1074 case 4: stack
[sp
] = *(unsigned int *)stack
[sp
]; break;
1075 case 8: stack
[sp
] = *(ULONG_PTR
*)stack
[sp
]; break;
1079 FIXME( "unhandled opcode %02x\n", opcode
);
1085 /* apply the computed frame info to the actual context */
1086 static void apply_frame_state( CONTEXT
*context
, struct frame_state
*state
)
1089 ULONG_PTR cfa
, value
;
1090 CONTEXT new_context
= *context
;
1092 switch (state
->cfa_rule
)
1094 case RULE_EXPRESSION
:
1095 cfa
= *(ULONG_PTR
*)eval_expression( (const unsigned char *)state
->cfa_offset
, context
);
1097 case RULE_VAL_EXPRESSION
:
1098 cfa
= eval_expression( (const unsigned char *)state
->cfa_offset
, context
);
1101 cfa
= *(ULONG_PTR
*)get_context_reg( context
, state
->cfa_reg
) + state
->cfa_offset
;
1106 for (i
= 0; i
< NB_FRAME_REGS
; i
++)
1108 switch (state
->rules
[i
])
1111 case RULE_UNDEFINED
:
1114 case RULE_CFA_OFFSET
:
1115 set_context_reg( &new_context
, i
, (char *)cfa
+ state
->regs
[i
] );
1117 case RULE_OTHER_REG
:
1118 set_context_reg( &new_context
, i
, get_context_reg( context
, state
->regs
[i
] ));
1120 case RULE_EXPRESSION
:
1121 value
= eval_expression( (const unsigned char *)state
->regs
[i
], context
);
1122 set_context_reg( &new_context
, i
, (void *)value
);
1124 case RULE_VAL_EXPRESSION
:
1125 value
= eval_expression( (const unsigned char *)state
->regs
[i
], context
);
1126 set_context_reg( &new_context
, i
, &value
);
1130 new_context
.Rsp
= cfa
;
1131 *context
= new_context
;
1135 /***********************************************************************
1136 * dwarf_virtual_unwind
1138 * Equivalent of RtlVirtualUnwind for builtin modules.
1140 static NTSTATUS
dwarf_virtual_unwind( ULONG64 ip
, ULONG64
*frame
,CONTEXT
*context
,
1141 const struct dwarf_fde
*fde
, const struct dwarf_eh_bases
*bases
,
1142 PEXCEPTION_ROUTINE
*handler
, void **handler_data
)
1144 const struct dwarf_cie
*cie
;
1145 const unsigned char *ptr
, *augmentation
, *end
;
1146 ULONG_PTR len
, code_end
;
1147 struct frame_info info
;
1148 struct frame_state state_stack
[MAX_SAVED_STATES
];
1149 int aug_z_format
= 0;
1150 unsigned char lsda_encoding
= DW_EH_PE_omit
;
1152 memset( &info
, 0, sizeof(info
) );
1153 info
.state_stack
= state_stack
;
1154 info
.ip
= (ULONG_PTR
)bases
->func
;
1157 cie
= (const struct dwarf_cie
*)((const char *)&fde
->cie_offset
- fde
->cie_offset
);
1159 /* parse the CIE first */
1161 if (cie
->version
!= 1 && cie
->version
!= 3)
1163 FIXME( "unknown CIE version %u at %p\n", cie
->version
, cie
);
1164 return STATUS_INVALID_DISPOSITION
;
1166 ptr
= cie
->augmentation
+ strlen((const char *)cie
->augmentation
) + 1;
1168 info
.code_align
= dwarf_get_uleb128( &ptr
);
1169 info
.data_align
= dwarf_get_sleb128( &ptr
);
1170 if (cie
->version
== 1)
1171 info
.retaddr_reg
= *ptr
++;
1173 info
.retaddr_reg
= dwarf_get_uleb128( &ptr
);
1174 info
.state
.cfa_rule
= RULE_CFA_OFFSET
;
1176 TRACE( "function %lx base %p cie %p len %x id %x version %x aug '%s' code_align %lu data_align %ld retaddr %s\n",
1177 ip
, bases
->func
, cie
, cie
->length
, cie
->id
, cie
->version
, cie
->augmentation
,
1178 info
.code_align
, info
.data_align
, dwarf_reg_names
[info
.retaddr_reg
] );
1181 for (augmentation
= cie
->augmentation
; *augmentation
; augmentation
++)
1183 switch (*augmentation
)
1186 len
= dwarf_get_uleb128( &ptr
);
1191 lsda_encoding
= *ptr
++;
1195 unsigned char encoding
= *ptr
++;
1196 *handler
= (void *)dwarf_get_ptr( &ptr
, encoding
);
1200 info
.fde_encoding
= *ptr
++;
1203 info
.signal_frame
= 1;
1206 FIXME( "unknown augmentation '%c'\n", *augmentation
);
1207 if (!end
) return STATUS_INVALID_DISPOSITION
; /* cannot continue */
1212 end
= (const unsigned char *)(&cie
->length
+ 1) + cie
->length
;
1213 execute_cfa_instructions( ptr
, end
, ip
, &info
);
1215 ptr
= (const unsigned char *)(fde
+ 1);
1216 info
.ip
= dwarf_get_ptr( &ptr
, info
.fde_encoding
); /* fde code start */
1217 code_end
= info
.ip
+ dwarf_get_ptr( &ptr
, info
.fde_encoding
& 0x0f ); /* fde code length */
1219 if (aug_z_format
) /* get length of augmentation data */
1221 len
= dwarf_get_uleb128( &ptr
);
1226 *handler_data
= (void *)dwarf_get_ptr( &ptr
, lsda_encoding
);
1229 end
= (const unsigned char *)(&fde
->length
+ 1) + fde
->length
;
1230 TRACE( "fde %p len %x personality %p lsda %p code %lx-%lx\n",
1231 fde
, fde
->length
, *handler
, *handler_data
, info
.ip
, code_end
);
1232 execute_cfa_instructions( ptr
, end
, ip
, &info
);
1233 *frame
= context
->Rsp
;
1234 apply_frame_state( context
, &info
.state
);
1236 TRACE( "next function rip=%016lx\n", context
->Rip
);
1237 TRACE( " rax=%016lx rbx=%016lx rcx=%016lx rdx=%016lx\n",
1238 context
->Rax
, context
->Rbx
, context
->Rcx
, context
->Rdx
);
1239 TRACE( " rsi=%016lx rdi=%016lx rbp=%016lx rsp=%016lx\n",
1240 context
->Rsi
, context
->Rdi
, context
->Rbp
, context
->Rsp
);
1241 TRACE( " r8=%016lx r9=%016lx r10=%016lx r11=%016lx\n",
1242 context
->R8
, context
->R9
, context
->R10
, context
->R11
);
1243 TRACE( " r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n",
1244 context
->R12
, context
->R13
, context
->R14
, context
->R15
);
1246 return STATUS_SUCCESS
;
1250 #ifdef HAVE_LIBUNWIND
1251 /***********************************************************************
1252 * libunwind_virtual_unwind
1254 * Equivalent of RtlVirtualUnwind for builtin modules.
1256 static NTSTATUS
libunwind_virtual_unwind( ULONG64 ip
, ULONG64
*frame
, CONTEXT
*context
,
1257 PEXCEPTION_ROUTINE
*handler
, void **handler_data
)
1259 unw_context_t unw_context
;
1260 unw_cursor_t cursor
;
1261 unw_proc_info_t info
;
1265 rc
= unw_getcontext( &unw_context
);
1266 if (rc
== UNW_ESUCCESS
)
1267 rc
= unw_init_local( &cursor
, &unw_context
);
1268 if (rc
== UNW_ESUCCESS
)
1270 unw_set_reg( &cursor
, UNW_REG_IP
, context
->Rip
);
1271 unw_set_reg( &cursor
, UNW_REG_SP
, context
->Rsp
);
1272 unw_set_reg( &cursor
, UNW_X86_64_RAX
, context
->Rax
);
1273 unw_set_reg( &cursor
, UNW_X86_64_RDX
, context
->Rdx
);
1274 unw_set_reg( &cursor
, UNW_X86_64_RCX
, context
->Rcx
);
1275 unw_set_reg( &cursor
, UNW_X86_64_RBX
, context
->Rbx
);
1276 unw_set_reg( &cursor
, UNW_X86_64_RSI
, context
->Rsi
);
1277 unw_set_reg( &cursor
, UNW_X86_64_RDI
, context
->Rdi
);
1278 unw_set_reg( &cursor
, UNW_X86_64_RBP
, context
->Rbp
);
1279 unw_set_reg( &cursor
, UNW_X86_64_R8
, context
->R8
);
1280 unw_set_reg( &cursor
, UNW_X86_64_R9
, context
->R9
);
1281 unw_set_reg( &cursor
, UNW_X86_64_R10
, context
->R10
);
1282 unw_set_reg( &cursor
, UNW_X86_64_R11
, context
->R11
);
1283 unw_set_reg( &cursor
, UNW_X86_64_R12
, context
->R12
);
1284 unw_set_reg( &cursor
, UNW_X86_64_R13
, context
->R13
);
1285 unw_set_reg( &cursor
, UNW_X86_64_R14
, context
->R14
);
1286 unw_set_reg( &cursor
, UNW_X86_64_R15
, context
->R15
);
1289 RAX_sig(&unw_context
) = context
->Rax
;
1290 RCX_sig(&unw_context
) = context
->Rcx
;
1291 RDX_sig(&unw_context
) = context
->Rdx
;
1292 RBX_sig(&unw_context
) = context
->Rbx
;
1293 RSP_sig(&unw_context
) = context
->Rsp
;
1294 RBP_sig(&unw_context
) = context
->Rbp
;
1295 RSI_sig(&unw_context
) = context
->Rsi
;
1296 RDI_sig(&unw_context
) = context
->Rdi
;
1297 R8_sig(&unw_context
) = context
->R8
;
1298 R9_sig(&unw_context
) = context
->R9
;
1299 R10_sig(&unw_context
) = context
->R10
;
1300 R11_sig(&unw_context
) = context
->R11
;
1301 R12_sig(&unw_context
) = context
->R12
;
1302 R13_sig(&unw_context
) = context
->R13
;
1303 R14_sig(&unw_context
) = context
->R14
;
1304 R15_sig(&unw_context
) = context
->R15
;
1305 RIP_sig(&unw_context
) = context
->Rip
;
1306 CS_sig(&unw_context
) = context
->SegCs
;
1307 FS_sig(&unw_context
) = context
->SegFs
;
1308 GS_sig(&unw_context
) = context
->SegGs
;
1309 EFL_sig(&unw_context
) = context
->EFlags
;
1310 rc
= unw_init_local( &cursor
, &unw_context
);
1312 if (rc
!= UNW_ESUCCESS
)
1314 WARN( "setup failed: %d\n", rc
);
1315 return STATUS_INVALID_DISPOSITION
;
1319 *frame
= context
->Rsp
;
1321 rc
= unw_get_proc_info(&cursor
, &info
);
1322 if (rc
!= UNW_ESUCCESS
&& rc
!= UNW_ENOINFO
)
1324 WARN( "failed to get info: %d\n", rc
);
1325 return STATUS_INVALID_DISPOSITION
;
1327 if (rc
== UNW_ENOINFO
|| ip
< info
.start_ip
|| ip
> info
.end_ip
|| info
.end_ip
== info
.start_ip
+ 1)
1328 return STATUS_UNSUCCESSFUL
;
1330 TRACE( "ip %#lx function %#lx-%#lx personality %#lx lsda %#lx fde %#lx\n",
1331 ip
, (unsigned long)info
.start_ip
, (unsigned long)info
.end_ip
, (unsigned long)info
.handler
,
1332 (unsigned long)info
.lsda
, (unsigned long)info
.unwind_info
);
1334 if (!(rc
= unw_step( &cursor
)))
1336 WARN( "last frame\n" );
1337 return STATUS_UNSUCCESSFUL
;
1341 WARN( "failed to unwind: %d\n", rc
);
1342 return STATUS_INVALID_DISPOSITION
;
1345 unw_get_reg( &cursor
, UNW_REG_IP
, (unw_word_t
*)&context
->Rip
);
1346 unw_get_reg( &cursor
, UNW_REG_SP
, (unw_word_t
*)&context
->Rsp
);
1347 unw_get_reg( &cursor
, UNW_X86_64_RAX
, (unw_word_t
*)&context
->Rax
);
1348 unw_get_reg( &cursor
, UNW_X86_64_RDX
, (unw_word_t
*)&context
->Rdx
);
1349 unw_get_reg( &cursor
, UNW_X86_64_RCX
, (unw_word_t
*)&context
->Rcx
);
1350 unw_get_reg( &cursor
, UNW_X86_64_RBX
, (unw_word_t
*)&context
->Rbx
);
1351 unw_get_reg( &cursor
, UNW_X86_64_RSI
, (unw_word_t
*)&context
->Rsi
);
1352 unw_get_reg( &cursor
, UNW_X86_64_RDI
, (unw_word_t
*)&context
->Rdi
);
1353 unw_get_reg( &cursor
, UNW_X86_64_RBP
, (unw_word_t
*)&context
->Rbp
);
1354 unw_get_reg( &cursor
, UNW_X86_64_R8
, (unw_word_t
*)&context
->R8
);
1355 unw_get_reg( &cursor
, UNW_X86_64_R9
, (unw_word_t
*)&context
->R9
);
1356 unw_get_reg( &cursor
, UNW_X86_64_R10
, (unw_word_t
*)&context
->R10
);
1357 unw_get_reg( &cursor
, UNW_X86_64_R11
, (unw_word_t
*)&context
->R11
);
1358 unw_get_reg( &cursor
, UNW_X86_64_R12
, (unw_word_t
*)&context
->R12
);
1359 unw_get_reg( &cursor
, UNW_X86_64_R13
, (unw_word_t
*)&context
->R13
);
1360 unw_get_reg( &cursor
, UNW_X86_64_R14
, (unw_word_t
*)&context
->R14
);
1361 unw_get_reg( &cursor
, UNW_X86_64_R15
, (unw_word_t
*)&context
->R15
);
1362 *handler
= (void*)info
.handler
;
1363 *handler_data
= (void*)info
.lsda
;
1365 TRACE( "next function rip=%016lx\n", context
->Rip
);
1366 TRACE( " rax=%016lx rbx=%016lx rcx=%016lx rdx=%016lx\n",
1367 context
->Rax
, context
->Rbx
, context
->Rcx
, context
->Rdx
);
1368 TRACE( " rsi=%016lx rdi=%016lx rbp=%016lx rsp=%016lx\n",
1369 context
->Rsi
, context
->Rdi
, context
->Rbp
, context
->Rsp
);
1370 TRACE( " r8=%016lx r9=%016lx r10=%016lx r11=%016lx\n",
1371 context
->R8
, context
->R9
, context
->R10
, context
->R11
);
1372 TRACE( " r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n",
1373 context
->R12
, context
->R13
, context
->R14
, context
->R15
);
1375 return STATUS_SUCCESS
;
1380 /***********************************************************************
1381 * unwind_builtin_dll
1383 NTSTATUS CDECL
unwind_builtin_dll( ULONG type
, DISPATCHER_CONTEXT
*dispatch
, CONTEXT
*context
)
1385 struct dwarf_eh_bases bases
;
1386 const struct dwarf_fde
*fde
= _Unwind_Find_FDE( (void *)(context
->Rip
- 1), &bases
);
1389 return dwarf_virtual_unwind( context
->Rip
, &dispatch
->EstablisherFrame
, context
, fde
,
1390 &bases
, &dispatch
->LanguageHandler
, &dispatch
->HandlerData
);
1391 #ifdef HAVE_LIBUNWIND
1392 return libunwind_virtual_unwind( context
->Rip
, &dispatch
->EstablisherFrame
, context
,
1393 &dispatch
->LanguageHandler
, &dispatch
->HandlerData
);
1395 return STATUS_UNSUCCESSFUL
;
1399 static inline void set_sigcontext( const CONTEXT
*context
, ucontext_t
*sigcontext
)
1401 RAX_sig(sigcontext
) = context
->Rax
;
1402 RCX_sig(sigcontext
) = context
->Rcx
;
1403 RDX_sig(sigcontext
) = context
->Rdx
;
1404 RBX_sig(sigcontext
) = context
->Rbx
;
1405 RSP_sig(sigcontext
) = context
->Rsp
;
1406 RBP_sig(sigcontext
) = context
->Rbp
;
1407 RSI_sig(sigcontext
) = context
->Rsi
;
1408 RDI_sig(sigcontext
) = context
->Rdi
;
1409 R8_sig(sigcontext
) = context
->R8
;
1410 R9_sig(sigcontext
) = context
->R9
;
1411 R10_sig(sigcontext
) = context
->R10
;
1412 R11_sig(sigcontext
) = context
->R11
;
1413 R12_sig(sigcontext
) = context
->R12
;
1414 R13_sig(sigcontext
) = context
->R13
;
1415 R14_sig(sigcontext
) = context
->R14
;
1416 R15_sig(sigcontext
) = context
->R15
;
1417 RIP_sig(sigcontext
) = context
->Rip
;
1418 CS_sig(sigcontext
) = context
->SegCs
;
1419 FS_sig(sigcontext
) = context
->SegFs
;
1420 GS_sig(sigcontext
) = context
->SegGs
;
1421 EFL_sig(sigcontext
) = context
->EFlags
;
1423 DS_sig(sigcontext
) = context
->SegDs
;
1426 ES_sig(sigcontext
) = context
->SegEs
;
1429 SS_sig(sigcontext
) = context
->SegSs
;
1434 /***********************************************************************
1437 * Set the register values from a sigcontext.
1439 static void save_context( struct xcontext
*xcontext
, const ucontext_t
*sigcontext
)
1441 CONTEXT
*context
= &xcontext
->c
;
1443 context
->ContextFlags
= CONTEXT_CONTROL
| CONTEXT_INTEGER
| CONTEXT_SEGMENTS
| CONTEXT_DEBUG_REGISTERS
;
1444 context
->Rax
= RAX_sig(sigcontext
);
1445 context
->Rcx
= RCX_sig(sigcontext
);
1446 context
->Rdx
= RDX_sig(sigcontext
);
1447 context
->Rbx
= RBX_sig(sigcontext
);
1448 context
->Rsp
= RSP_sig(sigcontext
);
1449 context
->Rbp
= RBP_sig(sigcontext
);
1450 context
->Rsi
= RSI_sig(sigcontext
);
1451 context
->Rdi
= RDI_sig(sigcontext
);
1452 context
->R8
= R8_sig(sigcontext
);
1453 context
->R9
= R9_sig(sigcontext
);
1454 context
->R10
= R10_sig(sigcontext
);
1455 context
->R11
= R11_sig(sigcontext
);
1456 context
->R12
= R12_sig(sigcontext
);
1457 context
->R13
= R13_sig(sigcontext
);
1458 context
->R14
= R14_sig(sigcontext
);
1459 context
->R15
= R15_sig(sigcontext
);
1460 context
->Rip
= RIP_sig(sigcontext
);
1461 context
->SegCs
= CS_sig(sigcontext
);
1462 context
->SegFs
= FS_sig(sigcontext
);
1463 context
->SegGs
= GS_sig(sigcontext
);
1464 context
->EFlags
= EFL_sig(sigcontext
);
1466 context
->SegDs
= DS_sig(sigcontext
);
1468 __asm__("movw %%ds,%0" : "=m" (context
->SegDs
));
1471 context
->SegEs
= ES_sig(sigcontext
);
1473 __asm__("movw %%es,%0" : "=m" (context
->SegEs
));
1476 context
->SegSs
= SS_sig(sigcontext
);
1478 __asm__("movw %%ss,%0" : "=m" (context
->SegSs
));
1480 context
->Dr0
= amd64_thread_data()->dr0
;
1481 context
->Dr1
= amd64_thread_data()->dr1
;
1482 context
->Dr2
= amd64_thread_data()->dr2
;
1483 context
->Dr3
= amd64_thread_data()->dr3
;
1484 context
->Dr6
= amd64_thread_data()->dr6
;
1485 context
->Dr7
= amd64_thread_data()->dr7
;
1486 if (FPU_sig(sigcontext
))
1490 context
->ContextFlags
|= CONTEXT_FLOATING_POINT
;
1491 context
->u
.FltSave
= *FPU_sig(sigcontext
);
1492 context
->MxCsr
= context
->u
.FltSave
.MxCsr
;
1493 if (user_shared_data
->XState
.EnabledFeatures
&& (xs
= XState_sig(FPU_sig(sigcontext
))))
1495 /* xcontext and sigcontext are both on the signal stack, so we can
1496 * just reference sigcontext without overflowing 32 bit XState.Offset */
1497 context_init_xstate( context
, xs
);
1498 assert( xcontext
->c_ex
.XState
.Offset
== (BYTE
*)xs
- (BYTE
*)&xcontext
->c_ex
);
1499 xcontext
->host_compaction_mask
= xs
->CompactionMask
;
1505 /***********************************************************************
1508 * Save the XState context
1510 static inline NTSTATUS
save_xstate( CONTEXT
*context
)
1512 CONTEXT_EX
*context_ex
= (CONTEXT_EX
*)(context
+ 1);
1513 DECLSPEC_ALIGN(64) struct
1521 if (!(user_shared_data
->XState
.EnabledFeatures
&& (xs
= xstate_from_context( context
))))
1522 return STATUS_SUCCESS
;
1524 if (context_ex
->XState
.Length
< offsetof(XSTATE
, YmmContext
)
1525 || context_ex
->XState
.Length
> sizeof(XSTATE
))
1526 return STATUS_INVALID_PARAMETER
;
1528 if (user_shared_data
->XState
.CompactionEnabled
)
1530 /* xsavec doesn't use anything from the save area. */
1531 __asm__
volatile( "xsavec %0" : "=m"(xsave_area
)
1532 : "a" ((unsigned int)(xs
->CompactionMask
& (1 << XSTATE_AVX
))), "d" (0) );
1536 /* xsave preserves those bits in the mask which are not in EDX:EAX, so zero it. */
1537 xsave_area
.xstate
.Mask
= xsave_area
.xstate
.CompactionMask
= 0;
1538 __asm__
volatile( "xsave %0" : "=m"(xsave_area
)
1539 : "a" ((unsigned int)(xs
->Mask
& (1 << XSTATE_AVX
))), "d" (0) );
1542 memcpy(xs
, &xsave_area
.xstate
, offsetof(XSTATE
, YmmContext
));
1543 if (xs
->Mask
& (1 << XSTATE_AVX
))
1545 if (context_ex
->XState
.Length
< sizeof(XSTATE
))
1546 return STATUS_BUFFER_OVERFLOW
;
1548 memcpy(&xs
->YmmContext
, &xsave_area
.xstate
.YmmContext
, sizeof(xs
->YmmContext
));
1551 return STATUS_SUCCESS
;
1555 /***********************************************************************
1558 * Build a sigcontext from the register values.
1560 static void restore_context( const struct xcontext
*xcontext
, ucontext_t
*sigcontext
)
1562 const CONTEXT
*context
= &xcontext
->c
;
1565 amd64_thread_data()->dr0
= context
->Dr0
;
1566 amd64_thread_data()->dr1
= context
->Dr1
;
1567 amd64_thread_data()->dr2
= context
->Dr2
;
1568 amd64_thread_data()->dr3
= context
->Dr3
;
1569 amd64_thread_data()->dr6
= context
->Dr6
;
1570 amd64_thread_data()->dr7
= context
->Dr7
;
1571 set_sigcontext( context
, sigcontext
);
1572 if (FPU_sig(sigcontext
)) *FPU_sig(sigcontext
) = context
->u
.FltSave
;
1573 if (user_shared_data
->XState
.EnabledFeatures
&& (xs
= XState_sig(FPU_sig(sigcontext
))))
1574 xs
->CompactionMask
= xcontext
->host_compaction_mask
;
1578 /***********************************************************************
1579 * set_full_cpu_context
1581 * Set the new CPU context.
1583 extern void set_full_cpu_context( const CONTEXT
*context
);
1584 __ASM_GLOBAL_FUNC( set_full_cpu_context
,
1586 __ASM_SEH(".seh_stackalloc 0x40\n\t")
1587 __ASM_SEH(".seh_endprologue\n\t")
1588 __ASM_CFI(".cfi_adjust_cfa_offset 40\n\t")
1589 "movq %gs:0x30,%rdx\n\t"
1590 "movw 0x38(%rdi),%ax\n\t" /* context->SegCs */
1591 "movq %rax,8(%rsp)\n\t"
1592 "movw 0x42(%rdi),%ax\n\t" /* context->SegSs */
1593 "movq %rax,32(%rsp)\n\t"
1594 "movq 0x44(%rdi),%rax\n\t" /* context->Eflags */
1595 "movq %rax,16(%rsp)\n\t"
1596 "movq $0,0x328(%rdx)\n\t" /* amd64_thread_data()->syscall_frame */
1597 "movq 0x80(%rdi),%rcx\n\t" /* context->Rcx */
1598 "movq 0x88(%rdi),%rdx\n\t" /* context->Rdx */
1599 "movq 0x90(%rdi),%rbx\n\t" /* context->Rbx */
1600 "movq 0x98(%rdi),%rax\n\t" /* context->Rsp */
1601 "movq %rax,24(%rsp)\n\t"
1602 "movq 0xa0(%rdi),%rbp\n\t" /* context->Rbp */
1603 "movq 0xa8(%rdi),%rsi\n\t" /* context->Rsi */
1604 "movq 0xb8(%rdi),%r8\n\t" /* context->R8 */
1605 "movq 0xc0(%rdi),%r9\n\t" /* context->R9 */
1606 "movq 0xc8(%rdi),%r10\n\t" /* context->R10 */
1607 "movq 0xd0(%rdi),%r11\n\t" /* context->R11 */
1608 "movq 0xd8(%rdi),%r12\n\t" /* context->R12 */
1609 "movq 0xe0(%rdi),%r13\n\t" /* context->R13 */
1610 "movq 0xe8(%rdi),%r14\n\t" /* context->R14 */
1611 "movq 0xf0(%rdi),%r15\n\t" /* context->R15 */
1612 "movq 0xf8(%rdi),%rax\n\t" /* context->Rip */
1613 "movq %rax,(%rsp)\n\t"
1614 "fxrstor 0x100(%rdi)\n\t" /* context->FltSave */
1615 "movq 0x78(%rdi),%rax\n\t" /* context->Rax */
1616 "movq 0xb0(%rdi),%rdi\n\t" /* context->Rdi */
1620 /***********************************************************************
1623 * Restore the XState context.
1625 static void restore_xstate( const CONTEXT
*context
)
1627 XSAVE_FORMAT
*xrstor_base
;
1630 if (!(user_shared_data
->XState
.EnabledFeatures
&& (xs
= xstate_from_context( context
))))
1633 xrstor_base
= (XSAVE_FORMAT
*)xs
- 1;
1635 if (!(xs
->CompactionMask
& ((ULONG64
)1 << 63)))
1637 /* Non-compacted xrstor will load Mxcsr regardless of the specified mask. Loading garbage there
1638 * may lead to fault. We have only padding, no more used EXCEPTION_RECORD or unused context fields
1639 * at the MxCsr restore location, so just put it there. */
1640 assert( (void *)&xrstor_base
->MxCsr
> (void *)context
->VectorRegister
);
1641 xrstor_base
->MxCsr
= context
->u
.FltSave
.MxCsr
;
1642 xrstor_base
->MxCsr_Mask
= context
->u
.FltSave
.MxCsr_Mask
;
1645 __asm__
volatile( "xrstor64 %0" : : "m"(*xrstor_base
), "a" (4), "d" (0) );
1648 /***********************************************************************
1649 * get_server_context_flags
1651 * Convert CPU-specific flags to generic server flags
1653 static unsigned int get_server_context_flags( DWORD flags
)
1655 unsigned int ret
= 0;
1657 flags
&= ~CONTEXT_AMD64
; /* get rid of CPU id */
1658 if (flags
& CONTEXT_CONTROL
) ret
|= SERVER_CTX_CONTROL
;
1659 if (flags
& CONTEXT_INTEGER
) ret
|= SERVER_CTX_INTEGER
;
1660 if (flags
& CONTEXT_SEGMENTS
) ret
|= SERVER_CTX_SEGMENTS
;
1661 if (flags
& CONTEXT_FLOATING_POINT
) ret
|= SERVER_CTX_FLOATING_POINT
;
1662 if (flags
& CONTEXT_DEBUG_REGISTERS
) ret
|= SERVER_CTX_DEBUG_REGISTERS
;
1663 if (flags
& CONTEXT_XSTATE
) ret
|= SERVER_CTX_YMM_REGISTERS
;
1668 /***********************************************************************
1671 * Convert a register context to the server format.
1673 NTSTATUS
context_to_server( context_t
*to
, const CONTEXT
*from
)
1675 DWORD flags
= from
->ContextFlags
& ~CONTEXT_AMD64
; /* get rid of CPU id */
1677 memset( to
, 0, sizeof(*to
) );
1678 to
->cpu
= CPU_x86_64
;
1680 if (flags
& CONTEXT_CONTROL
)
1682 to
->flags
|= SERVER_CTX_CONTROL
;
1683 to
->ctl
.x86_64_regs
.rbp
= from
->Rbp
;
1684 to
->ctl
.x86_64_regs
.rip
= from
->Rip
;
1685 to
->ctl
.x86_64_regs
.rsp
= from
->Rsp
;
1686 to
->ctl
.x86_64_regs
.cs
= from
->SegCs
;
1687 to
->ctl
.x86_64_regs
.ss
= from
->SegSs
;
1688 to
->ctl
.x86_64_regs
.flags
= from
->EFlags
;
1690 if (flags
& CONTEXT_INTEGER
)
1692 to
->flags
|= SERVER_CTX_INTEGER
;
1693 to
->integer
.x86_64_regs
.rax
= from
->Rax
;
1694 to
->integer
.x86_64_regs
.rcx
= from
->Rcx
;
1695 to
->integer
.x86_64_regs
.rdx
= from
->Rdx
;
1696 to
->integer
.x86_64_regs
.rbx
= from
->Rbx
;
1697 to
->integer
.x86_64_regs
.rsi
= from
->Rsi
;
1698 to
->integer
.x86_64_regs
.rdi
= from
->Rdi
;
1699 to
->integer
.x86_64_regs
.r8
= from
->R8
;
1700 to
->integer
.x86_64_regs
.r9
= from
->R9
;
1701 to
->integer
.x86_64_regs
.r10
= from
->R10
;
1702 to
->integer
.x86_64_regs
.r11
= from
->R11
;
1703 to
->integer
.x86_64_regs
.r12
= from
->R12
;
1704 to
->integer
.x86_64_regs
.r13
= from
->R13
;
1705 to
->integer
.x86_64_regs
.r14
= from
->R14
;
1706 to
->integer
.x86_64_regs
.r15
= from
->R15
;
1708 if (flags
& CONTEXT_SEGMENTS
)
1710 to
->flags
|= SERVER_CTX_SEGMENTS
;
1711 to
->seg
.x86_64_regs
.ds
= from
->SegDs
;
1712 to
->seg
.x86_64_regs
.es
= from
->SegEs
;
1713 to
->seg
.x86_64_regs
.fs
= from
->SegFs
;
1714 to
->seg
.x86_64_regs
.gs
= from
->SegGs
;
1716 if (flags
& CONTEXT_FLOATING_POINT
)
1718 to
->flags
|= SERVER_CTX_FLOATING_POINT
;
1719 memcpy( to
->fp
.x86_64_regs
.fpregs
, &from
->u
.FltSave
, sizeof(to
->fp
.x86_64_regs
.fpregs
) );
1721 if (flags
& CONTEXT_DEBUG_REGISTERS
)
1723 to
->flags
|= SERVER_CTX_DEBUG_REGISTERS
;
1724 to
->debug
.x86_64_regs
.dr0
= from
->Dr0
;
1725 to
->debug
.x86_64_regs
.dr1
= from
->Dr1
;
1726 to
->debug
.x86_64_regs
.dr2
= from
->Dr2
;
1727 to
->debug
.x86_64_regs
.dr3
= from
->Dr3
;
1728 to
->debug
.x86_64_regs
.dr6
= from
->Dr6
;
1729 to
->debug
.x86_64_regs
.dr7
= from
->Dr7
;
1731 xstate_to_server( to
, xstate_from_context( from
) );
1732 return STATUS_SUCCESS
;
1736 /***********************************************************************
1737 * context_from_server
1739 * Convert a register context from the server format.
1741 NTSTATUS
context_from_server( CONTEXT
*to
, const context_t
*from
)
1743 if (from
->cpu
!= CPU_x86_64
) return STATUS_INVALID_PARAMETER
;
1745 to
->ContextFlags
= CONTEXT_AMD64
| (to
->ContextFlags
& 0x40);
1746 if (from
->flags
& SERVER_CTX_CONTROL
)
1748 to
->ContextFlags
|= CONTEXT_CONTROL
;
1749 to
->Rbp
= from
->ctl
.x86_64_regs
.rbp
;
1750 to
->Rip
= from
->ctl
.x86_64_regs
.rip
;
1751 to
->Rsp
= from
->ctl
.x86_64_regs
.rsp
;
1752 to
->SegCs
= from
->ctl
.x86_64_regs
.cs
;
1753 to
->SegSs
= from
->ctl
.x86_64_regs
.ss
;
1754 to
->EFlags
= from
->ctl
.x86_64_regs
.flags
;
1757 if (from
->flags
& SERVER_CTX_INTEGER
)
1759 to
->ContextFlags
|= CONTEXT_INTEGER
;
1760 to
->Rax
= from
->integer
.x86_64_regs
.rax
;
1761 to
->Rcx
= from
->integer
.x86_64_regs
.rcx
;
1762 to
->Rdx
= from
->integer
.x86_64_regs
.rdx
;
1763 to
->Rbx
= from
->integer
.x86_64_regs
.rbx
;
1764 to
->Rsi
= from
->integer
.x86_64_regs
.rsi
;
1765 to
->Rdi
= from
->integer
.x86_64_regs
.rdi
;
1766 to
->R8
= from
->integer
.x86_64_regs
.r8
;
1767 to
->R9
= from
->integer
.x86_64_regs
.r9
;
1768 to
->R10
= from
->integer
.x86_64_regs
.r10
;
1769 to
->R11
= from
->integer
.x86_64_regs
.r11
;
1770 to
->R12
= from
->integer
.x86_64_regs
.r12
;
1771 to
->R13
= from
->integer
.x86_64_regs
.r13
;
1772 to
->R14
= from
->integer
.x86_64_regs
.r14
;
1773 to
->R15
= from
->integer
.x86_64_regs
.r15
;
1775 if (from
->flags
& SERVER_CTX_SEGMENTS
)
1777 to
->ContextFlags
|= CONTEXT_SEGMENTS
;
1778 to
->SegDs
= from
->seg
.x86_64_regs
.ds
;
1779 to
->SegEs
= from
->seg
.x86_64_regs
.es
;
1780 to
->SegFs
= from
->seg
.x86_64_regs
.fs
;
1781 to
->SegGs
= from
->seg
.x86_64_regs
.gs
;
1783 if (from
->flags
& SERVER_CTX_FLOATING_POINT
)
1785 to
->ContextFlags
|= CONTEXT_FLOATING_POINT
;
1786 memcpy( &to
->u
.FltSave
, from
->fp
.x86_64_regs
.fpregs
, sizeof(from
->fp
.x86_64_regs
.fpregs
) );
1787 to
->MxCsr
= to
->u
.FltSave
.MxCsr
;
1789 if (from
->flags
& SERVER_CTX_DEBUG_REGISTERS
)
1791 to
->ContextFlags
|= CONTEXT_DEBUG_REGISTERS
;
1792 to
->Dr0
= from
->debug
.x86_64_regs
.dr0
;
1793 to
->Dr1
= from
->debug
.x86_64_regs
.dr1
;
1794 to
->Dr2
= from
->debug
.x86_64_regs
.dr2
;
1795 to
->Dr3
= from
->debug
.x86_64_regs
.dr3
;
1796 to
->Dr6
= from
->debug
.x86_64_regs
.dr6
;
1797 to
->Dr7
= from
->debug
.x86_64_regs
.dr7
;
1799 xstate_from_server( xstate_from_context( to
), from
);
1800 return STATUS_SUCCESS
;
1804 /***********************************************************************
1805 * NtSetContextThread (NTDLL.@)
1806 * ZwSetContextThread (NTDLL.@)
1808 NTSTATUS WINAPI
NtSetContextThread( HANDLE handle
, const CONTEXT
*context
)
1810 NTSTATUS ret
= STATUS_SUCCESS
;
1811 DWORD flags
= context
->ContextFlags
& ~CONTEXT_AMD64
;
1812 BOOL self
= (handle
== GetCurrentThread());
1814 /* debug registers require a server call */
1815 if (self
&& (flags
& CONTEXT_DEBUG_REGISTERS
))
1816 self
= (amd64_thread_data()->dr0
== context
->Dr0
&&
1817 amd64_thread_data()->dr1
== context
->Dr1
&&
1818 amd64_thread_data()->dr2
== context
->Dr2
&&
1819 amd64_thread_data()->dr3
== context
->Dr3
&&
1820 amd64_thread_data()->dr6
== context
->Dr6
&&
1821 amd64_thread_data()->dr7
== context
->Dr7
);
1825 context_t server_context
;
1827 context_to_server( &server_context
, context
);
1828 ret
= set_thread_context( handle
, &server_context
, &self
);
1829 if (ret
|| !self
) return ret
;
1830 if (flags
& CONTEXT_DEBUG_REGISTERS
)
1832 amd64_thread_data()->dr0
= context
->Dr0
;
1833 amd64_thread_data()->dr1
= context
->Dr1
;
1834 amd64_thread_data()->dr2
= context
->Dr2
;
1835 amd64_thread_data()->dr3
= context
->Dr3
;
1836 amd64_thread_data()->dr6
= context
->Dr6
;
1837 amd64_thread_data()->dr7
= context
->Dr7
;
1841 restore_xstate( context
);
1843 if (flags
& CONTEXT_FULL
)
1845 if (!(flags
& CONTEXT_CONTROL
))
1846 FIXME( "setting partial context (%x) not supported\n", flags
);
1848 set_full_cpu_context( context
);
1854 /***********************************************************************
1855 * NtGetContextThread (NTDLL.@)
1856 * ZwGetContextThread (NTDLL.@)
1858 NTSTATUS WINAPI
NtGetContextThread( HANDLE handle
, CONTEXT
*context
)
1860 NTSTATUS ret
, xsave_status
;
1862 struct syscall_frame
*frame
= amd64_thread_data()->syscall_frame
;
1863 BOOL self
= (handle
== GetCurrentThread());
1865 if (!context
) return STATUS_INVALID_PARAMETER
;
1867 /* Save xstate before any calls which can potentially change volatile ymm registers.
1868 * E. g., debug output will clobber ymm registers. */
1869 xsave_status
= self
? save_xstate( context
) : STATUS_SUCCESS
;
1871 needed_flags
= context
->ContextFlags
& ~CONTEXT_AMD64
;
1873 /* debug registers require a server call */
1874 if (context
->ContextFlags
& (CONTEXT_DEBUG_REGISTERS
& ~CONTEXT_AMD64
)) self
= FALSE
;
1878 context_t server_context
;
1879 unsigned int server_flags
= get_server_context_flags( context
->ContextFlags
);
1881 if ((ret
= get_thread_context( handle
, &server_context
, server_flags
, &self
))) return ret
;
1882 if ((ret
= context_from_server( context
, &server_context
))) return ret
;
1883 needed_flags
&= ~context
->ContextFlags
;
1888 if (needed_flags
& CONTEXT_INTEGER
)
1891 context
->Rbx
= frame
->rbx
;
1894 context
->Rsi
= frame
->rsi
;
1895 context
->Rdi
= frame
->rdi
;
1900 context
->R12
= frame
->r12
;
1901 context
->R13
= frame
->r13
;
1902 context
->R14
= frame
->r14
;
1903 context
->R15
= frame
->r15
;
1904 context
->ContextFlags
|= CONTEXT_INTEGER
;
1906 if (needed_flags
& CONTEXT_CONTROL
)
1908 context
->Rsp
= (ULONG64
)&frame
->ret_addr
;
1909 context
->Rbp
= frame
->rbp
;
1910 context
->Rip
= frame
->thunk_addr
;
1911 context
->EFlags
= 0x202;
1912 __asm__( "movw %%cs,%0" : "=g" (context
->SegCs
) );
1913 __asm__( "movw %%ss,%0" : "=g" (context
->SegSs
) );
1914 context
->ContextFlags
|= CONTEXT_CONTROL
;
1916 if (needed_flags
& CONTEXT_SEGMENTS
)
1918 __asm__( "movw %%ds,%0" : "=g" (context
->SegDs
) );
1919 __asm__( "movw %%es,%0" : "=g" (context
->SegEs
) );
1920 __asm__( "movw %%fs,%0" : "=g" (context
->SegFs
) );
1921 __asm__( "movw %%gs,%0" : "=g" (context
->SegGs
) );
1922 context
->ContextFlags
|= CONTEXT_SEGMENTS
;
1924 if (needed_flags
& CONTEXT_FLOATING_POINT
)
1926 __asm__( "fxsave %0" : "=m" (context
->u
.FltSave
) );
1927 context
->MxCsr
= frame
->mxcsr
;
1928 memset( &context
->u
.s
.Xmm0
, 0, 6 * sizeof(context
->u
.s
.Xmm0
) );
1929 memcpy( &context
->u
.s
.Xmm6
, frame
->xmm
, 10 * sizeof(context
->u
.s
.Xmm0
) );
1930 context
->ContextFlags
|= CONTEXT_FLOATING_POINT
;
1932 /* update the cached version of the debug registers */
1933 if (context
->ContextFlags
& (CONTEXT_DEBUG_REGISTERS
& ~CONTEXT_AMD64
))
1935 amd64_thread_data()->dr0
= context
->Dr0
;
1936 amd64_thread_data()->dr1
= context
->Dr1
;
1937 amd64_thread_data()->dr2
= context
->Dr2
;
1938 amd64_thread_data()->dr3
= context
->Dr3
;
1939 amd64_thread_data()->dr6
= context
->Dr6
;
1940 amd64_thread_data()->dr7
= context
->Dr7
;
1944 return xsave_status
;
1947 extern void CDECL
raise_func_trampoline( void *dispatcher
);
1949 __ASM_GLOBAL_FUNC( raise_func_trampoline
,
1952 /***********************************************************************
1953 * setup_raise_exception
1955 static void setup_raise_exception( ucontext_t
*sigcontext
, EXCEPTION_RECORD
*rec
, struct xcontext
*xcontext
)
1957 void *stack_ptr
= (void *)(RSP_sig(sigcontext
) & ~15);
1958 CONTEXT
*context
= &xcontext
->c
;
1959 struct stack_layout
*stack
;
1964 if (rec
->ExceptionCode
== EXCEPTION_SINGLE_STEP
)
1966 /* when single stepping can't tell whether this is a hw bp or a
1967 * single step interrupt. try to avoid as much overhead as possible
1968 * and only do a server call if there is any hw bp enabled. */
1970 if (!(context
->EFlags
& 0x100) || (context
->Dr7
& 0xff))
1972 /* (possible) hardware breakpoint, fetch the debug registers */
1973 DWORD saved_flags
= context
->ContextFlags
;
1974 context
->ContextFlags
= CONTEXT_DEBUG_REGISTERS
;
1975 NtGetContextThread(GetCurrentThread(), context
);
1976 context
->ContextFlags
|= saved_flags
; /* restore flags */
1978 context
->EFlags
&= ~0x100; /* clear single-step flag */
1981 status
= send_debug_event( rec
, context
, TRUE
);
1982 if (status
== DBG_CONTINUE
|| status
== DBG_EXCEPTION_HANDLED
)
1984 restore_context( xcontext
, sigcontext
);
1988 /* fix up instruction pointer in context for EXCEPTION_BREAKPOINT */
1989 if (rec
->ExceptionCode
== EXCEPTION_BREAKPOINT
) context
->Rip
--;
1991 stack_size
= sizeof(*stack
) + 0x20;
1992 if ((src_xs
= xstate_from_context( context
)))
1994 stack_size
+= (ULONG_PTR
)stack_ptr
- 0x20 - (((ULONG_PTR
)stack_ptr
- 0x20
1995 - sizeof(XSTATE
)) & ~(ULONG_PTR
)63);
1998 stack
= virtual_setup_exception( stack_ptr
, stack_size
, rec
);
2000 stack
->context
= *context
;
2003 XSTATE
*dst_xs
= (XSTATE
*)stack
->xstate
;
2005 assert( !((ULONG_PTR
)dst_xs
& 63) );
2006 context_init_xstate( &stack
->context
, stack
->xstate
);
2007 memset( dst_xs
, 0, offsetof(XSTATE
, YmmContext
) );
2008 dst_xs
->CompactionMask
= user_shared_data
->XState
.CompactionEnabled
? 0x8000000000000004 : 0;
2009 if (src_xs
->Mask
& 4)
2012 memcpy( &dst_xs
->YmmContext
, &src_xs
->YmmContext
, sizeof(dst_xs
->YmmContext
) );
2016 RIP_sig(sigcontext
) = (ULONG_PTR
)raise_func_trampoline
;
2017 R8_sig(sigcontext
) = (ULONG_PTR
)pKiUserExceptionDispatcher
;
2018 RSP_sig(sigcontext
) = (ULONG_PTR
)stack
;
2019 /* clear single-step, direction, and align check flag */
2020 EFL_sig(sigcontext
) &= ~(0x100|0x400|0x40000);
2024 /***********************************************************************
2027 * Setup a proper stack frame for the raise function, and modify the
2028 * sigcontext so that the return from the signal handler will call
2029 * the raise function.
2031 static void setup_exception( ucontext_t
*sigcontext
, EXCEPTION_RECORD
*rec
)
2033 struct xcontext context
;
2035 rec
->ExceptionAddress
= (void *)RIP_sig(sigcontext
);
2036 save_context( &context
, sigcontext
);
2037 setup_raise_exception( sigcontext
, rec
, &context
);
2041 /***********************************************************************
2042 * call_user_apc_dispatcher
2044 struct apc_stack_layout
* WINAPI
setup_user_apc_dispatcher_stack( CONTEXT
*context
, struct apc_stack_layout
*stack
)
2050 c
.ContextFlags
= CONTEXT_FULL
;
2051 NtGetContextThread( GetCurrentThread(), &c
);
2052 c
.Rax
= STATUS_USER_APC
;
2055 memmove( &stack
->context
, context
, sizeof(stack
->context
) );
2059 __ASM_GLOBAL_FUNC( call_user_apc_dispatcher
,
2060 "movq 0x28(%rsp),%rsi\n\t" /* func */
2061 "movq 0x30(%rsp),%rdi\n\t" /* dispatcher */
2062 "movq %gs:0x30,%rbx\n\t"
2063 "movq %rdx,%r12\n\t" /* ctx */
2064 "movq %r8,%r13\n\t" /* arg1 */
2065 "movq %r9,%r14\n\t" /* arg2 */
2067 "movq 0x98(%rcx),%rdx\n\t" /* context->Rsp */
2069 "1:\tmovq 0x328(%rbx),%rax\n\t" /* amd64_thread_data()->syscall_frame */
2070 "leaq 0xf0(%rax),%rdx\n\t" /* &amd64_thread_data()->syscall_frame->ret_addr */
2071 "2:\tsubq $0x510,%rdx\n\t" /* sizeof(struct apc_stack_layout) */
2072 "andq $~0xf,%rdx\n\t"
2073 "addq $8,%rsp\n\t" /* pop return address */
2074 "cmpq %rsp,%rdx\n\t"
2075 "cmovbq %rdx,%rsp\n\t"
2076 "subq $0x20,%rsp\n\t"
2077 "call " __ASM_NAME("setup_user_apc_dispatcher_stack") "\n\t"
2078 "movq %rax,%rsp\n\t"
2079 "leaq 0x30(%rsp),%rcx\n\t" /* context */
2080 "movq %r12,%rdx\n\t" /* ctx */
2081 "movq %r13,%r8\n\t" /* arg1 */
2082 "movq %r14,%r9\n" /* arg2 */
2083 "movq $0,0x328(%rbx)\n\t" /* amd64_thread_data()->syscall_frame */
2084 "movq %rsi,0x20(%rsp)\n\t" /* func */
2085 "movq %rdi,%r10\n\t"
2086 /* Set nonvolatile regs from context. */
2087 "movq 0xa0(%rcx),%rbp\n\t"
2088 "movq 0x90(%rcx),%rbx\n\t"
2089 "movq 0xa8(%rcx),%rsi\n\t"
2090 "movq 0xb0(%rcx),%rdi\n\t"
2091 "movq 0xd8(%rcx),%r12\n\t"
2092 "movq 0xe0(%rcx),%r13\n\t"
2093 "movq 0xe8(%rcx),%r14\n\t"
2094 "movq 0xf0(%rcx),%r15\n\t"
2095 "movdqa 0x200(%rcx),%xmm6\n\t"
2096 "movdqa 0x210(%rcx),%xmm7\n\t"
2097 "movdqa 0x220(%rcx),%xmm8\n\t"
2098 "movdqa 0x230(%rcx),%xmm9\n\t"
2099 "movdqa 0x240(%rcx),%xmm10\n\t"
2100 "movdqa 0x250(%rcx),%xmm11\n\t"
2101 "movdqa 0x260(%rcx),%xmm12\n\t"
2102 "movdqa 0x270(%rcx),%xmm13\n\t"
2103 "movdqa 0x280(%rcx),%xmm14\n\t"
2104 "movdqa 0x290(%rcx),%xmm15\n\t"
2105 "pushq 0xf8(%rcx)\n\t" /* context.Rip */
2109 /***********************************************************************
2110 * call_raise_user_exception_dispatcher
2112 __ASM_GLOBAL_FUNC( call_raise_user_exception_dispatcher
,
2113 "movq %gs:0x30,%rdx\n\t"
2114 "movq 0x328(%rdx),%rax\n\t" /* amd64_thread_data()->syscall_frame */
2115 "movdqu 0x0(%rax),%xmm6\n\t" /* frame->xmm[0..19] */
2116 "movdqu 0x10(%rax),%xmm7\n\t"
2117 "movdqu 0x20(%rax),%xmm8\n\t"
2118 "movdqu 0x30(%rax),%xmm9\n\t"
2119 "movdqu 0x40(%rax),%xmm10\n\t"
2120 "movdqu 0x50(%rax),%xmm11\n\t"
2121 "movdqu 0x60(%rax),%xmm12\n\t"
2122 "movdqu 0x70(%rax),%xmm13\n\t"
2123 "movdqu 0x80(%rax),%xmm14\n\t"
2124 "movdqu 0x90(%rax),%xmm15\n\t"
2125 "ldmxcsr 0xa0(%rax)\n\t" /* frame->mxcsr */
2126 "movq 0xa8(%rax),%r12\n\t" /* frame->r12 */
2127 "movq 0xb0(%rax),%r13\n\t" /* frame->r13 */
2128 "movq 0xb8(%rax),%r14\n\t" /* frame->r14 */
2129 "movq 0xc0(%rax),%r15\n\t" /* frame->r15 */
2130 "movq 0xc8(%rax),%rdi\n\t" /* frame->rdi */
2131 "movq 0xd0(%rax),%rsi\n\t" /* frame->rsi */
2132 "movq 0xd8(%rax),%rbx\n\t" /* frame->rbx */
2133 "movq 0xe0(%rax),%rbp\n\t" /* frame->rbp */
2134 "movq $0,0x328(%rdx)\n\t"
2135 "leaq 0xf0(%rax),%rsp\n\t"
2139 /***********************************************************************
2140 * call_user_exception_dispatcher
2142 struct stack_layout
* WINAPI
setup_user_exception_dispatcher_stack( EXCEPTION_RECORD
*rec
, CONTEXT
*context
,
2143 NTSTATUS (WINAPI
*dispatcher
)(EXCEPTION_RECORD
*,CONTEXT
*),
2144 struct stack_layout
*stack
)
2146 if ((context
->ContextFlags
& CONTEXT_XSTATE
) == CONTEXT_XSTATE
)
2148 CONTEXT_EX
*xctx
= (CONTEXT_EX
*)context
+ 1;
2149 XSTATE
*xs
, *src_xs
, xs_buf
;
2151 src_xs
= xstate_from_context(context
);
2152 if ((CONTEXT
*)src_xs
>= &stack
->context
+ 1 || src_xs
+ 1 <= (XSTATE
*)&stack
->context
)
2159 memcpy(xs
, src_xs
, sizeof(*xs
));
2162 memmove(&stack
->context
, context
, sizeof(*context
) + sizeof(*xctx
));
2163 assert(!((ULONG_PTR
)stack
->xstate
& 63));
2164 context_init_xstate(&stack
->context
, stack
->xstate
);
2165 memcpy(stack
->xstate
, xs
, sizeof(*xs
));
2169 memmove(&stack
->context
, context
, sizeof(*context
));
2171 memcpy(&stack
->rec
, rec
, sizeof(*rec
));
2173 /* fix up instruction pointer in context for EXCEPTION_BREAKPOINT */
2174 if (stack
->rec
.ExceptionCode
== EXCEPTION_BREAKPOINT
) stack
->context
.Rip
--;
2179 __ASM_GLOBAL_FUNC( call_user_exception_dispatcher
,
2180 "movq 0x98(%rdx),%r9\n\t" /* context->Rsp */
2181 "subq $0x20,%r9\n\t" /* Unwind registers save space */
2182 "andq $~0xf,%r9\n\t"
2183 "btl $6,0x30(%rdx)\n\t" /* context->ContextFlags, CONTEXT_XSTATE bit. */
2185 "subq $0x140,%r9\n\t" /* sizeof(XSTATE) */
2187 "1:\tsubq $0x590,%r9\n\t" /* sizeof(struct stack_layout) */
2189 "cmovbq %r9,%rsp\n\t"
2191 "subq $0x20,%rsp\n\t"
2192 "call " __ASM_NAME("setup_user_exception_dispatcher_stack") "\n\t"
2193 "addq $0x20,%rsp\n\t"
2196 "movq 0xa0(%rcx),%rbp\n\t"
2197 "movq 0x90(%rcx),%rbx\n\t"
2198 "movq 0xa8(%rcx),%rsi\n\t"
2199 "movq 0xb0(%rcx),%rdi\n\t"
2200 "movq 0xd8(%rcx),%r12\n\t"
2201 "movq 0xe0(%rcx),%r13\n\t"
2202 "movq 0xe8(%rcx),%r14\n\t"
2203 "movq 0xf0(%rcx),%r15\n\t"
2204 "movdqa 0x200(%rcx),%xmm6\n\t"
2205 "movdqa 0x210(%rcx),%xmm7\n\t"
2206 "movdqa 0x220(%rcx),%xmm8\n\t"
2207 "movdqa 0x230(%rcx),%xmm9\n\t"
2208 "movdqa 0x240(%rcx),%xmm10\n\t"
2209 "movdqa 0x250(%rcx),%xmm11\n\t"
2210 "movdqa 0x260(%rcx),%xmm12\n\t"
2211 "movdqa 0x270(%rcx),%xmm13\n\t"
2212 "movdqa 0x280(%rcx),%xmm14\n\t"
2213 "movdqa 0x290(%rcx),%xmm15\n\t"
2215 "movq %gs:0x30,%rax\n\t"
2216 "movq $0,0x328(%rax)\n\t" /* amd64_thread_data()->syscall_frame */
2219 /***********************************************************************
2220 * is_privileged_instr
2222 * Check if the fault location is a privileged instruction.
2224 static inline DWORD
is_privileged_instr( CONTEXT
*context
)
2227 unsigned int i
, prefix_count
= 0;
2228 unsigned int len
= virtual_uninterrupted_read_memory( (BYTE
*)context
->Rip
, instr
, sizeof(instr
) );
2230 for (i
= 0; i
< len
; i
++) switch (instr
[i
])
2232 /* instruction prefixes */
2233 case 0x2e: /* %cs: */
2234 case 0x36: /* %ss: */
2235 case 0x3e: /* %ds: */
2236 case 0x26: /* %es: */
2237 case 0x40: /* rex */
2238 case 0x41: /* rex */
2239 case 0x42: /* rex */
2240 case 0x43: /* rex */
2241 case 0x44: /* rex */
2242 case 0x45: /* rex */
2243 case 0x46: /* rex */
2244 case 0x47: /* rex */
2245 case 0x48: /* rex */
2246 case 0x49: /* rex */
2247 case 0x4a: /* rex */
2248 case 0x4b: /* rex */
2249 case 0x4c: /* rex */
2250 case 0x4d: /* rex */
2251 case 0x4e: /* rex */
2252 case 0x4f: /* rex */
2253 case 0x64: /* %fs: */
2254 case 0x65: /* %gs: */
2255 case 0x66: /* opcode size */
2256 case 0x67: /* addr size */
2257 case 0xf0: /* lock */
2258 case 0xf2: /* repne */
2259 case 0xf3: /* repe */
2260 if (++prefix_count
>= 15) return EXCEPTION_ILLEGAL_INSTRUCTION
;
2263 case 0x0f: /* extended instruction */
2264 if (i
== len
- 1) return 0;
2265 switch (instr
[i
+ 1])
2267 case 0x06: /* clts */
2268 case 0x08: /* invd */
2269 case 0x09: /* wbinvd */
2270 case 0x20: /* mov crX, reg */
2271 case 0x21: /* mov drX, reg */
2272 case 0x22: /* mov reg, crX */
2273 case 0x23: /* mov reg drX */
2274 return EXCEPTION_PRIV_INSTRUCTION
;
2277 case 0x6c: /* insb (%dx) */
2278 case 0x6d: /* insl (%dx) */
2279 case 0x6e: /* outsb (%dx) */
2280 case 0x6f: /* outsl (%dx) */
2281 case 0xcd: /* int $xx */
2282 case 0xe4: /* inb al,XX */
2283 case 0xe5: /* in (e)ax,XX */
2284 case 0xe6: /* outb XX,al */
2285 case 0xe7: /* out XX,(e)ax */
2286 case 0xec: /* inb (%dx),%al */
2287 case 0xed: /* inl (%dx),%eax */
2288 case 0xee: /* outb %al,(%dx) */
2289 case 0xef: /* outl %eax,(%dx) */
2290 case 0xf4: /* hlt */
2291 case 0xfa: /* cli */
2292 case 0xfb: /* sti */
2293 return EXCEPTION_PRIV_INSTRUCTION
;
2301 /***********************************************************************
2304 * Handle an interrupt.
2306 static inline BOOL
handle_interrupt( ucontext_t
*sigcontext
, EXCEPTION_RECORD
*rec
, struct xcontext
*xcontext
)
2308 CONTEXT
*context
= &xcontext
->c
;
2310 switch (ERROR_sig(sigcontext
) >> 3)
2313 rec
->ExceptionCode
= STATUS_ASSERTION_FAILURE
;
2316 switch (context
->Rax
)
2318 case 1: /* BREAKPOINT_PRINT */
2319 case 3: /* BREAKPOINT_LOAD_SYMBOLS */
2320 case 4: /* BREAKPOINT_UNLOAD_SYMBOLS */
2321 case 5: /* BREAKPOINT_COMMAND_STRING (>= Win2003) */
2322 RIP_sig(sigcontext
) += 3;
2326 rec
->ExceptionCode
= EXCEPTION_BREAKPOINT
;
2327 rec
->ExceptionAddress
= (void *)context
->Rip
;
2328 rec
->NumberParameters
= 1;
2329 rec
->ExceptionInformation
[0] = context
->Rax
;
2334 setup_raise_exception( sigcontext
, rec
, xcontext
);
2339 /***********************************************************************
2340 * handle_syscall_fault
2342 * Handle a page fault happening during a system call.
2344 static BOOL
handle_syscall_fault( ucontext_t
*sigcontext
, EXCEPTION_RECORD
*rec
, CONTEXT
*context
)
2346 struct syscall_frame
*frame
= amd64_thread_data()->syscall_frame
;
2347 __WINE_FRAME
*wine_frame
= (__WINE_FRAME
*)NtCurrentTeb()->Tib
.ExceptionList
;
2350 if (!frame
) return FALSE
;
2352 TRACE( "code=%x flags=%x addr=%p ip=%lx tid=%04x\n",
2353 rec
->ExceptionCode
, rec
->ExceptionFlags
, rec
->ExceptionAddress
,
2354 context
->Rip
, GetCurrentThreadId() );
2355 for (i
= 0; i
< rec
->NumberParameters
; i
++)
2356 TRACE( " info[%d]=%016lx\n", i
, rec
->ExceptionInformation
[i
] );
2357 TRACE(" rax=%016lx rbx=%016lx rcx=%016lx rdx=%016lx\n",
2358 context
->Rax
, context
->Rbx
, context
->Rcx
, context
->Rdx
);
2359 TRACE(" rsi=%016lx rdi=%016lx rbp=%016lx rsp=%016lx\n",
2360 context
->Rsi
, context
->Rdi
, context
->Rbp
, context
->Rsp
);
2361 TRACE(" r8=%016lx r9=%016lx r10=%016lx r11=%016lx\n",
2362 context
->R8
, context
->R9
, context
->R10
, context
->R11
);
2363 TRACE(" r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n",
2364 context
->R12
, context
->R13
, context
->R14
, context
->R15
);
2366 if ((char *)wine_frame
< (char *)frame
)
2368 TRACE( "returning to handler\n" );
2369 RCX_sig(sigcontext
) = (ULONG_PTR
)&wine_frame
->jmp
;
2370 RDX_sig(sigcontext
) = 1;
2371 RIP_sig(sigcontext
) = (ULONG_PTR
)__wine_longjmp
;
2375 XMM_SAVE_AREA32
*fpu
= FPU_sig(sigcontext
);
2377 TRACE( "returning to user mode ip=%016lx ret=%08x\n", frame
->ret_addr
, rec
->ExceptionCode
);
2378 RAX_sig(sigcontext
) = rec
->ExceptionCode
;
2379 RBX_sig(sigcontext
) = frame
->rbx
;
2380 RSI_sig(sigcontext
) = frame
->rsi
;
2381 RDI_sig(sigcontext
) = frame
->rdi
;
2382 RBP_sig(sigcontext
) = frame
->rbp
;
2383 R12_sig(sigcontext
) = frame
->r12
;
2384 R13_sig(sigcontext
) = frame
->r13
;
2385 R14_sig(sigcontext
) = frame
->r14
;
2386 R15_sig(sigcontext
) = frame
->r15
;
2387 RSP_sig(sigcontext
) = (ULONG_PTR
)&frame
->ret_addr
;
2388 RIP_sig(sigcontext
) = frame
->thunk_addr
;
2391 fpu
->MxCsr
=frame
->mxcsr
;
2392 memcpy( fpu
->XmmRegisters
+ 6, frame
->xmm
, sizeof(frame
->xmm
) );
2394 amd64_thread_data()->syscall_frame
= NULL
;
2400 /**********************************************************************
2403 * Handler for SIGSEGV and related errors.
2405 static void segv_handler( int signal
, siginfo_t
*siginfo
, void *sigcontext
)
2407 EXCEPTION_RECORD rec
= { 0 };
2408 struct xcontext context
;
2409 ucontext_t
*ucontext
= sigcontext
;
2411 rec
.ExceptionAddress
= (void *)RIP_sig(ucontext
);
2412 save_context( &context
, sigcontext
);
2414 switch(TRAP_sig(ucontext
))
2416 case TRAP_x86_OFLOW
: /* Overflow exception */
2417 rec
.ExceptionCode
= EXCEPTION_INT_OVERFLOW
;
2419 case TRAP_x86_BOUND
: /* Bound range exception */
2420 rec
.ExceptionCode
= EXCEPTION_ARRAY_BOUNDS_EXCEEDED
;
2422 case TRAP_x86_PRIVINFLT
: /* Invalid opcode exception */
2423 rec
.ExceptionCode
= EXCEPTION_ILLEGAL_INSTRUCTION
;
2425 case TRAP_x86_STKFLT
: /* Stack fault */
2426 rec
.ExceptionCode
= EXCEPTION_STACK_OVERFLOW
;
2428 case TRAP_x86_SEGNPFLT
: /* Segment not present exception */
2429 case TRAP_x86_PROTFLT
: /* General protection fault */
2431 WORD err
= ERROR_sig(ucontext
);
2432 if (!err
&& (rec
.ExceptionCode
= is_privileged_instr( &context
.c
))) break;
2433 if ((err
& 7) == 2 && handle_interrupt( ucontext
, &rec
, &context
)) return;
2434 rec
.ExceptionCode
= EXCEPTION_ACCESS_VIOLATION
;
2435 rec
.NumberParameters
= 2;
2436 rec
.ExceptionInformation
[0] = 0;
2437 rec
.ExceptionInformation
[1] = 0xffffffffffffffff;
2440 case TRAP_x86_PAGEFLT
: /* Page fault */
2441 rec
.NumberParameters
= 2;
2442 rec
.ExceptionInformation
[0] = (ERROR_sig(ucontext
) >> 1) & 0x09;
2443 rec
.ExceptionInformation
[1] = (ULONG_PTR
)siginfo
->si_addr
;
2444 rec
.ExceptionCode
= virtual_handle_fault( siginfo
->si_addr
, rec
.ExceptionInformation
[0],
2445 (void *)RSP_sig(ucontext
) );
2446 if (!rec
.ExceptionCode
) return;
2448 case TRAP_x86_ALIGNFLT
: /* Alignment check exception */
2449 rec
.ExceptionCode
= EXCEPTION_DATATYPE_MISALIGNMENT
;
2452 ERR( "Got unexpected trap %ld\n", (ULONG_PTR
)TRAP_sig(ucontext
) );
2454 case TRAP_x86_NMI
: /* NMI interrupt */
2455 case TRAP_x86_DNA
: /* Device not available exception */
2456 case TRAP_x86_DOUBLEFLT
: /* Double fault exception */
2457 case TRAP_x86_TSSFLT
: /* Invalid TSS exception */
2458 case TRAP_x86_MCHK
: /* Machine check exception */
2459 case TRAP_x86_CACHEFLT
: /* Cache flush exception */
2460 rec
.ExceptionCode
= EXCEPTION_ILLEGAL_INSTRUCTION
;
2463 if (handle_syscall_fault( sigcontext
, &rec
, &context
.c
)) return;
2464 setup_raise_exception( sigcontext
, &rec
, &context
);
2468 /**********************************************************************
2471 * Handler for SIGTRAP.
2473 static void trap_handler( int signal
, siginfo_t
*siginfo
, void *sigcontext
)
2475 EXCEPTION_RECORD rec
= { 0 };
2476 struct xcontext context
;
2477 ucontext_t
*ucontext
= sigcontext
;
2479 rec
.ExceptionAddress
= (void *)RIP_sig(ucontext
);
2480 save_context( &context
, sigcontext
);
2482 switch (siginfo
->si_code
)
2484 case TRAP_TRACE
: /* Single-step exception */
2485 case 4 /* TRAP_HWBKPT */: /* Hardware breakpoint exception */
2486 rec
.ExceptionCode
= EXCEPTION_SINGLE_STEP
;
2488 case TRAP_BRKPT
: /* Breakpoint exception */
2492 /* Check if this is actually icebp instruction */
2493 if (((unsigned char *)RIP_sig(ucontext
))[-1] == 0xF1)
2495 rec
.ExceptionCode
= EXCEPTION_SINGLE_STEP
;
2498 rec
.ExceptionAddress
= (char *)rec
.ExceptionAddress
- 1; /* back up over the int3 instruction */
2501 rec
.ExceptionCode
= EXCEPTION_BREAKPOINT
;
2502 rec
.NumberParameters
= 1;
2503 rec
.ExceptionInformation
[0] = 0;
2506 setup_raise_exception( sigcontext
, &rec
, &context
);
2510 /**********************************************************************
2513 * Handler for SIGFPE.
2515 static void fpe_handler( int signal
, siginfo_t
*siginfo
, void *sigcontext
)
2517 EXCEPTION_RECORD rec
= { 0 };
2519 switch (siginfo
->si_code
)
2522 rec
.ExceptionCode
= EXCEPTION_ARRAY_BOUNDS_EXCEEDED
;
2525 rec
.ExceptionCode
= EXCEPTION_INT_DIVIDE_BY_ZERO
;
2528 rec
.ExceptionCode
= EXCEPTION_INT_OVERFLOW
;
2531 rec
.ExceptionCode
= EXCEPTION_FLT_DIVIDE_BY_ZERO
;
2534 rec
.ExceptionCode
= EXCEPTION_FLT_OVERFLOW
;
2537 rec
.ExceptionCode
= EXCEPTION_FLT_UNDERFLOW
;
2540 rec
.ExceptionCode
= EXCEPTION_FLT_INEXACT_RESULT
;
2544 rec
.ExceptionCode
= EXCEPTION_FLT_INVALID_OPERATION
;
2547 setup_exception( sigcontext
, &rec
);
2551 /**********************************************************************
2554 * Handler for SIGINT.
2556 static void int_handler( int signal
, siginfo_t
*siginfo
, void *sigcontext
)
2558 EXCEPTION_RECORD rec
= { CONTROL_C_EXIT
};
2560 setup_exception( sigcontext
, &rec
);
2564 /**********************************************************************
2567 * Handler for SIGABRT.
2569 static void abrt_handler( int signal
, siginfo_t
*siginfo
, void *sigcontext
)
2571 EXCEPTION_RECORD rec
= { EXCEPTION_WINE_ASSERTION
, EH_NONCONTINUABLE
};
2573 setup_exception( sigcontext
, &rec
);
2577 /**********************************************************************
2580 * Handler for SIGQUIT.
2582 static void quit_handler( int signal
, siginfo_t
*siginfo
, void *ucontext
)
2588 /**********************************************************************
2591 * Handler for SIGUSR1, used to signal a thread that it got suspended.
2593 static void usr1_handler( int signal
, siginfo_t
*siginfo
, void *ucontext
)
2595 struct xcontext context
;
2597 save_context( &context
, ucontext
);
2598 wait_suspend( &context
.c
);
2599 restore_context( &context
, ucontext
);
2603 /**********************************************************************
2604 * get_thread_ldt_entry
2606 NTSTATUS
get_thread_ldt_entry( HANDLE handle
, void *data
, ULONG len
, ULONG
*ret_len
)
2608 return STATUS_NOT_IMPLEMENTED
;
2612 /******************************************************************************
2613 * NtSetLdtEntries (NTDLL.@)
2614 * ZwSetLdtEntries (NTDLL.@)
2616 NTSTATUS WINAPI
NtSetLdtEntries( ULONG sel1
, LDT_ENTRY entry1
, ULONG sel2
, LDT_ENTRY entry2
)
2618 return STATUS_NOT_IMPLEMENTED
;
2622 /**********************************************************************
2623 * signal_init_threading
2625 void signal_init_threading(void)
2630 /**********************************************************************
2631 * signal_alloc_thread
2633 NTSTATUS
signal_alloc_thread( TEB
*teb
)
2635 return STATUS_SUCCESS
;
2639 /**********************************************************************
2640 * signal_free_thread
2642 void signal_free_thread( TEB
*teb
)
2647 /**********************************************************************
2650 static void *mac_thread_gsbase(void)
2652 struct thread_identifier_info tiinfo
;
2653 unsigned int info_count
= THREAD_IDENTIFIER_INFO_COUNT
;
2654 static int gsbase_offset
= -1;
2656 kern_return_t kr
= thread_info(mach_thread_self(), THREAD_IDENTIFIER_INFO
, (thread_info_t
) &tiinfo
, &info_count
);
2657 if (kr
== KERN_SUCCESS
) return (void*)tiinfo
.thread_handle
;
2659 if (gsbase_offset
< 0)
2661 /* Search for the array of TLS slots within the pthread data structure.
2662 That's what the macOS pthread implementation uses for gsbase. */
2663 const void* const sentinel1
= (const void*)0x2bffb6b4f11228ae;
2664 const void* const sentinel2
= (const void*)0x0845a7ff6ab76707;
2667 const void** p
= (const void**)pthread_self();
2671 if ((rc
= pthread_key_create(&key
, NULL
))) return NULL
;
2673 pthread_setspecific(key
, sentinel1
);
2675 for (i
= key
+ 1; i
< 2000; i
++) /* arbitrary limit */
2677 if (p
[i
] == sentinel1
)
2679 pthread_setspecific(key
, sentinel2
);
2681 if (p
[i
] == sentinel2
)
2683 gsbase_offset
= (i
- key
) * sizeof(*p
);
2687 pthread_setspecific(key
, sentinel1
);
2691 pthread_key_delete(key
);
2694 if (gsbase_offset
) return (char*)pthread_self() + gsbase_offset
;
2700 /**********************************************************************
2701 * signal_init_thread
2703 void signal_init_thread( TEB
*teb
)
2705 const WORD fpu_cw
= 0x27f;
2707 #if defined __linux__
2708 arch_prctl( ARCH_SET_GS
, teb
);
2709 #elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
2710 amd64_set_gsbase( teb
);
2711 #elif defined(__NetBSD__)
2712 sysarch( X86_64_SET_GSBASE
, &teb
);
2713 #elif defined (__APPLE__)
2714 __asm__
volatile (".byte 0x65\n\tmovq %0,%c1"
2716 : "r" (teb
->Tib
.Self
), "n" (FIELD_OFFSET(TEB
, Tib
.Self
)));
2717 __asm__
volatile (".byte 0x65\n\tmovq %0,%c1"
2719 : "r" (teb
->ThreadLocalStoragePointer
), "n" (FIELD_OFFSET(TEB
, ThreadLocalStoragePointer
)));
2721 /* alloc_tls_slot() needs to poke a value to an address relative to each
2722 thread's gsbase. Have each thread record its gsbase pointer into its
2723 TEB so alloc_tls_slot() can find it. */
2724 teb
->Reserved5
[0] = mac_thread_gsbase();
2726 # error Please define setting %gs for your architecture
2730 __asm__
volatile ("fninit; fldcw %0" : : "m" (fpu_cw
));
2732 FIXME("FPU setup not implemented for this platform.\n");
2737 /**********************************************************************
2738 * signal_init_process
2740 void signal_init_process(void)
2742 struct sigaction sig_act
;
2744 sig_act
.sa_mask
= server_block_set
;
2745 sig_act
.sa_flags
= SA_SIGINFO
| SA_RESTART
| SA_ONSTACK
;
2747 sig_act
.sa_sigaction
= int_handler
;
2748 if (sigaction( SIGINT
, &sig_act
, NULL
) == -1) goto error
;
2749 sig_act
.sa_sigaction
= fpe_handler
;
2750 if (sigaction( SIGFPE
, &sig_act
, NULL
) == -1) goto error
;
2751 sig_act
.sa_sigaction
= abrt_handler
;
2752 if (sigaction( SIGABRT
, &sig_act
, NULL
) == -1) goto error
;
2753 sig_act
.sa_sigaction
= quit_handler
;
2754 if (sigaction( SIGQUIT
, &sig_act
, NULL
) == -1) goto error
;
2755 sig_act
.sa_sigaction
= usr1_handler
;
2756 if (sigaction( SIGUSR1
, &sig_act
, NULL
) == -1) goto error
;
2757 sig_act
.sa_sigaction
= trap_handler
;
2758 if (sigaction( SIGTRAP
, &sig_act
, NULL
) == -1) goto error
;
2759 sig_act
.sa_sigaction
= segv_handler
;
2760 if (sigaction( SIGSEGV
, &sig_act
, NULL
) == -1) goto error
;
2761 if (sigaction( SIGILL
, &sig_act
, NULL
) == -1) goto error
;
2762 if (sigaction( SIGBUS
, &sig_act
, NULL
) == -1) goto error
;
2766 perror("sigaction");
2771 /***********************************************************************
2772 * init_thread_context
2774 static void init_thread_context( CONTEXT
*context
, LPTHREAD_START_ROUTINE entry
, void *arg
)
2776 __asm__( "movw %%cs,%0" : "=m" (context
->SegCs
) );
2777 __asm__( "movw %%ss,%0" : "=m" (context
->SegSs
) );
2778 context
->Rcx
= (ULONG_PTR
)entry
;
2779 context
->Rdx
= (ULONG_PTR
)arg
;
2780 context
->Rsp
= (ULONG_PTR
)NtCurrentTeb()->Tib
.StackBase
- 0x28;
2781 context
->Rip
= (ULONG_PTR
)pRtlUserThreadStart
;
2782 context
->EFlags
= 0x200;
2783 context
->u
.FltSave
.ControlWord
= 0x27f;
2784 context
->u
.FltSave
.MxCsr
= context
->MxCsr
= 0x1f80;
2788 /***********************************************************************
2789 * get_initial_context
2791 PCONTEXT DECLSPEC_HIDDEN
get_initial_context( LPTHREAD_START_ROUTINE entry
, void *arg
, BOOL suspend
)
2797 CONTEXT context
= { 0 };
2799 context
.ContextFlags
= CONTEXT_ALL
;
2800 init_thread_context( &context
, entry
, arg
);
2801 wait_suspend( &context
);
2802 ctx
= (CONTEXT
*)((ULONG_PTR
)context
.Rsp
& ~15) - 1;
2807 ctx
= (CONTEXT
*)((char *)NtCurrentTeb()->Tib
.StackBase
- 0x30) - 1;
2808 init_thread_context( ctx
, entry
, arg
);
2810 pthread_sigmask( SIG_UNBLOCK
, &server_block_set
, NULL
);
2811 ctx
->ContextFlags
= CONTEXT_FULL
;
2816 /***********************************************************************
2817 * signal_start_thread
2819 __ASM_GLOBAL_FUNC( signal_start_thread
,
2821 __ASM_SEH(".seh_stackalloc 56\n\t")
2822 __ASM_SEH(".seh_endprologue\n\t")
2823 __ASM_CFI(".cfi_adjust_cfa_offset 56\n\t")
2824 "movq %rbp,48(%rsp)\n\t"
2825 __ASM_CFI(".cfi_rel_offset %rbp,48\n\t")
2826 "movq %rbx,40(%rsp)\n\t"
2827 __ASM_CFI(".cfi_rel_offset %rbx,40\n\t")
2828 "movq %r12,32(%rsp)\n\t"
2829 __ASM_CFI(".cfi_rel_offset %r12,32\n\t")
2830 "movq %r13,24(%rsp)\n\t"
2831 __ASM_CFI(".cfi_rel_offset %r13,24\n\t")
2832 "movq %r14,16(%rsp)\n\t"
2833 __ASM_CFI(".cfi_rel_offset %r14,16\n\t")
2834 "movq %r15,8(%rsp)\n\t"
2835 __ASM_CFI(".cfi_rel_offset %r15,8\n\t")
2836 /* store exit frame */
2837 "movq %gs:0x30,%rax\n\t"
2838 "movq %rsp,0x320(%rax)\n\t" /* amd64_thread_data()->exit_frame */
2839 /* switch to thread stack */
2840 "movq 8(%rax),%rax\n\t" /* NtCurrentTeb()->Tib.StackBase */
2841 "movq %rcx,%rbx\n\t" /* thunk */
2842 "leaq -0x1000(%rax),%rsp\n\t"
2844 "call " __ASM_NAME("get_initial_context") "\n\t"
2845 "movq %rax,%rcx\n\t" /* context */
2846 "xorq %rax,%rax\n\t"
2851 /***********************************************************************
2852 * signal_exit_thread
2854 __ASM_GLOBAL_FUNC( signal_exit_thread
,
2855 /* fetch exit frame */
2856 "movq %gs:0x30,%rax\n\t"
2857 "movq 0x320(%rax),%rdx\n\t" /* amd64_thread_data()->exit_frame */
2858 "testq %rdx,%rdx\n\t"
2861 /* switch to exit frame stack */
2862 "1:\tmovq $0,0x330(%rax)\n\t"
2863 "movq %rdx,%rsp\n\t"
2864 __ASM_CFI(".cfi_adjust_cfa_offset 56\n\t")
2865 __ASM_CFI(".cfi_rel_offset %rbp,48\n\t")
2866 __ASM_CFI(".cfi_rel_offset %rbx,40\n\t")
2867 __ASM_CFI(".cfi_rel_offset %r12,32\n\t")
2868 __ASM_CFI(".cfi_rel_offset %r13,24\n\t")
2869 __ASM_CFI(".cfi_rel_offset %r14,16\n\t")
2870 __ASM_CFI(".cfi_rel_offset %r15,8\n\t")
2873 #endif /* __x86_64__ */