2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
30 #include <sys/types.h>
32 #ifdef HAVE_SYS_SYSCALL_H
33 #include <sys/syscall.h>
37 #define WIN32_NO_STATUS
47 #include <mach/mach.h>
48 #include <mach/mach_error.h>
49 #include <mach/thread_act.h>
50 #include <mach/mach_vm.h>
51 #include <servers/bootstrap.h>
53 static mach_port_t server_mach_port
;
55 void sigchld_callback(void)
57 assert(0); /* should never be called on MacOS */
60 static void mach_set_error(kern_return_t mach_error
)
64 case KERN_SUCCESS
: break;
65 case KERN_INVALID_ARGUMENT
: set_error(STATUS_INVALID_PARAMETER
); break;
66 case KERN_NO_SPACE
: set_error(STATUS_NO_MEMORY
); break;
67 case KERN_PROTECTION_FAILURE
: set_error(STATUS_ACCESS_DENIED
); break;
68 case KERN_INVALID_ADDRESS
: set_error(STATUS_ACCESS_VIOLATION
); break;
69 default: set_error(STATUS_UNSUCCESSFUL
); break;
73 static mach_port_t
get_process_port( struct process
*process
)
75 return process
->trace_data
;
78 /* initialize the process control mechanism */
79 void init_tracing_mechanism(void)
83 if (task_get_bootstrap_port(mach_task_self(), &bp
) != KERN_SUCCESS
)
84 fatal_error("Can't find bootstrap port\n");
85 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &server_mach_port
) != KERN_SUCCESS
)
86 fatal_error("Can't allocate port\n");
87 if (mach_port_insert_right( mach_task_self(),
90 MACH_MSG_TYPE_MAKE_SEND
) != KERN_SUCCESS
)
91 fatal_error("Error inserting rights\n");
92 if (bootstrap_register(bp
, server_dir
, server_mach_port
) != KERN_SUCCESS
)
93 fatal_error("Can't check in server_mach_port\n");
94 mach_port_deallocate(mach_task_self(), bp
);
97 /* initialize the per-process tracing mechanism */
98 void init_process_tracing( struct process
*process
)
103 mach_msg_header_t header
;
104 mach_msg_body_t body
;
105 mach_msg_port_descriptor_t task_port
;
106 mach_msg_trailer_t trailer
; /* only present on receive */
111 ret
= mach_msg( &msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0, sizeof(msg
),
112 server_mach_port
, 0, 0 );
115 if (ret
!= MACH_RCV_TIMED_OUT
&& debug_level
)
116 fprintf( stderr
, "warning: mach port receive failed with %x\n", ret
);
120 /* if anything in the message is invalid, ignore it */
121 if (msg
.header
.msgh_size
!= offsetof(typeof(msg
), trailer
)) continue;
122 if (msg
.body
.msgh_descriptor_count
!= 1) continue;
123 if (msg
.task_port
.type
!= MACH_MSG_PORT_DESCRIPTOR
) continue;
124 if (msg
.task_port
.disposition
!= MACH_MSG_TYPE_PORT_SEND
) continue;
125 if (msg
.task_port
.name
== MACH_PORT_NULL
) continue;
126 if (msg
.task_port
.name
== MACH_PORT_DEAD
) continue;
128 if (!pid_for_task( msg
.task_port
.name
, &pid
))
130 struct thread
*thread
= get_thread_from_pid( pid
);
132 if (thread
&& !thread
->process
->trace_data
)
133 thread
->process
->trace_data
= msg
.task_port
.name
;
135 mach_port_deallocate( mach_task_self(), msg
.task_port
.name
);
140 /* terminate the per-process tracing mechanism */
141 void finish_process_tracing( struct process
*process
)
143 if (process
->trace_data
)
145 mach_port_deallocate( mach_task_self(), process
->trace_data
);
146 process
->trace_data
= 0;
150 /* initialize registers in new thread if necessary */
151 void init_thread_context( struct thread
*thread
)
155 /* retrieve the thread x86 registers */
156 void get_thread_context( struct thread
*thread
, context_t
*context
, unsigned int flags
)
158 #if defined(__i386__) || defined(__x86_64__)
159 x86_debug_state_t state
;
160 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
161 mach_msg_type_name_t type
;
162 mach_port_t port
, process_port
= get_process_port( thread
->process
);
164 /* all other regs are handled on the client side */
165 assert( flags
== SERVER_CTX_DEBUG_REGISTERS
);
167 if (thread
->unix_pid
== -1 || !process_port
||
168 mach_port_extract_right( process_port
, thread
->unix_tid
,
169 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
171 set_error( STATUS_ACCESS_DENIED
);
175 if (!thread_get_state( port
, x86_DEBUG_STATE
, (thread_state_t
)&state
, &count
))
178 assert( state
.dsh
.flavor
== x86_DEBUG_STATE32
||
179 state
.dsh
.flavor
== x86_DEBUG_STATE64
);
181 assert( state
.dsh
.flavor
== x86_DEBUG_STATE32
);
185 if (state
.dsh
.flavor
== x86_DEBUG_STATE64
)
187 context
->debug
.x86_64_regs
.dr0
= state
.uds
.ds64
.__dr0
;
188 context
->debug
.x86_64_regs
.dr1
= state
.uds
.ds64
.__dr1
;
189 context
->debug
.x86_64_regs
.dr2
= state
.uds
.ds64
.__dr2
;
190 context
->debug
.x86_64_regs
.dr3
= state
.uds
.ds64
.__dr3
;
191 context
->debug
.x86_64_regs
.dr6
= state
.uds
.ds64
.__dr6
;
192 context
->debug
.x86_64_regs
.dr7
= state
.uds
.ds64
.__dr7
;
197 /* work around silly renaming of struct members in OS X 10.5 */
198 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
199 context
->debug
.i386_regs
.dr0
= state
.uds
.ds32
.__dr0
;
200 context
->debug
.i386_regs
.dr1
= state
.uds
.ds32
.__dr1
;
201 context
->debug
.i386_regs
.dr2
= state
.uds
.ds32
.__dr2
;
202 context
->debug
.i386_regs
.dr3
= state
.uds
.ds32
.__dr3
;
203 context
->debug
.i386_regs
.dr6
= state
.uds
.ds32
.__dr6
;
204 context
->debug
.i386_regs
.dr7
= state
.uds
.ds32
.__dr7
;
206 context
->debug
.i386_regs
.dr0
= state
.uds
.ds32
.dr0
;
207 context
->debug
.i386_regs
.dr1
= state
.uds
.ds32
.dr1
;
208 context
->debug
.i386_regs
.dr2
= state
.uds
.ds32
.dr2
;
209 context
->debug
.i386_regs
.dr3
= state
.uds
.ds32
.dr3
;
210 context
->debug
.i386_regs
.dr6
= state
.uds
.ds32
.dr6
;
211 context
->debug
.i386_regs
.dr7
= state
.uds
.ds32
.dr7
;
214 context
->flags
|= SERVER_CTX_DEBUG_REGISTERS
;
216 mach_port_deallocate( mach_task_self(), port
);
220 /* set the thread x86 registers */
221 void set_thread_context( struct thread
*thread
, const context_t
*context
, unsigned int flags
)
223 #if defined(__i386__) || defined(__x86_64__)
224 x86_debug_state_t state
;
225 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
226 mach_msg_type_name_t type
;
227 mach_port_t port
, process_port
= get_process_port( thread
->process
);
230 /* all other regs are handled on the client side */
231 assert( flags
== SERVER_CTX_DEBUG_REGISTERS
);
233 if (thread
->unix_pid
== -1 || !process_port
||
234 mach_port_extract_right( process_port
, thread
->unix_tid
,
235 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
237 set_error( STATUS_ACCESS_DENIED
);
243 if (thread
->process
->cpu
== CPU_x86_64
)
245 /* Mac OS doesn't allow setting the global breakpoint flags */
246 dr7
= (context
->debug
.x86_64_regs
.dr7
& ~0xaa) | ((context
->debug
.x86_64_regs
.dr7
& 0xaa) >> 1);
248 state
.dsh
.flavor
= x86_DEBUG_STATE64
;
249 state
.dsh
.count
= sizeof(state
.uds
.ds64
) / sizeof(int);
250 state
.uds
.ds64
.__dr0
= context
->debug
.x86_64_regs
.dr0
;
251 state
.uds
.ds64
.__dr1
= context
->debug
.x86_64_regs
.dr1
;
252 state
.uds
.ds64
.__dr2
= context
->debug
.x86_64_regs
.dr2
;
253 state
.uds
.ds64
.__dr3
= context
->debug
.x86_64_regs
.dr3
;
254 state
.uds
.ds64
.__dr4
= 0;
255 state
.uds
.ds64
.__dr5
= 0;
256 state
.uds
.ds64
.__dr6
= context
->debug
.x86_64_regs
.dr6
;
257 state
.uds
.ds64
.__dr7
= dr7
;
262 dr7
= (context
->debug
.i386_regs
.dr7
& ~0xaa) | ((context
->debug
.i386_regs
.dr7
& 0xaa) >> 1);
264 state
.dsh
.flavor
= x86_DEBUG_STATE32
;
265 state
.dsh
.count
= sizeof(state
.uds
.ds32
) / sizeof(int);
266 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
267 state
.uds
.ds32
.__dr0
= context
->debug
.i386_regs
.dr0
;
268 state
.uds
.ds32
.__dr1
= context
->debug
.i386_regs
.dr1
;
269 state
.uds
.ds32
.__dr2
= context
->debug
.i386_regs
.dr2
;
270 state
.uds
.ds32
.__dr3
= context
->debug
.i386_regs
.dr3
;
271 state
.uds
.ds32
.__dr4
= 0;
272 state
.uds
.ds32
.__dr5
= 0;
273 state
.uds
.ds32
.__dr6
= context
->debug
.i386_regs
.dr6
;
274 state
.uds
.ds32
.__dr7
= dr7
;
276 state
.uds
.ds32
.dr0
= context
->debug
.i386_regs
.dr0
;
277 state
.uds
.ds32
.dr1
= context
->debug
.i386_regs
.dr1
;
278 state
.uds
.ds32
.dr2
= context
->debug
.i386_regs
.dr2
;
279 state
.uds
.ds32
.dr3
= context
->debug
.i386_regs
.dr3
;
280 state
.uds
.ds32
.dr4
= 0;
281 state
.uds
.ds32
.dr5
= 0;
282 state
.uds
.ds32
.dr6
= context
->debug
.i386_regs
.dr6
;
283 state
.uds
.ds32
.dr7
= dr7
;
286 if (!thread_set_state( port
, x86_DEBUG_STATE
, (thread_state_t
)&state
, count
))
288 if (thread
->context
) /* update the cached values */
291 if (thread
->process
->cpu
== CPU_x86_64
)
292 thread
->context
->debug
.x86_64_regs
= context
->debug
.x86_64_regs
;
295 thread
->context
->debug
.i386_regs
= context
->debug
.i386_regs
;
298 mach_port_deallocate( mach_task_self(), port
);
302 int send_thread_signal( struct thread
*thread
, int sig
)
305 mach_port_t process_port
= get_process_port( thread
->process
);
307 if (thread
->unix_pid
!= -1 && process_port
)
309 mach_msg_type_name_t type
;
312 if (!mach_port_extract_right( process_port
, thread
->unix_tid
,
313 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
315 ret
= syscall( SYS___pthread_kill
, port
, sig
);
316 mach_port_deallocate( mach_task_self(), port
);
320 if (ret
== -1 && errno
== ESRCH
) /* thread got killed */
322 thread
->unix_pid
= -1;
323 thread
->unix_tid
= -1;
326 if (debug_level
&& ret
!= -1)
327 fprintf( stderr
, "%04x: *sent signal* signal=%d\n", thread
->id
, sig
);
331 /* read data from a process memory space */
332 int read_process_memory( struct process
*process
, client_ptr_t ptr
, data_size_t size
, char *dest
)
335 mach_msg_type_number_t bytes_read
;
336 mach_vm_offset_t offset
;
338 mach_vm_address_t aligned_address
;
339 mach_vm_size_t aligned_size
;
340 unsigned int page_size
= get_page_size();
341 mach_port_t process_port
= get_process_port( process
);
345 set_error( STATUS_ACCESS_DENIED
);
348 if ((mach_vm_address_t
)ptr
!= ptr
)
350 set_error( STATUS_ACCESS_DENIED
);
354 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
356 mach_set_error( ret
);
360 offset
= ptr
% page_size
;
361 aligned_address
= (mach_vm_address_t
)(ptr
- offset
);
362 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
364 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
365 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
368 memcpy( dest
, (char *)data
+ offset
, size
);
369 mach_vm_deallocate( mach_task_self(), data
, bytes_read
);
371 task_resume( process_port
);
372 return (ret
== KERN_SUCCESS
);
375 /* write data to a process memory space */
376 int write_process_memory( struct process
*process
, client_ptr_t ptr
, data_size_t size
, const char *src
)
379 mach_vm_address_t aligned_address
, region_address
;
380 mach_vm_size_t aligned_size
, region_size
;
381 mach_msg_type_number_t info_size
, bytes_read
;
382 mach_vm_offset_t offset
;
383 vm_offset_t task_mem
= 0;
384 struct vm_region_basic_info_64 info
;
386 unsigned int page_size
= get_page_size();
387 mach_port_t process_port
= get_process_port( process
);
391 set_error( STATUS_ACCESS_DENIED
);
394 if ((mach_vm_address_t
)ptr
!= ptr
)
396 set_error( STATUS_ACCESS_DENIED
);
400 offset
= ptr
% page_size
;
401 aligned_address
= (mach_vm_address_t
)(ptr
- offset
);
402 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
404 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
406 mach_set_error( ret
);
410 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &task_mem
, &bytes_read
);
411 if (ret
!= KERN_SUCCESS
)
413 mach_set_error( ret
);
416 region_address
= aligned_address
;
417 info_size
= sizeof(info
);
418 ret
= mach_vm_region( process_port
, ®ion_address
, ®ion_size
, VM_REGION_BASIC_INFO_64
,
419 (vm_region_info_t
)&info
, &info_size
, &dummy
);
420 if (ret
!= KERN_SUCCESS
)
422 mach_set_error( ret
);
425 if (region_address
> aligned_address
||
426 region_address
+ region_size
< aligned_address
+ aligned_size
)
428 /* FIXME: should support multiple regions */
429 set_error( ERROR_ACCESS_DENIED
);
432 ret
= mach_vm_protect( process_port
, aligned_address
, aligned_size
, 0, VM_PROT_READ
| VM_PROT_WRITE
);
433 if (ret
!= KERN_SUCCESS
)
435 mach_set_error( ret
);
439 /* FIXME: there's an optimization that can be made: check first and last */
440 /* pages for writability; read first and last pages; write interior */
441 /* pages to task without ever reading&modifying them; if that succeeds, */
442 /* modify first and last pages and write them. */
444 memcpy( (char*)task_mem
+ offset
, src
, size
);
446 ret
= mach_vm_write( process_port
, aligned_address
, task_mem
, bytes_read
);
447 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
450 mach_vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
451 /* restore protection */
452 mach_vm_protect( process_port
, aligned_address
, aligned_size
, 0, info
.protection
);
453 task_resume( process_port
);
458 if (task_mem
) mach_vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
459 task_resume( process_port
);
463 /* retrieve an LDT selector entry */
464 void get_selector_entry( struct thread
*thread
, int entry
, unsigned int *base
,
465 unsigned int *limit
, unsigned char *flags
)
467 const unsigned int total_size
= (2 * sizeof(int) + 1) * 8192;
468 struct process
*process
= thread
->process
;
469 unsigned int page_size
= get_page_size();
472 mach_msg_type_number_t bytes_read
;
473 mach_port_t process_port
= get_process_port( thread
->process
);
475 if (!process
->ldt_copy
|| !process_port
)
477 set_error( STATUS_ACCESS_DENIED
);
482 set_error( STATUS_INVALID_PARAMETER
); /* FIXME */
486 if ((ret
= task_suspend( process_port
)) == KERN_SUCCESS
)
488 mach_vm_offset_t offset
= process
->ldt_copy
% page_size
;
489 mach_vm_address_t aligned_address
= (mach_vm_address_t
)(process
->ldt_copy
- offset
);
490 mach_vm_size_t aligned_size
= (total_size
+ offset
+ page_size
- 1) / page_size
* page_size
;
492 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
493 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
496 const int *ldt
= (const int *)((char *)data
+ offset
);
497 memcpy( base
, ldt
+ entry
, sizeof(int) );
498 memcpy( limit
, ldt
+ entry
+ 8192, sizeof(int) );
499 memcpy( flags
, (char *)(ldt
+ 2 * 8192) + entry
, 1 );
500 mach_vm_deallocate( mach_task_self(), data
, bytes_read
);
502 task_resume( process_port
);
504 else mach_set_error( ret
);
507 #endif /* USE_MACH */