2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
30 #include <sys/types.h>
32 #ifdef HAVE_SYS_SYSCALL_H
33 #include <sys/syscall.h>
37 #define WIN32_NO_STATUS
47 #include <mach/mach.h>
48 #include <mach/mach_error.h>
49 #include <mach/thread_act.h>
50 #include <mach/mach_vm.h>
51 #include <servers/bootstrap.h>
53 static mach_port_t server_mach_port
;
55 void sigchld_callback(void)
57 assert(0); /* should never be called on MacOS */
60 static void mach_set_error(kern_return_t mach_error
)
64 case KERN_SUCCESS
: break;
65 case KERN_INVALID_ARGUMENT
: set_error(STATUS_INVALID_PARAMETER
); break;
66 case KERN_NO_SPACE
: set_error(STATUS_NO_MEMORY
); break;
67 case KERN_PROTECTION_FAILURE
: set_error(STATUS_ACCESS_DENIED
); break;
68 case KERN_INVALID_ADDRESS
: set_error(STATUS_ACCESS_VIOLATION
); break;
69 default: set_error(STATUS_UNSUCCESSFUL
); break;
73 static mach_port_t
get_process_port( struct process
*process
)
75 return process
->trace_data
;
78 /* initialize the process control mechanism */
79 void init_tracing_mechanism(void)
83 if (task_get_bootstrap_port(mach_task_self(), &bp
) != KERN_SUCCESS
)
84 fatal_error("Can't find bootstrap port\n");
85 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &server_mach_port
) != KERN_SUCCESS
)
86 fatal_error("Can't allocate port\n");
87 if (mach_port_insert_right( mach_task_self(),
90 MACH_MSG_TYPE_MAKE_SEND
) != KERN_SUCCESS
)
91 fatal_error("Error inserting rights\n");
92 if (bootstrap_register(bp
, server_dir
, server_mach_port
) != KERN_SUCCESS
)
93 fatal_error("Can't check in server_mach_port\n");
94 mach_port_deallocate(mach_task_self(), bp
);
97 /* initialize the per-process tracing mechanism */
98 void init_process_tracing( struct process
*process
)
103 mach_msg_header_t header
;
104 mach_msg_body_t body
;
105 mach_msg_port_descriptor_t task_port
;
106 mach_msg_trailer_t trailer
; /* only present on receive */
111 ret
= mach_msg( &msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0, sizeof(msg
),
112 server_mach_port
, 0, 0 );
115 if (ret
!= MACH_RCV_TIMED_OUT
&& debug_level
)
116 fprintf( stderr
, "warning: mach port receive failed with %x\n", ret
);
120 /* if anything in the message is invalid, ignore it */
121 if (msg
.header
.msgh_size
!= offsetof(typeof(msg
), trailer
)) continue;
122 if (msg
.body
.msgh_descriptor_count
!= 1) continue;
123 if (msg
.task_port
.type
!= MACH_MSG_PORT_DESCRIPTOR
) continue;
124 if (msg
.task_port
.disposition
!= MACH_MSG_TYPE_PORT_SEND
) continue;
125 if (msg
.task_port
.name
== MACH_PORT_NULL
) continue;
126 if (msg
.task_port
.name
== MACH_PORT_DEAD
) continue;
128 if (!pid_for_task( msg
.task_port
.name
, &pid
))
130 struct thread
*thread
= get_thread_from_pid( pid
);
132 if (thread
&& !thread
->process
->trace_data
)
133 thread
->process
->trace_data
= msg
.task_port
.name
;
135 mach_port_deallocate( mach_task_self(), msg
.task_port
.name
);
140 /* terminate the per-process tracing mechanism */
141 void finish_process_tracing( struct process
*process
)
143 if (process
->trace_data
)
145 mach_port_deallocate( mach_task_self(), process
->trace_data
);
146 process
->trace_data
= 0;
150 /* initialize registers in new thread if necessary */
151 void init_thread_context( struct thread
*thread
)
155 /* retrieve the thread x86 registers */
156 void get_thread_context( struct thread
*thread
, context_t
*context
, unsigned int flags
)
158 #if defined(__i386__) || defined(__x86_64__)
159 x86_debug_state_t state
;
160 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
161 mach_msg_type_name_t type
;
162 mach_port_t port
, process_port
= get_process_port( thread
->process
);
164 /* all other regs are handled on the client side */
165 assert( flags
== SERVER_CTX_DEBUG_REGISTERS
);
167 if (thread
->unix_pid
== -1 || !process_port
||
168 mach_port_extract_right( process_port
, thread
->unix_tid
,
169 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
171 set_error( STATUS_ACCESS_DENIED
);
175 if (!thread_get_state( port
, x86_DEBUG_STATE
, (thread_state_t
)&state
, &count
))
178 assert( state
.dsh
.flavor
== x86_DEBUG_STATE32
||
179 state
.dsh
.flavor
== x86_DEBUG_STATE64
);
181 assert( state
.dsh
.flavor
== x86_DEBUG_STATE32
);
185 if (state
.dsh
.flavor
== x86_DEBUG_STATE64
)
187 context
->debug
.x86_64_regs
.dr0
= state
.uds
.ds64
.__dr0
;
188 context
->debug
.x86_64_regs
.dr1
= state
.uds
.ds64
.__dr1
;
189 context
->debug
.x86_64_regs
.dr2
= state
.uds
.ds64
.__dr2
;
190 context
->debug
.x86_64_regs
.dr3
= state
.uds
.ds64
.__dr3
;
191 context
->debug
.x86_64_regs
.dr6
= state
.uds
.ds64
.__dr6
;
192 context
->debug
.x86_64_regs
.dr7
= state
.uds
.ds64
.__dr7
;
197 /* work around silly renaming of struct members in OS X 10.5 */
198 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
199 context
->debug
.i386_regs
.dr0
= state
.uds
.ds32
.__dr0
;
200 context
->debug
.i386_regs
.dr1
= state
.uds
.ds32
.__dr1
;
201 context
->debug
.i386_regs
.dr2
= state
.uds
.ds32
.__dr2
;
202 context
->debug
.i386_regs
.dr3
= state
.uds
.ds32
.__dr3
;
203 context
->debug
.i386_regs
.dr6
= state
.uds
.ds32
.__dr6
;
204 context
->debug
.i386_regs
.dr7
= state
.uds
.ds32
.__dr7
;
206 context
->debug
.i386_regs
.dr0
= state
.uds
.ds32
.dr0
;
207 context
->debug
.i386_regs
.dr1
= state
.uds
.ds32
.dr1
;
208 context
->debug
.i386_regs
.dr2
= state
.uds
.ds32
.dr2
;
209 context
->debug
.i386_regs
.dr3
= state
.uds
.ds32
.dr3
;
210 context
->debug
.i386_regs
.dr6
= state
.uds
.ds32
.dr6
;
211 context
->debug
.i386_regs
.dr7
= state
.uds
.ds32
.dr7
;
214 context
->flags
|= SERVER_CTX_DEBUG_REGISTERS
;
216 mach_port_deallocate( mach_task_self(), port
);
220 /* set the thread x86 registers */
221 void set_thread_context( struct thread
*thread
, const context_t
*context
, unsigned int flags
)
223 #if defined(__i386__) || defined(__x86_64__)
224 x86_debug_state_t state
;
225 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
226 mach_msg_type_name_t type
;
227 mach_port_t port
, process_port
= get_process_port( thread
->process
);
230 /* all other regs are handled on the client side */
231 assert( flags
== SERVER_CTX_DEBUG_REGISTERS
);
233 if (thread
->unix_pid
== -1 || !process_port
||
234 mach_port_extract_right( process_port
, thread
->unix_tid
,
235 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
237 set_error( STATUS_ACCESS_DENIED
);
243 if (thread
->process
->cpu
== CPU_x86_64
)
245 /* Mac OS doesn't allow setting the global breakpoint flags */
246 dr7
= (context
->debug
.x86_64_regs
.dr7
& ~0xaa) | ((context
->debug
.x86_64_regs
.dr7
& 0xaa) >> 1);
248 state
.dsh
.flavor
= x86_DEBUG_STATE64
;
249 state
.dsh
.count
= sizeof(state
.uds
.ds64
) / sizeof(int);
250 state
.uds
.ds64
.__dr0
= context
->debug
.x86_64_regs
.dr0
;
251 state
.uds
.ds64
.__dr1
= context
->debug
.x86_64_regs
.dr1
;
252 state
.uds
.ds64
.__dr2
= context
->debug
.x86_64_regs
.dr2
;
253 state
.uds
.ds64
.__dr3
= context
->debug
.x86_64_regs
.dr3
;
254 state
.uds
.ds64
.__dr4
= 0;
255 state
.uds
.ds64
.__dr5
= 0;
256 state
.uds
.ds64
.__dr6
= context
->debug
.x86_64_regs
.dr6
;
257 state
.uds
.ds64
.__dr7
= dr7
;
262 dr7
= (context
->debug
.i386_regs
.dr7
& ~0xaa) | ((context
->debug
.i386_regs
.dr7
& 0xaa) >> 1);
264 state
.dsh
.flavor
= x86_DEBUG_STATE32
;
265 state
.dsh
.count
= sizeof(state
.uds
.ds32
) / sizeof(int);
266 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
267 state
.uds
.ds32
.__dr0
= context
->debug
.i386_regs
.dr0
;
268 state
.uds
.ds32
.__dr1
= context
->debug
.i386_regs
.dr1
;
269 state
.uds
.ds32
.__dr2
= context
->debug
.i386_regs
.dr2
;
270 state
.uds
.ds32
.__dr3
= context
->debug
.i386_regs
.dr3
;
271 state
.uds
.ds32
.__dr4
= 0;
272 state
.uds
.ds32
.__dr5
= 0;
273 state
.uds
.ds32
.__dr6
= context
->debug
.i386_regs
.dr6
;
274 state
.uds
.ds32
.__dr7
= dr7
;
276 state
.uds
.ds32
.dr0
= context
->debug
.i386_regs
.dr0
;
277 state
.uds
.ds32
.dr1
= context
->debug
.i386_regs
.dr1
;
278 state
.uds
.ds32
.dr2
= context
->debug
.i386_regs
.dr2
;
279 state
.uds
.ds32
.dr3
= context
->debug
.i386_regs
.dr3
;
280 state
.uds
.ds32
.dr4
= 0;
281 state
.uds
.ds32
.dr5
= 0;
282 state
.uds
.ds32
.dr6
= context
->debug
.i386_regs
.dr6
;
283 state
.uds
.ds32
.dr7
= dr7
;
286 thread_set_state( port
, x86_DEBUG_STATE
, (thread_state_t
)&state
, count
);
287 mach_port_deallocate( mach_task_self(), port
);
291 int send_thread_signal( struct thread
*thread
, int sig
)
294 mach_port_t process_port
= get_process_port( thread
->process
);
296 if (thread
->unix_pid
!= -1 && process_port
)
298 mach_msg_type_name_t type
;
301 if (!mach_port_extract_right( process_port
, thread
->unix_tid
,
302 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
304 ret
= syscall( SYS___pthread_kill
, port
, sig
);
305 mach_port_deallocate( mach_task_self(), port
);
309 if (ret
== -1 && errno
== ESRCH
) /* thread got killed */
311 thread
->unix_pid
= -1;
312 thread
->unix_tid
= -1;
315 if (debug_level
&& ret
!= -1)
316 fprintf( stderr
, "%04x: *sent signal* signal=%d\n", thread
->id
, sig
);
320 /* read data from a process memory space */
321 int read_process_memory( struct process
*process
, client_ptr_t ptr
, data_size_t size
, char *dest
)
324 mach_msg_type_number_t bytes_read
;
325 mach_vm_offset_t offset
;
327 mach_vm_address_t aligned_address
;
328 mach_vm_size_t aligned_size
;
329 unsigned int page_size
= get_page_size();
330 mach_port_t process_port
= get_process_port( process
);
334 set_error( STATUS_ACCESS_DENIED
);
337 if ((mach_vm_address_t
)ptr
!= ptr
)
339 set_error( STATUS_ACCESS_DENIED
);
343 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
345 mach_set_error( ret
);
349 offset
= ptr
% page_size
;
350 aligned_address
= (mach_vm_address_t
)(ptr
- offset
);
351 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
353 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
354 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
357 memcpy( dest
, (char *)data
+ offset
, size
);
358 mach_vm_deallocate( mach_task_self(), data
, bytes_read
);
360 task_resume( process_port
);
361 return (ret
== KERN_SUCCESS
);
364 /* write data to a process memory space */
365 int write_process_memory( struct process
*process
, client_ptr_t ptr
, data_size_t size
, const char *src
)
368 mach_vm_address_t aligned_address
, region_address
;
369 mach_vm_size_t aligned_size
, region_size
;
370 mach_msg_type_number_t info_size
, bytes_read
;
371 mach_vm_offset_t offset
;
372 vm_offset_t task_mem
= 0;
373 struct vm_region_basic_info_64 info
;
375 unsigned int page_size
= get_page_size();
376 mach_port_t process_port
= get_process_port( process
);
380 set_error( STATUS_ACCESS_DENIED
);
383 if ((mach_vm_address_t
)ptr
!= ptr
)
385 set_error( STATUS_ACCESS_DENIED
);
389 offset
= ptr
% page_size
;
390 aligned_address
= (mach_vm_address_t
)(ptr
- offset
);
391 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
393 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
395 mach_set_error( ret
);
399 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &task_mem
, &bytes_read
);
400 if (ret
!= KERN_SUCCESS
)
402 mach_set_error( ret
);
405 region_address
= aligned_address
;
406 info_size
= sizeof(info
);
407 ret
= mach_vm_region( process_port
, ®ion_address
, ®ion_size
, VM_REGION_BASIC_INFO_64
,
408 (vm_region_info_t
)&info
, &info_size
, &dummy
);
409 if (ret
!= KERN_SUCCESS
)
411 mach_set_error( ret
);
414 if (region_address
> aligned_address
||
415 region_address
+ region_size
< aligned_address
+ aligned_size
)
417 /* FIXME: should support multiple regions */
418 set_error( ERROR_ACCESS_DENIED
);
421 ret
= mach_vm_protect( process_port
, aligned_address
, aligned_size
, 0, VM_PROT_READ
| VM_PROT_WRITE
);
422 if (ret
!= KERN_SUCCESS
)
424 mach_set_error( ret
);
428 /* FIXME: there's an optimization that can be made: check first and last */
429 /* pages for writability; read first and last pages; write interior */
430 /* pages to task without ever reading&modifying them; if that succeeds, */
431 /* modify first and last pages and write them. */
433 memcpy( (char*)task_mem
+ offset
, src
, size
);
435 ret
= mach_vm_write( process_port
, aligned_address
, task_mem
, bytes_read
);
436 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
439 mach_vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
440 /* restore protection */
441 mach_vm_protect( process_port
, aligned_address
, aligned_size
, 0, info
.protection
);
442 task_resume( process_port
);
447 if (task_mem
) mach_vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
448 task_resume( process_port
);
452 /* retrieve an LDT selector entry */
453 void get_selector_entry( struct thread
*thread
, int entry
, unsigned int *base
,
454 unsigned int *limit
, unsigned char *flags
)
456 const unsigned int total_size
= (2 * sizeof(int) + 1) * 8192;
457 struct process
*process
= thread
->process
;
458 unsigned int page_size
= get_page_size();
461 mach_msg_type_number_t bytes_read
;
462 mach_port_t process_port
= get_process_port( thread
->process
);
464 if (!process
->ldt_copy
|| !process_port
)
466 set_error( STATUS_ACCESS_DENIED
);
471 set_error( STATUS_INVALID_PARAMETER
); /* FIXME */
475 if ((ret
= task_suspend( process_port
)) == KERN_SUCCESS
)
477 mach_vm_offset_t offset
= process
->ldt_copy
% page_size
;
478 mach_vm_address_t aligned_address
= (mach_vm_address_t
)(process
->ldt_copy
- offset
);
479 mach_vm_size_t aligned_size
= (total_size
+ offset
+ page_size
- 1) / page_size
* page_size
;
481 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
482 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
485 const int *ldt
= (const int *)((char *)data
+ offset
);
486 memcpy( base
, ldt
+ entry
, sizeof(int) );
487 memcpy( limit
, ldt
+ entry
+ 8192, sizeof(int) );
488 memcpy( flags
, (char *)(ldt
+ 2 * 8192) + entry
, 1 );
489 mach_vm_deallocate( mach_task_self(), data
, bytes_read
);
491 task_resume( process_port
);
493 else mach_set_error( ret
);
496 #endif /* USE_MACH */