2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
30 #include <sys/types.h>
32 #ifdef HAVE_SYS_SYSCALL_H
33 #include <sys/syscall.h>
37 #define WIN32_NO_STATUS
44 #include "wine/library.h"
48 #include <mach/mach.h>
49 #include <mach/mach_error.h>
50 #include <mach/thread_act.h>
51 #include <mach/mach_vm.h>
52 #include <servers/bootstrap.h>
54 static mach_port_t server_mach_port
;
56 void sigchld_callback(void)
58 assert(0); /* should never be called on MacOS */
61 static void mach_set_error(kern_return_t mach_error
)
65 case KERN_SUCCESS
: break;
66 case KERN_INVALID_ARGUMENT
: set_error(STATUS_INVALID_PARAMETER
); break;
67 case KERN_NO_SPACE
: set_error(STATUS_NO_MEMORY
); break;
68 case KERN_PROTECTION_FAILURE
: set_error(STATUS_ACCESS_DENIED
); break;
69 case KERN_INVALID_ADDRESS
: set_error(STATUS_ACCESS_VIOLATION
); break;
70 default: set_error(STATUS_UNSUCCESSFUL
); break;
74 static mach_port_t
get_process_port( struct process
*process
)
76 return process
->trace_data
;
79 /* initialize the process control mechanism */
80 void init_tracing_mechanism(void)
84 if (task_get_bootstrap_port(mach_task_self(), &bp
) != KERN_SUCCESS
)
85 fatal_error("Can't find bootstrap port\n");
86 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &server_mach_port
) != KERN_SUCCESS
)
87 fatal_error("Can't allocate port\n");
88 if (mach_port_insert_right( mach_task_self(),
91 MACH_MSG_TYPE_MAKE_SEND
) != KERN_SUCCESS
)
92 fatal_error("Error inserting rights\n");
93 if (bootstrap_register(bp
, (char*)wine_get_server_dir(), server_mach_port
) != KERN_SUCCESS
)
94 fatal_error("Can't check in server_mach_port\n");
95 mach_port_deallocate(mach_task_self(), bp
);
98 /* initialize the per-process tracing mechanism */
99 void init_process_tracing( struct process
*process
)
104 mach_msg_header_t header
;
105 mach_msg_body_t body
;
106 mach_msg_port_descriptor_t task_port
;
107 mach_msg_trailer_t trailer
; /* only present on receive */
112 ret
= mach_msg( &msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0, sizeof(msg
),
113 server_mach_port
, 0, 0 );
116 if (ret
!= MACH_RCV_TIMED_OUT
&& debug_level
)
117 fprintf( stderr
, "warning: mach port receive failed with %x\n", ret
);
121 /* if anything in the message is invalid, ignore it */
122 if (msg
.header
.msgh_size
!= offsetof(typeof(msg
), trailer
)) continue;
123 if (msg
.body
.msgh_descriptor_count
!= 1) continue;
124 if (msg
.task_port
.type
!= MACH_MSG_PORT_DESCRIPTOR
) continue;
125 if (msg
.task_port
.disposition
!= MACH_MSG_TYPE_PORT_SEND
) continue;
126 if (msg
.task_port
.name
== MACH_PORT_NULL
) continue;
127 if (msg
.task_port
.name
== MACH_PORT_DEAD
) continue;
129 if (!pid_for_task( msg
.task_port
.name
, &pid
))
131 struct thread
*thread
= get_thread_from_pid( pid
);
133 if (thread
&& !thread
->process
->trace_data
)
134 thread
->process
->trace_data
= msg
.task_port
.name
;
136 mach_port_deallocate( mach_task_self(), msg
.task_port
.name
);
141 /* terminate the per-process tracing mechanism */
142 void finish_process_tracing( struct process
*process
)
144 if (process
->trace_data
)
146 mach_port_deallocate( mach_task_self(), process
->trace_data
);
147 process
->trace_data
= 0;
151 /* retrieve the thread x86 registers */
152 void get_thread_context( struct thread
*thread
, context_t
*context
, unsigned int flags
)
154 #if defined(__i386__) || defined(__x86_64__)
155 x86_debug_state_t state
;
156 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
157 mach_msg_type_name_t type
;
158 mach_port_t port
, process_port
= get_process_port( thread
->process
);
160 /* all other regs are handled on the client side */
161 assert( flags
== SERVER_CTX_DEBUG_REGISTERS
);
163 if (thread
->unix_pid
== -1 || !process_port
||
164 mach_port_extract_right( process_port
, thread
->unix_tid
,
165 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
167 set_error( STATUS_ACCESS_DENIED
);
171 if (!thread_get_state( port
, x86_DEBUG_STATE
, (thread_state_t
)&state
, &count
))
174 assert( state
.dsh
.flavor
== x86_DEBUG_STATE32
||
175 state
.dsh
.flavor
== x86_DEBUG_STATE64
);
177 assert( state
.dsh
.flavor
== x86_DEBUG_STATE32
);
181 if (state
.dsh
.flavor
== x86_DEBUG_STATE64
)
183 context
->debug
.x86_64_regs
.dr0
= state
.uds
.ds64
.__dr0
;
184 context
->debug
.x86_64_regs
.dr1
= state
.uds
.ds64
.__dr1
;
185 context
->debug
.x86_64_regs
.dr2
= state
.uds
.ds64
.__dr2
;
186 context
->debug
.x86_64_regs
.dr3
= state
.uds
.ds64
.__dr3
;
187 context
->debug
.x86_64_regs
.dr6
= state
.uds
.ds64
.__dr6
;
188 context
->debug
.x86_64_regs
.dr7
= state
.uds
.ds64
.__dr7
;
193 /* work around silly renaming of struct members in OS X 10.5 */
194 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
195 context
->debug
.i386_regs
.dr0
= state
.uds
.ds32
.__dr0
;
196 context
->debug
.i386_regs
.dr1
= state
.uds
.ds32
.__dr1
;
197 context
->debug
.i386_regs
.dr2
= state
.uds
.ds32
.__dr2
;
198 context
->debug
.i386_regs
.dr3
= state
.uds
.ds32
.__dr3
;
199 context
->debug
.i386_regs
.dr6
= state
.uds
.ds32
.__dr6
;
200 context
->debug
.i386_regs
.dr7
= state
.uds
.ds32
.__dr7
;
202 context
->debug
.i386_regs
.dr0
= state
.uds
.ds32
.dr0
;
203 context
->debug
.i386_regs
.dr1
= state
.uds
.ds32
.dr1
;
204 context
->debug
.i386_regs
.dr2
= state
.uds
.ds32
.dr2
;
205 context
->debug
.i386_regs
.dr3
= state
.uds
.ds32
.dr3
;
206 context
->debug
.i386_regs
.dr6
= state
.uds
.ds32
.dr6
;
207 context
->debug
.i386_regs
.dr7
= state
.uds
.ds32
.dr7
;
210 context
->flags
|= SERVER_CTX_DEBUG_REGISTERS
;
212 mach_port_deallocate( mach_task_self(), port
);
216 /* set the thread x86 registers */
217 void set_thread_context( struct thread
*thread
, const context_t
*context
, unsigned int flags
)
219 #if defined(__i386__) || defined(__x86_64__)
220 x86_debug_state_t state
;
221 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
222 mach_msg_type_name_t type
;
223 mach_port_t port
, process_port
= get_process_port( thread
->process
);
226 /* all other regs are handled on the client side */
227 assert( flags
== SERVER_CTX_DEBUG_REGISTERS
);
229 if (thread
->unix_pid
== -1 || !process_port
||
230 mach_port_extract_right( process_port
, thread
->unix_tid
,
231 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
233 set_error( STATUS_ACCESS_DENIED
);
239 if (thread
->process
->cpu
== CPU_x86_64
)
241 /* Mac OS doesn't allow setting the global breakpoint flags */
242 dr7
= (context
->debug
.x86_64_regs
.dr7
& ~0xaa) | ((context
->debug
.x86_64_regs
.dr7
& 0xaa) >> 1);
244 state
.dsh
.flavor
= x86_DEBUG_STATE64
;
245 state
.dsh
.count
= sizeof(state
.uds
.ds64
) / sizeof(int);
246 state
.uds
.ds64
.__dr0
= context
->debug
.x86_64_regs
.dr0
;
247 state
.uds
.ds64
.__dr1
= context
->debug
.x86_64_regs
.dr1
;
248 state
.uds
.ds64
.__dr2
= context
->debug
.x86_64_regs
.dr2
;
249 state
.uds
.ds64
.__dr3
= context
->debug
.x86_64_regs
.dr3
;
250 state
.uds
.ds64
.__dr4
= 0;
251 state
.uds
.ds64
.__dr5
= 0;
252 state
.uds
.ds64
.__dr6
= context
->debug
.x86_64_regs
.dr6
;
253 state
.uds
.ds64
.__dr7
= dr7
;
258 dr7
= (context
->debug
.i386_regs
.dr7
& ~0xaa) | ((context
->debug
.i386_regs
.dr7
& 0xaa) >> 1);
260 state
.dsh
.flavor
= x86_DEBUG_STATE32
;
261 state
.dsh
.count
= sizeof(state
.uds
.ds32
) / sizeof(int);
262 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
263 state
.uds
.ds32
.__dr0
= context
->debug
.i386_regs
.dr0
;
264 state
.uds
.ds32
.__dr1
= context
->debug
.i386_regs
.dr1
;
265 state
.uds
.ds32
.__dr2
= context
->debug
.i386_regs
.dr2
;
266 state
.uds
.ds32
.__dr3
= context
->debug
.i386_regs
.dr3
;
267 state
.uds
.ds32
.__dr4
= 0;
268 state
.uds
.ds32
.__dr5
= 0;
269 state
.uds
.ds32
.__dr6
= context
->debug
.i386_regs
.dr6
;
270 state
.uds
.ds32
.__dr7
= dr7
;
272 state
.uds
.ds32
.dr0
= context
->debug
.i386_regs
.dr0
;
273 state
.uds
.ds32
.dr1
= context
->debug
.i386_regs
.dr1
;
274 state
.uds
.ds32
.dr2
= context
->debug
.i386_regs
.dr2
;
275 state
.uds
.ds32
.dr3
= context
->debug
.i386_regs
.dr3
;
276 state
.uds
.ds32
.dr4
= 0;
277 state
.uds
.ds32
.dr5
= 0;
278 state
.uds
.ds32
.dr6
= context
->debug
.i386_regs
.dr6
;
279 state
.uds
.ds32
.dr7
= dr7
;
282 if (!thread_set_state( port
, x86_DEBUG_STATE
, (thread_state_t
)&state
, count
))
284 if (thread
->context
) /* update the cached values */
287 if (thread
->process
->cpu
== CPU_x86_64
)
288 thread
->context
->debug
.x86_64_regs
= context
->debug
.x86_64_regs
;
291 thread
->context
->debug
.i386_regs
= context
->debug
.i386_regs
;
294 mach_port_deallocate( mach_task_self(), port
);
298 int send_thread_signal( struct thread
*thread
, int sig
)
301 mach_port_t process_port
= get_process_port( thread
->process
);
303 if (thread
->unix_pid
!= -1 && process_port
)
305 mach_msg_type_name_t type
;
308 if (!mach_port_extract_right( process_port
, thread
->unix_tid
,
309 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
311 ret
= syscall( SYS___pthread_kill
, port
, sig
);
312 mach_port_deallocate( mach_task_self(), port
);
316 if (ret
== -1 && errno
== ESRCH
) /* thread got killed */
318 thread
->unix_pid
= -1;
319 thread
->unix_tid
= -1;
322 if (debug_level
&& ret
!= -1)
323 fprintf( stderr
, "%04x: *sent signal* signal=%d\n", thread
->id
, sig
);
327 /* read data from a process memory space */
328 int read_process_memory( struct process
*process
, client_ptr_t ptr
, data_size_t size
, char *dest
)
331 mach_msg_type_number_t bytes_read
;
332 mach_vm_offset_t offset
;
334 mach_vm_address_t aligned_address
;
335 mach_vm_size_t aligned_size
;
336 unsigned int page_size
= get_page_size();
337 mach_port_t process_port
= get_process_port( process
);
341 set_error( STATUS_ACCESS_DENIED
);
344 if ((mach_vm_address_t
)ptr
!= ptr
)
346 set_error( STATUS_ACCESS_DENIED
);
350 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
352 mach_set_error( ret
);
356 offset
= ptr
% page_size
;
357 aligned_address
= (mach_vm_address_t
)(ptr
- offset
);
358 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
360 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
361 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
364 memcpy( dest
, (char *)data
+ offset
, size
);
365 mach_vm_deallocate( mach_task_self(), data
, bytes_read
);
367 task_resume( process_port
);
368 return (ret
== KERN_SUCCESS
);
371 /* write data to a process memory space */
372 int write_process_memory( struct process
*process
, client_ptr_t ptr
, data_size_t size
, const char *src
)
375 mach_vm_address_t aligned_address
, region_address
;
376 mach_vm_size_t aligned_size
, region_size
;
377 mach_msg_type_number_t info_size
, bytes_read
;
378 mach_vm_offset_t offset
;
379 vm_offset_t task_mem
= 0;
380 struct vm_region_basic_info_64 info
;
382 unsigned int page_size
= get_page_size();
383 mach_port_t process_port
= get_process_port( process
);
387 set_error( STATUS_ACCESS_DENIED
);
390 if ((mach_vm_address_t
)ptr
!= ptr
)
392 set_error( STATUS_ACCESS_DENIED
);
396 offset
= ptr
% page_size
;
397 aligned_address
= (mach_vm_address_t
)(ptr
- offset
);
398 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
400 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
402 mach_set_error( ret
);
406 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &task_mem
, &bytes_read
);
407 if (ret
!= KERN_SUCCESS
)
409 mach_set_error( ret
);
412 region_address
= aligned_address
;
413 info_size
= sizeof(info
);
414 ret
= mach_vm_region( process_port
, ®ion_address
, ®ion_size
, VM_REGION_BASIC_INFO_64
,
415 (vm_region_info_t
)&info
, &info_size
, &dummy
);
416 if (ret
!= KERN_SUCCESS
)
418 mach_set_error( ret
);
421 if (region_address
> aligned_address
||
422 region_address
+ region_size
< aligned_address
+ aligned_size
)
424 /* FIXME: should support multiple regions */
425 set_error( ERROR_ACCESS_DENIED
);
428 ret
= mach_vm_protect( process_port
, aligned_address
, aligned_size
, 0, VM_PROT_READ
| VM_PROT_WRITE
);
429 if (ret
!= KERN_SUCCESS
)
431 mach_set_error( ret
);
435 /* FIXME: there's an optimization that can be made: check first and last */
436 /* pages for writability; read first and last pages; write interior */
437 /* pages to task without ever reading&modifying them; if that succeeds, */
438 /* modify first and last pages and write them. */
440 memcpy( (char*)task_mem
+ offset
, src
, size
);
442 ret
= mach_vm_write( process_port
, aligned_address
, task_mem
, bytes_read
);
443 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
446 mach_vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
447 /* restore protection */
448 mach_vm_protect( process_port
, aligned_address
, aligned_size
, 0, info
.protection
);
449 task_resume( process_port
);
454 if (task_mem
) mach_vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
455 task_resume( process_port
);
459 /* retrieve an LDT selector entry */
460 void get_selector_entry( struct thread
*thread
, int entry
, unsigned int *base
,
461 unsigned int *limit
, unsigned char *flags
)
463 const unsigned int total_size
= (2 * sizeof(int) + 1) * 8192;
464 struct process
*process
= thread
->process
;
465 unsigned int page_size
= get_page_size();
468 mach_msg_type_number_t bytes_read
;
469 mach_port_t process_port
= get_process_port( thread
->process
);
471 if (!process
->ldt_copy
|| !process_port
)
473 set_error( STATUS_ACCESS_DENIED
);
478 set_error( STATUS_INVALID_PARAMETER
); /* FIXME */
482 if ((ret
= task_suspend( process_port
)) == KERN_SUCCESS
)
484 mach_vm_offset_t offset
= process
->ldt_copy
% page_size
;
485 mach_vm_address_t aligned_address
= (mach_vm_address_t
)(process
->ldt_copy
- offset
);
486 mach_vm_size_t aligned_size
= (total_size
+ offset
+ page_size
- 1) / page_size
* page_size
;
488 ret
= mach_vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
489 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
492 const int *ldt
= (const int *)((char *)data
+ offset
);
493 memcpy( base
, ldt
+ entry
, sizeof(int) );
494 memcpy( limit
, ldt
+ entry
+ 8192, sizeof(int) );
495 memcpy( flags
, (char *)(ldt
+ 2 * 8192) + entry
, 1 );
496 mach_vm_deallocate( mach_task_self(), data
, bytes_read
);
498 task_resume( process_port
);
500 else mach_set_error( ret
);
503 #endif /* USE_MACH */