2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
29 #include <sys/types.h>
33 #define WIN32_NO_STATUS
40 #include "wine/library.h"
44 #include <mach/mach.h>
45 #include <mach/mach_error.h>
46 #include <mach/thread_act.h>
47 #include <servers/bootstrap.h>
49 extern int __pthread_kill(mach_port_t
, int);
51 static mach_port_t server_mach_port
;
53 void sigchld_callback(void)
55 assert(0); /* should never be called on MacOS */
58 static void mach_set_error(kern_return_t mach_error
)
62 case KERN_SUCCESS
: break;
63 case KERN_INVALID_ARGUMENT
: set_error(STATUS_INVALID_PARAMETER
); break;
64 case KERN_NO_SPACE
: set_error(STATUS_NO_MEMORY
); break;
65 case KERN_PROTECTION_FAILURE
: set_error(STATUS_ACCESS_DENIED
); break;
66 case KERN_INVALID_ADDRESS
: set_error(STATUS_ACCESS_VIOLATION
); break;
67 default: set_error(STATUS_UNSUCCESSFUL
); break;
71 static mach_port_t
get_process_port( struct process
*process
)
73 return process
->trace_data
;
76 /* initialize the process control mechanism */
77 void init_tracing_mechanism(void)
81 if (task_get_bootstrap_port(mach_task_self(), &bp
) != KERN_SUCCESS
)
82 fatal_error("Can't find bootstrap port\n");
83 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &server_mach_port
) != KERN_SUCCESS
)
84 fatal_error("Can't allocate port\n");
85 if (mach_port_insert_right( mach_task_self(),
88 MACH_MSG_TYPE_MAKE_SEND
) != KERN_SUCCESS
)
89 fatal_error("Error inserting rights\n");
90 if (bootstrap_register(bp
, (char*)wine_get_server_dir(), server_mach_port
) != KERN_SUCCESS
)
91 fatal_error("Can't check in server_mach_port\n");
92 mach_port_deallocate(mach_task_self(), bp
);
95 /* initialize the per-process tracing mechanism */
96 void init_process_tracing( struct process
*process
)
101 mach_msg_header_t header
;
102 mach_msg_body_t body
;
103 mach_msg_port_descriptor_t task_port
;
104 mach_msg_trailer_t trailer
; /* only present on receive */
109 ret
= mach_msg( &msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0, sizeof(msg
),
110 server_mach_port
, 0, 0 );
113 if (ret
!= MACH_RCV_TIMED_OUT
&& debug_level
)
114 fprintf( stderr
, "warning: mach port receive failed with %x\n", ret
);
118 /* if anything in the message is invalid, ignore it */
119 if (msg
.header
.msgh_size
!= offsetof(typeof(msg
), trailer
)) continue;
120 if (msg
.body
.msgh_descriptor_count
!= 1) continue;
121 if (msg
.task_port
.type
!= MACH_MSG_PORT_DESCRIPTOR
) continue;
122 if (msg
.task_port
.disposition
!= MACH_MSG_TYPE_PORT_SEND
) continue;
123 if (msg
.task_port
.name
== MACH_PORT_NULL
) continue;
124 if (msg
.task_port
.name
== MACH_PORT_DEAD
) continue;
126 if (!pid_for_task( msg
.task_port
.name
, &pid
))
128 struct thread
*thread
= get_thread_from_pid( pid
);
130 if (thread
&& !thread
->process
->trace_data
)
131 thread
->process
->trace_data
= msg
.task_port
.name
;
133 mach_port_deallocate( mach_task_self(), msg
.task_port
.name
);
138 /* terminate the per-process tracing mechanism */
139 void finish_process_tracing( struct process
*process
)
141 if (process
->trace_data
)
143 mach_port_deallocate( mach_task_self(), process
->trace_data
);
144 process
->trace_data
= 0;
148 /* retrieve the thread x86 registers */
149 void get_thread_context( struct thread
*thread
, CONTEXT
*context
, unsigned int flags
)
151 struct x86_debug_state32 state
;
152 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
153 mach_msg_type_name_t type
;
154 mach_port_t port
, process_port
= get_process_port( thread
->process
);
156 /* all other regs are handled on the client side */
157 assert( (flags
| CONTEXT_i386
) == CONTEXT_DEBUG_REGISTERS
);
159 if (thread
->unix_pid
== -1 || !process_port
||
160 mach_port_extract_right( process_port
, thread
->unix_tid
,
161 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
163 set_error( STATUS_ACCESS_DENIED
);
167 if (!thread_get_state( port
, x86_DEBUG_STATE32
, (thread_state_t
)&state
, &count
))
169 context
->Dr0
= state
.dr0
;
170 context
->Dr1
= state
.dr1
;
171 context
->Dr2
= state
.dr2
;
172 context
->Dr3
= state
.dr3
;
173 context
->Dr6
= state
.dr6
;
174 context
->Dr7
= state
.dr7
;
175 context
->ContextFlags
|= CONTEXT_DEBUG_REGISTERS
;
177 mach_port_deallocate( mach_task_self(), port
);
180 /* set the thread x86 registers */
181 void set_thread_context( struct thread
*thread
, const CONTEXT
*context
, unsigned int flags
)
183 struct x86_debug_state32 state
;
184 mach_msg_type_number_t count
= sizeof(state
) / sizeof(int);
185 mach_msg_type_name_t type
;
186 mach_port_t port
, process_port
= get_process_port( thread
->process
);
188 /* all other regs are handled on the client side */
189 assert( (flags
| CONTEXT_i386
) == CONTEXT_DEBUG_REGISTERS
);
191 if (thread
->unix_pid
== -1 || !process_port
||
192 mach_port_extract_right( process_port
, thread
->unix_tid
,
193 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
195 set_error( STATUS_ACCESS_DENIED
);
199 state
.dr0
= context
->Dr0
;
200 state
.dr1
= context
->Dr1
;
201 state
.dr2
= context
->Dr2
;
202 state
.dr3
= context
->Dr3
;
205 state
.dr6
= context
->Dr6
;
206 state
.dr7
= context
->Dr7
;
207 if (!thread_set_state( port
, x86_DEBUG_STATE32
, (thread_state_t
)&state
, count
))
209 if (thread
->context
) /* update the cached values */
211 thread
->context
->Dr0
= context
->Dr0
;
212 thread
->context
->Dr1
= context
->Dr1
;
213 thread
->context
->Dr2
= context
->Dr2
;
214 thread
->context
->Dr3
= context
->Dr3
;
215 thread
->context
->Dr6
= context
->Dr6
;
216 thread
->context
->Dr7
= context
->Dr7
;
219 mach_port_deallocate( mach_task_self(), port
);
222 int send_thread_signal( struct thread
*thread
, int sig
)
225 mach_port_t process_port
= get_process_port( thread
->process
);
227 if (thread
->unix_pid
!= -1 && process_port
)
229 mach_msg_type_name_t type
;
232 if (!mach_port_extract_right( process_port
, thread
->unix_tid
,
233 MACH_MSG_TYPE_COPY_SEND
, &port
, &type
))
235 ret
= __pthread_kill( port
, sig
);
236 mach_port_deallocate( mach_task_self(), port
);
240 if (ret
== -1 && errno
== ESRCH
) /* thread got killed */
242 thread
->unix_pid
= -1;
243 thread
->unix_tid
= -1;
249 /* read data from a process memory space */
250 int read_process_memory( struct process
*process
, const void *ptr
, data_size_t size
, char *dest
)
253 mach_msg_type_number_t bytes_read
;
254 vm_offset_t offset
, data
;
255 vm_address_t aligned_address
;
256 vm_size_t aligned_size
;
257 unsigned int page_size
= get_page_size();
258 mach_port_t process_port
= get_process_port( process
);
262 set_error( STATUS_ACCESS_DENIED
);
266 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
268 mach_set_error( ret
);
272 offset
= (unsigned long)ptr
% page_size
;
273 aligned_address
= (vm_address_t
)((char *)ptr
- offset
);
274 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
276 ret
= vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
277 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
280 memcpy( dest
, (char *)data
+ offset
, size
);
281 vm_deallocate( mach_task_self(), data
, bytes_read
);
283 task_resume( process_port
);
284 return (ret
== KERN_SUCCESS
);
287 /* write data to a process memory space */
288 int write_process_memory( struct process
*process
, void *ptr
, data_size_t size
, const char *src
)
291 vm_address_t aligned_address
, region_address
;
292 vm_size_t aligned_size
, region_size
;
293 mach_msg_type_number_t info_size
, bytes_read
;
294 vm_offset_t offset
, task_mem
= 0;
295 struct vm_region_basic_info info
;
297 unsigned int page_size
= get_page_size();
298 mach_port_t process_port
= get_process_port( process
);
302 set_error( STATUS_ACCESS_DENIED
);
306 offset
= (unsigned long)ptr
% page_size
;
307 aligned_address
= (vm_address_t
)((char *)ptr
- offset
);
308 aligned_size
= (size
+ offset
+ page_size
- 1) / page_size
* page_size
;
310 if ((ret
= task_suspend( process_port
)) != KERN_SUCCESS
)
312 mach_set_error( ret
);
316 ret
= vm_read( process_port
, aligned_address
, aligned_size
, &task_mem
, &bytes_read
);
317 if (ret
!= KERN_SUCCESS
)
319 mach_set_error( ret
);
322 region_address
= aligned_address
;
323 info_size
= sizeof(info
);
324 ret
= vm_region( process_port
, ®ion_address
, ®ion_size
, VM_REGION_BASIC_INFO
,
325 (vm_region_info_t
)&info
, &info_size
, &dummy
);
326 if (ret
!= KERN_SUCCESS
)
328 mach_set_error( ret
);
331 if (region_address
> aligned_address
||
332 region_address
+ region_size
< aligned_address
+ aligned_size
)
334 /* FIXME: should support multiple regions */
335 set_error( ERROR_ACCESS_DENIED
);
338 ret
= vm_protect( process_port
, aligned_address
, aligned_size
, 0, VM_PROT_READ
| VM_PROT_WRITE
);
339 if (ret
!= KERN_SUCCESS
)
341 mach_set_error( ret
);
345 /* FIXME: there's an optimization that can be made: check first and last */
346 /* pages for writability; read first and last pages; write interior */
347 /* pages to task without ever reading&modifying them; if that succeeds, */
348 /* modify first and last pages and write them. */
350 memcpy( (char*)task_mem
+ offset
, src
, size
);
352 ret
= vm_write( process_port
, aligned_address
, task_mem
, bytes_read
);
353 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
356 vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
357 /* restore protection */
358 vm_protect( process_port
, aligned_address
, aligned_size
, 0, info
.protection
);
359 task_resume( process_port
);
364 if (task_mem
) vm_deallocate( mach_task_self(), task_mem
, bytes_read
);
365 task_resume( process_port
);
369 /* retrieve an LDT selector entry */
370 void get_selector_entry( struct thread
*thread
, int entry
, unsigned int *base
,
371 unsigned int *limit
, unsigned char *flags
)
373 const unsigned int total_size
= (2 * sizeof(int) + 1) * 8192;
374 struct process
*process
= thread
->process
;
375 unsigned int page_size
= get_page_size();
378 mach_msg_type_number_t bytes_read
;
379 mach_port_t process_port
= get_process_port( thread
->process
);
381 if (!process
->ldt_copy
|| !process_port
)
383 set_error( STATUS_ACCESS_DENIED
);
388 set_error( STATUS_INVALID_PARAMETER
); /* FIXME */
392 if ((ret
= task_suspend( process_port
)) == KERN_SUCCESS
)
394 void *ptr
= process
->ldt_copy
;
395 vm_offset_t offset
= (unsigned long)ptr
% page_size
;
396 vm_address_t aligned_address
= (vm_address_t
)((char *)ptr
- offset
);
397 vm_size_t aligned_size
= (total_size
+ offset
+ page_size
- 1) / page_size
* page_size
;
399 ret
= vm_read( process_port
, aligned_address
, aligned_size
, &data
, &bytes_read
);
400 if (ret
!= KERN_SUCCESS
) mach_set_error( ret
);
403 const int *ldt
= (const int *)((char *)data
+ offset
);
404 memcpy( base
, ldt
+ entry
, sizeof(int) );
405 memcpy( limit
, ldt
+ entry
+ 8192, sizeof(int) );
406 memcpy( flags
, (char *)(ldt
+ 2 * 8192) + entry
, 1 );
407 vm_deallocate( mach_task_self(), data
, bytes_read
);
409 task_resume( process_port
);
411 else mach_set_error( ret
);
414 #endif /* USE_MACH */