explorer: add a navbar to explorer
[wine/gsoc_explorer.git] / server / mach.c
blobf89032102d90d2a33a2bcbf30d367eb69bd85202
1 /*
2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "config.h"
23 #include "wine/port.h"
25 #include <assert.h>
26 #include <errno.h>
27 #include <stdio.h>
28 #include <signal.h>
29 #include <stdarg.h>
30 #include <sys/types.h>
31 #include <unistd.h>
33 #include "ntstatus.h"
34 #define WIN32_NO_STATUS
35 #include "winternl.h"
37 #include "file.h"
38 #include "process.h"
39 #include "thread.h"
40 #include "request.h"
41 #include "wine/library.h"
43 #ifdef USE_MACH
45 #include <mach/mach.h>
46 #include <mach/mach_error.h>
47 #include <mach/thread_act.h>
48 #include <servers/bootstrap.h>
50 #if defined(__APPLE__) && defined(__i386__)
51 extern int pthread_kill_syscall( mach_port_t, int );
52 __ASM_GLOBAL_FUNC( pthread_kill_syscall,
53 "movl $328,%eax\n\t" /* SYS___pthread_kill */
54 "int $0x80\n\t"
55 "jae 1f\n\t"
56 "negl %eax\n"
57 "1:\tret" )
58 #else
59 static inline int pthread_kill_syscall( mach_port_t, int )
61 return -ENOSYS;
63 #endif
65 static mach_port_t server_mach_port;
67 void sigchld_callback(void)
69 assert(0); /* should never be called on MacOS */
72 static void mach_set_error(kern_return_t mach_error)
74 switch (mach_error)
76 case KERN_SUCCESS: break;
77 case KERN_INVALID_ARGUMENT: set_error(STATUS_INVALID_PARAMETER); break;
78 case KERN_NO_SPACE: set_error(STATUS_NO_MEMORY); break;
79 case KERN_PROTECTION_FAILURE: set_error(STATUS_ACCESS_DENIED); break;
80 case KERN_INVALID_ADDRESS: set_error(STATUS_ACCESS_VIOLATION); break;
81 default: set_error(STATUS_UNSUCCESSFUL); break;
85 static mach_port_t get_process_port( struct process *process )
87 return process->trace_data;
90 /* initialize the process control mechanism */
91 void init_tracing_mechanism(void)
93 mach_port_t bp;
95 if (task_get_bootstrap_port(mach_task_self(), &bp) != KERN_SUCCESS)
96 fatal_error("Can't find bootstrap port\n");
97 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &server_mach_port) != KERN_SUCCESS)
98 fatal_error("Can't allocate port\n");
99 if (mach_port_insert_right( mach_task_self(),
100 server_mach_port,
101 server_mach_port,
102 MACH_MSG_TYPE_MAKE_SEND ) != KERN_SUCCESS)
103 fatal_error("Error inserting rights\n");
104 if (bootstrap_register(bp, (char*)wine_get_server_dir(), server_mach_port) != KERN_SUCCESS)
105 fatal_error("Can't check in server_mach_port\n");
106 mach_port_deallocate(mach_task_self(), bp);
109 /* initialize the per-process tracing mechanism */
110 void init_process_tracing( struct process *process )
112 int pid, ret;
113 struct
115 mach_msg_header_t header;
116 mach_msg_body_t body;
117 mach_msg_port_descriptor_t task_port;
118 mach_msg_trailer_t trailer; /* only present on receive */
119 } msg;
121 for (;;)
123 ret = mach_msg( &msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, sizeof(msg),
124 server_mach_port, 0, 0 );
125 if (ret)
127 if (ret != MACH_RCV_TIMED_OUT && debug_level)
128 fprintf( stderr, "warning: mach port receive failed with %x\n", ret );
129 return;
132 /* if anything in the message is invalid, ignore it */
133 if (msg.header.msgh_size != offsetof(typeof(msg), trailer)) continue;
134 if (msg.body.msgh_descriptor_count != 1) continue;
135 if (msg.task_port.type != MACH_MSG_PORT_DESCRIPTOR) continue;
136 if (msg.task_port.disposition != MACH_MSG_TYPE_PORT_SEND) continue;
137 if (msg.task_port.name == MACH_PORT_NULL) continue;
138 if (msg.task_port.name == MACH_PORT_DEAD) continue;
140 if (!pid_for_task( msg.task_port.name, &pid ))
142 struct thread *thread = get_thread_from_pid( pid );
144 if (thread && !thread->process->trace_data)
145 thread->process->trace_data = msg.task_port.name;
146 else
147 mach_port_deallocate( mach_task_self(), msg.task_port.name );
152 /* terminate the per-process tracing mechanism */
153 void finish_process_tracing( struct process *process )
155 if (process->trace_data)
157 mach_port_deallocate( mach_task_self(), process->trace_data );
158 process->trace_data = 0;
162 /* retrieve the thread x86 registers */
163 void get_thread_context( struct thread *thread, context_t *context, unsigned int flags )
165 #ifdef __i386__
166 x86_debug_state32_t state;
167 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
168 mach_msg_type_name_t type;
169 mach_port_t port, process_port = get_process_port( thread->process );
171 /* all other regs are handled on the client side */
172 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
174 if (thread->unix_pid == -1 || !process_port ||
175 mach_port_extract_right( process_port, thread->unix_tid,
176 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
178 set_error( STATUS_ACCESS_DENIED );
179 return;
182 if (!thread_get_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, &count ))
184 /* work around silly renaming of struct members in OS X 10.5 */
185 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
186 context->debug.i386_regs.dr0 = state.__dr0;
187 context->debug.i386_regs.dr1 = state.__dr1;
188 context->debug.i386_regs.dr2 = state.__dr2;
189 context->debug.i386_regs.dr3 = state.__dr3;
190 context->debug.i386_regs.dr6 = state.__dr6;
191 context->debug.i386_regs.dr7 = state.__dr7;
192 #else
193 context->debug.i386_regs.dr0 = state.dr0;
194 context->debug.i386_regs.dr1 = state.dr1;
195 context->debug.i386_regs.dr2 = state.dr2;
196 context->debug.i386_regs.dr3 = state.dr3;
197 context->debug.i386_regs.dr6 = state.dr6;
198 context->debug.i386_regs.dr7 = state.dr7;
199 #endif
200 context->flags |= SERVER_CTX_DEBUG_REGISTERS;
202 mach_port_deallocate( mach_task_self(), port );
203 #endif
206 /* set the thread x86 registers */
207 void set_thread_context( struct thread *thread, const context_t *context, unsigned int flags )
209 #ifdef __i386__
210 x86_debug_state32_t state;
211 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
212 mach_msg_type_name_t type;
213 mach_port_t port, process_port = get_process_port( thread->process );
214 unsigned int dr7;
216 /* all other regs are handled on the client side */
217 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
219 if (thread->unix_pid == -1 || !process_port ||
220 mach_port_extract_right( process_port, thread->unix_tid,
221 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
223 set_error( STATUS_ACCESS_DENIED );
224 return;
227 /* Mac OS doesn't allow setting the global breakpoint flags */
228 dr7 = (context->debug.i386_regs.dr7 & ~0xaa) | ((context->debug.i386_regs.dr7 & 0xaa) >> 1);
230 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
231 state.__dr0 = context->debug.i386_regs.dr0;
232 state.__dr1 = context->debug.i386_regs.dr1;
233 state.__dr2 = context->debug.i386_regs.dr2;
234 state.__dr3 = context->debug.i386_regs.dr3;
235 state.__dr4 = 0;
236 state.__dr5 = 0;
237 state.__dr6 = context->debug.i386_regs.dr6;
238 state.__dr7 = dr7;
239 #else
240 state.dr0 = context->debug.i386_regs.dr0;
241 state.dr1 = context->debug.i386_regs.dr1;
242 state.dr2 = context->debug.i386_regs.dr2;
243 state.dr3 = context->debug.i386_regs.dr3;
244 state.dr4 = 0;
245 state.dr5 = 0;
246 state.dr6 = context->debug.i386_regs.dr6;
247 state.dr7 = dr7;
248 #endif
249 if (!thread_set_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, count ))
251 if (thread->context) /* update the cached values */
252 thread->context->debug.i386_regs = context->debug.i386_regs;
254 mach_port_deallocate( mach_task_self(), port );
255 #endif
258 int send_thread_signal( struct thread *thread, int sig )
260 int ret = -1;
261 mach_port_t process_port = get_process_port( thread->process );
263 if (thread->unix_pid != -1 && process_port)
265 mach_msg_type_name_t type;
266 mach_port_t port;
268 if (!mach_port_extract_right( process_port, thread->unix_tid,
269 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
271 if ((ret = pthread_kill_syscall( port, sig )) < 0)
273 errno = -ret;
274 ret = -1;
276 mach_port_deallocate( mach_task_self(), port );
278 else errno = ESRCH;
280 if (ret == -1 && errno == ESRCH) /* thread got killed */
282 thread->unix_pid = -1;
283 thread->unix_tid = -1;
286 if (debug_level && ret != -1)
287 fprintf( stderr, "%04x: *sent signal* signal=%d\n", thread->id, sig );
288 return (ret != -1);
291 /* read data from a process memory space */
292 int read_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, char *dest )
294 kern_return_t ret;
295 mach_msg_type_number_t bytes_read;
296 vm_offset_t offset, data;
297 vm_address_t aligned_address;
298 vm_size_t aligned_size;
299 unsigned int page_size = get_page_size();
300 mach_port_t process_port = get_process_port( process );
302 if (!process_port)
304 set_error( STATUS_ACCESS_DENIED );
305 return 0;
307 if ((vm_address_t)ptr != ptr)
309 set_error( STATUS_ACCESS_DENIED );
310 return 0;
313 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
315 mach_set_error( ret );
316 return 0;
319 offset = ptr % page_size;
320 aligned_address = (vm_address_t)(ptr - offset);
321 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
323 ret = vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
324 if (ret != KERN_SUCCESS) mach_set_error( ret );
325 else
327 memcpy( dest, (char *)data + offset, size );
328 vm_deallocate( mach_task_self(), data, bytes_read );
330 task_resume( process_port );
331 return (ret == KERN_SUCCESS);
334 /* write data to a process memory space */
335 int write_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, const char *src )
337 kern_return_t ret;
338 vm_address_t aligned_address, region_address;
339 vm_size_t aligned_size, region_size;
340 mach_msg_type_number_t info_size, bytes_read;
341 vm_offset_t offset, task_mem = 0;
342 struct vm_region_basic_info info;
343 mach_port_t dummy;
344 unsigned int page_size = get_page_size();
345 mach_port_t process_port = get_process_port( process );
347 if (!process_port)
349 set_error( STATUS_ACCESS_DENIED );
350 return 0;
352 if ((vm_address_t)ptr != ptr)
354 set_error( STATUS_ACCESS_DENIED );
355 return 0;
358 offset = ptr % page_size;
359 aligned_address = (vm_address_t)(ptr - offset);
360 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
362 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
364 mach_set_error( ret );
365 return 0;
368 ret = vm_read( process_port, aligned_address, aligned_size, &task_mem, &bytes_read );
369 if (ret != KERN_SUCCESS)
371 mach_set_error( ret );
372 goto failed;
374 region_address = aligned_address;
375 info_size = sizeof(info);
376 ret = vm_region( process_port, &region_address, &region_size, VM_REGION_BASIC_INFO,
377 (vm_region_info_t)&info, &info_size, &dummy );
378 if (ret != KERN_SUCCESS)
380 mach_set_error( ret );
381 goto failed;
383 if (region_address > aligned_address ||
384 region_address + region_size < aligned_address + aligned_size)
386 /* FIXME: should support multiple regions */
387 set_error( ERROR_ACCESS_DENIED );
388 goto failed;
390 ret = vm_protect( process_port, aligned_address, aligned_size, 0, VM_PROT_READ | VM_PROT_WRITE );
391 if (ret != KERN_SUCCESS)
393 mach_set_error( ret );
394 goto failed;
397 /* FIXME: there's an optimization that can be made: check first and last */
398 /* pages for writability; read first and last pages; write interior */
399 /* pages to task without ever reading&modifying them; if that succeeds, */
400 /* modify first and last pages and write them. */
402 memcpy( (char*)task_mem + offset, src, size );
404 ret = vm_write( process_port, aligned_address, task_mem, bytes_read );
405 if (ret != KERN_SUCCESS) mach_set_error( ret );
406 else
408 vm_deallocate( mach_task_self(), task_mem, bytes_read );
409 /* restore protection */
410 vm_protect( process_port, aligned_address, aligned_size, 0, info.protection );
411 task_resume( process_port );
412 return 1;
415 failed:
416 if (task_mem) vm_deallocate( mach_task_self(), task_mem, bytes_read );
417 task_resume( process_port );
418 return 0;
421 /* retrieve an LDT selector entry */
422 void get_selector_entry( struct thread *thread, int entry, unsigned int *base,
423 unsigned int *limit, unsigned char *flags )
425 const unsigned int total_size = (2 * sizeof(int) + 1) * 8192;
426 struct process *process = thread->process;
427 unsigned int page_size = get_page_size();
428 vm_offset_t data;
429 kern_return_t ret;
430 mach_msg_type_number_t bytes_read;
431 mach_port_t process_port = get_process_port( thread->process );
433 if (!process->ldt_copy || !process_port)
435 set_error( STATUS_ACCESS_DENIED );
436 return;
438 if (entry >= 8192)
440 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
441 return;
444 if ((ret = task_suspend( process_port )) == KERN_SUCCESS)
446 vm_offset_t offset = process->ldt_copy % page_size;
447 vm_address_t aligned_address = (vm_address_t)(process->ldt_copy - offset);
448 vm_size_t aligned_size = (total_size + offset + page_size - 1) / page_size * page_size;
450 ret = vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
451 if (ret != KERN_SUCCESS) mach_set_error( ret );
452 else
454 const int *ldt = (const int *)((char *)data + offset);
455 memcpy( base, ldt + entry, sizeof(int) );
456 memcpy( limit, ldt + entry + 8192, sizeof(int) );
457 memcpy( flags, (char *)(ldt + 2 * 8192) + entry, 1 );
458 vm_deallocate( mach_task_self(), data, bytes_read );
460 task_resume( process_port );
462 else mach_set_error( ret );
465 #endif /* USE_MACH */