DIB Engine: Hook the engine between GDI32 and Display driver
[wine/hacks.git] / server / mach.c
blobf9490f3f0deeec792dfc9c2fe919eb5247219927
1 /*
2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "config.h"
23 #include "wine/port.h"
25 #include <assert.h>
26 #include <errno.h>
27 #include <stdio.h>
28 #include <signal.h>
29 #include <stdarg.h>
30 #include <sys/types.h>
31 #include <unistd.h>
33 #include "ntstatus.h"
34 #define WIN32_NO_STATUS
35 #include "winternl.h"
37 #include "file.h"
38 #include "process.h"
39 #include "thread.h"
40 #include "request.h"
41 #include "wine/library.h"
43 #ifdef USE_MACH
45 #include <mach/mach.h>
46 #include <mach/mach_error.h>
47 #include <mach/thread_act.h>
48 #include <servers/bootstrap.h>
50 #if defined(__APPLE__) && defined(__i386__)
51 extern int pthread_kill_syscall( mach_port_t, int );
52 __ASM_GLOBAL_FUNC( pthread_kill_syscall,
53 "movl $328,%eax\n\t" /* SYS___pthread_kill */
54 "int $0x80\n\t"
55 "jae 1f\n\t"
56 "negl %eax\n"
57 "1:\tret" )
58 #else
59 static inline int pthread_kill_syscall( mach_port_t, int )
61 return -ENOSYS;
63 #endif
65 static mach_port_t server_mach_port;
67 void sigchld_callback(void)
69 assert(0); /* should never be called on MacOS */
72 static void mach_set_error(kern_return_t mach_error)
74 switch (mach_error)
76 case KERN_SUCCESS: break;
77 case KERN_INVALID_ARGUMENT: set_error(STATUS_INVALID_PARAMETER); break;
78 case KERN_NO_SPACE: set_error(STATUS_NO_MEMORY); break;
79 case KERN_PROTECTION_FAILURE: set_error(STATUS_ACCESS_DENIED); break;
80 case KERN_INVALID_ADDRESS: set_error(STATUS_ACCESS_VIOLATION); break;
81 default: set_error(STATUS_UNSUCCESSFUL); break;
85 static mach_port_t get_process_port( struct process *process )
87 return process->trace_data;
90 /* initialize the process control mechanism */
91 void init_tracing_mechanism(void)
93 mach_port_t bp;
95 if (task_get_bootstrap_port(mach_task_self(), &bp) != KERN_SUCCESS)
96 fatal_error("Can't find bootstrap port\n");
97 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &server_mach_port) != KERN_SUCCESS)
98 fatal_error("Can't allocate port\n");
99 if (mach_port_insert_right( mach_task_self(),
100 server_mach_port,
101 server_mach_port,
102 MACH_MSG_TYPE_MAKE_SEND ) != KERN_SUCCESS)
103 fatal_error("Error inserting rights\n");
104 if (bootstrap_register(bp, (char*)wine_get_server_dir(), server_mach_port) != KERN_SUCCESS)
105 fatal_error("Can't check in server_mach_port\n");
106 mach_port_deallocate(mach_task_self(), bp);
109 /* initialize the per-process tracing mechanism */
110 void init_process_tracing( struct process *process )
112 int pid, ret;
113 struct
115 mach_msg_header_t header;
116 mach_msg_body_t body;
117 mach_msg_port_descriptor_t task_port;
118 mach_msg_trailer_t trailer; /* only present on receive */
119 } msg;
121 for (;;)
123 ret = mach_msg( &msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, sizeof(msg),
124 server_mach_port, 0, 0 );
125 if (ret)
127 if (ret != MACH_RCV_TIMED_OUT && debug_level)
128 fprintf( stderr, "warning: mach port receive failed with %x\n", ret );
129 return;
132 /* if anything in the message is invalid, ignore it */
133 if (msg.header.msgh_size != offsetof(typeof(msg), trailer)) continue;
134 if (msg.body.msgh_descriptor_count != 1) continue;
135 if (msg.task_port.type != MACH_MSG_PORT_DESCRIPTOR) continue;
136 if (msg.task_port.disposition != MACH_MSG_TYPE_PORT_SEND) continue;
137 if (msg.task_port.name == MACH_PORT_NULL) continue;
138 if (msg.task_port.name == MACH_PORT_DEAD) continue;
140 if (!pid_for_task( msg.task_port.name, &pid ))
142 struct thread *thread = get_thread_from_pid( pid );
144 if (thread && !thread->process->trace_data)
145 thread->process->trace_data = msg.task_port.name;
146 else
147 mach_port_deallocate( mach_task_self(), msg.task_port.name );
152 /* terminate the per-process tracing mechanism */
153 void finish_process_tracing( struct process *process )
155 if (process->trace_data)
157 mach_port_deallocate( mach_task_self(), process->trace_data );
158 process->trace_data = 0;
162 /* retrieve the thread x86 registers */
163 void get_thread_context( struct thread *thread, context_t *context, unsigned int flags )
165 #ifdef __i386__
166 x86_debug_state32_t state;
167 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
168 mach_msg_type_name_t type;
169 mach_port_t port, process_port = get_process_port( thread->process );
171 /* all other regs are handled on the client side */
172 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
174 if (thread->unix_pid == -1 || !process_port ||
175 mach_port_extract_right( process_port, thread->unix_tid,
176 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
178 set_error( STATUS_ACCESS_DENIED );
179 return;
182 if (!thread_get_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, &count ))
184 /* work around silly renaming of struct members in OS X 10.5 */
185 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
186 context->debug.i386_regs.dr0 = state.__dr0;
187 context->debug.i386_regs.dr1 = state.__dr1;
188 context->debug.i386_regs.dr2 = state.__dr2;
189 context->debug.i386_regs.dr3 = state.__dr3;
190 context->debug.i386_regs.dr6 = state.__dr6;
191 context->debug.i386_regs.dr7 = state.__dr7;
192 #else
193 context->debug.i386_regs.dr0 = state.dr0;
194 context->debug.i386_regs.dr1 = state.dr1;
195 context->debug.i386_regs.dr2 = state.dr2;
196 context->debug.i386_regs.dr3 = state.dr3;
197 context->debug.i386_regs.dr6 = state.dr6;
198 context->debug.i386_regs.dr7 = state.dr7;
199 #endif
200 context->flags |= SERVER_CTX_DEBUG_REGISTERS;
202 mach_port_deallocate( mach_task_self(), port );
203 #endif
206 /* set the thread x86 registers */
207 void set_thread_context( struct thread *thread, const context_t *context, unsigned int flags )
209 #ifdef __i386__
210 x86_debug_state32_t state;
211 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
212 mach_msg_type_name_t type;
213 mach_port_t port, process_port = get_process_port( thread->process );
215 /* all other regs are handled on the client side */
216 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
218 if (thread->unix_pid == -1 || !process_port ||
219 mach_port_extract_right( process_port, thread->unix_tid,
220 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
222 set_error( STATUS_ACCESS_DENIED );
223 return;
226 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
227 state.__dr0 = context->debug.i386_regs.dr0;
228 state.__dr1 = context->debug.i386_regs.dr1;
229 state.__dr2 = context->debug.i386_regs.dr2;
230 state.__dr3 = context->debug.i386_regs.dr3;
231 state.__dr4 = 0;
232 state.__dr5 = 0;
233 state.__dr6 = context->debug.i386_regs.dr6;
234 state.__dr7 = context->debug.i386_regs.dr7;
235 #else
236 state.dr0 = context->debug.i386_regs.dr0;
237 state.dr1 = context->debug.i386_regs.dr1;
238 state.dr2 = context->debug.i386_regs.dr2;
239 state.dr3 = context->debug.i386_regs.dr3;
240 state.dr4 = 0;
241 state.dr5 = 0;
242 state.dr6 = context->debug.i386_regs.dr6;
243 state.dr7 = context->debug.i386_regs.dr7;
244 #endif
245 if (!thread_set_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, count ))
247 if (thread->context) /* update the cached values */
248 thread->context->debug.i386_regs = context->debug.i386_regs;
250 mach_port_deallocate( mach_task_self(), port );
251 #endif
254 int send_thread_signal( struct thread *thread, int sig )
256 int ret = -1;
257 mach_port_t process_port = get_process_port( thread->process );
259 if (thread->unix_pid != -1 && process_port)
261 mach_msg_type_name_t type;
262 mach_port_t port;
264 if (!mach_port_extract_right( process_port, thread->unix_tid,
265 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
267 if ((ret = pthread_kill_syscall( port, sig )) < 0)
269 errno = -ret;
270 ret = -1;
272 mach_port_deallocate( mach_task_self(), port );
274 else errno = ESRCH;
276 if (ret == -1 && errno == ESRCH) /* thread got killed */
278 thread->unix_pid = -1;
279 thread->unix_tid = -1;
282 if (debug_level && ret != -1)
283 fprintf( stderr, "%04x: *sent signal* signal=%d\n", thread->id, sig );
284 return (ret != -1);
287 /* read data from a process memory space */
288 int read_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, char *dest )
290 kern_return_t ret;
291 mach_msg_type_number_t bytes_read;
292 vm_offset_t offset, data;
293 vm_address_t aligned_address;
294 vm_size_t aligned_size;
295 unsigned int page_size = get_page_size();
296 mach_port_t process_port = get_process_port( process );
298 if (!process_port)
300 set_error( STATUS_ACCESS_DENIED );
301 return 0;
303 if ((vm_address_t)ptr != ptr)
305 set_error( STATUS_ACCESS_DENIED );
306 return 0;
309 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
311 mach_set_error( ret );
312 return 0;
315 offset = ptr % page_size;
316 aligned_address = (vm_address_t)(ptr - offset);
317 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
319 ret = vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
320 if (ret != KERN_SUCCESS) mach_set_error( ret );
321 else
323 memcpy( dest, (char *)data + offset, size );
324 vm_deallocate( mach_task_self(), data, bytes_read );
326 task_resume( process_port );
327 return (ret == KERN_SUCCESS);
330 /* write data to a process memory space */
331 int write_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, const char *src )
333 kern_return_t ret;
334 vm_address_t aligned_address, region_address;
335 vm_size_t aligned_size, region_size;
336 mach_msg_type_number_t info_size, bytes_read;
337 vm_offset_t offset, task_mem = 0;
338 struct vm_region_basic_info info;
339 mach_port_t dummy;
340 unsigned int page_size = get_page_size();
341 mach_port_t process_port = get_process_port( process );
343 if (!process_port)
345 set_error( STATUS_ACCESS_DENIED );
346 return 0;
348 if ((vm_address_t)ptr != ptr)
350 set_error( STATUS_ACCESS_DENIED );
351 return 0;
354 offset = ptr % page_size;
355 aligned_address = (vm_address_t)(ptr - offset);
356 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
358 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
360 mach_set_error( ret );
361 return 0;
364 ret = vm_read( process_port, aligned_address, aligned_size, &task_mem, &bytes_read );
365 if (ret != KERN_SUCCESS)
367 mach_set_error( ret );
368 goto failed;
370 region_address = aligned_address;
371 info_size = sizeof(info);
372 ret = vm_region( process_port, &region_address, &region_size, VM_REGION_BASIC_INFO,
373 (vm_region_info_t)&info, &info_size, &dummy );
374 if (ret != KERN_SUCCESS)
376 mach_set_error( ret );
377 goto failed;
379 if (region_address > aligned_address ||
380 region_address + region_size < aligned_address + aligned_size)
382 /* FIXME: should support multiple regions */
383 set_error( ERROR_ACCESS_DENIED );
384 goto failed;
386 ret = vm_protect( process_port, aligned_address, aligned_size, 0, VM_PROT_READ | VM_PROT_WRITE );
387 if (ret != KERN_SUCCESS)
389 mach_set_error( ret );
390 goto failed;
393 /* FIXME: there's an optimization that can be made: check first and last */
394 /* pages for writability; read first and last pages; write interior */
395 /* pages to task without ever reading&modifying them; if that succeeds, */
396 /* modify first and last pages and write them. */
398 memcpy( (char*)task_mem + offset, src, size );
400 ret = vm_write( process_port, aligned_address, task_mem, bytes_read );
401 if (ret != KERN_SUCCESS) mach_set_error( ret );
402 else
404 vm_deallocate( mach_task_self(), task_mem, bytes_read );
405 /* restore protection */
406 vm_protect( process_port, aligned_address, aligned_size, 0, info.protection );
407 task_resume( process_port );
408 return 1;
411 failed:
412 if (task_mem) vm_deallocate( mach_task_self(), task_mem, bytes_read );
413 task_resume( process_port );
414 return 0;
417 /* retrieve an LDT selector entry */
418 void get_selector_entry( struct thread *thread, int entry, unsigned int *base,
419 unsigned int *limit, unsigned char *flags )
421 const unsigned int total_size = (2 * sizeof(int) + 1) * 8192;
422 struct process *process = thread->process;
423 unsigned int page_size = get_page_size();
424 vm_offset_t data;
425 kern_return_t ret;
426 mach_msg_type_number_t bytes_read;
427 mach_port_t process_port = get_process_port( thread->process );
429 if (!process->ldt_copy || !process_port)
431 set_error( STATUS_ACCESS_DENIED );
432 return;
434 if (entry >= 8192)
436 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
437 return;
440 if ((ret = task_suspend( process_port )) == KERN_SUCCESS)
442 vm_offset_t offset = process->ldt_copy % page_size;
443 vm_address_t aligned_address = (vm_address_t)(process->ldt_copy - offset);
444 vm_size_t aligned_size = (total_size + offset + page_size - 1) / page_size * page_size;
446 ret = vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
447 if (ret != KERN_SUCCESS) mach_set_error( ret );
448 else
450 const int *ldt = (const int *)((char *)data + offset);
451 memcpy( base, ldt + entry, sizeof(int) );
452 memcpy( limit, ldt + entry + 8192, sizeof(int) );
453 memcpy( flags, (char *)(ldt + 2 * 8192) + entry, 1 );
454 vm_deallocate( mach_task_self(), data, bytes_read );
456 task_resume( process_port );
458 else mach_set_error( ret );
461 #endif /* USE_MACH */