wininet: Fix memory leak by not calling get_cookie_header twice.
[wine.git] / server / mach.c
blobbe8445783593c12563b3574820a176649d52a196
1 /*
2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "config.h"
23 #include "wine/port.h"
25 #include <assert.h>
26 #include <errno.h>
27 #include <stdio.h>
28 #include <signal.h>
29 #include <stdarg.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32 #ifdef HAVE_SYS_SYSCALL_H
33 #include <sys/syscall.h>
34 #endif
36 #include "ntstatus.h"
37 #define WIN32_NO_STATUS
38 #include "winternl.h"
40 #include "file.h"
41 #include "process.h"
42 #include "thread.h"
43 #include "request.h"
44 #include "wine/library.h"
46 #ifdef USE_MACH
48 #include <mach/mach.h>
49 #include <mach/mach_error.h>
50 #include <mach/thread_act.h>
51 #include <mach/mach_vm.h>
52 #include <servers/bootstrap.h>
54 static mach_port_t server_mach_port;
56 void sigchld_callback(void)
58 assert(0); /* should never be called on MacOS */
61 static void mach_set_error(kern_return_t mach_error)
63 switch (mach_error)
65 case KERN_SUCCESS: break;
66 case KERN_INVALID_ARGUMENT: set_error(STATUS_INVALID_PARAMETER); break;
67 case KERN_NO_SPACE: set_error(STATUS_NO_MEMORY); break;
68 case KERN_PROTECTION_FAILURE: set_error(STATUS_ACCESS_DENIED); break;
69 case KERN_INVALID_ADDRESS: set_error(STATUS_ACCESS_VIOLATION); break;
70 default: set_error(STATUS_UNSUCCESSFUL); break;
74 static mach_port_t get_process_port( struct process *process )
76 return process->trace_data;
79 /* initialize the process control mechanism */
80 void init_tracing_mechanism(void)
82 mach_port_t bp;
84 if (task_get_bootstrap_port(mach_task_self(), &bp) != KERN_SUCCESS)
85 fatal_error("Can't find bootstrap port\n");
86 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &server_mach_port) != KERN_SUCCESS)
87 fatal_error("Can't allocate port\n");
88 if (mach_port_insert_right( mach_task_self(),
89 server_mach_port,
90 server_mach_port,
91 MACH_MSG_TYPE_MAKE_SEND ) != KERN_SUCCESS)
92 fatal_error("Error inserting rights\n");
93 if (bootstrap_register(bp, (char*)wine_get_server_dir(), server_mach_port) != KERN_SUCCESS)
94 fatal_error("Can't check in server_mach_port\n");
95 mach_port_deallocate(mach_task_self(), bp);
98 /* initialize the per-process tracing mechanism */
99 void init_process_tracing( struct process *process )
101 int pid, ret;
102 struct
104 mach_msg_header_t header;
105 mach_msg_body_t body;
106 mach_msg_port_descriptor_t task_port;
107 mach_msg_trailer_t trailer; /* only present on receive */
108 } msg;
110 for (;;)
112 ret = mach_msg( &msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, sizeof(msg),
113 server_mach_port, 0, 0 );
114 if (ret)
116 if (ret != MACH_RCV_TIMED_OUT && debug_level)
117 fprintf( stderr, "warning: mach port receive failed with %x\n", ret );
118 return;
121 /* if anything in the message is invalid, ignore it */
122 if (msg.header.msgh_size != offsetof(typeof(msg), trailer)) continue;
123 if (msg.body.msgh_descriptor_count != 1) continue;
124 if (msg.task_port.type != MACH_MSG_PORT_DESCRIPTOR) continue;
125 if (msg.task_port.disposition != MACH_MSG_TYPE_PORT_SEND) continue;
126 if (msg.task_port.name == MACH_PORT_NULL) continue;
127 if (msg.task_port.name == MACH_PORT_DEAD) continue;
129 if (!pid_for_task( msg.task_port.name, &pid ))
131 struct thread *thread = get_thread_from_pid( pid );
133 if (thread && !thread->process->trace_data)
134 thread->process->trace_data = msg.task_port.name;
135 else
136 mach_port_deallocate( mach_task_self(), msg.task_port.name );
141 /* terminate the per-process tracing mechanism */
142 void finish_process_tracing( struct process *process )
144 if (process->trace_data)
146 mach_port_deallocate( mach_task_self(), process->trace_data );
147 process->trace_data = 0;
151 /* retrieve the thread x86 registers */
152 void get_thread_context( struct thread *thread, context_t *context, unsigned int flags )
154 #ifdef __i386__
155 x86_debug_state32_t state;
156 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
157 mach_msg_type_name_t type;
158 mach_port_t port, process_port = get_process_port( thread->process );
160 /* all other regs are handled on the client side */
161 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
163 if (thread->unix_pid == -1 || !process_port ||
164 mach_port_extract_right( process_port, thread->unix_tid,
165 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
167 set_error( STATUS_ACCESS_DENIED );
168 return;
171 if (!thread_get_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, &count ))
173 /* work around silly renaming of struct members in OS X 10.5 */
174 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
175 context->debug.i386_regs.dr0 = state.__dr0;
176 context->debug.i386_regs.dr1 = state.__dr1;
177 context->debug.i386_regs.dr2 = state.__dr2;
178 context->debug.i386_regs.dr3 = state.__dr3;
179 context->debug.i386_regs.dr6 = state.__dr6;
180 context->debug.i386_regs.dr7 = state.__dr7;
181 #else
182 context->debug.i386_regs.dr0 = state.dr0;
183 context->debug.i386_regs.dr1 = state.dr1;
184 context->debug.i386_regs.dr2 = state.dr2;
185 context->debug.i386_regs.dr3 = state.dr3;
186 context->debug.i386_regs.dr6 = state.dr6;
187 context->debug.i386_regs.dr7 = state.dr7;
188 #endif
189 context->flags |= SERVER_CTX_DEBUG_REGISTERS;
191 mach_port_deallocate( mach_task_self(), port );
192 #endif
195 /* set the thread x86 registers */
196 void set_thread_context( struct thread *thread, const context_t *context, unsigned int flags )
198 #ifdef __i386__
199 x86_debug_state32_t state;
200 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
201 mach_msg_type_name_t type;
202 mach_port_t port, process_port = get_process_port( thread->process );
203 unsigned int dr7;
205 /* all other regs are handled on the client side */
206 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
208 if (thread->unix_pid == -1 || !process_port ||
209 mach_port_extract_right( process_port, thread->unix_tid,
210 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
212 set_error( STATUS_ACCESS_DENIED );
213 return;
216 /* Mac OS doesn't allow setting the global breakpoint flags */
217 dr7 = (context->debug.i386_regs.dr7 & ~0xaa) | ((context->debug.i386_regs.dr7 & 0xaa) >> 1);
219 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
220 state.__dr0 = context->debug.i386_regs.dr0;
221 state.__dr1 = context->debug.i386_regs.dr1;
222 state.__dr2 = context->debug.i386_regs.dr2;
223 state.__dr3 = context->debug.i386_regs.dr3;
224 state.__dr4 = 0;
225 state.__dr5 = 0;
226 state.__dr6 = context->debug.i386_regs.dr6;
227 state.__dr7 = dr7;
228 #else
229 state.dr0 = context->debug.i386_regs.dr0;
230 state.dr1 = context->debug.i386_regs.dr1;
231 state.dr2 = context->debug.i386_regs.dr2;
232 state.dr3 = context->debug.i386_regs.dr3;
233 state.dr4 = 0;
234 state.dr5 = 0;
235 state.dr6 = context->debug.i386_regs.dr6;
236 state.dr7 = dr7;
237 #endif
238 if (!thread_set_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, count ))
240 if (thread->context) /* update the cached values */
241 thread->context->debug.i386_regs = context->debug.i386_regs;
243 mach_port_deallocate( mach_task_self(), port );
244 #endif
247 int send_thread_signal( struct thread *thread, int sig )
249 int ret = -1;
250 mach_port_t process_port = get_process_port( thread->process );
252 if (thread->unix_pid != -1 && process_port)
254 mach_msg_type_name_t type;
255 mach_port_t port;
257 if (!mach_port_extract_right( process_port, thread->unix_tid,
258 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
260 ret = syscall( SYS___pthread_kill, port, sig );
261 mach_port_deallocate( mach_task_self(), port );
263 else errno = ESRCH;
265 if (ret == -1 && errno == ESRCH) /* thread got killed */
267 thread->unix_pid = -1;
268 thread->unix_tid = -1;
271 if (debug_level && ret != -1)
272 fprintf( stderr, "%04x: *sent signal* signal=%d\n", thread->id, sig );
273 return (ret != -1);
276 /* read data from a process memory space */
277 int read_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, char *dest )
279 kern_return_t ret;
280 mach_msg_type_number_t bytes_read;
281 mach_vm_offset_t offset;
282 vm_offset_t data;
283 mach_vm_address_t aligned_address;
284 mach_vm_size_t aligned_size;
285 unsigned int page_size = get_page_size();
286 mach_port_t process_port = get_process_port( process );
288 if (!process_port)
290 set_error( STATUS_ACCESS_DENIED );
291 return 0;
293 if ((mach_vm_address_t)ptr != ptr)
295 set_error( STATUS_ACCESS_DENIED );
296 return 0;
299 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
301 mach_set_error( ret );
302 return 0;
305 offset = ptr % page_size;
306 aligned_address = (mach_vm_address_t)(ptr - offset);
307 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
309 ret = mach_vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
310 if (ret != KERN_SUCCESS) mach_set_error( ret );
311 else
313 memcpy( dest, (char *)data + offset, size );
314 mach_vm_deallocate( mach_task_self(), data, bytes_read );
316 task_resume( process_port );
317 return (ret == KERN_SUCCESS);
320 /* write data to a process memory space */
321 int write_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, const char *src )
323 kern_return_t ret;
324 mach_vm_address_t aligned_address, region_address;
325 mach_vm_size_t aligned_size, region_size;
326 mach_msg_type_number_t info_size, bytes_read;
327 mach_vm_offset_t offset;
328 vm_offset_t task_mem = 0;
329 struct vm_region_basic_info_64 info;
330 mach_port_t dummy;
331 unsigned int page_size = get_page_size();
332 mach_port_t process_port = get_process_port( process );
334 if (!process_port)
336 set_error( STATUS_ACCESS_DENIED );
337 return 0;
339 if ((mach_vm_address_t)ptr != ptr)
341 set_error( STATUS_ACCESS_DENIED );
342 return 0;
345 offset = ptr % page_size;
346 aligned_address = (mach_vm_address_t)(ptr - offset);
347 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
349 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
351 mach_set_error( ret );
352 return 0;
355 ret = mach_vm_read( process_port, aligned_address, aligned_size, &task_mem, &bytes_read );
356 if (ret != KERN_SUCCESS)
358 mach_set_error( ret );
359 goto failed;
361 region_address = aligned_address;
362 info_size = sizeof(info);
363 ret = mach_vm_region( process_port, &region_address, &region_size, VM_REGION_BASIC_INFO_64,
364 (vm_region_info_t)&info, &info_size, &dummy );
365 if (ret != KERN_SUCCESS)
367 mach_set_error( ret );
368 goto failed;
370 if (region_address > aligned_address ||
371 region_address + region_size < aligned_address + aligned_size)
373 /* FIXME: should support multiple regions */
374 set_error( ERROR_ACCESS_DENIED );
375 goto failed;
377 ret = mach_vm_protect( process_port, aligned_address, aligned_size, 0, VM_PROT_READ | VM_PROT_WRITE );
378 if (ret != KERN_SUCCESS)
380 mach_set_error( ret );
381 goto failed;
384 /* FIXME: there's an optimization that can be made: check first and last */
385 /* pages for writability; read first and last pages; write interior */
386 /* pages to task without ever reading&modifying them; if that succeeds, */
387 /* modify first and last pages and write them. */
389 memcpy( (char*)task_mem + offset, src, size );
391 ret = mach_vm_write( process_port, aligned_address, task_mem, bytes_read );
392 if (ret != KERN_SUCCESS) mach_set_error( ret );
393 else
395 mach_vm_deallocate( mach_task_self(), task_mem, bytes_read );
396 /* restore protection */
397 mach_vm_protect( process_port, aligned_address, aligned_size, 0, info.protection );
398 task_resume( process_port );
399 return 1;
402 failed:
403 if (task_mem) mach_vm_deallocate( mach_task_self(), task_mem, bytes_read );
404 task_resume( process_port );
405 return 0;
408 /* retrieve an LDT selector entry */
409 void get_selector_entry( struct thread *thread, int entry, unsigned int *base,
410 unsigned int *limit, unsigned char *flags )
412 const unsigned int total_size = (2 * sizeof(int) + 1) * 8192;
413 struct process *process = thread->process;
414 unsigned int page_size = get_page_size();
415 vm_offset_t data;
416 kern_return_t ret;
417 mach_msg_type_number_t bytes_read;
418 mach_port_t process_port = get_process_port( thread->process );
420 if (!process->ldt_copy || !process_port)
422 set_error( STATUS_ACCESS_DENIED );
423 return;
425 if (entry >= 8192)
427 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
428 return;
431 if ((ret = task_suspend( process_port )) == KERN_SUCCESS)
433 mach_vm_offset_t offset = process->ldt_copy % page_size;
434 mach_vm_address_t aligned_address = (mach_vm_address_t)(process->ldt_copy - offset);
435 mach_vm_size_t aligned_size = (total_size + offset + page_size - 1) / page_size * page_size;
437 ret = mach_vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
438 if (ret != KERN_SUCCESS) mach_set_error( ret );
439 else
441 const int *ldt = (const int *)((char *)data + offset);
442 memcpy( base, ldt + entry, sizeof(int) );
443 memcpy( limit, ldt + entry + 8192, sizeof(int) );
444 memcpy( flags, (char *)(ldt + 2 * 8192) + entry, 1 );
445 mach_vm_deallocate( mach_task_self(), data, bytes_read );
447 task_resume( process_port );
449 else mach_set_error( ret );
452 #endif /* USE_MACH */