s390x: Fix PC calculations with EX/EXRL
[valgrind.git] / coregrind / m_syswrap / syswrap-solaris.c
blobb954c2cfdb0940c6b822d26a4ceb760f1dc979ca
2 /*--------------------------------------------------------------------*/
3 /*--- Solaris-specific syscalls, etc. syswrap-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2011-2017 Petr Pavlu
11 setup@dagobah.cz
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 /* Copyright 2013-2017, Ivo Raisr <ivosh@ivosh.net>. */
31 /* Copyright 2015-2017, Tomas Jedlicka <jedlickat@gmail.com>. */
33 /* Copyright 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
35 #if defined(VGO_solaris)
37 #include "libvex_guest_offsets.h"
38 #include "pub_core_basics.h"
39 #include "pub_core_vki.h"
40 #include "pub_core_vkiscnums.h"
41 #include "pub_core_threadstate.h"
42 #include "pub_core_aspacemgr.h"
43 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
44 #include "pub_core_debuglog.h"
45 #include "pub_core_clientstate.h"
46 #include "pub_core_gdbserver.h"
47 #include "pub_core_inner.h"
48 #include "pub_core_libcassert.h"
49 #include "pub_core_libcbase.h"
50 #include "pub_core_libcfile.h"
51 #include "pub_core_libcprint.h"
52 #include "pub_core_libcproc.h"
53 #include "pub_core_libcsignal.h"
54 #include "pub_core_machine.h" // VG_(get_SP)
55 #include "pub_core_mallocfree.h"
56 #include "pub_core_options.h"
57 #include "pub_core_tooliface.h"
58 #include "pub_core_transtab.h" // VG_(discard_translations)
59 #include "pub_core_scheduler.h"
60 #include "pub_core_sigframe.h"
61 #include "pub_core_signals.h"
62 #include "pub_core_stacks.h"
63 #include "pub_core_syscall.h"
64 #include "pub_core_syswrap.h"
65 #include "pub_core_ume.h"
66 #if defined(ENABLE_INNER_CLIENT_REQUEST)
67 #include "pub_core_clreq.h"
68 #endif
70 #include "priv_types_n_macros.h"
71 #include "priv_syswrap-generic.h"
72 #include "priv_syswrap-main.h"
73 #include "priv_syswrap-solaris.h"
75 /* Return the number of non-dead and daemon threads.
76 count_daemon == True: count daemon threads
77 count_daemon == False: count non-daemon threads */
78 static UInt count_living_daemon_threads(Bool count_daemon)
80 UInt count = 0;
81 for (ThreadId tid = 1; tid < VG_N_THREADS; tid++)
82 if (VG_(threads)[tid].status != VgTs_Empty &&
83 VG_(threads)[tid].status != VgTs_Zombie &&
84 VG_(threads)[tid].os_state.daemon_thread == count_daemon)
85 count++;
87 return count;
90 /* Note: The following functions (thread_wrapper, run_a_thread_NORETURN,
91 ML_(start_thread_NORETURN), ML_(allocstack) and
92 VG_(main_thread_wrapper_NORETURN)) are based on the code in
93 syswrap-linux.c. Keep them synchronized! */
95 /* Run a thread from beginning to end and return the thread's
96 scheduler-return-code. */
97 static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
99 VgSchedReturnCode ret;
100 ThreadId tid = (ThreadId)tidW;
101 Int lwpid = VG_(gettid)();
102 ThreadState *tst = VG_(get_ThreadState)(tid);
104 VG_(debugLog)(1, "syswrap-solaris",
105 "thread_wrapper(tid=%u,lwpid=%d): entry\n",
106 tid, lwpid);
108 vg_assert(tst->status == VgTs_Init);
110 /* Make sure we get the CPU lock before doing anything significant. */
111 VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
113 if (0)
114 VG_(printf)("thread tid %u started: stack = %p\n", tid, (void *)&tid);
116 /* Make sure error reporting is enabled in the new thread. */
117 tst->err_disablement_level = 0;
119 if (tid == 1)
120 VG_TRACK(pre_thread_first_insn, tid);
121 else {
122 /* For newly created threads, VG_TRACK(pre_thread_first_insn, tid) is
123 invoked later from PRE(sys_getsetcontext)() when setucontext()
124 called from _thrp_setup() concludes new thread setup. Invoking it
125 here would be way too early - new thread has no stack, yet. */
128 tst->os_state.lwpid = lwpid;
129 tst->os_state.threadgroup = VG_(getpid)();
131 /* Thread created with all signals blocked; scheduler will set the
132 appropriate mask. */
134 ret = VG_(scheduler)(tid);
136 vg_assert(VG_(is_exiting)(tid));
138 vg_assert(tst->status == VgTs_Runnable);
139 vg_assert(VG_(is_running_thread)(tid));
141 VG_(debugLog)(1, "syswrap-solaris",
142 "thread_wrapper(tid=%u,lwpid=%d): exit, schedreturncode %s\n",
143 tid, lwpid, VG_(name_of_VgSchedReturnCode)(ret));
145 /* Return to caller, still holding the lock. */
146 return ret;
149 /* Run a thread all the way to the end, then do appropriate exit actions
150 (this is the last-one-out-turn-off-the-lights bit). */
151 static void run_a_thread_NORETURN(Word tidW)
153 ThreadId tid = (ThreadId)tidW;
154 VgSchedReturnCode src;
155 Int c;
156 ThreadState *tst;
157 #ifdef ENABLE_INNER_CLIENT_REQUEST
158 Int registered_vgstack_id;
159 #endif
161 VG_(debugLog)(1, "syswrap-solaris",
162 "run_a_thread_NORETURN(tid=%u): pre-thread_wrapper\n",
163 tid);
165 tst = VG_(get_ThreadState)(tid);
166 vg_assert(tst);
168 /* A thread has two stacks:
169 * the simulated stack (used by the synthetic cpu. Guest process
170 is using this stack).
171 * the valgrind stack (used by the real cpu. Valgrind code is running
172 on this stack).
173 When Valgrind runs as an inner, it must signal that its (real) stack
174 is the stack to use by the outer to e.g. do stacktraces.
176 INNER_REQUEST
177 (registered_vgstack_id
178 = VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
179 tst->os_state.valgrind_stack_init_SP));
181 /* Run the thread all the way through. */
182 src = thread_wrapper(tid);
184 VG_(debugLog)(1, "syswrap-solaris",
185 "run_a_thread_NORETURN(tid=%u): post-thread_wrapper\n",
186 tid);
188 c = count_living_daemon_threads(False);
189 vg_assert(c >= 1); /* Stay sane. */
191 /* Tell the tool that schedctl data belonging to this thread are gone. */
192 Addr a = tst->os_state.schedctl_data;
193 if (a != 0)
194 VG_TRACK(die_mem_munmap, a, sizeof(struct vki_sc_shared));
196 /* Deregister thread's stack. */
197 if (tst->os_state.stk_id != NULL_STK_ID)
198 VG_(deregister_stack)(tst->os_state.stk_id);
200 /* Tell the tool this thread is exiting. */
201 VG_TRACK(pre_thread_ll_exit, tid);
203 /* If the thread is exiting with errors disabled, complain loudly;
204 doing so is bad (does the user know this has happened?) Also, in all
205 cases, be paranoid and clear the flag anyway so that the thread slot is
206 safe in this respect if later reallocated. This should be unnecessary
207 since the flag should be cleared when the slot is reallocated, in
208 thread_wrapper(). */
209 if (tst->err_disablement_level > 0) {
210 VG_(umsg)(
211 "WARNING: exiting thread has error reporting disabled.\n"
212 "WARNING: possibly as a result of some mistake in the use\n"
213 "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
215 VG_(debugLog)(
216 1, "syswrap-solaris",
217 "run_a_thread_NORETURN(tid=%u): "
218 "WARNING: exiting thread has err_disablement_level = %u\n",
219 tid, tst->err_disablement_level
222 tst->err_disablement_level = 0;
224 if (c == 1) {
225 UInt daemon_threads = count_living_daemon_threads(True);
226 if (daemon_threads == 0)
227 VG_(debugLog)(1, "syswrap-solaris",
228 "run_a_thread_NORETURN(tid=%u): "
229 "last one standing\n",
230 tid);
231 else
232 VG_(debugLog)(1, "syswrap-solaris",
233 "run_a_thread_NORETURN(tid=%u): "
234 "last non-daemon thread standing "
235 "[daemon threads=%u]\n",
236 tid, daemon_threads);
238 /* We are the last non-daemon thread standing. Keep hold of the lock and
239 carry on to show final tool results, then exit the entire system.
240 Use the continuation pointer set at startup in m_main. */
241 if ((src == VgSrc_ExitThread) && (daemon_threads > 0))
242 src = VgSrc_ExitProcess;
243 (*VG_(address_of_m_main_shutdown_actions_NORETURN))(tid, src);
245 else {
246 VG_(debugLog)(1, "syswrap-solaris",
247 "run_a_thread_NORETURN(tid=%u): "
248 "not last one standing\n",
249 tid);
251 /* OK, thread is dead, but others still exist. Just exit. */
253 /* This releases the run lock. */
254 VG_(exit_thread)(tid);
255 vg_assert(tst->status == VgTs_Zombie);
256 vg_assert(sizeof(tst->status) == 4);
258 INNER_REQUEST(VALGRIND_STACK_DEREGISTER(registered_vgstack_id));
260 /* We have to use this sequence to terminate the thread to
261 prevent a subtle race. If VG_(exit_thread)() had left the
262 ThreadState as Empty, then it could have been reallocated, reusing
263 the stack while we're doing these last cleanups. Instead,
264 VG_(exit_thread) leaves it as Zombie to prevent reallocation. We
265 need to make sure we don't touch the stack between marking it Empty
266 and exiting. Hence the assembler. */
267 #if defined(VGP_x86_solaris)
268 /* Luckily lwp_exit doesn't take any arguments so we don't have to mess
269 with the stack. */
270 __asm__ __volatile__ (
271 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
272 "movl $"VG_STRINGIFY(__NR_lwp_exit)", %%eax\n"
273 "int $0x91\n" /* lwp_exit() */
274 : [status] "=m" (tst->status)
275 : [EMPTY] "n" (VgTs_Empty)
276 : "eax", "edx", "cc", "memory");
277 #elif defined(VGP_amd64_solaris)
278 __asm__ __volatile__ (
279 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
280 "movq $"VG_STRINGIFY(__NR_lwp_exit)", %%rax\n"
281 "syscall\n" /* lwp_exit() */
282 : [status] "=m" (tst->status)
283 : [EMPTY] "n" (VgTs_Empty)
284 : "rax", "rdx", "cc", "memory");
285 #else
286 # error "Unknown platform"
287 #endif
289 VG_(core_panic)("Thread exit failed?\n");
292 /*NOTREACHED*/
293 vg_assert(0);
296 Word ML_(start_thread_NORETURN)(void *arg)
298 ThreadState *tst = (ThreadState*)arg;
299 ThreadId tid = tst->tid;
301 run_a_thread_NORETURN((Word)tid);
302 /*NOTREACHED*/
303 vg_assert(0);
306 /* Allocate a stack for this thread, if it doesn't already have one.
307 They're allocated lazily, and never freed. Returns the initial stack
308 pointer value to use, or 0 if allocation failed. */
309 Addr ML_(allocstack)(ThreadId tid)
311 ThreadState *tst = VG_(get_ThreadState)(tid);
312 VgStack *stack;
313 Addr initial_SP;
315 /* Either the stack_base and stack_init_SP are both zero (in which
316 case a stack hasn't been allocated) or they are both non-zero,
317 in which case it has. */
319 if (tst->os_state.valgrind_stack_base == 0)
320 vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
322 if (tst->os_state.valgrind_stack_base != 0)
323 vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
325 /* If no stack is present, allocate one. */
327 if (tst->os_state.valgrind_stack_base == 0) {
328 stack = VG_(am_alloc_VgStack)( &initial_SP );
329 if (stack) {
330 tst->os_state.valgrind_stack_base = (Addr)stack;
331 tst->os_state.valgrind_stack_init_SP = initial_SP;
335 if (0)
336 VG_(printf)("stack for tid %u at %p; init_SP=%p\n",
337 tid,
338 (void*)tst->os_state.valgrind_stack_base,
339 (void*)tst->os_state.valgrind_stack_init_SP);
341 return tst->os_state.valgrind_stack_init_SP;
344 /* Allocate a stack for the main thread, and run it all the way to the
345 end. Although we already have a working VgStack (VG_(interim_stack)) it's
346 better to allocate a new one, so that overflow detection works uniformly
347 for all threads. Also initialize the GDT (for normal threads, this is done
348 in the PRE wrapper of lwp_create). */
349 void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
351 Addr sp;
353 VG_(debugLog)(1, "syswrap-solaris",
354 "entering VG_(main_thread_wrapper_NORETURN)\n");
356 sp = ML_(allocstack)(tid);
357 #if defined(ENABLE_INNER_CLIENT_REQUEST)
359 // we must register the main thread stack before the call
360 // to ML_(call_on_new_stack_0_1), otherwise the outer valgrind
361 // reports 'write error' on the non registered stack.
362 ThreadState *tst = VG_(get_ThreadState)(tid);
363 INNER_REQUEST
364 ((void)
365 VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
366 tst->os_state.valgrind_stack_init_SP));
368 #endif
370 #if defined(VGP_x86_solaris)
372 ThreadState *tst = VG_(get_ThreadState)(tid);
373 ML_(setup_gdt)(&tst->arch.vex);
374 ML_(update_gdt_lwpgs)(tid);
376 #elif defined(VGP_amd64_solaris)
377 /* Nothing to do. */
378 #else
379 # error "Unknown platform"
380 #endif
382 /* If we can't even allocate the first thread's stack, we're hosed.
383 Give up. */
384 vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
386 /* Shouldn't be any other threads around yet. */
387 vg_assert(VG_(count_living_threads)() == 1);
389 ML_(call_on_new_stack_0_1)(
390 (Addr)sp, /* stack */
391 0, /* bogus return address */
392 run_a_thread_NORETURN, /* fn to call */
393 (Word)tid /* arg to give it */
396 /*NOTREACHED*/
397 vg_assert(0);
400 /* Deallocate the GDT for a thread. */
401 void VG_(cleanup_thread)(ThreadArchState *arch)
403 #if defined(VGP_x86_solaris)
404 ML_(cleanup_gdt)(&arch->vex);
405 #elif defined(VGP_amd64_solaris)
406 /* Nothing to do. */
407 #else
408 # error "Unknown platform"
409 #endif
413 * Notify core about spring cleaning of schedctl data pages for all threads
414 * in child post-fork handler. Libc will issue new schedctl syscalls for threads
415 * in the child when needs arise.
417 * See also POST(schedctl) and run_a_thread_NORETURN() when a thread exits.
419 static void clean_schedctl_data(ThreadId tid)
421 UInt i;
422 for (i = 0; i < VG_N_THREADS; i++) {
423 ThreadState *tst = &VG_(threads)[i];
424 if (tst->status != VgTs_Empty) {
425 Addr a = tst->os_state.schedctl_data;
426 if (a != 0) {
427 tst->os_state.schedctl_data = 0;
428 a = VG_PGROUNDDN(a);
429 if (VG_(am_find_anon_segment)(a))
430 VG_(am_notify_munmap)(a, VKI_PAGE_SIZE);
436 void VG_(syswrap_init)(void)
438 VG_(atfork)(NULL, NULL, clean_schedctl_data);
441 /* Changes ownership of a memory mapping shared between kernel and the client
442 process. This mapping should have already been pre-arranged during process
443 address space initialization happening in kernel. Valgrind on startup created
444 a segment for this mapping categorized as Valgrind's owned anonymous.
445 Size of this mapping typically varies among Solaris versions but should be
446 page aligned.
447 If 'once_only' is 'True', it is expected this function is called once only
448 and the mapping ownership has not been changed, yet [useful during
449 initialization]. If 'False', this function can be called many times but does
450 change ownership only upon the first invocation [useful in syscall wrappers].
452 void VG_(change_mapping_ownership)(Addr addr, Bool once_only)
454 const NSegment *seg = VG_(am_find_anon_segment)(addr);
455 vg_assert(seg != NULL);
456 vg_assert(seg->start == addr);
457 vg_assert(VG_IS_PAGE_ALIGNED(seg->start));
458 vg_assert(VG_IS_PAGE_ALIGNED(seg->end + 1));
459 SizeT size = seg->end - seg->start + 1;
460 vg_assert(size > 0);
462 Bool do_change = False;
463 if (once_only) {
464 vg_assert(VG_(am_is_valid_for_valgrind)(addr, size, VKI_PROT_READ));
465 do_change = True;
466 } else {
467 if (!VG_(am_is_valid_for_client)(addr, size, VKI_PROT_READ))
468 do_change = True;
471 if (do_change) {
472 Bool change_ownership_OK = VG_(am_change_ownership_v_to_c)(addr, size);
473 vg_assert(change_ownership_OK);
475 /* Tell the tool about just discovered mapping. */
476 VG_TRACK(new_mem_startup,
477 addr, size,
478 True /* readable? */,
479 False /* writable? */,
480 False /* executable? */,
481 0 /* di_handle */);
485 /* Calculate the Fletcher-32 checksum of a given buffer. */
486 UInt ML_(fletcher32)(UShort *buf, SizeT blocks)
488 UInt sum1 = 0;
489 UInt sum2 = 0;
490 SizeT i;
492 for (i = 0; i < blocks; i++) {
493 sum1 = (sum1 + buf[i]) % 0xffff;
494 sum2 = (sum2 + sum1) % 0xffff;
497 return (sum2 << 16) | sum1;
500 /* Calculate the Fletcher-64 checksum of a given buffer. */
501 ULong ML_(fletcher64)(UInt *buf, SizeT blocks)
503 ULong sum1 = 0;
504 ULong sum2 = 0;
505 SizeT i;
507 for (i = 0; i < blocks; i++) {
508 sum1 = (sum1 + buf[i]) % 0xffffffff;
509 sum2 = (sum2 + sum1) % 0xffffffff;
511 return (sum2 << 32) | sum1;
514 /* Save a complete context (VCPU state, sigmask) of a given client thread
515 into the vki_ucontext_t structure. This structure is supposed to be
516 allocated in the client memory, a caller must make sure that the memory can
517 be dereferenced. The active tool is informed about the save. */
518 void VG_(save_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part)
520 ThreadState *tst = VG_(get_ThreadState)(tid);
522 VG_TRACK(pre_mem_write, part, tid, "save_context(uc)", (Addr)uc,
523 sizeof(*uc));
525 uc->uc_flags = VKI_UC_ALL;
526 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_flags,
527 sizeof(uc->uc_flags));
529 /* Old context */
530 uc->uc_link = tst->os_state.oldcontext;
531 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_link,
532 sizeof(uc->uc_link));
534 /* Clear uc->vki_uc_signo. This slot is used by the signal machinery to
535 store a signal number. */
536 VKI_UC_SIGNO(uc) = 0;
538 /* Sigmask */
539 uc->uc_sigmask = tst->sig_mask;
540 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_sigmask,
541 sizeof(uc->uc_sigmask));
543 /* Stack */
545 if (tst->os_state.ustack
546 && ML_(safe_to_deref)(tst->os_state.ustack, sizeof(vki_stack_t))
547 && tst->os_state.ustack->ss_size) {
548 /* If ustack points to a valid stack copy it to ucontext. */
549 uc->uc_stack = *tst->os_state.ustack;
551 else {
552 /* Ustack is not valid. A correct stack has to be figured out
553 manually. */
554 SysRes res;
555 vki_stack_t altstack;
557 /* Get information about alternate stack. */
558 res = VG_(do_sys_sigaltstack)(tid, NULL, &altstack);
559 vg_assert(!sr_isError(res));
561 if (altstack.ss_flags == VKI_SS_ONSTACK) {
562 /* If the alternate stack is active copy it to ucontext. */
563 uc->uc_stack = altstack;
565 else {
566 /* No information about stack is present, save information about
567 current main stack to ucontext. This branch should be reached
568 only by the main thread. */
569 ThreadState *tst2 = VG_(get_ThreadState)(1);
570 uc->uc_stack.ss_sp = (void*)(tst2->client_stack_highest_byte + 1
571 - tst2->client_stack_szB);
572 uc->uc_stack.ss_size = tst2->client_stack_szB;
573 uc->uc_stack.ss_flags = 0;
577 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_stack,
578 sizeof(uc->uc_stack));
581 /* Save the architecture-specific part of the context. */
582 ML_(save_machine_context)(tid, uc, part);
585 /* Set a complete context (VCPU state, sigmask) of a given client thread
586 according to values passed in the vki_ucontext_t structure. This structure
587 is supposed to be allocated in the client memory, a caller must make sure
588 that the memory can be dereferenced. The active tool is informed about
589 what parts of the structure are read.
591 This function is a counterpart to VG_(save_context)(). */
592 void VG_(restore_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part,
593 Bool esp_is_thrptr)
595 ThreadState *tst = VG_(get_ThreadState)(tid);
596 Addr old_esp = VG_(get_SP)(tid);
598 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_flags)",
599 (Addr)&uc->uc_flags, sizeof(uc->uc_flags));
601 /* Old context */
602 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_link)",
603 (Addr)&uc->uc_link, sizeof(uc->uc_link));
604 tst->os_state.oldcontext = uc->uc_link;
606 /* Sigmask */
607 if (uc->uc_flags & VKI_UC_SIGMASK) {
608 SysRes res;
610 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_sigmask)",
611 (Addr)&uc->uc_sigmask, sizeof(uc->uc_sigmask));
612 res = VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, &uc->uc_sigmask,
613 NULL);
614 /* Setting signal mask should never fail. */
615 vg_assert(!sr_isError(res));
618 /* Stack */
619 if (uc->uc_flags & VKI_UC_STACK) {
620 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_stack)",
621 (Addr)&uc->uc_stack, sizeof(uc->uc_stack));
623 if (uc->uc_stack.ss_flags == VKI_SS_ONSTACK) {
624 /* This seems to be a little bit dangerous but it is what the kernel
625 does. */
626 if (VG_(clo_trace_signals))
627 VG_(dmsg)("restore_context, sigaltstack: tid %u, "
628 "ss %p{%p,sz=%lu,flags=%#x}\n",
629 tid, &uc->uc_stack, uc->uc_stack.ss_sp,
630 (SizeT)uc->uc_stack.ss_size, (UInt)uc->uc_stack.ss_flags);
632 tst->altstack.ss_sp = uc->uc_stack.ss_sp;
633 tst->altstack.ss_size = uc->uc_stack.ss_size;
634 /* Do not copy ss_flags, they are calculated dynamically by
635 Valgrind. */
638 /* Copyout the new stack. */
639 if (tst->os_state.ustack
640 && VG_(am_is_valid_for_client)((Addr)tst->os_state.ustack,
641 sizeof(*tst->os_state.ustack),
642 VKI_PROT_WRITE)) {
643 *tst->os_state.ustack = uc->uc_stack;
644 VG_TRACK(post_mem_write, part, tid, (Addr)&tst->os_state.ustack,
645 sizeof(tst->os_state.ustack));
649 /* Restore the architecture-specific part of the context. */
650 ML_(restore_machine_context)(tid, uc, part, esp_is_thrptr);
652 /* If the thread stack is already known, kill the deallocated stack area.
653 This is important when returning from a signal handler. */
654 if (tst->client_stack_highest_byte && tst->client_stack_szB) {
655 Addr end = tst->client_stack_highest_byte;
656 Addr start = end + 1 - tst->client_stack_szB;
657 Addr new_esp = VG_(get_SP)(tid);
659 /* Make sure that the old and new stack pointer are on the same (active)
660 stack. Alternate stack is currently never affected by this code. */
661 if (start <= old_esp && old_esp <= end
662 && start <= new_esp && new_esp <= end
663 && new_esp > old_esp)
664 VG_TRACK(die_mem_stack, old_esp - VG_STACK_REDZONE_SZB,
665 (new_esp - old_esp) + VG_STACK_REDZONE_SZB);
669 /* Set a client stack associated with a given thread id according to values
670 passed in the vki_stack_t structure. */
671 static void set_stack(ThreadId tid, vki_stack_t *st)
673 ThreadState *tst = VG_(get_ThreadState)(tid);
674 Addr new_start, new_end;
675 SizeT new_size;
676 Addr cur_start;
677 SizeT cur_size;
679 VG_(debugLog)(2, "syswrap-solaris",
680 "set stack: sp=%#lx, size=%#lx.\n",
681 (Addr)st->ss_sp, (SizeT)st->ss_size);
683 /* Stay sane. */
684 vg_assert(st->ss_flags == 0);
686 new_start = (Addr)st->ss_sp;
687 new_end = new_start + st->ss_size - 1;
688 new_size = st->ss_size;
689 cur_start = tst->client_stack_highest_byte + 1
690 - tst->client_stack_szB;
691 cur_size = tst->client_stack_szB;
693 if (new_start == cur_start && new_size == cur_size) {
694 /* No change is requested, bail out. */
695 return;
698 if (tid == 1 && (new_size == 0 || new_size > VG_(clstk_max_size))) {
699 /* The main thread requests to use a stack without any size checking, or
700 too big stack. Fallback to the maximum allocated client stack. */
702 /* TODO I think it is possible to give up on setting main stack anyway.
703 Valgrind knows where it is located and it is already registered as
704 VG_(clstk_id). */
706 new_size = VG_(clstk_max_size);
707 new_end = tst->client_stack_highest_byte;
708 new_start = new_end + 1 - new_size;
711 if (tst->os_state.stk_id == NULL_STK_ID) {
712 /* This thread doesn't have a stack set yet. */
713 VG_(debugLog)(2, "syswrap-solaris",
714 "Stack set to %#lx-%#lx (new) for thread %u.\n",
715 new_start, new_end, tid);
716 tst->os_state.stk_id = VG_(register_stack)(new_start, new_end);
717 } else {
718 /* Change a thread stack. */
719 VG_(debugLog)(2, "syswrap-solaris",
720 "Stack set to %#lx-%#lx (change) for thread %u.\n",
721 new_start, new_end, tid);
722 VG_(change_stack)(tst->os_state.stk_id, new_start, new_end);
724 tst->client_stack_highest_byte = new_end;
725 tst->client_stack_szB = new_size;
728 /* ---------------------------------------------------------------------
729 Door tracking. Used mainly for server side where door_return()
730 parameters alone do not contain sufficient information.
731 Also used on client side when new door descriptors are passed via
732 door_call() in desc_ptr. Not used for tracking door descriptors
733 explicitly open()'ed [generic fd tracking is used in that case].
734 ------------------------------------------------------------------ */
736 /* One of these is allocated for each created door. */
737 typedef struct OpenDoor
739 Bool server; /* TRUE = server door, FALSE = client door */
740 Int fd; /* The file descriptor. */
741 union {
742 /* Server side. */
743 struct {
744 Addr server_procedure; /* The door server procedure. */
745 HChar *pathname; /* NULL if unknown. */
747 /* Client side. */
748 struct {
749 /* Hook called during PRE door_call()
750 to check contents of params->data_ptr. */
751 void (*pre_mem_hook)(ThreadId tid, Int fd,
752 void *data_ptr, SizeT data_size);
753 /* Hook called during POST door_call()
754 to define contents of params->rbuf. */
755 void (*post_mem_hook)(ThreadId tid, Int fd,
756 void *rbuf, SizeT rsize);
759 struct OpenDoor *next, *prev;
760 } OpenDoor;
762 /* List of allocated door fds. */
763 static OpenDoor *doors_recorded = NULL;
764 static UInt nr_doors_recorded = 0;
766 static OpenDoor *door_record_create(void)
768 OpenDoor *d = VG_(malloc)("syswrap.door_record_create.1", sizeof(OpenDoor));
769 d->prev = NULL;
770 d->next = doors_recorded;
771 if (doors_recorded != NULL)
772 doors_recorded->prev = d;
773 doors_recorded = d;
774 nr_doors_recorded += 1;
776 return d;
779 /* Records a server door. */
780 static void door_record_server(ThreadId tid, Addr server_procedure, Int fd)
782 OpenDoor *d = doors_recorded;
784 while (d != NULL) {
785 if ((d->server == TRUE) && (d->server_procedure == server_procedure)) {
786 if (d->pathname) {
787 VG_(free)(d->pathname);
789 break;
791 d = d->next;
794 if (d == NULL)
795 d = door_record_create();
796 vg_assert(d != NULL);
798 d->server = TRUE;
799 d->fd = fd;
800 d->server_procedure = server_procedure;
801 d->pathname = NULL;
804 /* Records a client door. */
805 static void door_record_client(ThreadId tid, Int fd,
806 void (*pre_mem_hook)(ThreadId tid, Int fd, void *data_ptr, SizeT data_size),
807 void (*post_mem_hook)(ThreadId tid, Int fd, void *rbuf, SizeT rsize))
809 OpenDoor *d = doors_recorded;
811 while (d != NULL) {
812 if ((d->server == FALSE) && (d->fd == fd))
813 break;
814 d = d->next;
817 if (d == NULL)
818 d = door_record_create();
819 vg_assert(d != NULL);
821 d->server = FALSE;
822 d->fd = fd;
823 d->pre_mem_hook = pre_mem_hook;
824 d->post_mem_hook = post_mem_hook;
827 /* Revokes an open door, be it server side or client side. */
828 static void door_record_revoke(ThreadId tid, Int fd)
830 OpenDoor *d = doors_recorded;
832 while (d != NULL) {
833 if (d->fd == fd) {
834 if (d->prev != NULL)
835 d->prev->next = d->next;
836 else
837 doors_recorded = d->next;
838 if (d->next != NULL)
839 d->next->prev = d->prev;
841 if ((d->server == TRUE) && (d->pathname != NULL))
842 VG_(free)(d->pathname);
843 VG_(free)(d);
844 nr_doors_recorded -= 1;
845 return;
847 d = d->next;
851 /* Attaches a server door to a filename. */
852 static void door_record_server_fattach(Int fd, HChar *pathname)
854 OpenDoor *d = doors_recorded;
856 while (d != NULL) {
857 if (d->fd == fd) {
858 vg_assert(d->server == TRUE);
860 if (d->pathname != NULL)
861 VG_(free)(d->pathname);
862 d->pathname = VG_(strdup)("syswrap.door_server_fattach.1", pathname);
863 return;
865 d = d->next;
869 /* Finds a server door based on server procedure. */
870 static const OpenDoor *door_find_by_proc(Addr server_procedure)
872 OpenDoor *d = doors_recorded;
874 while (d != NULL) {
875 if ((d->server) && (d->server_procedure == server_procedure))
876 return d;
877 d = d->next;
880 return NULL;
883 /* Finds a client door based on fd. */
884 static const OpenDoor *door_find_by_fd(Int fd)
886 OpenDoor *d = doors_recorded;
888 while (d != NULL) {
889 if ((d->server == FALSE) && (d->fd == fd))
890 return d;
891 d = d->next;
894 return NULL;
897 /* ---------------------------------------------------------------------
898 PRE/POST wrappers for Solaris-specific syscalls
899 ------------------------------------------------------------------ */
901 #define PRE(name) DEFN_PRE_TEMPLATE(solaris, name)
902 #define POST(name) DEFN_POST_TEMPLATE(solaris, name)
904 /* prototypes */
905 DECL_TEMPLATE(solaris, sys_exit);
906 #if defined(SOLARIS_SPAWN_SYSCALL)
907 DECL_TEMPLATE(solaris, sys_spawn);
908 #endif /* SOLARIS_SPAWN_SYSCALL */
909 #if defined(SOLARIS_OLD_SYSCALLS)
910 DECL_TEMPLATE(solaris, sys_open);
911 #endif /* SOLARIS_OLD_SYSCALLS */
912 DECL_TEMPLATE(solaris, sys_close);
913 DECL_TEMPLATE(solaris, sys_linkat);
914 DECL_TEMPLATE(solaris, sys_symlinkat);
915 DECL_TEMPLATE(solaris, sys_time);
916 DECL_TEMPLATE(solaris, sys_brk);
917 DECL_TEMPLATE(solaris, sys_stat);
918 DECL_TEMPLATE(solaris, sys_lseek);
919 DECL_TEMPLATE(solaris, sys_mount);
920 DECL_TEMPLATE(solaris, sys_readlinkat);
921 DECL_TEMPLATE(solaris, sys_stime);
922 DECL_TEMPLATE(solaris, sys_fstat);
923 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
924 DECL_TEMPLATE(solaris, sys_frealpathat);
925 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
926 DECL_TEMPLATE(solaris, sys_stty);
927 DECL_TEMPLATE(solaris, sys_gtty);
928 DECL_TEMPLATE(solaris, sys_pgrpsys);
929 DECL_TEMPLATE(solaris, sys_pipe);
930 DECL_TEMPLATE(solaris, sys_faccessat);
931 DECL_TEMPLATE(solaris, sys_mknodat);
932 DECL_TEMPLATE(solaris, sys_sysi86);
933 DECL_TEMPLATE(solaris, sys_shmsys);
934 DECL_TEMPLATE(solaris, sys_semsys);
935 DECL_TEMPLATE(solaris, sys_ioctl);
936 DECL_TEMPLATE(solaris, sys_fchownat);
937 DECL_TEMPLATE(solaris, sys_fdsync);
938 DECL_TEMPLATE(solaris, sys_execve);
939 DECL_TEMPLATE(solaris, sys_fcntl);
940 DECL_TEMPLATE(solaris, sys_renameat);
941 DECL_TEMPLATE(solaris, sys_unlinkat);
942 DECL_TEMPLATE(solaris, sys_fstatat);
943 DECL_TEMPLATE(solaris, sys_openat);
944 DECL_TEMPLATE(solaris, sys_tasksys);
945 DECL_TEMPLATE(solaris, sys_getpagesizes);
946 DECL_TEMPLATE(solaris, sys_lwp_park);
947 DECL_TEMPLATE(solaris, sys_sendfilev);
948 #if defined(SOLARIS_LWP_NAME_SYSCALL)
949 DECL_TEMPLATE(solaris, sys_lwp_name);
950 #endif /* SOLARIS_LWP_NAME_SYSCALL */
951 DECL_TEMPLATE(solaris, sys_privsys);
952 DECL_TEMPLATE(solaris, sys_ucredsys);
953 DECL_TEMPLATE(solaris, sys_sysfs);
954 DECL_TEMPLATE(solaris, sys_getmsg);
955 DECL_TEMPLATE(solaris, sys_putmsg);
956 DECL_TEMPLATE(solaris, sys_lstat);
957 DECL_TEMPLATE(solaris, sys_sigprocmask);
958 DECL_TEMPLATE(solaris, sys_sigsuspend);
959 DECL_TEMPLATE(solaris, sys_sigaction);
960 DECL_TEMPLATE(solaris, sys_sigpending);
961 DECL_TEMPLATE(solaris, sys_getsetcontext);
962 DECL_TEMPLATE(solaris, sys_fchmodat);
963 DECL_TEMPLATE(solaris, sys_mkdirat);
964 DECL_TEMPLATE(solaris, sys_statvfs);
965 DECL_TEMPLATE(solaris, sys_fstatvfs);
966 DECL_TEMPLATE(solaris, sys_nfssys);
967 DECL_TEMPLATE(solaris, sys_waitid);
968 DECL_TEMPLATE(solaris, sys_sigsendsys);
969 #if defined(SOLARIS_UTIMESYS_SYSCALL)
970 DECL_TEMPLATE(solaris, sys_utimesys);
971 #endif /* SOLARIS_UTIMESYS_SYSCALL */
972 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
973 DECL_TEMPLATE(solaris, sys_utimensat);
974 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
975 DECL_TEMPLATE(solaris, sys_sigresend);
976 DECL_TEMPLATE(solaris, sys_priocntlsys);
977 DECL_TEMPLATE(solaris, sys_pathconf);
978 DECL_TEMPLATE(solaris, sys_mmap);
979 #if defined(SOLARIS_UUIDSYS_SYSCALL)
980 DECL_TEMPLATE(solaris, sys_uuidsys);
981 #endif /* SOLARIS_UUIDSYS_SYSCALL */
982 DECL_TEMPLATE(solaris, sys_mmapobj);
983 DECL_TEMPLATE(solaris, sys_memcntl);
984 DECL_TEMPLATE(solaris, sys_getpmsg);
985 DECL_TEMPLATE(solaris, sys_putpmsg);
986 #if defined(SOLARIS_OLD_SYSCALLS)
987 DECL_TEMPLATE(solaris, sys_rename);
988 #endif /* SOLARIS_OLD_SYSCALLS */
989 DECL_TEMPLATE(solaris, sys_uname);
990 DECL_TEMPLATE(solaris, sys_setegid);
991 DECL_TEMPLATE(solaris, sys_sysconfig);
992 DECL_TEMPLATE(solaris, sys_systeminfo);
993 DECL_TEMPLATE(solaris, sys_seteuid);
994 DECL_TEMPLATE(solaris, sys_forksys);
995 #if defined(SOLARIS_GETRANDOM_SYSCALL)
996 DECL_TEMPLATE(solaris, sys_getrandom);
997 #endif /* SOLARIS_GETRANDOM_SYSCALL */
998 DECL_TEMPLATE(solaris, sys_sigtimedwait);
999 DECL_TEMPLATE(solaris, sys_yield);
1000 DECL_TEMPLATE(solaris, sys_lwp_sema_post);
1001 DECL_TEMPLATE(solaris, sys_lwp_sema_trywait);
1002 DECL_TEMPLATE(solaris, sys_lwp_detach);
1003 DECL_TEMPLATE(solaris, sys_modctl);
1004 DECL_TEMPLATE(solaris, sys_fchroot);
1005 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
1006 DECL_TEMPLATE(solaris, sys_system_stats);
1007 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
1008 DECL_TEMPLATE(solaris, sys_gettimeofday);
1009 DECL_TEMPLATE(solaris, sys_lwp_create);
1010 DECL_TEMPLATE(solaris, sys_lwp_exit);
1011 DECL_TEMPLATE(solaris, sys_lwp_suspend);
1012 DECL_TEMPLATE(solaris, sys_lwp_continue);
1013 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
1014 DECL_TEMPLATE(solaris, sys_lwp_sigqueue);
1015 #else
1016 DECL_TEMPLATE(solaris, sys_lwp_kill);
1017 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
1018 DECL_TEMPLATE(solaris, sys_lwp_self);
1019 DECL_TEMPLATE(solaris, sys_lwp_sigmask);
1020 DECL_TEMPLATE(solaris, sys_lwp_private);
1021 DECL_TEMPLATE(solaris, sys_lwp_wait);
1022 DECL_TEMPLATE(solaris, sys_lwp_mutex_wakeup);
1023 DECL_TEMPLATE(solaris, sys_lwp_cond_wait);
1024 DECL_TEMPLATE(solaris, sys_lwp_cond_signal);
1025 DECL_TEMPLATE(solaris, sys_lwp_cond_broadcast);
1026 DECL_TEMPLATE(solaris, sys_pread);
1027 DECL_TEMPLATE(solaris, sys_pwrite);
1028 DECL_TEMPLATE(solaris, sys_lgrpsys);
1029 DECL_TEMPLATE(solaris, sys_rusagesys);
1030 DECL_TEMPLATE(solaris, sys_port);
1031 DECL_TEMPLATE(solaris, sys_pollsys);
1032 DECL_TEMPLATE(solaris, sys_labelsys);
1033 DECL_TEMPLATE(solaris, sys_acl);
1034 DECL_TEMPLATE(solaris, sys_auditsys);
1035 DECL_TEMPLATE(solaris, sys_p_online);
1036 DECL_TEMPLATE(solaris, sys_sigqueue);
1037 DECL_TEMPLATE(solaris, sys_clock_gettime);
1038 DECL_TEMPLATE(solaris, sys_clock_settime);
1039 DECL_TEMPLATE(solaris, sys_clock_getres);
1040 DECL_TEMPLATE(solaris, sys_timer_create);
1041 DECL_TEMPLATE(solaris, sys_timer_delete);
1042 DECL_TEMPLATE(solaris, sys_timer_settime);
1043 DECL_TEMPLATE(solaris, sys_timer_gettime);
1044 DECL_TEMPLATE(solaris, sys_timer_getoverrun);
1045 DECL_TEMPLATE(solaris, sys_facl);
1046 DECL_TEMPLATE(solaris, sys_door);
1047 DECL_TEMPLATE(solaris, sys_schedctl);
1048 DECL_TEMPLATE(solaris, sys_pset);
1049 DECL_TEMPLATE(solaris, sys_resolvepath);
1050 DECL_TEMPLATE(solaris, sys_lwp_mutex_timedlock);
1051 DECL_TEMPLATE(solaris, sys_lwp_rwlock_sys);
1052 DECL_TEMPLATE(solaris, sys_lwp_sema_timedwait);
1053 DECL_TEMPLATE(solaris, sys_zone);
1054 DECL_TEMPLATE(solaris, sys_getcwd);
1055 DECL_TEMPLATE(solaris, sys_so_socket);
1056 DECL_TEMPLATE(solaris, sys_so_socketpair);
1057 DECL_TEMPLATE(solaris, sys_bind);
1058 DECL_TEMPLATE(solaris, sys_listen);
1059 DECL_TEMPLATE(solaris, sys_accept);
1060 DECL_TEMPLATE(solaris, sys_connect);
1061 DECL_TEMPLATE(solaris, sys_shutdown);
1062 DECL_TEMPLATE(solaris, sys_recv);
1063 DECL_TEMPLATE(solaris, sys_recvfrom);
1064 DECL_TEMPLATE(solaris, sys_recvmsg);
1065 DECL_TEMPLATE(solaris, sys_send);
1066 DECL_TEMPLATE(solaris, sys_sendmsg);
1067 DECL_TEMPLATE(solaris, sys_sendto);
1068 DECL_TEMPLATE(solaris, sys_getpeername);
1069 DECL_TEMPLATE(solaris, sys_getsockname);
1070 DECL_TEMPLATE(solaris, sys_getsockopt);
1071 DECL_TEMPLATE(solaris, sys_setsockopt);
1072 DECL_TEMPLATE(solaris, sys_lwp_mutex_unlock);
1073 DECL_TEMPLATE(solaris, sys_lwp_mutex_register);
1074 DECL_TEMPLATE(solaris, sys_uucopy);
1075 DECL_TEMPLATE(solaris, sys_umount2);
1077 DECL_TEMPLATE(solaris, fast_gethrtime);
1078 DECL_TEMPLATE(solaris, fast_gethrvtime);
1079 DECL_TEMPLATE(solaris, fast_gethrestime);
1080 DECL_TEMPLATE(solaris, fast_getlgrp);
1081 #if defined(SOLARIS_GETHRT_FASTTRAP)
1082 DECL_TEMPLATE(solaris, fast_gethrt);
1083 #endif /* SOLARIS_GETHRT_FASTTRAP */
1084 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
1085 DECL_TEMPLATE(solaris, fast_getzoneoffset);
1086 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
1088 /* implementation */
1089 PRE(sys_exit)
1091 /* void exit(int status); */
1092 ThreadId t;
1094 PRINT("sys_exit( %ld )", SARG1);
1095 PRE_REG_READ1(void, "exit", int, status);
1097 for (t = 1; t < VG_N_THREADS; t++) {
1098 if (VG_(threads)[t].status == VgTs_Empty)
1099 continue;
1101 /* Assign the exit code, VG_(nuke_all_threads_except) will assign
1102 the exitreason. */
1103 VG_(threads)[t].os_state.exitcode = ARG1;
1106 /* Indicate in all other threads that the process is exiting.
1107 Then wait using VG_(reap_threads) for these threads to disappear.
1108 See comments in syswrap-linux.c, PRE(sys_exit_group) wrapper,
1109 for reasoning why this cannot give a deadlock. */
1110 VG_(nuke_all_threads_except)(tid, VgSrc_ExitProcess);
1111 VG_(reap_threads)(tid);
1112 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
1113 /* We do assign VgSrc_ExitThread and not VgSrc_ExitProcess, as this thread
1114 is the thread calling exit_group and so its registers must be considered
1115 as not reachable. See pub_tool_machine.h VG_(apply_to_GP_regs). */
1117 /* We have to claim the syscall already succeeded. */
1118 SET_STATUS_Success(0);
1121 #if defined(SOLARIS_SPAWN_SYSCALL)
1122 static Bool spawn_pre_check_kfa(ThreadId tid, SyscallStatus *status,
1123 vki_kfile_attr_t *kfa)
1125 PRE_FIELD_READ("spawn(attrs->kfa_size)", kfa->kfa_size);
1126 PRE_FIELD_READ("spawn(attrs->kfa_type)", kfa->kfa_type);
1128 if (ML_(safe_to_deref)(kfa, kfa->kfa_size)) {
1129 switch (kfa->kfa_type) {
1130 case VKI_FA_DUP2:
1131 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1132 PRE_FIELD_READ("spawn(attrs->kfa_newfiledes)", kfa->kfa_newfiledes);
1133 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(dup2)", tid, False) ||
1134 !ML_(fd_allowed)(kfa->kfa_newfiledes, "spawn(dup2)", tid, False)) {
1135 SET_STATUS_Failure(VKI_EBADF);
1136 return False;
1138 break;
1139 case VKI_FA_CLOSE:
1140 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1141 /* If doing -d style logging (which is to fd = 2 = stderr),
1142 don't allow that filedes to be closed. See ML_(fd_allowed)(). */
1143 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(close)", tid, False) ||
1144 (kfa->kfa_filedes == 2 && VG_(debugLog_getLevel)() > 0)) {
1145 SET_STATUS_Failure(VKI_EBADF);
1146 return False;
1148 break;
1149 case VKI_FA_CLOSEFROM:
1150 /* :TODO: All file descriptors greater than or equal to
1151 kfa->kfa_filedes would have to be checked. */
1152 VG_(unimplemented)("Support for spawn() with file attribute type "
1153 "FA_CLOSEFROM.");
1154 break;
1155 case VKI_FA_OPEN:
1156 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1157 PRE_FIELD_READ("spawn(attrs->kfa_oflag)", kfa->kfa_oflag);
1158 PRE_FIELD_READ("spawn(attrs->kfa_mode)", kfa->kfa_mode);
1159 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(open)", tid, False)) {
1160 SET_STATUS_Failure(VKI_EBADF);
1161 return False;
1163 /* fallthrough */
1164 case VKI_FA_CHDIR:
1165 PRE_FIELD_READ("spawn(attrs->kfa_pathsize)", kfa->kfa_pathsize);
1166 if (kfa->kfa_pathsize != 0) {
1167 PRE_MEM_RASCIIZ("spawn(attrs->kfa_data)", (Addr) kfa->kfa_data);
1169 break;
1170 default:
1171 VG_(unimplemented)("Support for spawn() with file attribute type %u.",
1172 kfa->kfa_type);
1176 return True;
1179 PRE(sys_spawn)
1181 /* int spawn(char *path, void *attrs, size_t attrsize,
1182 char *argenv, size_t aesize); */
1183 PRINT("sys_spawn ( %#lx(%s), %#lx, %lu, %#lx, %lu )",
1184 ARG1, (HChar *) ARG1, ARG2, ARG3, ARG4, ARG5);
1185 PRE_REG_READ5(long, "spawn", const char *, path, void *, attrs,
1186 size_t, attrsize, char *, argenv, size_t, aesize);
1188 /* First check input arguments. */
1189 PRE_MEM_RASCIIZ("spawn(path)", ARG1);
1190 if (ARG3 > 0) {
1191 /* --- vki_kspawn_attr_t --
1192 | ksa_version |
1193 | ksa_size |
1194 | ksa_attr_off | -----| (only if != 0)
1195 | ksa_attr_size | |
1196 | ksa_path_off | =====|====| (only if != 0)
1197 | ksa_path_size | | |
1198 | ksa_shell_off | -----|----|----| (only if != 0)
1199 | ksa_shell_size | | | |
1200 | ksa_data[0] | | | |
1201 ------------------------ | | |
1202 | vki_spawn_attr_t | <----| | |
1203 ------------------------ | |
1204 | path | <---------| |
1205 ------------------------ |
1206 | shell | <---------------
1207 ------------------------
1208 | file actions | (not included in ksa_size, only in ARG3)
1209 ------------------------
1211 ksa_size = sizeof(vki_kspawn_attr_t) + ksa_attr_size + ksa_path_size +
1212 ksa_shell_size
1213 attrs_size (ARG3) = ksa_size + file actions size */
1215 vki_kspawn_attr_t *attrs = (vki_kspawn_attr_t *) ARG2;
1216 PRE_FIELD_READ("spawn(attrs->ksa_version)", attrs->ksa_version);
1217 PRE_FIELD_READ("spawn(attrs->ksa_size)", attrs->ksa_size);
1218 PRE_FIELD_READ("spawn(attrs->ksa_attr_off)", attrs->ksa_attr_off);
1219 PRE_FIELD_READ("spawn(attrs->ksa_path_off)", attrs->ksa_path_off);
1220 PRE_FIELD_READ("spawn(attrs->ksa_shell_off)", attrs->ksa_shell_off);
1222 if (ML_(safe_to_deref)(attrs, sizeof(vki_kspawn_attr_t))) {
1223 if (attrs->ksa_version != VKI_SPAWN_VERSION) {
1224 VG_(unimplemented)("Support for spawn() with attributes "
1225 "version %u.", attrs->ksa_version);
1228 if (attrs->ksa_attr_off != 0) {
1229 PRE_FIELD_READ("spawn(attrs->ksa_attr_size)", attrs->ksa_attr_size);
1230 vki_spawn_attr_t *sap =
1231 (vki_spawn_attr_t *) ((Addr) attrs + attrs->ksa_attr_off);
1232 PRE_MEM_READ("spawn(attrs->ksa_attr)",
1233 (Addr) sap, attrs->ksa_attr_size);
1234 if (ML_(safe_to_deref)(sap, sizeof(vki_spawn_attr_t))) {
1235 if (sap->sa_psflags & VKI_POSIX_SPAWN_SETVAMASK_NP) {
1236 VG_(unimplemented)("Support for spawn() with attributes flag "
1237 "including POSIX_SPAWN_SETVAMASK_NP.");
1239 /* paranoia */
1240 Int rem = sap->sa_psflags & ~(
1241 VKI_POSIX_SPAWN_RESETIDS | VKI_POSIX_SPAWN_SETPGROUP |
1242 VKI_POSIX_SPAWN_SETSIGDEF | VKI_POSIX_SPAWN_SETSIGMASK |
1243 VKI_POSIX_SPAWN_SETSCHEDPARAM | VKI_POSIX_SPAWN_SETSCHEDULER |
1244 VKI_POSIX_SPAWN_SETSID_NP | VKI_POSIX_SPAWN_SETVAMASK_NP |
1245 VKI_POSIX_SPAWN_SETSIGIGN_NP | VKI_POSIX_SPAWN_NOSIGCHLD_NP |
1246 VKI_POSIX_SPAWN_WAITPID_NP | VKI_POSIX_SPAWN_NOEXECERR_NP);
1247 if (rem != 0) {
1248 VG_(unimplemented)("Support for spawn() with attributes flag "
1249 "%#x.", sap->sa_psflags);
1254 if (attrs->ksa_path_off != 0) {
1255 PRE_FIELD_READ("spawn(attrs->ksa_path_size)", attrs->ksa_path_size);
1256 PRE_MEM_RASCIIZ("spawn(attrs->ksa_path)",
1257 (Addr) attrs + attrs->ksa_path_off);
1260 if (attrs->ksa_shell_off != 0) {
1261 PRE_FIELD_READ("spawn(attrs->ksa_shell_size)",
1262 attrs->ksa_shell_size);
1263 PRE_MEM_RASCIIZ("spawn(attrs->ksa_shell)",
1264 (Addr) attrs + attrs->ksa_shell_off);
1267 vki_kfile_attr_t *kfa = (vki_kfile_attr_t *) (ARG2 + attrs->ksa_size);
1268 while ((Addr) kfa < ARG2 + ARG3) {
1269 if (spawn_pre_check_kfa(tid, status, kfa) == False) {
1270 return;
1272 kfa = (vki_kfile_attr_t *) ((Addr) kfa + kfa->kfa_size);
1276 PRE_MEM_READ("spawn(argenv)", ARG4, ARG5);
1278 /* Check that the name at least begins in client-accessible storage. */
1279 if ((ARG1 == 0) || !ML_(safe_to_deref)((HChar *) ARG1, 1)) {
1280 SET_STATUS_Failure(VKI_EFAULT);
1281 return;
1284 /* Check that attrs reside in client-accessible storage. */
1285 if (ARG2 != 0) {
1286 if (!VG_(am_is_valid_for_client)(ARG2, ARG3, VKI_PROT_READ)) {
1287 SET_STATUS_Failure(VKI_EFAULT);
1288 return;
1292 /* Check that the argenv reside in client-accessible storage.
1293 Solaris disallows to perform spawn() without any arguments & environment
1294 variables specified. */
1295 if ((ARG4 == 0) /* obviously bogus */ ||
1296 !VG_(am_is_valid_for_client)(ARG4, ARG5, VKI_PROT_READ)) {
1297 SET_STATUS_Failure(VKI_EFAULT);
1298 return;
1301 /* Copy existing attrs or create empty minimal ones. */
1302 vki_kspawn_attr_t *attrs;
1303 SizeT attrs_size;
1304 if (ARG2 == 0) {
1305 /* minimalistic kspawn_attr_t + spawn_attr_t */
1306 attrs_size = sizeof(vki_kspawn_attr_t) + sizeof(vki_spawn_attr_t);
1307 attrs = VG_(calloc)("syswrap.spawn.1", 1, attrs_size);
1308 attrs->ksa_version = VKI_SPAWN_VERSION;
1309 attrs->ksa_size = attrs_size;
1310 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1311 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1312 } else if (((vki_kspawn_attr_t *) ARG2)->ksa_attr_off == 0) {
1313 /* existing kspawn_attr_t but missing spawn_attr_t */
1314 attrs_size = ARG3 + sizeof(vki_spawn_attr_t);
1315 attrs = VG_(calloc)("syswrap.spawn.2", 1, attrs_size);
1316 VG_(memcpy)(attrs, (void *) ARG2, sizeof(vki_kspawn_attr_t));
1317 SizeT file_actions_size = ARG3 - attrs->ksa_size;
1318 attrs->ksa_size += sizeof(vki_spawn_attr_t);
1319 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1320 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1321 if (attrs->ksa_path_off != 0) {
1322 VG_(memcpy)((HChar *) attrs + attrs->ksa_path_off +
1323 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1324 attrs->ksa_path_off, attrs->ksa_path_size);
1325 attrs->ksa_path_off += sizeof(vki_spawn_attr_t);
1327 if (attrs->ksa_shell_off != 0) {
1328 VG_(memcpy)((HChar *) attrs + attrs->ksa_shell_off +
1329 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1330 attrs->ksa_shell_off, attrs->ksa_shell_size);
1331 attrs->ksa_shell_off += sizeof(vki_spawn_attr_t);
1333 if (file_actions_size > 0) {
1334 VG_(memcpy)((HChar *) attrs + attrs_size - file_actions_size,
1335 (HChar *) ARG2 + ARG3 - file_actions_size,
1336 file_actions_size);
1338 } else {
1339 /* existing kspawn_attr_t + spawn_attr_t */
1340 attrs_size = ARG3;
1341 attrs = VG_(malloc)("syswrap.spawn.3", attrs_size);
1342 VG_(memcpy)(attrs, (void *) ARG2, attrs_size);
1344 vki_spawn_attr_t *spa = (vki_spawn_attr_t *) ((HChar *) attrs +
1345 attrs->ksa_attr_off);
1347 /* Convert argv and envp parts of argenv into their separate XArray's.
1348 Duplicate strings because argv and envp will be then modified. */
1349 XArray *argv = VG_(newXA)(VG_(malloc), "syswrap.spawn.4",
1350 VG_(free), sizeof(HChar *));
1351 XArray *envp = VG_(newXA)(VG_(malloc), "syswrap.spawn.5",
1352 VG_(free), sizeof(HChar *));
1354 HChar *argenv = (HChar *) ARG4;
1355 XArray *current_xa = argv;
1356 while ((Addr) argenv < ARG4 + ARG5) {
1357 if (*argenv == '\0') {
1358 argenv += 1;
1359 if (current_xa == argv) {
1360 current_xa = envp;
1361 if ((*argenv == '\0') && ((Addr) argenv == ARG4 + ARG5 - 1)) {
1362 /* envp part is empty, it contained only {NULL}. */
1363 break;
1365 } else {
1366 if ((Addr) argenv != ARG4 + ARG5) {
1367 if (VG_(clo_trace_syscalls))
1368 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1369 SET_STATUS_Failure(VKI_EINVAL);
1370 goto exit;
1372 break;
1376 if (*argenv != '\1') {
1377 if (VG_(clo_trace_syscalls))
1378 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1379 SET_STATUS_Failure(VKI_EINVAL);
1380 goto exit;
1382 argenv += 1;
1384 HChar *duplicate = VG_(strdup)("syswrap.spawn.6", argenv);
1385 VG_(addToXA)(current_xa, &duplicate);
1386 argenv += VG_(strlen)(argenv) + 1;
1389 /* Debug-only printing. */
1390 if (0) {
1391 VG_(printf)("\nARG1 = %#lx(%s)\n", ARG1, (HChar *) ARG1);
1392 VG_(printf)("ARG4 (argv) = ");
1393 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1394 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1397 VG_(printf)("\nARG4 (envp) = ");
1398 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1399 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1401 VG_(printf)("\n");
1404 /* Decide whether or not we want to trace the spawned child.
1405 Omit the executable name itself from child_argv. */
1406 const HChar **child_argv = VG_(malloc)("syswrap.spawn.7",
1407 (VG_(sizeXA)(argv) - 1) * sizeof(HChar *));
1408 for (Word i = 1; i < VG_(sizeXA)(argv); i++) {
1409 child_argv[i - 1] = *(HChar **) VG_(indexXA)(argv, i);
1411 Bool trace_this_child = VG_(should_we_trace_this_child)((HChar *) ARG1,
1412 child_argv);
1413 VG_(free)(child_argv);
1415 /* If we're tracing the child, and the launcher name looks bogus (possibly
1416 because launcher.c couldn't figure it out, see comments therein) then we
1417 have no option but to fail. */
1418 if (trace_this_child &&
1419 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
1420 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
1421 goto exit;
1424 /* Set up the child's exe path. */
1425 const HChar *path = (const HChar *) ARG1;
1426 const HChar *launcher_basename = NULL;
1427 if (trace_this_child) {
1428 /* We want to exec the launcher. */
1429 path = VG_(name_of_launcher);
1430 vg_assert(path != NULL);
1432 launcher_basename = VG_(strrchr)(path, '/');
1433 if ((launcher_basename == NULL) || (launcher_basename[1] == '\0')) {
1434 launcher_basename = path; /* hmm, tres dubious */
1435 } else {
1436 launcher_basename++;
1440 /* Set up the child's environment.
1442 Remove the valgrind-specific stuff from the environment so the child
1443 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
1444 unconditionally, since if we are tracing the child, the child valgrind
1445 will set up the appropriate client environment.
1447 Then, if tracing the child, set VALGRIND_LIB for it. */
1448 HChar **child_envp = VG_(calloc)("syswrap.spawn.8",
1449 VG_(sizeXA)(envp) + 1, sizeof(HChar *));
1450 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1451 child_envp[i] = *(HChar **) VG_(indexXA)(envp, i);
1453 VG_(env_remove_valgrind_env_stuff)(child_envp, /* ro_strings */ False,
1454 VG_(free));
1456 /* Stuff was removed from child_envp, reflect that in envp XArray. */
1457 VG_(dropTailXA)(envp, VG_(sizeXA)(envp));
1458 for (UInt i = 0; child_envp[i] != NULL; i++) {
1459 VG_(addToXA)(envp, &child_envp[i]);
1461 VG_(free)(child_envp);
1463 if (trace_this_child) {
1464 /* Set VALGRIND_LIB in envp. */
1465 SizeT len = VG_(strlen)(VALGRIND_LIB) + VG_(strlen)(VG_(libdir)) + 2;
1466 HChar *valstr = VG_(malloc)("syswrap.spawn.9", len);
1467 VG_(sprintf)(valstr, "%s=%s", VALGRIND_LIB, VG_(libdir));
1468 VG_(addToXA)(envp, &valstr);
1471 /* Set up the child's args. If not tracing it, they are left untouched.
1472 Otherwise, they are:
1474 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG4[1..],
1476 except that the first VG_(args_for_valgrind_noexecpass) args are
1477 omitted. */
1478 if (trace_this_child) {
1479 vg_assert(VG_(args_for_valgrind) != NULL);
1480 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
1481 vg_assert(VG_(args_for_valgrind_noexecpass)
1482 <= VG_(sizeXA)(VG_(args_for_valgrind)));
1484 /* So what args will there be? Bear with me... */
1485 /* ... launcher basename, ... */
1486 HChar *duplicate = VG_(strdup)("syswrap.spawn.10", launcher_basename);
1487 VG_(insertIndexXA)(argv, 0, &duplicate);
1489 /* ... Valgrind's args, ... */
1490 UInt v_args = VG_(sizeXA)(VG_(args_for_valgrind));
1491 v_args -= VG_(args_for_valgrind_noexecpass);
1492 for (Word i = VG_(args_for_valgrind_noexecpass);
1493 i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
1494 duplicate = VG_(strdup)("syswrap.spawn.11",
1495 *(HChar **) VG_(indexXA)(VG_(args_for_valgrind), i));
1496 VG_(insertIndexXA)(argv, 1 + i, &duplicate);
1499 /* ... name of client executable, ... */
1500 duplicate = VG_(strdup)("syswrap.spawn.12", (HChar *) ARG1);
1501 VG_(insertIndexXA)(argv, 1 + v_args, &duplicate);
1503 /* ... and args for client executable (without [0]). */
1504 duplicate = *(HChar **) VG_(indexXA)(argv, 1 + v_args + 1);
1505 VG_(free)(duplicate);
1506 VG_(removeIndexXA)(argv, 1 + v_args + 1);
1509 /* Debug-only printing. */
1510 if (0) {
1511 VG_(printf)("\npath = %s\n", path);
1512 VG_(printf)("argv = ");
1513 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1514 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1517 VG_(printf)("\nenvp = ");
1518 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1519 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1521 VG_(printf)("\n");
1524 /* Set the signal state up for spawned child.
1526 Signals set to be caught are equivalent to signals set to the default
1527 action, from the child's perspective.
1529 Therefore query SCSS and prepare default (DFL) and ignore (IGN) signal
1530 sets. Then combine these sets with those passed from client, if flags
1531 POSIX_SPAWN_SETSIGDEF, or POSIX_SPAWN_SETSIGIGN_NP have been specified.
1533 vki_sigset_t sig_default;
1534 vki_sigset_t sig_ignore;
1535 VG_(sigemptyset)(&sig_default);
1536 VG_(sigemptyset)(&sig_ignore);
1537 for (Int i = 1; i < VG_(max_signal); i++) {
1538 vki_sigaction_fromK_t sa;
1539 VG_(do_sys_sigaction)(i, NULL, &sa); /* query SCSS */
1540 if (sa.sa_handler == VKI_SIG_IGN) {
1541 VG_(sigaddset)(&sig_ignore, i);
1542 } else {
1543 VG_(sigaddset)(&sig_default, i);
1547 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGDEF) {
1548 VG_(sigaddset_from_set)(&spa->sa_sigdefault, &sig_default);
1549 } else {
1550 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGDEF;
1551 spa->sa_sigdefault = sig_default;
1554 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGIGN_NP) {
1555 VG_(sigaddset_from_set)(&spa->sa_sigignore, &sig_ignore);
1556 } else {
1557 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGIGN_NP;
1558 spa->sa_sigignore = sig_ignore;
1561 /* Set the signal mask for spawned child.
1563 Analogous to signal handlers: query SCSS for blocked signals mask
1564 and combine this mask with that passed from client, if flag
1565 POSIX_SPAWN_SETSIGMASK has been specified. */
1566 vki_sigset_t *sigmask = &VG_(get_ThreadState)(tid)->sig_mask;
1567 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGMASK) {
1568 VG_(sigaddset_from_set)(&spa->sa_sigmask, sigmask);
1569 } else {
1570 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGMASK;
1571 spa->sa_sigmask = *sigmask;
1574 /* Lastly, reconstruct argenv from argv + envp. */
1575 SizeT argenv_size = 1 + 1;
1576 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1577 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(argv, i)) + 2;
1579 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1580 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(envp, i)) + 2;
1583 argenv = VG_(malloc)("syswrap.spawn.13", argenv_size);
1584 HChar *current = argenv;
1585 #define COPY_CHAR_TO_ARGENV(dst, character) \
1586 do { \
1587 *(dst) = character; \
1588 (dst) += 1; \
1589 } while (0)
1590 #define COPY_STRING_TO_ARGENV(dst, src) \
1591 do { \
1592 COPY_CHAR_TO_ARGENV(dst, '\1'); \
1593 SizeT src_len = VG_(strlen)((src)) + 1; \
1594 VG_(memcpy)((dst), (src), src_len); \
1595 (dst) += src_len; \
1596 } while (0)
1598 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1599 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(argv, i));
1601 COPY_CHAR_TO_ARGENV(current, '\0');
1602 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1603 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(envp, i));
1605 COPY_CHAR_TO_ARGENV(current, '\0');
1606 vg_assert(current == argenv + argenv_size);
1607 #undef COPY_CHAR_TO_ARGENV
1608 #undef COPY_STRING_TOARGENV
1610 /* Actual spawn() syscall. */
1611 SysRes res = VG_(do_syscall5)(__NR_spawn, (UWord) path, (UWord) attrs,
1612 attrs_size, (UWord) argenv, argenv_size);
1613 SET_STATUS_from_SysRes(res);
1614 VG_(free)(argenv);
1616 if (SUCCESS) {
1617 PRINT(" spawn: process %d spawned child %ld\n", VG_(getpid)(), RES);
1620 exit:
1621 VG_(free)(attrs);
1622 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1623 VG_(free)(*(HChar **) VG_(indexXA)(argv, i));
1625 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1626 VG_(free)(*(HChar **) VG_(indexXA)(envp, i));
1628 VG_(deleteXA)(argv);
1629 VG_(deleteXA)(envp);
1631 #endif /* SOLARIS_SPAWN_SYSCALL */
1633 /* Handles the case where the open is of /proc/self/psinfo or
1634 /proc/<pid>/psinfo. Fetch fresh contents into psinfo_t,
1635 fake fname, psargs, argc and argv. Write the structure to the fake
1636 file we cooked up at startup (in m_main) and give out a copy of this
1637 fd. Also seek the cloned fd back to the start. */
1638 static Bool handle_psinfo_open(SyscallStatus *status,
1639 Bool use_openat,
1640 const HChar *filename,
1641 Int arg1, UWord arg3, UWord arg4)
1643 if (!ML_(safe_to_deref)((const void *) filename, 1))
1644 return False;
1646 HChar name[VKI_PATH_MAX]; // large enough
1647 VG_(sprintf)(name, "/proc/%d/psinfo", VG_(getpid)());
1649 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/psinfo"))
1650 return False;
1652 /* Use original arguments to open() or openat(). */
1653 SysRes sres;
1654 #if defined(SOLARIS_OLD_SYSCALLS)
1655 if (use_openat)
1656 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1657 arg3, arg4);
1658 else
1659 sres = VG_(do_syscall3)(SYS_open, (UWord) filename, arg3, arg4);
1660 #else
1661 vg_assert(use_openat == True);
1662 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1663 arg3, arg4);
1664 #endif /* SOLARIS_OLD_SYSCALLS */
1666 if (sr_isError(sres)) {
1667 SET_STATUS_from_SysRes(sres);
1668 return True;
1670 Int fd = sr_Res(sres);
1672 vki_psinfo_t psinfo;
1673 sres = VG_(do_syscall3)(SYS_read, fd, (UWord) &psinfo, sizeof(psinfo));
1674 if (sr_isError(sres)) {
1675 SET_STATUS_from_SysRes(sres);
1676 VG_(close)(fd);
1677 return True;
1679 if (sr_Res(sres) != sizeof(psinfo)) {
1680 SET_STATUS_Failure(VKI_ENODATA);
1681 VG_(close)(fd);
1682 return True;
1685 VG_(close)(fd);
1687 VG_(client_fname)(psinfo.pr_fname, sizeof(psinfo.pr_fname), True);
1688 VG_(client_cmd_and_args)(psinfo.pr_psargs, sizeof(psinfo.pr_psargs));
1690 Addr *ptr = (Addr *) VG_(get_initial_client_SP)();
1691 psinfo.pr_argc = *ptr++;
1692 psinfo.pr_argv = (Addr) ptr;
1694 sres = VG_(do_syscall4)(SYS_pwrite, VG_(cl_psinfo_fd),
1695 (UWord) &psinfo, sizeof(psinfo), 0);
1696 if (sr_isError(sres)) {
1697 SET_STATUS_from_SysRes(sres);
1698 return True;
1701 sres = VG_(dup)(VG_(cl_psinfo_fd));
1702 SET_STATUS_from_SysRes(sres);
1703 if (!sr_isError(sres)) {
1704 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1705 if (off < 0)
1706 SET_STATUS_Failure(VKI_EMFILE);
1709 return True;
1712 #if defined(SOLARIS_PROC_CMDLINE)
1713 /* Handles the case where the open is of /proc/self/cmdline or
1714 /proc/<pid>/cmdline. Just give it a copy of VG_(cl_cmdline_fd) for the
1715 fake file we cooked up at startup (in m_main). Also, seek the
1716 cloned fd back to the start. */
1717 static Bool handle_cmdline_open(SyscallStatus *status, const HChar *filename)
1719 if (!ML_(safe_to_deref)((const void *) filename, 1))
1720 return False;
1722 HChar name[VKI_PATH_MAX]; // large enough
1723 VG_(sprintf)(name, "/proc/%d/cmdline", VG_(getpid)());
1725 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/cmdline"))
1726 return False;
1728 SysRes sres = VG_(dup)(VG_(cl_cmdline_fd));
1729 SET_STATUS_from_SysRes(sres);
1730 if (!sr_isError(sres)) {
1731 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1732 if (off < 0)
1733 SET_STATUS_Failure(VKI_EMFILE);
1736 return True;
1738 #endif /* SOLARIS_PROC_CMDLINE */
1741 #if defined(SOLARIS_OLD_SYSCALLS)
1742 PRE(sys_open)
1744 /* int open(const char *filename, int flags);
1745 int open(const char *filename, int flags, mode_t mode); */
1747 if (ARG2 & VKI_O_CREAT) {
1748 /* 3-arg version */
1749 PRINT("sys_open ( %#lx(%s), %ld, %lu )", ARG1, (HChar *) ARG1,
1750 SARG2, ARG3);
1751 PRE_REG_READ3(long, "open", const char *, filename,
1752 int, flags, vki_mode_t, mode);
1753 } else {
1754 /* 2-arg version */
1755 PRINT("sys_open ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
1756 PRE_REG_READ2(long, "open", const char *, filename, int, flags);
1759 PRE_MEM_RASCIIZ("open(filename)", ARG1);
1761 if (ML_(handle_auxv_open)(status, (const HChar*)ARG1, ARG2))
1762 return;
1764 if (handle_psinfo_open(status, False /*use_openat*/, (const HChar*)ARG1, 0,
1765 ARG2, ARG3))
1766 return;
1768 *flags |= SfMayBlock;
1771 POST(sys_open)
1773 if (!ML_(fd_allowed)(RES, "open", tid, True)) {
1774 VG_(close)(RES);
1775 SET_STATUS_Failure(VKI_EMFILE);
1776 } else if (VG_(clo_track_fds))
1777 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG1);
1779 #endif /* SOLARIS_OLD_SYSCALLS */
1781 PRE(sys_close)
1783 WRAPPER_PRE_NAME(generic, sys_close)(tid, layout, arrghs, status,
1784 flags);
1785 door_record_revoke(tid, ARG1);
1788 PRE(sys_linkat)
1790 /* int linkat(int fd1, const char *path1, int fd2,
1791 const char *path2, int flag);
1794 /* Interpret the first and third arguments as 32-bit values even on 64-bit
1795 architecture. This is different from Linux, for example, where glibc
1796 sign-extends them. */
1797 Int fd1 = (Int) ARG1;
1798 Int fd2 = (Int) ARG3;
1800 PRINT("sys_linkat ( %d, %#lx(%s), %d, %#lx(%s), %ld )",
1801 fd1, ARG2, (HChar *) ARG2, fd2, ARG4, (HChar *) ARG4, SARG5);
1802 PRE_REG_READ5(long, "linkat", int, fd1, const char *, path1,
1803 int, fd2, const char *, path2, int, flags);
1804 PRE_MEM_RASCIIZ("linkat(path1)", ARG2);
1805 PRE_MEM_RASCIIZ("linkat(path2)", ARG4);
1807 /* Be strict but ignore fd1/fd2 for absolute path1/path2. */
1808 if (fd1 != VKI_AT_FDCWD
1809 && ML_(safe_to_deref)((void *) ARG2, 1)
1810 && ((HChar *) ARG2)[0] != '/'
1811 && !ML_(fd_allowed)(fd1, "linkat", tid, False)) {
1812 SET_STATUS_Failure(VKI_EBADF);
1814 if (fd2 != VKI_AT_FDCWD
1815 && ML_(safe_to_deref)((void *) ARG4, 1)
1816 && ((HChar *) ARG4)[0] != '/'
1817 && !ML_(fd_allowed)(fd2, "linkat", tid, False)) {
1818 SET_STATUS_Failure(VKI_EBADF);
1821 *flags |= SfMayBlock;
1824 PRE(sys_symlinkat)
1826 /* int symlinkat(const char *path1, int fd, const char *path2); */
1828 /* Interpret the second argument as 32-bit value even on 64-bit architecture.
1829 This is different from Linux, for example, where glibc sign-extends it. */
1830 Int fd = (Int) ARG2;
1832 PRINT("sys_symlinkat ( %#lx(%s), %d, %#lx(%s) )",
1833 ARG1, (HChar *) ARG1, fd, ARG3, (HChar *) ARG3);
1834 PRE_REG_READ3(long, "symlinkat", const char *, path1, int, fd,
1835 const char *, path2);
1836 PRE_MEM_RASCIIZ("symlinkat(path1)", ARG1);
1837 PRE_MEM_RASCIIZ("symlinkat(path2)", ARG3);
1839 /* Be strict but ignore fd for absolute path2. */
1840 if (fd != VKI_AT_FDCWD
1841 && ML_(safe_to_deref)((void *) ARG3, 1)
1842 && ((HChar *) ARG3)[0] != '/'
1843 && !ML_(fd_allowed)(fd, "symlinkat", tid, False))
1844 SET_STATUS_Failure(VKI_EBADF);
1846 *flags |= SfMayBlock;
1849 PRE(sys_time)
1851 /* time_t time(); */
1852 PRINT("sys_time ( )");
1853 PRE_REG_READ0(long, "time");
1856 /* Data segment for brk (heap). It is an expandable anonymous mapping
1857 abutting a 1-page reservation. The data segment starts at VG_(brk_base)
1858 and runs up to VG_(brk_limit). None of these two values have to be
1859 page-aligned.
1860 Initial data segment is established (see initimg-solaris.c for rationale):
1861 - directly during client program image initialization,
1862 - or on demand when the executed program is the runtime linker itself,
1863 after it has loaded its target dynamic executable (see PRE(sys_mmapobj)),
1864 or when the first brk() syscall is made.
1866 Notable facts:
1867 - VG_(brk_base) is not page aligned; does not move
1868 - VG_(brk_limit) moves between [VG_(brk_base), data segment end]
1869 - data segment end is always page aligned
1870 - right after data segment end is 1-page reservation
1872 | heap | 1 page
1873 +------+------+--------------+-------+
1874 | BSS | anon | anon | resvn |
1875 +------+------+--------------+-------+
1877 ^ ^ ^ ^
1878 | | | |
1879 | | | data segment end
1880 | | VG_(brk_limit) -- no alignment constraint
1881 | brk_base_pgup -- page aligned
1882 VG_(brk_base) -- not page aligned -- does not move
1884 Because VG_(brk_base) is not page-aligned and is initially located within
1885 pre-established BSS (data) segment, special care has to be taken in the code
1886 below to handle this feature.
1888 Reservation segment is used to protect the data segment merging with
1889 a pre-existing segment. This should be no problem because address space
1890 manager ensures that requests for client address space are satisfied from
1891 the highest available addresses. However when memory is low, data segment
1892 can meet with mmap'ed objects and the reservation segment separates these.
1893 The page that contains VG_(brk_base) is already allocated by the program's
1894 loaded data segment. The brk syscall wrapper handles this special case. */
1896 static Bool brk_segment_established = False;
1898 /* Establishes initial data segment for brk (heap). */
1899 Bool VG_(setup_client_dataseg)(void)
1901 /* Segment size is initially at least 1 MB and at most 8 MB. */
1902 SizeT m1 = 1024 * 1024;
1903 SizeT m8 = 8 * m1;
1904 SizeT initial_size = VG_(client_rlimit_data).rlim_cur;
1905 VG_(debugLog)(1, "syswrap-solaris", "Setup client data (brk) segment "
1906 "at %#lx\n", VG_(brk_base));
1907 if (initial_size < m1)
1908 initial_size = m1;
1909 if (initial_size > m8)
1910 initial_size = m8;
1911 initial_size = VG_PGROUNDUP(initial_size);
1913 Addr anon_start = VG_PGROUNDUP(VG_(brk_base));
1914 SizeT anon_size = VG_PGROUNDUP(initial_size);
1915 Addr resvn_start = anon_start + anon_size;
1916 SizeT resvn_size = VKI_PAGE_SIZE;
1918 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
1919 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
1920 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
1921 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
1922 vg_assert(VG_(brk_base) == VG_(brk_limit));
1924 /* Find the loaded data segment and remember its protection. */
1925 const NSegment *seg = VG_(am_find_nsegment)(VG_(brk_base) - 1);
1926 vg_assert(seg != NULL);
1927 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
1928 | (seg->hasW ? VKI_PROT_WRITE : 0)
1929 | (seg->hasX ? VKI_PROT_EXEC : 0);
1931 /* Try to create the data segment and associated reservation where
1932 VG_(brk_base) says. */
1933 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
1934 anon_size);
1935 if (!ok) {
1936 /* That didn't work, we're hosed. */
1937 return False;
1940 /* Map the data segment. */
1941 SysRes sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
1942 vg_assert(!sr_isError(sres));
1943 vg_assert(sr_Res(sres) == anon_start);
1945 brk_segment_established = True;
1946 return True;
1949 /* Tell the tool about the client data segment and then kill it which will
1950 make it initially inaccessible/unaddressable. */
1951 void VG_(track_client_dataseg)(ThreadId tid)
1953 const NSegment *seg = VG_(am_find_nsegment)(VG_PGROUNDUP(VG_(brk_base)));
1954 vg_assert(seg != NULL);
1955 vg_assert(seg->kind == SkAnonC);
1957 VG_TRACK(new_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base), tid);
1958 VG_TRACK(die_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base));
1961 static void PRINTF_CHECK(1, 2)
1962 possibly_complain_brk(const HChar *format, ...)
1964 static Bool alreadyComplained = False;
1965 if (!alreadyComplained) {
1966 alreadyComplained = True;
1967 if (VG_(clo_verbosity) > 0) {
1968 va_list vargs;
1969 va_start(vargs, format);
1970 VG_(vmessage)(Vg_UserMsg, format, vargs);
1971 va_end(vargs);
1972 VG_(umsg)("(See section Limitations in the user manual.)\n");
1973 VG_(umsg)("NOTE: further instances of this message will not be "
1974 "shown.\n");
1979 PRE(sys_brk)
1981 /* unsigned long brk(caddr_t end_data_segment); */
1982 /* The Solaris kernel returns 0 on success.
1983 In addition to this, brk(0) returns current data segment end. This is
1984 very different from the Linux kernel, for example. */
1986 Addr old_brk_limit = VG_(brk_limit);
1987 /* If VG_(brk_base) is page-aligned then old_brk_base_pgup is equal to
1988 VG_(brk_base). */
1989 Addr old_brk_base_pgup = VG_PGROUNDUP(VG_(brk_base));
1990 Addr new_brk = ARG1;
1991 const NSegment *seg, *seg2;
1993 PRINT("sys_brk ( %#lx )", ARG1);
1994 PRE_REG_READ1(unsigned long, "brk", vki_caddr_t, end_data_segment);
1996 if (new_brk == 0) {
1997 /* brk(0) - specific to Solaris 11 only. */
1998 SET_STATUS_Success(old_brk_limit);
1999 return;
2002 /* Handle some trivial cases. */
2003 if (new_brk == old_brk_limit) {
2004 SET_STATUS_Success(0);
2005 return;
2007 if (new_brk < VG_(brk_base)) {
2008 /* Clearly impossible. */
2009 SET_STATUS_Failure(VKI_ENOMEM);
2010 return;
2012 if (new_brk - VG_(brk_base) > VG_(client_rlimit_data).rlim_cur) {
2013 SET_STATUS_Failure(VKI_ENOMEM);
2014 return;
2017 /* The brk base and limit must have been already set. */
2018 vg_assert(VG_(brk_base) != -1);
2019 vg_assert(VG_(brk_limit) != -1);
2021 if (!brk_segment_established) {
2022 /* Stay sane (because there should have been no brk activity yet). */
2023 vg_assert(VG_(brk_base) == VG_(brk_limit));
2025 if (!VG_(setup_client_dataseg)()) {
2026 possibly_complain_brk("Cannot map memory to initialize brk segment in "
2027 "thread #%u at %#lx\n", tid, VG_(brk_base));
2028 SET_STATUS_Failure(VKI_ENOMEM);
2029 return;
2032 VG_(track_client_dataseg)(tid);
2035 if (new_brk < old_brk_limit) {
2036 /* Shrinking the data segment. Be lazy and don't munmap the excess
2037 area. */
2038 if (old_brk_limit > old_brk_base_pgup) {
2039 /* Calculate new local brk (=MAX(new_brk, old_brk_base_pgup)). */
2040 Addr new_brk_local;
2041 if (new_brk < old_brk_base_pgup)
2042 new_brk_local = old_brk_base_pgup;
2043 else
2044 new_brk_local = new_brk;
2046 /* Find a segment at the beginning and at the end of the shrinked
2047 range. */
2048 seg = VG_(am_find_nsegment)(new_brk_local);
2049 seg2 = VG_(am_find_nsegment)(old_brk_limit - 1);
2050 vg_assert(seg);
2051 vg_assert(seg->kind == SkAnonC);
2052 vg_assert(seg2);
2053 vg_assert(seg == seg2);
2055 /* Discard any translations and zero-out the area. */
2056 if (seg->hasT)
2057 VG_(discard_translations)(new_brk_local,
2058 old_brk_limit - new_brk_local,
2059 "do_brk(shrink)");
2060 /* Since we're being lazy and not unmapping pages, we have to zero out
2061 the area, so that if the area later comes back into circulation, it
2062 will be filled with zeroes, as if it really had been unmapped and
2063 later remapped. Be a bit paranoid and try hard to ensure we're not
2064 going to segfault by doing the write - check that segment is
2065 writable. */
2066 if (seg->hasW)
2067 VG_(memset)((void*)new_brk_local, 0, old_brk_limit - new_brk_local);
2070 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2071 if (new_brk < old_brk_base_pgup) {
2072 /* Calculate old local brk (=MIN(old_brk_limit, old_brk_base_up)). */
2073 Addr old_brk_local;
2074 if (old_brk_limit < old_brk_base_pgup)
2075 old_brk_local = old_brk_limit;
2076 else
2077 old_brk_local = old_brk_base_pgup;
2079 /* Find a segment at the beginning and at the end of the shrinked
2080 range. */
2081 seg = VG_(am_find_nsegment)(new_brk);
2082 seg2 = VG_(am_find_nsegment)(old_brk_local - 1);
2083 vg_assert(seg);
2084 vg_assert(seg2);
2085 vg_assert(seg == seg2);
2087 /* Discard any translations and zero-out the area. */
2088 if (seg->hasT)
2089 VG_(discard_translations)(new_brk, old_brk_local - new_brk,
2090 "do_brk(shrink)");
2091 if (seg->hasW)
2092 VG_(memset)((void*)new_brk, 0, old_brk_local - new_brk);
2095 /* We are done, update VG_(brk_limit), tell the tool about the changes,
2096 and leave. */
2097 VG_(brk_limit) = new_brk;
2098 VG_TRACK(die_mem_brk, new_brk, old_brk_limit - new_brk);
2099 SET_STATUS_Success(0);
2100 return;
2103 /* We are expanding the brk segment. */
2105 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2106 if (old_brk_limit < old_brk_base_pgup) {
2107 /* Calculate new local brk (=MIN(new_brk, old_brk_base_pgup)). */
2108 Addr new_brk_local;
2109 if (new_brk < old_brk_base_pgup)
2110 new_brk_local = new_brk;
2111 else
2112 new_brk_local = old_brk_base_pgup;
2114 /* Find a segment at the beginning and at the end of the expanded
2115 range. */
2116 seg = VG_(am_find_nsegment)(old_brk_limit);
2117 seg2 = VG_(am_find_nsegment)(new_brk_local - 1);
2118 vg_assert(seg);
2119 vg_assert(seg2);
2120 vg_assert(seg == seg2);
2122 /* Nothing else to do. */
2125 if (new_brk > old_brk_base_pgup) {
2126 /* Calculate old local brk (=MAX(old_brk_limit, old_brk_base_pgup)). */
2127 Addr old_brk_local;
2128 if (old_brk_limit < old_brk_base_pgup)
2129 old_brk_local = old_brk_base_pgup;
2130 else
2131 old_brk_local = old_brk_limit;
2133 /* Find a segment at the beginning of the expanded range. */
2134 if (old_brk_local > old_brk_base_pgup)
2135 seg = VG_(am_find_nsegment)(old_brk_local - 1);
2136 else
2137 seg = VG_(am_find_nsegment)(old_brk_local);
2138 vg_assert(seg);
2139 vg_assert(seg->kind == SkAnonC);
2141 /* Find the 1-page reservation segment. */
2142 seg2 = VG_(am_next_nsegment)(seg, True/*forwards*/);
2143 vg_assert(seg2);
2144 vg_assert(seg2->kind == SkResvn);
2145 vg_assert(seg->end + 1 == seg2->start);
2146 vg_assert(seg2->end - seg2->start + 1 == VKI_PAGE_SIZE);
2148 if (new_brk <= seg2->start) {
2149 /* Still fits within the existing anon segment, nothing to do. */
2150 } else {
2151 /* Data segment limit was already checked. */
2152 Addr anon_start = seg->end + 1;
2153 Addr resvn_start = VG_PGROUNDUP(new_brk);
2154 SizeT anon_size = resvn_start - anon_start;
2155 SizeT resvn_size = VKI_PAGE_SIZE;
2156 SysRes sres;
2158 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
2159 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
2160 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
2161 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
2162 vg_assert(anon_size > 0);
2164 /* Address space manager checks for free address space for us;
2165 reservation would not be otherwise created. */
2166 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
2167 anon_size);
2168 if (!ok) {
2169 possibly_complain_brk("brk segment overflow in thread #%u: can not "
2170 "grow to %#lx\n", tid, new_brk);
2171 SET_STATUS_Failure(VKI_ENOMEM);
2172 return;
2175 /* Establish protection from the existing segment. */
2176 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
2177 | (seg->hasW ? VKI_PROT_WRITE : 0)
2178 | (seg->hasX ? VKI_PROT_EXEC : 0);
2180 /* Address space manager will merge old and new data segments. */
2181 sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
2182 if (sr_isError(sres)) {
2183 possibly_complain_brk("Cannot map memory to grow brk segment in "
2184 "thread #%u to %#lx\n", tid, new_brk);
2185 SET_STATUS_Failure(VKI_ENOMEM);
2186 return;
2188 vg_assert(sr_Res(sres) == anon_start);
2190 seg = VG_(am_find_nsegment)(old_brk_base_pgup);
2191 seg2 = VG_(am_find_nsegment)(VG_PGROUNDUP(new_brk) - 1);
2192 vg_assert(seg);
2193 vg_assert(seg2);
2194 vg_assert(seg == seg2);
2195 vg_assert(new_brk <= seg->end + 1);
2199 /* We are done, update VG_(brk_limit), tell the tool about the changes, and
2200 leave. */
2201 VG_(brk_limit) = new_brk;
2202 VG_TRACK(new_mem_brk, old_brk_limit, new_brk - old_brk_limit, tid);
2203 SET_STATUS_Success(0);
2206 PRE(sys_stat)
2208 /* int stat(const char *path, struct stat *buf); */
2209 /* Note: We could use here the sys_newstat generic wrapper, but the 'new'
2210 in its name is rather confusing in the Solaris context, thus we provide
2211 our own wrapper. */
2212 PRINT("sys_stat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
2213 PRE_REG_READ2(long, "stat", const char *, path, struct stat *, buf);
2215 PRE_MEM_RASCIIZ("stat(path)", ARG1);
2216 PRE_MEM_WRITE("stat(buf)", ARG2, sizeof(struct vki_stat));
2219 POST(sys_stat)
2221 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2224 PRE(sys_lseek)
2226 /* off_t lseek(int fildes, off_t offset, int whence); */
2227 PRINT("sys_lseek ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2228 PRE_REG_READ3(long, "lseek", int, fildes, vki_off_t, offset, int, whence);
2230 /* Stay sane. */
2231 if (!ML_(fd_allowed)(ARG1, "lseek", tid, False))
2232 SET_STATUS_Failure(VKI_EBADF);
2235 PRE(sys_mount)
2237 /* int mount(const char *spec, const char *dir, int mflag, char *fstype,
2238 char *dataptr, int datalen, char *optptr, int optlen); */
2239 *flags |= SfMayBlock;
2240 if (ARG3 & VKI_MS_OPTIONSTR) {
2241 /* 8-argument mount */
2242 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %lu, "
2243 "%#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3,
2244 ARG4, (HChar *) ARG4, ARG5, ARG6, ARG7, (HChar *) ARG7, SARG8);
2245 PRE_REG_READ8(long, "mount", const char *, spec, const char *, dir,
2246 int, mflag, char *, fstype, char *, dataptr, int, datalen,
2247 char *, optptr, int, optlen);
2249 else if (ARG3 & VKI_MS_DATA) {
2250 /* 6-argument mount */
2251 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld )",
2252 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4,
2253 (HChar *) ARG4, ARG5, SARG6);
2254 PRE_REG_READ6(long, "mount", const char *, spec, const char *, dir,
2255 int, mflag, char *, fstype, char *, dataptr,
2256 int, datalen);
2258 else {
2259 /* 4-argument mount */
2260 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s) )", ARG1,
2261 (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4, (HChar *) ARG4);
2262 PRE_REG_READ4(long, "mount", const char *, spec, const char *, dir,
2263 int, mflag, char *, fstype);
2265 if (ARG1)
2266 PRE_MEM_RASCIIZ("mount(spec)", ARG1);
2267 PRE_MEM_RASCIIZ("mount(dir)", ARG2);
2268 if (ARG4 && ARG4 >= 256) {
2269 /* If ARG4 < 256, then it's an index to a fs table in the kernel. */
2270 PRE_MEM_RASCIIZ("mount(fstype)", ARG4);
2272 if (ARG3 & (VKI_MS_DATA | VKI_MS_OPTIONSTR)) {
2273 if (ARG5)
2274 PRE_MEM_READ("mount(dataptr)", ARG5, ARG6);
2275 if ((ARG3 & VKI_MS_OPTIONSTR) && ARG7) {
2276 /* in/out buffer */
2277 PRE_MEM_RASCIIZ("mount(optptr)", ARG7);
2278 PRE_MEM_WRITE("mount(optptr)", ARG7, ARG8);
2283 POST(sys_mount)
2285 if (ARG3 & VKI_MS_OPTIONSTR) {
2286 POST_MEM_WRITE(ARG7, VG_(strlen)((HChar*)ARG7) + 1);
2287 } else if (ARG3 & VKI_MS_DATA) {
2288 if ((ARG2) &&
2289 (ARG3 & MS_NOMNTTAB) &&
2290 (VG_STREQ((HChar *) ARG4, "namefs")) &&
2291 (ARG6 == sizeof(struct vki_namefd)) &&
2292 ML_(safe_to_deref)((void *) ARG5, ARG6)) {
2293 /* Most likely an fattach() call for a door file descriptor. */
2294 door_record_server_fattach(((struct vki_namefd *) ARG5)->fd,
2295 (HChar *) ARG2);
2300 PRE(sys_readlinkat)
2302 /* ssize_t readlinkat(int dfd, const char *path, char *buf,
2303 size_t bufsiz); */
2304 HChar name[30]; // large enough
2305 Word saved = SYSNO;
2307 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2308 This is different from Linux, for example, where glibc sign-extends it. */
2309 Int dfd = (Int) ARG1;
2311 PRINT("sys_readlinkat ( %d, %#lx(%s), %#lx, %ld )", dfd, ARG2,
2312 (HChar *) ARG2, ARG3, SARG4);
2313 PRE_REG_READ4(long, "readlinkat", int, dfd, const char *, path,
2314 char *, buf, int, bufsiz);
2315 PRE_MEM_RASCIIZ("readlinkat(path)", ARG2);
2316 PRE_MEM_WRITE("readlinkat(buf)", ARG3, ARG4);
2318 /* Be strict but ignore dfd for absolute path. */
2319 if (dfd != VKI_AT_FDCWD
2320 && ML_(safe_to_deref)((void *) ARG2, 1)
2321 && ((HChar *) ARG2)[0] != '/'
2322 && !ML_(fd_allowed)(dfd, "readlinkat", tid, False)) {
2323 SET_STATUS_Failure(VKI_EBADF);
2324 return;
2327 /* Handle the case where readlinkat is looking at /proc/self/path/a.out or
2328 /proc/<pid>/path/a.out. */
2329 VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)());
2330 if (ML_(safe_to_deref)((void*)ARG2, 1) &&
2331 (!VG_(strcmp)((HChar*)ARG2, name) ||
2332 !VG_(strcmp)((HChar*)ARG2, "/proc/self/path/a.out"))) {
2333 VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd));
2334 SET_STATUS_from_SysRes(VG_(do_syscall4)(saved, dfd, (UWord)name, ARG3,
2335 ARG4));
2339 POST(sys_readlinkat)
2341 POST_MEM_WRITE(ARG3, RES);
2344 PRE(sys_stime)
2346 /* Kernel: int stime(time_t time); */
2347 PRINT("sys_stime ( %lu )", ARG1);
2348 PRE_REG_READ1(long, "stime", vki_time_t, time);
2351 PRE(sys_fstat)
2353 /* int fstat(int fildes, struct stat *buf); */
2354 /* Note: We could use here the sys_newfstat generic wrapper, but the 'new'
2355 in its name is rather confusing in the Solaris context, thus we provide
2356 our own wrapper. */
2357 PRINT("sys_fstat ( %ld, %#lx )", SARG1, ARG2);
2358 PRE_REG_READ2(long, "fstat", int, fildes, struct stat *, buf);
2359 PRE_MEM_WRITE("fstat(buf)", ARG2, sizeof(struct vki_stat));
2361 /* Be strict. */
2362 if (!ML_(fd_allowed)(ARG1, "fstat", tid, False))
2363 SET_STATUS_Failure(VKI_EBADF);
2366 POST(sys_fstat)
2368 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2371 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
2372 PRE(sys_frealpathat)
2374 /* int frealpathat(int fd, char *path, char *buf, size_t buflen); */
2376 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2377 This is different from Linux, for example, where glibc sign-extends it. */
2378 Int fd = (Int) ARG1;
2380 PRINT("sys_frealpathat ( %d, %#lx(%s), %#lx, %lu )",
2381 fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
2382 PRE_REG_READ4(long, "frealpathat", int, fd, char *, path,
2383 char *, buf, vki_size_t, buflen);
2384 PRE_MEM_RASCIIZ("frealpathat(path)", ARG2);
2385 PRE_MEM_WRITE("frealpathat(buf)", ARG3, ARG4);
2387 /* Be strict but ignore fd for absolute path. */
2388 if (fd != VKI_AT_FDCWD
2389 && ML_(safe_to_deref)((void *) ARG2, 1)
2390 && ((HChar *) ARG2)[0] != '/'
2391 && !ML_(fd_allowed)(fd, "frealpathat", tid, False))
2392 SET_STATUS_Failure(VKI_EBADF);
2395 POST(sys_frealpathat)
2397 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
2399 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
2401 PRE(sys_stty)
2403 /* int stty(int fd, const struct sgttyb *tty); */
2404 PRINT("sys_stty ( %ld, %#lx )", SARG1, ARG2);
2405 PRE_REG_READ2(long, "stty", int, fd,
2406 const struct vki_sgttyb *, tty);
2407 PRE_MEM_READ("stty(tty)", ARG2, sizeof(struct vki_sgttyb));
2409 /* Be strict. */
2410 if (!ML_(fd_allowed)(ARG1, "stty", tid, False))
2411 SET_STATUS_Failure(VKI_EBADF);
2414 PRE(sys_gtty)
2416 /* int gtty(int fd, struct sgttyb *tty); */
2417 PRINT("sys_gtty ( %ld, %#lx )", SARG1, ARG2);
2418 PRE_REG_READ2(long, "gtty", int, fd, struct vki_sgttyb *, tty);
2419 PRE_MEM_WRITE("gtty(tty)", ARG2, sizeof(struct vki_sgttyb));
2421 /* Be strict. */
2422 if (!ML_(fd_allowed)(ARG1, "gtty", tid, False))
2423 SET_STATUS_Failure(VKI_EBADF);
2426 POST(sys_gtty)
2428 POST_MEM_WRITE(ARG2, sizeof(struct vki_sgttyb));
2431 PRE(sys_pgrpsys)
2433 /* Kernel: int setpgrp(int flag, int pid, int pgid); */
2434 switch (ARG1 /*flag*/) {
2435 case 0:
2436 /* Libc: pid_t getpgrp(void); */
2437 PRINT("sys_pgrpsys ( %ld )", SARG1);
2438 PRE_REG_READ1(long, SC2("pgrpsys", "getpgrp"), int, flag);
2439 break;
2440 case 1:
2441 /* Libc: pid_t setpgrp(void); */
2442 PRINT("sys_pgrpsys ( %ld )", SARG1);
2443 PRE_REG_READ1(long, SC2("pgrpsys", "setpgrp"), int, flag);
2444 break;
2445 case 2:
2446 /* Libc: pid_t getsid(pid_t pid); */
2447 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2448 PRE_REG_READ2(long, SC2("pgrpsys", "getsid"), int, flag,
2449 vki_pid_t, pid);
2450 break;
2451 case 3:
2452 /* Libc: pid_t setsid(void); */
2453 PRINT("sys_pgrpsys ( %ld )", SARG1);
2454 PRE_REG_READ1(long, SC2("pgrpsys", "setsid"), int, flag);
2455 break;
2456 case 4:
2457 /* Libc: pid_t getpgid(pid_t pid); */
2458 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2459 PRE_REG_READ2(long, SC2("pgrpsys", "getpgid"), int, flag,
2460 vki_pid_t, pid);
2461 break;
2462 case 5:
2463 /* Libc: int setpgid(pid_t pid, pid_t pgid); */
2464 PRINT("sys_pgrpsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2465 PRE_REG_READ3(long, SC2("pgrpsys", "setpgid"), int, flag,
2466 vki_pid_t, pid, vki_pid_t, pgid);
2467 break;
2468 default:
2469 VG_(unimplemented)("Syswrap of the pgrpsys call with flag %ld.", SARG1);
2470 /*NOTREACHED*/
2471 break;
2475 PRE(sys_pipe)
2477 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2478 /* int pipe(int fildes[2], int flags); */
2479 PRINT("sys_pipe ( %#lx, %ld )", ARG1, SARG2);
2480 PRE_REG_READ2(long, "pipe", int *, fildes, int, flags);
2481 PRE_MEM_WRITE("pipe(fildes)", ARG1, 2 * sizeof(int));
2482 #else
2483 /* longlong_t pipe(); */
2484 PRINT("sys_pipe ( )");
2485 PRE_REG_READ0(long, "pipe");
2486 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2489 POST(sys_pipe)
2491 Int p0, p1;
2493 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2494 int *fds = (int*)ARG1;
2495 p0 = fds[0];
2496 p1 = fds[1];
2497 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
2498 #else
2499 p0 = RES;
2500 p1 = RESHI;
2501 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2503 if (!ML_(fd_allowed)(p0, "pipe", tid, True) ||
2504 !ML_(fd_allowed)(p1, "pipe", tid, True)) {
2505 VG_(close)(p0);
2506 VG_(close)(p1);
2507 SET_STATUS_Failure(VKI_EMFILE);
2509 else if (VG_(clo_track_fds)) {
2510 ML_(record_fd_open_nameless)(tid, p0);
2511 ML_(record_fd_open_nameless)(tid, p1);
2515 PRE(sys_faccessat)
2517 /* int faccessat(int fd, const char *path, int amode, int flag); */
2519 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2520 This is different from Linux, for example, where glibc sign-extends it. */
2521 Int fd = (Int) ARG1;
2523 PRINT("sys_faccessat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2524 (HChar *) ARG2, SARG3, SARG4);
2525 PRE_REG_READ4(long, "faccessat", int, fd, const char *, path,
2526 int, amode, int, flag);
2527 PRE_MEM_RASCIIZ("faccessat(path)", ARG2);
2529 /* Be strict but ignore fd for absolute path. */
2530 if (fd != VKI_AT_FDCWD
2531 && ML_(safe_to_deref)((void *) ARG2, 1)
2532 && ((HChar *) ARG2)[0] != '/'
2533 && !ML_(fd_allowed)(fd, "faccessat", tid, False))
2534 SET_STATUS_Failure(VKI_EBADF);
2537 PRE(sys_mknodat)
2539 /* int mknodat(int fd, char *fname, mode_t fmode, dev_t dev); */
2541 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2542 This is different from Linux, for example, where glibc sign-extends it. */
2543 Int fd = (Int) ARG1;
2545 PRINT("sys_mknodat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2546 (HChar *) ARG2, SARG3, SARG4);
2547 PRE_REG_READ4(long, "mknodat", int, fd, const char *, fname,
2548 vki_mode_t, fmode, vki_dev_t, dev);
2549 PRE_MEM_RASCIIZ("mknodat(fname)", ARG2);
2551 /* Be strict but ignore fd for absolute path. */
2552 if (fd != VKI_AT_FDCWD
2553 && ML_(safe_to_deref)((void *) ARG2, 1)
2554 && ((HChar *) ARG2)[0] != '/'
2555 && !ML_(fd_allowed)(fd, "mknodat", tid, False))
2556 SET_STATUS_Failure(VKI_EBADF);
2558 *flags |= SfMayBlock;
2561 POST(sys_mknodat)
2563 if (!ML_(fd_allowed)(RES, "mknodat", tid, True)) {
2564 VG_(close)(RES);
2565 SET_STATUS_Failure(VKI_EMFILE);
2566 } else if (VG_(clo_track_fds))
2567 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG2);
2570 PRE(sys_sysi86)
2572 /* int sysi86(int cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); */
2573 PRINT("sys_sysi86 ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
2574 PRE_REG_READ4(long, "sysi86", int, cmd, uintptr_t, arg1, uintptr_t, arg2,
2575 uintptr_t, arg3);
2577 switch (ARG1 /*cmd*/) {
2578 case VKI_SI86FPSTART:
2579 PRE_MEM_WRITE("sysi86(fp_hw)", ARG2, sizeof(vki_uint_t));
2580 /* ARG3 is a desired x87 FCW value, ARG4 is a desired SSE MXCSR value.
2581 They are passed to the kernel but V will change them later anyway
2582 (this is a general Valgrind limitation described in the official
2583 documentation). */
2584 break;
2585 default:
2586 VG_(unimplemented)("Syswrap of the sysi86 call with cmd %ld.", SARG1);
2587 /*NOTREACHED*/
2588 break;
2592 POST(sys_sysi86)
2594 switch (ARG1 /*cmd*/) {
2595 case VKI_SI86FPSTART:
2596 POST_MEM_WRITE(ARG2, sizeof(vki_uint_t));
2597 break;
2598 default:
2599 vg_assert(0);
2600 break;
2604 PRE(sys_shmsys)
2606 /* Kernel: uintptr_t shmsys(int opcode, uintptr_t a0, uintptr_t a1,
2607 uintptr_t a2, uintptr_t a3);
2609 *flags |= SfMayBlock;
2611 switch (ARG1 /*opcode*/) {
2612 case VKI_SHMAT:
2613 /* Libc: void *shmat(int shmid, const void *shmaddr, int shmflg); */
2614 PRINT("sys_shmsys ( %ld, %ld, %#lx, %ld )",
2615 SARG1, SARG2, ARG3, SARG4);
2616 PRE_REG_READ4(long, SC2("shmsys", "shmat"), int, opcode,
2617 int, shmid, const void *, shmaddr, int, shmflg);
2619 UWord addr = ML_(generic_PRE_sys_shmat)(tid, ARG2, ARG3, ARG4);
2620 if (addr == 0)
2621 SET_STATUS_Failure(VKI_EINVAL);
2622 else
2623 ARG3 = addr;
2624 break;
2626 case VKI_SHMCTL:
2627 /* Libc: int shmctl(int shmid, int cmd, struct shmid_ds *buf); */
2628 switch (ARG3 /* cmd */) {
2629 case VKI_SHM_LOCK:
2630 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2631 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "lock"),
2632 int, opcode, int, shmid, int, cmd);
2633 break;
2634 case VKI_SHM_UNLOCK:
2635 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2636 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "unlock"),
2637 int, opcode, int, shmid, int, cmd);
2638 break;
2639 case VKI_IPC_RMID:
2640 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2641 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "rmid"),
2642 int, opcode, int, shmid, int, cmd);
2643 break;
2644 case VKI_IPC_SET:
2645 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2646 SARG1, SARG2, SARG3, ARG4);
2647 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set"),
2648 int, opcode, int, shmid, int, cmd,
2649 struct vki_shmid_ds *, buf);
2651 struct vki_shmid_ds *buf = (struct vki_shmid_ds *) ARG4;
2652 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.uid)",
2653 buf->shm_perm.uid);
2654 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.gid)",
2655 buf->shm_perm.gid);
2656 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.mode)",
2657 buf->shm_perm.mode);
2658 break;
2659 case VKI_IPC_STAT:
2660 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2661 SARG1, SARG2, SARG3, ARG4);
2662 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat"),
2663 int, opcode, int, shmid, int, cmd,
2664 struct vki_shmid_ds *, buf);
2665 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat, buf)", ARG4,
2666 sizeof(struct vki_shmid_ds));
2667 break;
2668 case VKI_IPC_SET64:
2669 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2670 SARG1, SARG2, SARG3, ARG4);
2671 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set64"),
2672 int, opcode, int, shmid, int, cmd,
2673 struct vki_shmid_ds64 *, buf);
2675 struct vki_shmid_ds64 *buf64 = (struct vki_shmid_ds64 *) ARG4;
2676 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2677 "buf->shmx_perm.ipcx_uid)",
2678 buf64->shmx_perm.ipcx_uid);
2679 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2680 "buf->shmx_perm.ipcx_gid)",
2681 buf64->shmx_perm.ipcx_gid);
2682 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2683 "buf->shmx_perm.ipcx_mode)",
2684 buf64->shmx_perm.ipcx_mode);
2685 break;
2686 case VKI_IPC_STAT64:
2687 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2688 SARG1, SARG2, SARG3, ARG4);
2689 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat64"),
2690 int, opcode, int, shmid, int, cmd,
2691 struct vki_shmid_ds64 *, buf);
2692 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat64, buf)", ARG4,
2693 sizeof(struct vki_shmid_ds64));
2694 break;
2695 #if defined(SOLARIS_SHM_NEW)
2696 case VKI_IPC_XSTAT64:
2697 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2698 SARG1, SARG2, SARG3, ARG4);
2699 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "xstat64"),
2700 int, opcode, int, shmid, int, cmd,
2701 struct vki_shmid_ds64 *, buf);
2702 PRE_MEM_WRITE("shmsys(shmctl, ipc_xstat64, buf)", ARG4,
2703 sizeof(struct vki_shmid_xds64));
2704 break;
2705 #endif /* SOLARIS_SHM_NEW */
2706 default:
2707 VG_(unimplemented)("Syswrap of the shmsys(shmctl) call with "
2708 "cmd %ld.", SARG3);
2709 /*NOTREACHED*/
2710 break;
2712 break;
2714 case VKI_SHMDT:
2715 /* Libc: int shmdt(const void *shmaddr); */
2716 PRINT("sys_shmsys ( %ld, %#lx )", SARG1, ARG2);
2717 PRE_REG_READ2(long, SC2("shmsys", "shmdt"), int, opcode,
2718 const void *, shmaddr);
2720 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG2))
2721 SET_STATUS_Failure(VKI_EINVAL);
2722 break;
2724 case VKI_SHMGET:
2725 /* Libc: int shmget(key_t key, size_t size, int shmflg); */
2726 PRINT("sys_shmsys ( %ld, %ld, %lu, %lu )",
2727 SARG1, SARG2, ARG3, ARG4);
2728 PRE_REG_READ4(long, SC2("shmsys", "shmget"), int, opcode,
2729 vki_key_t, key, vki_size_t, size, int, shmflg);
2730 break;
2732 case VKI_SHMIDS:
2733 /* Libc: int shmids(int *buf, uint_t nids, uint_t *pnids); */
2734 PRINT("sys_shmsys ( %ld, %#lx, %lu, %#lx )",
2735 SARG1, ARG2, ARG3, ARG4);
2736 PRE_REG_READ4(long, SC2("shmsys", "shmids"), int, opcode,
2737 int *, buf, vki_uint_t, nids, vki_uint_t *, pnids);
2739 PRE_MEM_WRITE("shmsys(shmids, buf)", ARG2, ARG3 * sizeof(int *));
2740 PRE_MEM_WRITE("shmsys(shmids, pnids)", ARG4, sizeof(vki_uint_t));
2741 break;
2743 #if defined(SOLARIS_SHM_NEW)
2744 case VKI_SHMADV:
2745 /* Libc: int shmadv(int shmid, uint_t cmd, uint_t *advice); */
2746 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2747 SARG1, SARG2, ARG3, ARG4);
2748 PRE_REG_READ4(long, SC2("shmsys", "shmadv"), int, opcode,
2749 int, shmid, vki_uint_t, cmd, vki_uint_t *, advice);
2751 switch (ARG3 /*cmd*/) {
2752 case VKI_SHM_ADV_GET:
2753 PRE_MEM_WRITE("shmsys(shmadv, advice)", ARG4,
2754 sizeof(vki_uint_t));
2755 break;
2756 case VKI_SHM_ADV_SET:
2757 PRE_MEM_READ("shmsys(shmadv, advice)", ARG4,
2758 sizeof(vki_uint_t));
2759 break;
2760 default:
2761 VG_(unimplemented)("Syswrap of the shmsys(shmadv) call with "
2762 "cmd %lu.", ARG3);
2763 /*NOTREACHED*/
2764 break;
2766 break;
2768 case VKI_SHMGET_OSM:
2769 /* Libc: int shmget_osm(key_t key, size_t size, int shmflg,
2770 size_t granule_sz);
2772 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld, %lu )",
2773 SARG1, SARG2, ARG3, SARG4, ARG5);
2774 PRE_REG_READ5(long, SC2("shmsys", "shmget_osm"), int, opcode,
2775 vki_key_t, key, vki_size_t, size, int, shmflg,
2776 vki_size_t, granule_sz);
2777 break;
2778 #endif /* SOLARIS_SHM_NEW */
2780 default:
2781 VG_(unimplemented)("Syswrap of the shmsys call with opcode %ld.",
2782 SARG1);
2783 /*NOTREACHED*/
2784 break;
2788 POST(sys_shmsys)
2790 switch (ARG1 /*opcode*/) {
2791 case VKI_SHMAT:
2792 ML_(generic_POST_sys_shmat)(tid, RES, ARG2, ARG3, ARG4);
2793 break;
2795 case VKI_SHMCTL:
2796 switch (ARG3 /*cmd*/) {
2797 case VKI_SHM_LOCK:
2798 case VKI_SHM_UNLOCK:
2799 case VKI_IPC_RMID:
2800 case VKI_IPC_SET:
2801 break;
2802 case VKI_IPC_STAT:
2803 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds));
2804 break;
2805 case VKI_IPC_SET64:
2806 break;
2807 case VKI_IPC_STAT64:
2808 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds64));
2809 break;
2810 #if defined(SOLARIS_SHM_NEW)
2811 case VKI_IPC_XSTAT64:
2812 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_xds64));
2813 break;
2814 #endif /* SOLARIS_SHM_NEW */
2815 default:
2816 vg_assert(0);
2817 break;
2819 break;
2821 case VKI_SHMDT:
2822 ML_(generic_POST_sys_shmdt)(tid, RES, ARG2);
2823 break;
2825 case VKI_SHMGET:
2826 break;
2828 case VKI_SHMIDS:
2830 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2832 uint_t *pnids = (vki_uint_t *) ARG4;
2833 if (*pnids <= ARG3)
2834 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
2836 break;
2838 #if defined(SOLARIS_SHM_NEW)
2839 case VKI_SHMADV:
2840 switch (ARG3 /*cmd*/) {
2841 case VKI_SHM_ADV_GET:
2842 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2843 break;
2844 case VKI_SHM_ADV_SET:
2845 break;
2846 default:
2847 vg_assert(0);
2848 break;
2850 break;
2852 case VKI_SHMGET_OSM:
2853 break;
2854 #endif /* SOLARIS_SHM_NEW */
2856 default:
2857 vg_assert(0);
2858 break;
2862 PRE(sys_semsys)
2864 /* Kernel: int semsys(int opcode, uintptr_t a1, uintptr_t a2, uintptr_t a3,
2865 uintptr_t a4);
2867 *flags |= SfMayBlock;
2869 switch (ARG1 /*opcode*/) {
2870 case VKI_SEMCTL:
2871 /* Libc: int semctl(int semid, int semnum, int cmd...); */
2872 switch (ARG4) {
2873 case VKI_IPC_STAT:
2874 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2875 SARG1, SARG2, SARG3, SARG4, ARG5);
2876 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat"), int, opcode,
2877 int, semid, int, semnum, int, cmd,
2878 struct vki_semid_ds *, arg);
2879 break;
2880 case VKI_IPC_SET:
2881 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2882 SARG1, SARG2, SARG3, SARG4, ARG5);
2883 PRE_REG_READ5(long, SC3("semsys", "semctl", "set"), int, opcode,
2884 int, semid, int, semnum, int, cmd,
2885 struct vki_semid_ds *, arg);
2886 break;
2887 case VKI_IPC_STAT64:
2888 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2889 SARG1, SARG2, SARG3, SARG4, ARG5);
2890 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat64"), int, opcode,
2891 int, semid, int, semnum, int, cmd,
2892 struct vki_semid64_ds *, arg);
2893 break;
2894 case VKI_IPC_SET64:
2895 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2896 SARG1, SARG2, SARG3, SARG4, ARG5);
2897 PRE_REG_READ5(long, SC3("semsys", "semctl", "set64"), int, opcode,
2898 int, semid, int, semnum, int, cmd,
2899 struct vki_semid64_ds *, arg);
2900 break;
2901 case VKI_IPC_RMID:
2902 PRINT("sys_semsys ( %ld, %ld, %ld )", SARG1, SARG3, SARG4);
2903 PRE_REG_READ3(long, SC3("semsys", "semctl", "rmid"), int, opcode,
2904 int, semid, int, cmd);
2905 break;
2906 case VKI_GETALL:
2907 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2908 SARG1, SARG2, SARG4, ARG5);
2909 PRE_REG_READ4(long, SC3("semsys", "semctl", "getall"), int, opcode,
2910 int, semid, int, cmd, ushort_t *, arg);
2911 break;
2912 case VKI_SETALL:
2913 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2914 SARG1, SARG2, SARG4, ARG5);
2915 PRE_REG_READ4(long, SC3("semsys", "semctl", "setall"), int, opcode,
2916 int, semid, int, cmd, ushort_t *, arg);
2917 break;
2918 case VKI_GETVAL:
2919 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2920 SARG1, SARG2, SARG3, SARG4);
2921 PRE_REG_READ4(long, SC3("semsys", "semctl", "getval"), int, opcode,
2922 int, semid, int, semnum, int, cmd);
2923 break;
2924 case VKI_SETVAL:
2925 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2926 SARG1, SARG2, SARG3, SARG4, ARG5);
2927 PRE_REG_READ5(long, SC3("semsys", "semctl", "setval"), int, opcode,
2928 int, semid, int, semnum, int, cmd,
2929 union vki_semun *, arg);
2930 break;
2931 case VKI_GETPID:
2932 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2933 SARG1, SARG2, SARG3, SARG4);
2934 PRE_REG_READ4(long, SC3("semsys", "semctl", "getpid"), int, opcode,
2935 int, semid, int, semnum, int, cmd);
2936 break;
2937 case VKI_GETNCNT:
2938 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2939 SARG1, SARG2, SARG3, SARG4);
2940 PRE_REG_READ4(long, SC3("semsys", "semctl", "getncnt"),
2941 int, opcode, int, semid, int, semnum, int, cmd);
2942 break;
2943 case VKI_GETZCNT:
2944 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2945 SARG1, SARG2, SARG3, SARG4);
2946 PRE_REG_READ4(long, SC3("semsys", "semctl", "getzcnt"),
2947 int, opcode, int, semid, int, semnum, int, cmd);
2948 break;
2949 default:
2950 VG_(unimplemented)("Syswrap of the semsys(semctl) call "
2951 "with cmd %ld.", SARG4);
2952 /*NOTREACHED*/
2953 break;
2955 ML_(generic_PRE_sys_semctl)(tid, ARG2, ARG3, ARG4, ARG5);
2956 break;
2957 case VKI_SEMGET:
2958 /* Libc: int semget(key_t key, int nsems, int semflg); */
2959 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )", SARG1, SARG2, SARG3, SARG4);
2960 PRE_REG_READ4(long, SC2("semsys", "semget"), int, opcode,
2961 vki_key_t, key, int, nsems, int, semflg);
2962 break;
2963 case VKI_SEMOP:
2964 /* Libc: int semop(int semid, struct sembuf *sops, size_t nsops); */
2965 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
2966 PRE_REG_READ4(long, SC2("semsys", "semop"), int, opcode, int, semid,
2967 struct vki_sembuf *, sops, vki_size_t, nsops);
2968 ML_(generic_PRE_sys_semop)(tid, ARG2, ARG3, ARG4);
2969 break;
2970 case VKI_SEMIDS:
2971 /* Libc: int semids(int *buf, uint_t nids, uint_t *pnids); */
2972 PRINT("sys_semsys ( %ld, %#lx, %lu, %#lx )", SARG1, ARG2, ARG3, ARG4);
2973 PRE_REG_READ4(long, SC2("semsys", "semids"), int, opcode, int *, buf,
2974 vki_uint_t, nids, vki_uint_t *, pnids);
2976 PRE_MEM_WRITE("semsys(semids, buf)", ARG2, ARG3 * sizeof(int *));
2977 PRE_MEM_WRITE("semsys(semids, pnids)", ARG4, sizeof(vki_uint_t));
2978 break;
2979 case VKI_SEMTIMEDOP:
2980 /* Libc: int semtimedop(int semid, struct sembuf *sops, size_t nsops,
2981 const struct timespec *timeout);
2983 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu, %#lx )", SARG1, SARG2, ARG3,
2984 ARG4, ARG5);
2985 PRE_REG_READ5(long, SC2("semsys", "semtimedop"), int, opcode,
2986 int, semid, struct vki_sembuf *, sops, vki_size_t, nsops,
2987 struct vki_timespec *, timeout);
2988 ML_(generic_PRE_sys_semtimedop)(tid, ARG2, ARG3, ARG4, ARG5);
2989 break;
2990 default:
2991 VG_(unimplemented)("Syswrap of the semsys call with opcode %ld.", SARG1);
2992 /*NOTREACHED*/
2993 break;
2997 POST(sys_semsys)
2999 switch (ARG1 /*opcode*/) {
3000 case VKI_SEMCTL:
3001 ML_(generic_POST_sys_semctl)(tid, RES, ARG2, ARG3, ARG4, ARG5);
3002 break;
3003 case VKI_SEMGET:
3004 case VKI_SEMOP:
3005 break;
3006 case VKI_SEMIDS:
3008 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
3010 uint_t *pnids = (uint_t *)ARG4;
3011 if (*pnids <= ARG3)
3012 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
3014 break;
3015 case VKI_SEMTIMEDOP:
3016 break;
3017 default:
3018 vg_assert(0);
3019 break;
3023 /* ---------------------------------------------------------------------
3024 ioctl wrappers
3025 ------------------------------------------------------------------ */
3027 PRE(sys_ioctl)
3029 /* int ioctl(int fildes, int request, ...); */
3030 *flags |= SfMayBlock;
3032 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3033 architectures. */
3034 Int cmd = (Int) ARG2;
3036 switch (cmd /*request*/) {
3037 /* Handle 2-arg specially here (they do not use ARG3 at all). */
3038 case VKI_DINFOIDENT:
3039 case VKI_TIOCNOTTY:
3040 case VKI_TIOCSCTTY:
3041 PRINT("sys_ioctl ( %ld, %#lx )", SARG1, ARG2);
3042 PRE_REG_READ2(long, "ioctl", int, fd, int, request);
3043 break;
3044 /* And now come the 3-arg ones. */
3045 default:
3046 PRINT("sys_ioctl ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
3047 PRE_REG_READ3(long, "ioctl", int, fd, int, request, intptr_t, arg);
3048 break;
3051 switch (cmd /*request*/) {
3052 /* pools */
3053 case VKI_POOL_STATUSQ:
3054 PRE_MEM_WRITE("ioctl(POOL_STATUSQ)", ARG3, sizeof(vki_pool_status_t));
3055 break;
3057 /* mntio */
3058 case VKI_MNTIOC_GETEXTMNTENT:
3060 PRE_MEM_READ("ioctl(MNTIOC_GETEXTMNTENT)",
3061 ARG3, sizeof(struct vki_mntentbuf));
3063 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3064 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3065 PRE_MEM_WRITE("ioctl(MNTIOC_GETEXTMNTENT, embuf->mbuf_emp)",
3066 (Addr) embuf->mbuf_emp, sizeof(struct vki_extmnttab));
3067 PRE_MEM_WRITE("ioctl(MNTIOC_GETEXTMNTENT, embuf->mbuf_buf)",
3068 (Addr) embuf->mbuf_buf, embuf->mbuf_bufsize);
3071 break;
3073 case VKI_MNTIOC_GETMNTANY:
3075 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY)",
3076 ARG3, sizeof(struct vki_mntentbuf));
3078 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3079 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3080 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_emp)",
3081 (Addr) embuf->mbuf_emp, sizeof(struct vki_mnttab));
3082 PRE_MEM_WRITE("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_buf)",
3083 (Addr) embuf->mbuf_buf, embuf->mbuf_bufsize);
3085 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3086 if (ML_(safe_to_deref(mnt, sizeof(struct vki_mnttab)))) {
3087 if (mnt->mnt_special != NULL)
3088 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_special)",
3089 (Addr) mnt->mnt_special);
3090 if (mnt->mnt_mountp != NULL)
3091 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mountp)",
3092 (Addr) mnt->mnt_mountp);
3093 if (mnt->mnt_fstype != NULL)
3094 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_fstype)",
3095 (Addr) mnt->mnt_fstype);
3096 if (mnt->mnt_mntopts != NULL)
3097 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mntopts)",
3098 (Addr) mnt->mnt_mntopts);
3099 if (mnt->mnt_time != NULL)
3100 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_time)",
3101 (Addr) mnt->mnt_time);
3105 break;
3107 /* termio/termios */
3108 case VKI_TCGETA:
3109 PRE_MEM_WRITE("ioctl(TCGETA)", ARG3, sizeof(struct vki_termio));
3110 break;
3111 case VKI_TCGETS:
3112 PRE_MEM_WRITE("ioctl(TCGETS)", ARG3, sizeof(struct vki_termios));
3113 break;
3114 case VKI_TCSETS:
3115 PRE_MEM_READ("ioctl(TCSETS)", ARG3, sizeof(struct vki_termios));
3116 break;
3117 case VKI_TCSETSW:
3118 PRE_MEM_READ("ioctl(TCSETSW)", ARG3, sizeof(struct vki_termios));
3119 break;
3120 case VKI_TCSETSF:
3121 PRE_MEM_READ("ioctl(TCSETSF)", ARG3, sizeof(struct vki_termios));
3122 break;
3123 case VKI_TIOCGWINSZ:
3124 PRE_MEM_WRITE("ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize));
3125 break;
3126 case VKI_TIOCSWINSZ:
3127 PRE_MEM_READ("ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize));
3128 break;
3129 case VKI_TIOCGPGRP:
3130 PRE_MEM_WRITE("ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t));
3131 break;
3132 case VKI_TIOCSPGRP:
3133 PRE_MEM_READ("ioctl(TIOCSPGRP)", ARG3, sizeof(vki_pid_t));
3134 break;
3135 case VKI_TIOCGSID:
3136 PRE_MEM_WRITE("ioctl(TIOCGSID)", ARG3, sizeof(vki_pid_t));
3137 break;
3138 case VKI_TIOCNOTTY:
3139 case VKI_TIOCSCTTY:
3140 break;
3142 /* STREAMS */
3143 case VKI_I_PUSH:
3144 PRE_MEM_RASCIIZ("ioctl(I_PUSH)", ARG3);
3145 break;
3146 case VKI_I_FLUSH:
3147 break;
3148 case VKI_I_STR:
3150 PRE_MEM_READ("ioctl(I_STR)", ARG3, sizeof(struct vki_strioctl));
3152 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3153 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3154 if ((p->ic_dp != NULL) && (p->ic_len > 0)) {
3155 PRE_MEM_READ("ioctl(I_STR, strioctl->ic_dp)",
3156 (Addr) p->ic_dp, p->ic_len);
3160 break;
3161 case VKI_I_FIND:
3162 PRE_MEM_RASCIIZ("ioctl(I_FIND)", ARG3);
3163 break;
3164 case VKI_I_PEEK:
3166 /* Try hard not to mark strpeek->*buf.len members as being read. */
3167 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3169 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.maxlen)",
3170 p->ctlbuf.maxlen);
3171 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.len)",
3172 p->ctlbuf.len);
3173 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3174 p->ctlbuf.buf);
3175 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.maxlen)",
3176 p->databuf.maxlen);
3177 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->databuf.len)",
3178 p->databuf.len);
3179 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.buf)",
3180 p->databuf.buf);
3181 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->flags)", p->flags);
3182 /*PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->flags)", p->flags);*/
3184 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3185 if (p->ctlbuf.buf && p->ctlbuf.maxlen > 0)
3186 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3187 (Addr)p->ctlbuf.buf, p->ctlbuf.maxlen);
3188 if (p->databuf.buf && p->databuf.maxlen > 0)
3189 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->databuf.buf)",
3190 (Addr)p->databuf.buf, p->databuf.maxlen);
3193 break;
3194 case VKI_I_CANPUT:
3195 break;
3197 /* sockio */
3198 case VKI_SIOCGIFCONF:
3200 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3201 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_len)", p->ifc_len);
3202 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_buf)", p->ifc_buf);
3203 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3204 if ((p->ifc_buf != NULL) && (p->ifc_len > 0))
3205 PRE_MEM_WRITE("ioctl(SIOCGIFCONF, ifconf->ifc_buf)",
3206 (Addr) p->ifc_buf, p->ifc_len);
3208 /* ifc_len gets also written to during SIOCGIFCONF ioctl. */
3210 break;
3211 case VKI_SIOCGIFFLAGS:
3213 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3214 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3215 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_flags)", p->ifr_flags);
3217 break;
3218 case VKI_SIOCGIFNETMASK:
3220 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3221 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3222 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_addr)", p->ifr_addr);
3224 break;
3225 case VKI_SIOCGIFNUM:
3226 PRE_MEM_WRITE("ioctl(SIOCGIFNUM)", ARG3, sizeof(int));
3227 break;
3228 case VKI_SIOCGLIFBRDADDR:
3230 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3231 PRE_FIELD_READ("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_name)",
3232 p->lifr_name);
3233 PRE_FIELD_WRITE("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_addr)",
3234 p->lifr_addr);
3236 break;
3237 case VKI_SIOCGLIFCONF:
3239 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3240 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_len)", p->lifc_len);
3241 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)", p->lifc_buf);
3242 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_family)",
3243 p->lifc_family);
3244 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_flags)",
3245 p->lifc_flags);
3246 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3247 if ((p->lifc_buf != NULL) && (p->lifc_len > 0))
3248 PRE_MEM_WRITE("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)",
3249 (Addr) p->lifc_buf, p->lifc_len);
3251 /* lifc_len gets also written to during SIOCGLIFCONF ioctl. */
3253 break;
3254 case VKI_SIOCGLIFFLAGS:
3256 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3257 PRE_FIELD_READ("ioctl(SIOCGLIFFLAGS, lifreq->lifr_name)",
3258 p->lifr_name);
3259 PRE_FIELD_WRITE("ioctl(SIOCGLIFFLAGS, lifreq->lifr_flags)",
3260 p->lifr_flags);
3262 break;
3263 case VKI_SIOCGLIFNETMASK:
3265 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3266 PRE_FIELD_READ("ioctl(SIOCGLIFNETMASK, lifreq->lifr_name)",
3267 p->lifr_name);
3268 PRE_FIELD_WRITE("ioctl(SIOCGLIFNETMASK, lifreq->lifr_addr)",
3269 p->lifr_addr);
3271 break;
3272 case VKI_SIOCGLIFNUM:
3274 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3275 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_family)",
3276 p->lifn_family);
3277 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_flags)",
3278 p->lifn_flags);
3279 PRE_FIELD_WRITE("ioctl(SIOCGLIFNUM, lifn->lifn_count)",
3280 p->lifn_count);
3282 break;
3284 /* filio */
3285 case VKI_FIOSETOWN:
3286 PRE_MEM_READ("ioctl(FIOSETOWN)", ARG3, sizeof(vki_pid_t));
3287 break;
3288 case VKI_FIOGETOWN:
3289 PRE_MEM_WRITE("ioctl(FIOGETOWN)", ARG3, sizeof(vki_pid_t));
3290 break;
3292 /* CRYPTO */
3293 case VKI_CRYPTO_GET_PROVIDER_LIST:
3295 vki_crypto_get_provider_list_t *pl =
3296 (vki_crypto_get_provider_list_t *) ARG3;
3297 PRE_FIELD_READ("ioctl(CRYPTO_GET_PROVIDER_LIST, pl->pl_count)",
3298 pl->pl_count);
3300 if (ML_(safe_to_deref)(pl, sizeof(*pl))) {
3301 PRE_MEM_WRITE("ioctl(CRYPTO_GET_PROVIDER_LIST)", ARG3,
3302 MAX(1, pl->pl_count) *
3303 sizeof(vki_crypto_get_provider_list_t));
3305 /* Save the requested count to unused ARG4 below,
3306 when we know pre-handler succeeded.
3309 break;
3311 /* dtrace */
3312 case VKI_DTRACEHIOC_REMOVE:
3313 break;
3314 case VKI_DTRACEHIOC_ADDDOF:
3316 vki_dof_helper_t *dh = (vki_dof_helper_t *) ARG3;
3317 PRE_MEM_RASCIIZ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_mod)",
3318 (Addr) dh->dofhp_mod);
3319 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_addr",
3320 dh->dofhp_addr);
3321 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_dof",
3322 dh->dofhp_dof);
3324 break;
3326 /* devinfo */
3327 case VKI_DINFOUSRLD:
3328 /* We should do PRE_MEM_WRITE here but the question is for how many? */
3329 break;
3330 case VKI_DINFOIDENT:
3331 break;
3333 default:
3334 ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3);
3335 break;
3338 /* Be strict. */
3339 if (!ML_(fd_allowed)(ARG1, "ioctl", tid, False)) {
3340 SET_STATUS_Failure(VKI_EBADF);
3341 } else if (ARG2 == VKI_CRYPTO_GET_PROVIDER_LIST) {
3342 /* Save the requested count to unused ARG4 now. */
3343 ARG4 = ARG3;
3347 POST(sys_ioctl)
3349 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3350 architectures. */
3351 Int cmd = (Int) ARG2;
3353 switch (cmd /*request*/) {
3354 /* pools */
3355 case VKI_POOL_STATUSQ:
3356 POST_MEM_WRITE(ARG3, sizeof(vki_pool_status_t));
3357 break;
3359 /* mntio */
3360 case VKI_MNTIOC_GETEXTMNTENT:
3362 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3363 struct vki_extmnttab *mnt = (struct vki_extmnttab *) embuf->mbuf_emp;
3365 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_extmnttab));
3366 if (mnt != NULL) {
3367 if (mnt->mnt_special != NULL)
3368 POST_MEM_WRITE((Addr) mnt->mnt_special,
3369 VG_(strlen)(mnt->mnt_special) + 1);
3370 if (mnt->mnt_mountp != NULL)
3371 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3372 VG_(strlen)(mnt->mnt_mountp) + 1);
3373 if (mnt->mnt_fstype != NULL)
3374 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3375 VG_(strlen)(mnt->mnt_fstype) + 1);
3376 if (mnt->mnt_mntopts != NULL)
3377 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3378 VG_(strlen)(mnt->mnt_mntopts) + 1);
3379 if (mnt->mnt_time != NULL)
3380 POST_MEM_WRITE((Addr) mnt->mnt_time,
3381 VG_(strlen)(mnt->mnt_time) + 1);
3384 break;
3386 case VKI_MNTIOC_GETMNTANY:
3388 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3389 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3391 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_mnttab));
3392 if (mnt != NULL) {
3393 if (mnt->mnt_special != NULL)
3394 POST_MEM_WRITE((Addr) mnt->mnt_special,
3395 VG_(strlen)(mnt->mnt_special) + 1);
3396 if (mnt->mnt_mountp != NULL)
3397 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3398 VG_(strlen)(mnt->mnt_mountp) + 1);
3399 if (mnt->mnt_fstype != NULL)
3400 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3401 VG_(strlen)(mnt->mnt_fstype) + 1);
3402 if (mnt->mnt_mntopts != NULL)
3403 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3404 VG_(strlen)(mnt->mnt_mntopts) + 1);
3405 if (mnt->mnt_time != NULL)
3406 POST_MEM_WRITE((Addr) mnt->mnt_time,
3407 VG_(strlen)(mnt->mnt_time) + 1);
3410 break;
3412 /* termio/termios */
3413 case VKI_TCGETA:
3414 POST_MEM_WRITE(ARG3, sizeof(struct vki_termio));
3415 break;
3416 case VKI_TCGETS:
3417 POST_MEM_WRITE(ARG3, sizeof(struct vki_termios));
3418 break;
3419 case VKI_TCSETS:
3420 break;
3421 case VKI_TCSETSW:
3422 break;
3423 case VKI_TCSETSF:
3424 break;
3425 case VKI_TIOCGWINSZ:
3426 POST_MEM_WRITE(ARG3, sizeof(struct vki_winsize));
3427 break;
3428 case VKI_TIOCSWINSZ:
3429 break;
3430 case VKI_TIOCGPGRP:
3431 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3432 break;
3433 case VKI_TIOCSPGRP:
3434 break;
3435 case VKI_TIOCGSID:
3436 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3437 break;
3438 case VKI_TIOCNOTTY:
3439 case VKI_TIOCSCTTY:
3440 break;
3442 /* STREAMS */
3443 case VKI_I_PUSH:
3444 break;
3445 case VKI_I_FLUSH:
3446 break;
3447 case VKI_I_STR:
3449 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3451 POST_FIELD_WRITE(p->ic_len);
3452 if ((p->ic_dp != NULL) && (p->ic_len > 0))
3453 POST_MEM_WRITE((Addr) p->ic_dp, p->ic_len);
3455 break;
3456 case VKI_I_FIND:
3457 break;
3458 case VKI_I_PEEK:
3460 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3462 POST_FIELD_WRITE(p->ctlbuf.len);
3463 POST_FIELD_WRITE(p->databuf.len);
3464 POST_FIELD_WRITE(p->flags);
3466 if (p->ctlbuf.buf && p->ctlbuf.len > 0)
3467 POST_MEM_WRITE((Addr)p->ctlbuf.buf, p->ctlbuf.len);
3468 if (p->databuf.buf && p->databuf.len > 0)
3469 POST_MEM_WRITE((Addr)p->databuf.buf, p->databuf.len);
3471 break;
3472 case VKI_I_CANPUT:
3473 break;
3475 /* sockio */
3476 case VKI_SIOCGIFCONF:
3478 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3479 POST_FIELD_WRITE(p->ifc_len);
3480 POST_FIELD_WRITE(p->ifc_req);
3481 if ((p->ifc_req != NULL) && (p->ifc_len > 0))
3482 POST_MEM_WRITE((Addr) p->ifc_req, p->ifc_len);
3484 break;
3485 case VKI_SIOCGIFFLAGS:
3487 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3488 POST_FIELD_WRITE(p->ifr_flags);
3490 break;
3491 case VKI_SIOCGIFNETMASK:
3493 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3494 POST_FIELD_WRITE(p->ifr_addr);
3496 break;
3497 case VKI_SIOCGIFNUM:
3498 POST_MEM_WRITE(ARG3, sizeof(int));
3499 break;
3500 case VKI_SIOCGLIFBRDADDR:
3502 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3503 POST_FIELD_WRITE(p->lifr_addr);
3505 break;
3506 case VKI_SIOCGLIFCONF:
3508 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3509 POST_FIELD_WRITE(p->lifc_len);
3510 POST_FIELD_WRITE(p->lifc_req);
3511 if ((p->lifc_req != NULL) && (p->lifc_len > 0))
3512 POST_MEM_WRITE((Addr) p->lifc_req, p->lifc_len);
3514 break;
3515 case VKI_SIOCGLIFFLAGS:
3517 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3518 POST_FIELD_WRITE(p->lifr_flags);
3520 break;
3521 case VKI_SIOCGLIFNETMASK:
3523 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3524 POST_FIELD_WRITE(p->lifr_addr);
3526 break;
3527 case VKI_SIOCGLIFNUM:
3529 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3530 POST_FIELD_WRITE(p->lifn_count);
3532 break;
3534 /* filio */
3535 case VKI_FIOSETOWN:
3536 break;
3537 case VKI_FIOGETOWN:
3538 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3539 break;
3541 /* CRYPTO */
3542 case VKI_CRYPTO_GET_PROVIDER_LIST:
3544 vki_crypto_get_provider_list_t *pl =
3545 (vki_crypto_get_provider_list_t *) ARG3;
3547 POST_FIELD_WRITE(pl->pl_count);
3548 POST_FIELD_WRITE(pl->pl_return_value);
3550 if ((ARG4 > 0) && (pl->pl_return_value == VKI_CRYPTO_SUCCESS))
3551 POST_MEM_WRITE((Addr) pl->pl_list, pl->pl_count *
3552 sizeof(vki_crypto_provider_entry_t));
3554 break;
3556 /* dtrace */
3557 case VKI_DTRACEHIOC_REMOVE:
3558 case VKI_DTRACEHIOC_ADDDOF:
3559 break;
3561 /* devinfo */
3562 case VKI_DINFOUSRLD:
3563 POST_MEM_WRITE(ARG3, RES);
3564 break;
3565 case VKI_DINFOIDENT:
3566 break;
3568 default:
3569 /* Not really anything to do since ioctl direction hints are hardly used
3570 on Solaris. */
3571 break;
3575 PRE(sys_fchownat)
3577 /* int fchownat(int fd, const char *path, uid_t owner, gid_t group,
3578 int flag); */
3580 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
3581 This is different from Linux, for example, where glibc sign-extends it. */
3582 Int fd = (Int) ARG1;
3584 PRINT("sys_fchownat ( %d, %#lx(%s), %ld, %ld, %lu )", fd,
3585 ARG2, (HChar *) ARG2, SARG3, SARG4, ARG5);
3586 PRE_REG_READ5(long, "fchownat", int, fd, const char *, path,
3587 vki_uid_t, owner, vki_gid_t, group, int, flag);
3589 if (ARG2)
3590 PRE_MEM_RASCIIZ("fchownat(path)", ARG2);
3592 /* Be strict but ignore fd for absolute path. */
3593 if (fd != VKI_AT_FDCWD
3594 && ML_(safe_to_deref)((void *) ARG2, 1)
3595 && ((HChar *) ARG2)[0] != '/'
3596 && !ML_(fd_allowed)(fd, "fchownat", tid, False))
3597 SET_STATUS_Failure(VKI_EBADF);
3600 PRE(sys_fdsync)
3602 /* int fdsync(int fd, int flag); */
3603 PRINT("sys_fdsync ( %ld, %ld )", SARG1, SARG2);
3604 PRE_REG_READ2(long, "fdsync", int, fd, int, flag);
3606 /* Be strict. */
3607 if (!ML_(fd_allowed)(ARG1, "fdsync", tid, False))
3608 SET_STATUS_Failure(VKI_EBADF);
3611 PRE(sys_execve)
3613 Int i, j;
3614 Addr arg_2_check;
3615 const char* str2 = "execve(argv)";
3616 const char* str3 = "execve(argv[0])";
3617 const char* str4 = "execve(argv[i])";
3618 /* This is a Solaris specific version of the generic pre-execve wrapper. */
3620 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3621 /* int execve(uintptr_t file, const char **argv, const char **envp,
3622 int flags); */
3623 PRINT("sys_execve ( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
3624 PRE_REG_READ4(long, "execve", uintptr_t, file, const char **, argv,
3625 const char **, envp, int, flags);
3627 #else
3629 /* int execve(const char *fname, const char **argv, const char **envp); */
3630 PRINT("sys_execve ( %#lx(%s), %#lx, %#lx )",
3631 ARG1, (HChar *) ARG1, ARG2, ARG3);
3632 PRE_REG_READ3(long, "execve", const char *, file, const char **, argv,
3633 const char **, envp);
3634 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3636 Bool ARG1_is_fd = False;
3637 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3638 if (ARG4 & VKI_EXEC_DESCRIPTOR) {
3639 ARG1_is_fd = True;
3641 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3643 if (ARG1_is_fd == False)
3644 PRE_MEM_RASCIIZ("execve(filename)", ARG1);
3646 /* Erk. If the exec fails, then the following will have made a mess of
3647 things which makes it hard for us to continue. The right thing to do is
3648 piece everything together again in POST(execve), but that's close to
3649 impossible. Instead, we make an effort to check that the execve will
3650 work before actually doing it. */
3652 const HChar *fname = (const HChar *) ARG1;
3653 if (ARG1_is_fd) {
3654 if (!ML_(fd_allowed)(ARG1, "execve", tid, False)) {
3655 SET_STATUS_Failure(VKI_EBADF);
3656 return;
3659 if (VG_(resolve_filename)(ARG1, &fname) == False) {
3660 SET_STATUS_Failure(VKI_EBADF);
3661 return;
3664 struct vg_stat stats;
3665 if (VG_(fstat)(ARG1, &stats) != 0) {
3666 SET_STATUS_Failure(VKI_EBADF);
3667 return;
3670 if (stats.nlink > 1)
3671 VG_(unimplemented)("Syswrap of execve where fd points to a hardlink.");
3674 arg_2_check = (Addr)ARG2;
3676 /* argv[] should not be NULL and valid. */
3677 PRE_MEM_READ(str2, arg_2_check, sizeof(Addr));
3679 /* argv[0] should not be NULL and valid. */
3680 if (ML_(safe_to_deref)((HChar **) (Addr)arg_2_check, sizeof(HChar *))) {
3681 Addr argv0 = *(Addr*)arg_2_check;
3682 PRE_MEM_RASCIIZ( str3, argv0 );
3683 /* The rest of argv can be NULL or a valid string pointer. */
3684 if (VG_(am_is_valid_for_client)(arg_2_check, sizeof(HChar), VKI_PROT_READ)) {
3685 arg_2_check += sizeof(HChar*);
3686 ML_(pre_argv_envp)( arg_2_check, tid, str2, str4 );
3688 } else {
3689 SET_STATUS_Failure(VKI_EFAULT);
3690 return;
3693 if (ARG3 != 0) {
3694 /* At least the terminating NULL must be addressable. */
3695 if (!ML_(safe_to_deref)((HChar **) (Addr)ARG3, sizeof(HChar *))) {
3696 SET_STATUS_Failure(VKI_EFAULT);
3697 return;
3699 ML_(pre_argv_envp)( ARG3, tid, "execve(envp)", "execve(envp[i])" );
3702 /* Check that the name at least begins in client-accessible storage. */
3703 if (ARG1_is_fd == False) {
3704 if ((fname == NULL) || !ML_(safe_to_deref)(fname, 1)) {
3705 SET_STATUS_Failure(VKI_EFAULT);
3706 return;
3710 /* Check that the args at least begin in client-accessible storage.
3711 Solaris disallows to perform the exec without any arguments specified.
3713 if (!ARG2 /* obviously bogus */ ||
3714 !VG_(am_is_valid_for_client)(ARG2, 1, VKI_PROT_READ)) {
3715 SET_STATUS_Failure(VKI_EFAULT);
3716 return;
3719 /* Debug-only printing. */
3720 if (0) {
3721 VG_(printf)("ARG1 = %#lx(%s)\n", ARG1, fname);
3722 if (ARG2) {
3723 Int q;
3724 HChar** vec = (HChar**)ARG2;
3726 VG_(printf)("ARG2 = ");
3727 for (q = 0; vec[q]; q++)
3728 VG_(printf)("%p(%s) ", vec[q], vec[q]);
3729 VG_(printf)("\n");
3731 else
3732 VG_(printf)("ARG2 = null\n");
3735 /* Decide whether or not we want to follow along. */
3736 /* Make 'child_argv' be a pointer to the child's arg vector (skipping the
3737 exe name) */
3738 const HChar **child_argv = (const HChar **) ARG2;
3739 if (child_argv[0] == NULL)
3740 child_argv = NULL;
3741 Bool trace_this_child = VG_(should_we_trace_this_child)(fname, child_argv);
3743 /* Do the important checks: it is a file, is executable, permissions are
3744 ok, etc. We allow setuid executables to run only in the case when
3745 we are not simulating them, that is, they to be run natively. */
3746 Bool setuid_allowed = trace_this_child ? False : True;
3747 SysRes res = VG_(pre_exec_check)(fname, NULL, setuid_allowed);
3748 if (sr_isError(res)) {
3749 SET_STATUS_Failure(sr_Err(res));
3750 return;
3753 /* If we're tracing the child, and the launcher name looks bogus (possibly
3754 because launcher.c couldn't figure it out, see comments therein) then we
3755 have no option but to fail. */
3756 if (trace_this_child &&
3757 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
3758 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
3759 return;
3762 /* After this point, we can't recover if the execve fails. */
3763 VG_(debugLog)(1, "syswrap", "Exec of %s\n", fname);
3765 /* Terminate gdbserver if it is active. */
3766 if (VG_(clo_vgdb) != Vg_VgdbNo) {
3767 /* If the child will not be traced, we need to terminate gdbserver to
3768 cleanup the gdbserver resources (e.g. the FIFO files). If child will
3769 be traced, we also terminate gdbserver: the new Valgrind will start a
3770 fresh gdbserver after exec. */
3771 VG_(gdbserver)(0);
3774 /* Resistance is futile. Nuke all other threads. POSIX mandates this.
3775 (Really, nuke them all, since the new process will make its own new
3776 thread.) */
3777 VG_(nuke_all_threads_except)(tid, VgSrc_ExitThread);
3778 VG_(reap_threads)(tid);
3780 /* Set up the child's exe path. */
3781 const HChar *path = fname;
3782 const HChar *launcher_basename = NULL;
3783 if (trace_this_child) {
3784 /* We want to exec the launcher. Get its pre-remembered path. */
3785 path = VG_(name_of_launcher);
3786 /* VG_(name_of_launcher) should have been acquired by m_main at
3787 startup. */
3788 vg_assert(path);
3790 launcher_basename = VG_(strrchr)(path, '/');
3791 if (!launcher_basename || launcher_basename[1] == '\0')
3792 launcher_basename = path; /* hmm, tres dubious */
3793 else
3794 launcher_basename++;
3797 /* Set up the child's environment.
3799 Remove the valgrind-specific stuff from the environment so the child
3800 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
3801 unconditionally, since if we are tracing the child, the child valgrind
3802 will set up the appropriate client environment. Nb: we make a copy of
3803 the environment before trying to mangle it as it might be in read-only
3804 memory (bug #101881).
3806 Then, if tracing the child, set VALGRIND_LIB for it. */
3807 HChar **envp = NULL;
3808 if (ARG3 != 0) {
3809 envp = VG_(env_clone)((HChar**)ARG3);
3810 vg_assert(envp != NULL);
3811 VG_(env_remove_valgrind_env_stuff)(envp, True /*ro_strings*/, NULL);
3814 if (trace_this_child) {
3815 /* Set VALGRIND_LIB in ARG3 (the environment). */
3816 VG_(env_setenv)( &envp, VALGRIND_LIB, VG_(libdir));
3819 /* Set up the child's args. If not tracing it, they are simply ARG2.
3820 Otherwise, they are:
3822 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG2[1..],
3824 except that the first VG_(args_for_valgrind_noexecpass) args are
3825 omitted. */
3826 HChar **argv = NULL;
3827 if (!trace_this_child)
3828 argv = (HChar **) ARG2;
3829 else {
3830 Int tot_args;
3832 vg_assert(VG_(args_for_valgrind));
3833 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
3834 vg_assert(VG_(args_for_valgrind_noexecpass)
3835 <= VG_(sizeXA)(VG_(args_for_valgrind)));
3837 /* How many args in total will there be? */
3838 /* launcher basename */
3839 tot_args = 1;
3840 /* V's args */
3841 tot_args += VG_(sizeXA)(VG_(args_for_valgrind));
3842 tot_args -= VG_(args_for_valgrind_noexecpass);
3843 /* name of client exe */
3844 tot_args++;
3845 /* args for client exe, skipping [0] */
3846 HChar **arg2copy = (HChar **) ARG2;
3847 if (arg2copy[0] != NULL)
3848 for (i = 1; arg2copy[i]; i++)
3849 tot_args++;
3850 /* allocate */
3851 argv = VG_(malloc)("syswrap.exec.5", (tot_args + 1) * sizeof(HChar*));
3852 /* copy */
3853 j = 0;
3854 argv[j++] = CONST_CAST(HChar *, launcher_basename);
3855 for (i = 0; i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
3856 if (i < VG_(args_for_valgrind_noexecpass))
3857 continue;
3858 argv[j++] = *(HChar**)VG_(indexXA)(VG_(args_for_valgrind), i);
3860 argv[j++] = CONST_CAST(HChar *, fname);
3861 if (arg2copy[0] != NULL)
3862 for (i = 1; arg2copy[i]; i++)
3863 argv[j++] = arg2copy[i];
3864 argv[j++] = NULL;
3865 /* check */
3866 vg_assert(j == tot_args + 1);
3869 /* Set the signal state up for exec.
3871 We need to set the real signal state to make sure the exec'd process
3872 gets SIG_IGN properly.
3874 Also set our real sigmask to match the client's sigmask so that the
3875 exec'd child will get the right mask. First we need to clear out any
3876 pending signals so they they don't get delivered, which would confuse
3877 things.
3879 XXX This is a bug - the signals should remain pending, and be delivered
3880 to the new process after exec. There's also a race-condition, since if
3881 someone delivers us a signal between the sigprocmask and the execve,
3882 we'll still get the signal. Oh well.
3885 vki_sigset_t allsigs;
3886 vki_siginfo_t info;
3888 /* What this loop does: it queries SCSS (the signal state that the
3889 client _thinks_ the kernel is in) by calling VG_(do_sys_sigaction),
3890 and modifies the real kernel signal state accordingly. */
3891 for (i = 1; i < VG_(max_signal); i++) {
3892 vki_sigaction_fromK_t sa_f;
3893 vki_sigaction_toK_t sa_t;
3894 VG_(do_sys_sigaction)(i, NULL, &sa_f);
3895 VG_(convert_sigaction_fromK_to_toK)(&sa_f, &sa_t);
3896 VG_(sigaction)(i, &sa_t, NULL);
3899 VG_(sigfillset)(&allsigs);
3900 while (VG_(sigtimedwait_zero)(&allsigs, &info) > 0)
3903 ThreadState *tst = VG_(get_ThreadState)(tid);
3904 VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL);
3907 /* Debug-only printing. */
3908 if (0) {
3909 HChar **cpp;
3910 VG_(printf)("exec: %s\n", path);
3911 for (cpp = argv; cpp && *cpp; cpp++)
3912 VG_(printf)("argv: %s\n", *cpp);
3913 if (0)
3914 for (cpp = envp; cpp && *cpp; cpp++)
3915 VG_(printf)("env: %s\n", *cpp);
3918 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3919 res = VG_(do_syscall4)(__NR_execve, (UWord) path, (UWord) argv,
3920 (UWord) envp, ARG4 & ~VKI_EXEC_DESCRIPTOR);
3921 #else
3922 res = VG_(do_syscall3)(__NR_execve, (UWord) path, (UWord) argv,
3923 (UWord) envp);
3924 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3925 SET_STATUS_from_SysRes(res);
3927 /* If we got here, then the execve failed. We've already made way too much
3928 of a mess to continue, so we have to abort. */
3929 vg_assert(FAILURE);
3930 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3931 if (ARG1_is_fd)
3932 VG_(message)(Vg_UserMsg, "execve(%ld, %#lx, %#lx, %lu) failed, "
3933 "errno %ld\n", SARG1, ARG2, ARG3, ARG4, ERR);
3934 else
3935 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx, %ld) failed, errno"
3936 " %lu\n", ARG1, (HChar *) ARG1, ARG2, ARG3, SARG4, ERR);
3937 #else
3938 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx) failed, errno %lu\n",
3939 ARG1, (HChar *) ARG1, ARG2, ARG3, ERR);
3940 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3941 VG_(message)(Vg_UserMsg, "EXEC FAILED: I can't recover from "
3942 "execve() failing, so I'm dying.\n");
3943 VG_(message)(Vg_UserMsg, "Add more stringent tests in PRE(sys_execve), "
3944 "or work out how to recover.\n");
3945 VG_(exit)(101);
3946 /*NOTREACHED*/
3949 static void pre_mem_read_flock(ThreadId tid, struct vki_flock *lock)
3951 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3952 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3953 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3954 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3957 #if defined(VGP_x86_solaris)
3958 static void pre_mem_read_flock64(ThreadId tid, struct vki_flock64 *lock)
3960 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3961 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3962 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3963 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3965 #endif /* VGP_x86_solaris */
3967 PRE(sys_fcntl)
3969 /* int fcntl(int fildes, int cmd, ...); */
3971 switch (ARG2 /*cmd*/) {
3972 /* These ones ignore ARG3. */
3973 case VKI_F_GETFD:
3974 case VKI_F_GETFL:
3975 case VKI_F_GETXFL:
3976 PRINT("sys_fcntl ( %ld, %ld )", SARG1, SARG2);
3977 PRE_REG_READ2(long, "fcntl", int, fildes, int, cmd);
3978 break;
3980 /* These ones use ARG3 as "arg". */
3981 case VKI_F_DUPFD:
3982 case VKI_F_DUPFD_CLOEXEC:
3983 case VKI_F_SETFD:
3984 case VKI_F_SETFL:
3985 case VKI_F_DUP2FD:
3986 case VKI_F_BADFD:
3987 PRINT("sys_fcntl ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
3988 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd, int, arg);
3989 /* Check if a client program isn't going to poison any of V's output
3990 fds. */
3991 if (ARG2 == VKI_F_DUP2FD &&
3992 !ML_(fd_allowed)(ARG3, "fcntl(F_DUP2FD)", tid, False)) {
3993 SET_STATUS_Failure(VKI_EBADF);
3994 return;
3996 break;
3998 /* These ones use ARG3 as "native lock" (input only). */
3999 case VKI_F_SETLK:
4000 case VKI_F_SETLKW:
4001 case VKI_F_ALLOCSP:
4002 case VKI_F_FREESP:
4003 case VKI_F_SETLK_NBMAND:
4004 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4005 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4006 struct flock *, lock);
4007 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
4008 break;
4010 /* This one uses ARG3 as "native lock" (input&output). */
4011 case VKI_F_GETLK:
4012 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4013 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4014 struct flock *, lock);
4015 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
4016 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock));
4017 break;
4019 #if defined(VGP_x86_solaris)
4020 /* These ones use ARG3 as "transitional 64b lock" (input only). */
4021 case VKI_F_SETLK64:
4022 case VKI_F_SETLKW64:
4023 case VKI_F_ALLOCSP64:
4024 case VKI_F_FREESP64:
4025 case VKI_F_SETLK64_NBMAND:
4026 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4027 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4028 struct flock64 *, lock);
4029 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
4030 break;
4032 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
4033 case VKI_F_GETLK64:
4034 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4035 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4036 struct flock64 *, lock);
4037 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
4038 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock64));
4039 break;
4040 #endif /* VGP_x86_solaris */
4042 /* These ones use ARG3 as "fshare". */
4043 case VKI_F_SHARE:
4044 case VKI_F_UNSHARE:
4045 case VKI_F_SHARE_NBMAND:
4046 PRINT("sys_fcntl[ARG3=='fshare'] ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4047 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4048 struct fshare *, sh);
4049 PRE_MEM_READ("fcntl(fshare)", ARG3, sizeof(struct vki_fshare));
4050 break;
4052 default:
4053 VG_(unimplemented)("Syswrap of the fcntl call with cmd %ld.", SARG2);
4054 /*NOTREACHED*/
4055 break;
4058 if (ARG2 == VKI_F_SETLKW
4059 #if defined(VGP_x86_solaris)
4060 || ARG2 == VKI_F_SETLKW64
4061 #endif /* VGP_x86_solaris */
4063 *flags |= SfMayBlock;
4065 /* Be strict. */
4066 if (!ML_(fd_allowed)(ARG1, "fcntl", tid, False))
4067 SET_STATUS_Failure(VKI_EBADF);
4070 POST(sys_fcntl)
4072 switch (ARG2 /*cmd*/) {
4073 case VKI_F_DUPFD:
4074 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD)", tid, True)) {
4075 VG_(close)(RES);
4076 SET_STATUS_Failure(VKI_EMFILE);
4077 } else if (VG_(clo_track_fds))
4078 ML_(record_fd_open_named)(tid, RES);
4079 break;
4081 case VKI_F_DUPFD_CLOEXEC:
4082 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD_CLOEXEC)", tid, True)) {
4083 VG_(close)(RES);
4084 SET_STATUS_Failure(VKI_EMFILE);
4085 } else if (VG_(clo_track_fds))
4086 ML_(record_fd_open_named)(tid, RES);
4087 break;
4089 case VKI_F_DUP2FD:
4090 if (!ML_(fd_allowed)(RES, "fcntl(F_DUP2FD)", tid, True)) {
4091 VG_(close)(RES);
4092 SET_STATUS_Failure(VKI_EMFILE);
4093 } else if (VG_(clo_track_fds))
4094 ML_(record_fd_open_named)(tid, RES);
4095 break;
4097 /* This one uses ARG3 as "native lock" (input&output). */
4098 case VKI_F_GETLK:
4099 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock));
4100 break;
4102 #if defined(VGP_x86_solaris)
4103 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
4104 case VKI_F_GETLK64:
4105 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock64));
4106 break;
4107 #endif /* VGP_x86_solaris */
4109 default:
4110 break;
4114 PRE(sys_renameat)
4116 /* int renameat(int fromfd, const char *old, int tofd, const char *new); */
4118 /* Interpret the first and third arguments as 32-bit values even on 64-bit
4119 architecture. This is different from Linux, for example, where glibc
4120 sign-extends them. */
4121 Int fromfd = (Int) ARG1;
4122 Int tofd = (Int) ARG3;
4124 *flags |= SfMayBlock;
4125 PRINT("sys_renameat ( %d, %#lx(%s), %d, %#lx(%s) )", fromfd,
4126 ARG2, (HChar *) ARG2, tofd, ARG4, (HChar *) ARG4);
4127 PRE_REG_READ4(long, "renameat", int, fromfd, const char *, old,
4128 int, tofd, const char *, new);
4130 PRE_MEM_RASCIIZ("renameat(old)", ARG2);
4131 PRE_MEM_RASCIIZ("renameat(new)", ARG4);
4133 /* Be strict but ignore fromfd/tofd for absolute old/new. */
4134 if (fromfd != VKI_AT_FDCWD
4135 && ML_(safe_to_deref)((void *) ARG2, 1)
4136 && ((HChar *) ARG2)[0] != '/'
4137 && !ML_(fd_allowed)(fromfd, "renameat", tid, False)) {
4138 SET_STATUS_Failure(VKI_EBADF);
4140 if (tofd != VKI_AT_FDCWD
4141 && ML_(safe_to_deref)((void *) ARG4, 1)
4142 && ((HChar *) ARG4)[0] != '/'
4143 && !ML_(fd_allowed)(tofd, "renameat", tid, False)) {
4144 SET_STATUS_Failure(VKI_EBADF);
4148 PRE(sys_unlinkat)
4150 /* int unlinkat(int dirfd, const char *pathname, int flags); */
4152 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4153 This is different from Linux, for example, where glibc sign-extends it. */
4154 Int dfd = (Int) ARG1;
4156 *flags |= SfMayBlock;
4157 PRINT("sys_unlinkat ( %d, %#lx(%s), %ld )", dfd, ARG2, (HChar *) ARG2,
4158 SARG3);
4159 PRE_REG_READ3(long, "unlinkat", int, dirfd, const char *, pathname,
4160 int, flags);
4161 PRE_MEM_RASCIIZ("unlinkat(pathname)", ARG2);
4163 /* Be strict but ignore dfd for absolute pathname. */
4164 if (dfd != VKI_AT_FDCWD
4165 && ML_(safe_to_deref)((void *) ARG2, 1)
4166 && ((HChar *) ARG2)[0] != '/'
4167 && !ML_(fd_allowed)(dfd, "unlinkat", tid, False))
4168 SET_STATUS_Failure(VKI_EBADF);
4171 PRE(sys_fstatat)
4173 /* int fstatat(int fildes, const char *path, struct stat *buf,
4174 int flag); */
4176 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4177 This is different from Linux, for example, where glibc sign-extends it. */
4178 Int fd = (Int) ARG1;
4180 PRINT("sys_fstatat ( %d, %#lx(%s), %#lx, %ld )", fd, ARG2,
4181 (HChar *) ARG2, ARG3, SARG4);
4182 PRE_REG_READ4(long, "fstatat", int, fildes, const char *, path,
4183 struct stat *, buf, int, flag);
4184 if (ARG2) {
4185 /* Only test ARG2 if it isn't NULL. The kernel treats the NULL-case as
4186 fstat(fildes, buf). */
4187 PRE_MEM_RASCIIZ("fstatat(path)", ARG2);
4189 PRE_MEM_WRITE("fstatat(buf)", ARG3, sizeof(struct vki_stat));
4191 /* Be strict but ignore fildes for absolute path. */
4192 if (fd != VKI_AT_FDCWD
4193 && ML_(safe_to_deref)((void *) ARG2, 1)
4194 && ((HChar *) ARG2)[0] != '/'
4195 && !ML_(fd_allowed)(fd, "fstatat", tid, False))
4196 SET_STATUS_Failure(VKI_EBADF);
4199 POST(sys_fstatat)
4201 POST_MEM_WRITE(ARG3, sizeof(struct vki_stat));
4204 PRE(sys_openat)
4206 /* int openat(int fildes, const char *filename, int flags);
4207 int openat(int fildes, const char *filename, int flags, mode_t mode); */
4209 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4210 This is different from Linux, for example, where glibc sign-extends it. */
4211 Int fd = (Int) ARG1;
4213 if (ARG3 & VKI_O_CREAT) {
4214 /* 4-arg version */
4215 PRINT("sys_openat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2, (HChar *) ARG2,
4216 SARG3, SARG4);
4217 PRE_REG_READ4(long, "openat", int, fildes, const char *, filename,
4218 int, flags, vki_mode_t, mode);
4220 else {
4221 /* 3-arg version */
4222 PRINT("sys_openat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2,
4223 SARG3);
4224 PRE_REG_READ3(long, "openat", int, fildes, const char *, filename,
4225 int, flags);
4228 PRE_MEM_RASCIIZ("openat(filename)", ARG2);
4230 /* Be strict but ignore fildes for absolute pathname. */
4231 if (fd != VKI_AT_FDCWD
4232 && ML_(safe_to_deref)((void *) ARG2, 1)
4233 && ((HChar *) ARG2)[0] != '/'
4234 && !ML_(fd_allowed)(fd, "openat", tid, False)) {
4235 SET_STATUS_Failure(VKI_EBADF);
4236 return;
4239 if (ML_(handle_auxv_open)(status, (const HChar *) ARG2, ARG3))
4240 return;
4242 if (handle_psinfo_open(status, True /*use_openat*/, (const HChar *) ARG2,
4243 fd, ARG3, ARG4))
4244 return;
4246 #if defined(SOLARIS_PROC_CMDLINE)
4247 if (handle_cmdline_open(status, (const HChar *) ARG2))
4248 return;
4249 #endif /* SOLARIS_PROC_CMDLINE */
4251 *flags |= SfMayBlock;
4254 POST(sys_openat)
4256 if (!ML_(fd_allowed)(RES, "openat", tid, True)) {
4257 VG_(close)(RES);
4258 SET_STATUS_Failure(VKI_EMFILE);
4260 else if (VG_(clo_track_fds))
4261 ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
4264 PRE(sys_tasksys)
4266 /* Kernel: long tasksys(int code, projid_t projid, uint_t flags,
4267 void *projidbuf, size_t pbufsz);
4269 switch (ARG1 /*code*/) {
4270 case 0:
4271 /* Libc: taskid_t settaskid(projid_t project, uint_t flags); */
4272 PRINT("sys_tasksys ( %ld, %ld, %lu )", SARG1, SARG2, ARG3);
4273 PRE_REG_READ3(long, SC2("tasksys", "settaskid"), int, code,
4274 vki_projid_t, projid, vki_uint_t, flags);
4275 break;
4276 case 1:
4277 /* Libc: taskid_t gettaskid(void); */
4278 PRINT("sys_tasksys ( %ld )", SARG1);
4279 PRE_REG_READ1(long, SC2("tasksys", "gettaskid"), int, code);
4280 break;
4281 case 2:
4282 /* Libc: projid_t getprojid(void); */
4283 PRINT("sys_tasksys ( %ld )", SARG1);
4284 PRE_REG_READ1(long, SC2("tasksys", "getprojid"), int, code);
4285 break;
4286 case 3:
4287 /* Libproject: size_t projlist(id_t *idbuf, size_t idbufsz); */
4288 PRINT("sys_tasksys ( %ld, %#lx, %lu )", SARG1, ARG4, ARG5);
4289 PRE_REG_READ3(long, SC2("tasksys", "projlist"), int, code,
4290 vki_id_t *, idbuf, vki_size_t, idbufsz);
4291 PRE_MEM_WRITE("tasksys(idbuf)", ARG4, ARG5);
4292 break;
4293 default:
4294 VG_(unimplemented)("Syswrap of the tasksys call with code %ld.", SARG1);
4295 /*NOTREACHED*/
4296 break;
4300 POST(sys_tasksys)
4302 switch (ARG1 /*code*/) {
4303 case 0:
4304 case 1:
4305 case 2:
4306 break;
4307 case 3:
4308 if ((ARG4 != 0) && (ARG5 != 0))
4309 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
4310 break;
4311 default:
4312 vg_assert(0);
4313 break;
4317 PRE(sys_lwp_park)
4319 /* Kernel: int lwp_park(int which, uintptr_t arg1, uintptr_t arg2);
4321 *flags |= SfMayBlock;
4322 switch (ARG1 /*which*/) {
4323 case 0:
4324 /* Libc: int lwp_park(timespec_t *timeout, id_t lwpid); */
4325 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4326 PRE_REG_READ3(long, SC2("lwp_park", "lwp_park"), int, which,
4327 timespec_t *, timeout, vki_id_t, lwpid);
4328 if (ARG2) {
4329 PRE_MEM_READ("lwp_park(timeout)", ARG2, sizeof(vki_timespec_t));
4330 /*PRE_MEM_WRITE("lwp_park(timeout)", ARG2,
4331 sizeof(vki_timespec_t));*/
4333 break;
4334 case 1:
4335 /* Libc: int lwp_unpark(id_t lwpid); */
4336 PRINT("sys_lwp_park ( %ld, %ld )", SARG1, SARG2);
4337 PRE_REG_READ2(long, SC2("lwp_park", "lwp_unpark"), int, which,
4338 vki_id_t, lwpid);
4339 break;
4340 case 2:
4341 /* Libc: int lwp_unpark_all(id_t *lwpid, int nids); */
4342 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4343 PRE_REG_READ3(long, SC2("lwp_park", "lwp_unpark_all"), int, which,
4344 id_t *, lwpid, int, nids);
4345 PRE_MEM_READ("lwp_park(lwpid)", ARG2, ARG3 * sizeof(vki_id_t));
4346 break;
4347 default:
4348 VG_(unimplemented)("Syswrap of the lwp_park call with which %ld.", SARG1);
4349 /*NOTREACHED*/
4350 break;
4354 POST(sys_lwp_park)
4356 switch (ARG1 /*which*/) {
4357 case 0:
4358 if (ARG2)
4359 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
4360 break;
4361 case 1:
4362 case 2:
4363 break;
4364 default:
4365 vg_assert(0);
4366 break;
4370 PRE(sys_sendfilev)
4372 /* Kernel: ssize_t sendfilev(int opcode, int fd,
4373 const struct sendfilevec *vec,
4374 int sfvcnt, size_t *xferred);
4376 PRINT("sys_sendfilev ( %ld, %ld, %#lx, %ld, %#lx )",
4377 SARG1, SARG2, ARG3, SARG4, ARG5);
4379 switch (ARG1 /*opcode*/) {
4380 case VKI_SENDFILEV:
4382 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4383 const struct vki_sendfilevec *, vec,
4384 int, sfvcnt, vki_size_t *, xferred);
4386 PRE_MEM_READ("sendfilev(vec)", ARG3,
4387 ARG4 * sizeof(struct vki_sendfilevec));
4388 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4390 struct vki_sendfilevec *vec = (struct vki_sendfilevec *) ARG3;
4391 if (ML_(safe_to_deref)(vec, ARG4 *
4392 sizeof(struct vki_sendfilevec))) {
4393 UInt i;
4394 for (i = 0; i < ARG4; i++) {
4395 HChar desc[35]; // large enough
4396 if (vec[i].sfv_fd == VKI_SFV_FD_SELF) {
4397 VG_(snprintf)(desc, sizeof(desc),
4398 "sendfilev(vec[%u].sfv_off", i);
4399 PRE_MEM_READ(desc, vec[i].sfv_off, vec[i].sfv_len);
4400 } else {
4401 VG_(snprintf)(desc, sizeof(desc),
4402 "sendfilev(vec[%u].sfv_fd)", i);
4403 if (!ML_(fd_allowed)(vec[i].sfv_fd, desc, tid, False))
4404 SET_STATUS_Failure(VKI_EBADF);
4409 break;
4410 case VKI_SENDFILEV64:
4412 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4413 const struct vki_sendfilevec64 *, vec,
4414 int, sfvcnt, vki_size_t *, xferred);
4416 PRE_MEM_READ("sendfilev(vec)", ARG3,
4417 ARG4 * sizeof(struct vki_sendfilevec64));
4418 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4420 struct vki_sendfilevec64 *vec64 =
4421 (struct vki_sendfilevec64 *) ARG3;
4422 if (ML_(safe_to_deref)(vec64, ARG4 *
4423 sizeof(struct vki_sendfilevec64))) {
4424 UInt i;
4425 for (i = 0; i < ARG4; i++) {
4426 HChar desc[35]; // large enough
4427 if (vec64[i].sfv_fd == VKI_SFV_FD_SELF) {
4428 VG_(snprintf)(desc, sizeof(desc),
4429 "sendfilev(vec[%u].sfv_off", i);
4430 PRE_MEM_READ(desc, vec64[i].sfv_off, vec64[i].sfv_len);
4431 } else {
4432 VG_(snprintf)(desc, sizeof(desc),
4433 "sendfilev(vec[%u].sfv_fd)", i);
4434 if (!ML_(fd_allowed)(vec64[i].sfv_fd, desc,
4435 tid, False))
4436 SET_STATUS_Failure(VKI_EBADF);
4441 break;
4442 default:
4443 VG_(unimplemented)("Syswrap of the sendfilev call with "
4444 "opcode %ld.", SARG1);
4445 /*NOTREACHED*/
4446 break;
4449 /* Be strict. */
4450 if (!ML_(fd_allowed)(ARG2, "sendfilev(fd)", tid, False))
4451 SET_STATUS_Failure(VKI_EBADF);
4453 *flags |= SfMayBlock;
4456 POST(sys_sendfilev)
4458 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
4461 #if defined(SOLARIS_LWP_NAME_SYSCALL)
4462 PRE(sys_lwp_name)
4464 /* int lwp_name(int opcode, id_t lwpid, char *name, size_t len); */
4465 PRINT("sys_lwp_name ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
4467 switch (ARG1 /*opcode*/) {
4468 case 0:
4469 /* lwp_setname */
4470 PRE_REG_READ3(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4471 char *, name);
4472 PRE_MEM_RASCIIZ("lwp_name(name)", ARG3);
4473 break;
4474 case 1:
4475 /* lwp_getname */
4476 PRE_REG_READ4(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4477 char *, name, vki_size_t, len);
4478 PRE_MEM_WRITE("lwp_name(name)", ARG3, ARG4);
4479 break;
4480 default:
4481 VG_(unimplemented)("Syswrap of the lwp_name call with opcode %ld.", SARG1);
4482 /*NOTREACHED*/
4483 break;
4487 POST(sys_lwp_name)
4489 switch (ARG1 /*opcode*/) {
4490 case 0:
4491 if (ARG3) { // Paranoia
4492 const HChar *new_name = (const HChar *) ARG3;
4493 ThreadState *tst = VG_(get_ThreadState)(tid);
4494 SizeT new_len = VG_(strlen)(new_name);
4496 /* Don't bother reusing the memory. This is a rare event. */
4497 tst->thread_name = VG_(realloc)("syswrap.lwp_name", tst->thread_name,
4498 new_len + 1);
4499 VG_(strcpy)(tst->thread_name, new_name);
4501 break;
4502 case 1:
4503 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4504 break;
4505 default:
4506 vg_assert(0);
4507 break;
4510 #endif /* SOLARIS_LWP_NAME_SYSCALL */
4512 PRE(sys_privsys)
4514 /* Kernel: int privsys(int code, priv_op_t op, priv_ptype_t type,
4515 void *buf, size_t bufsize, int itype);
4517 switch (ARG1 /*code*/) {
4518 case VKI_PRIVSYS_SETPPRIV:
4519 /* Libc: int setppriv(priv_op_t op, priv_ptype_t type,
4520 const priv_set_t *pset);
4522 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4523 ARG4, ARG5);
4524 PRE_REG_READ5(long, SC2("privsys", "setppriv"), int, code,
4525 vki_priv_op_t, op, vki_priv_ptype_t, type,
4526 const priv_set_t *, pset, vki_size_t, bufsize);
4527 PRE_MEM_READ("privsys(pset)", ARG4, ARG5);
4528 break;
4529 case VKI_PRIVSYS_GETPPRIV:
4530 /* Libc: int getppriv(priv_ptype_t type, priv_set_t *pset);
4531 priv_set_t *pset -> void *buf
4533 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4534 ARG4, ARG5);
4535 PRE_REG_READ5(long, SC2("privsys", "getppriv"), int, code,
4536 vki_priv_op_t, op, vki_priv_ptype_t, type, priv_set_t *, pset,
4537 vki_size_t, bufsize);
4538 PRE_MEM_WRITE("privsys(pset)", ARG4, ARG5);
4539 break;
4540 case VKI_PRIVSYS_GETIMPLINFO:
4541 /* Libc: int getprivinfo(priv_impl_info_t *buf, size_t bufsize);
4542 priv_impl_info_t *buf -> void *buf
4544 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4545 ARG4, ARG5);
4546 PRE_REG_READ5(long, SC2("privsys", "getprivinfo"), int, code,
4547 vki_priv_op_t, op, vki_priv_ptype_t, type,
4548 priv_impl_info_t *, buf, vki_size_t, bufsize);
4549 PRE_MEM_WRITE("privsys(buf)", ARG4, ARG5);
4550 break;
4551 case VKI_PRIVSYS_SETPFLAGS:
4552 /* Libc: int setpflags(uint_t flag, uint_t val);
4553 uint_t flag -> priv_op_t op
4554 uint_t val -> priv_ptype_t type
4556 PRINT("sys_privsys ( %ld, %lu, %lu )", SARG1, ARG2, ARG3);
4557 PRE_REG_READ3(long, SC2("privsys", "setpflags"), int, code,
4558 vki_uint_t, flag, vki_uint_t, val);
4559 break;
4560 case VKI_PRIVSYS_GETPFLAGS:
4561 /* Libc: uint_t getpflags(uint_t flag);
4562 uint_t flag -> priv_op_t op
4564 PRINT("sys_privsys ( %ld, %lu )", SARG1, ARG2);
4565 PRE_REG_READ2(long, SC2("privsys", "setpflags"), int, code,
4566 vki_uint_t, flag);
4567 break;
4568 case VKI_PRIVSYS_ISSETUGID:
4569 /* Libc: int issetugid(void); */
4570 PRINT("sys_privsys ( %ld )", SARG1);
4571 PRE_REG_READ1(long, SC2("privsys", "issetugid"), int, code);
4572 break;
4573 case VKI_PRIVSYS_PFEXEC_REG:
4574 /* Libc: int register_pfexec(int did);
4575 int did -> priv_op_t op
4577 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4578 PRE_REG_READ2(long, SC2("privsys", "register_pfexec"), int, code,
4579 int, did);
4580 break;
4581 case VKI_PRIVSYS_PFEXEC_UNREG:
4582 /* Libc: int unregister_pfexec(int did); */
4583 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4584 PRE_REG_READ2(long, SC2("privsys", "unregister_pfexec"), int, code,
4585 int, did);
4586 break;
4587 default:
4588 VG_(unimplemented)("Syswrap of the privsys call with code %ld.", SARG1);
4589 /*NOTREACHED*/
4590 break;
4593 /* Be strict. */
4594 if ((ARG1 == VKI_PRIVSYS_PFEXEC_REG ||
4595 ARG1 == VKI_PRIVSYS_PFEXEC_UNREG) &&
4596 !ML_(fd_allowed)(ARG2, "privsys", tid, False))
4597 SET_STATUS_Failure(VKI_EBADF);
4600 POST(sys_privsys)
4602 switch (ARG1 /*code*/) {
4603 case VKI_PRIVSYS_SETPPRIV:
4604 break;
4605 case VKI_PRIVSYS_GETPPRIV:
4606 POST_MEM_WRITE(ARG4, sizeof(vki_priv_set_t));
4607 break;
4608 case VKI_PRIVSYS_GETIMPLINFO:
4609 /* The kernel copy outs data of size min(bufsize, privinfosize).
4610 Unfortunately, it does not seem to be possible to easily obtain the
4611 privinfosize value. The code below optimistically marks all ARG5
4612 bytes (aka bufsize) as written by the kernel. */
4613 POST_MEM_WRITE(ARG4, ARG5);
4614 break;
4615 case VKI_PRIVSYS_SETPFLAGS:
4616 case VKI_PRIVSYS_GETPFLAGS:
4617 case VKI_PRIVSYS_ISSETUGID:
4618 case VKI_PRIVSYS_PFEXEC_REG:
4619 case VKI_PRIVSYS_PFEXEC_UNREG:
4620 break;
4621 default:
4622 vg_assert(0);
4623 break;
4627 PRE(sys_ucredsys)
4629 /* Kernel: int ucredsys(int code, int obj, void *buf); */
4630 PRINT("sys_ucredsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4632 switch (ARG1 /*code*/) {
4633 case VKI_UCREDSYS_UCREDGET:
4634 /* Libc: ucred_t *ucred_get(pid_t pid); */
4635 PRE_REG_READ3(long, SC2("ucredsys", "ucredget"), int, code,
4636 vki_pid_t, pid, vki_ucred_t *, buf);
4637 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4638 break;
4640 case VKI_UCREDSYS_GETPEERUCRED:
4641 /* Libc: int getpeerucred(int fd, ucred_t **ucred); */
4642 PRE_REG_READ3(long, SC2("ucredsys", "getpeerucred"), int, code,
4643 int, fd, vki_ucred_t *, buf);
4644 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4646 /* Be strict. */
4647 if (!ML_(fd_allowed)(ARG2, "ucredsys", tid, False))
4648 SET_STATUS_Failure(VKI_EBADF);
4649 break;
4651 default:
4652 VG_(unimplemented)("Syswrap of the ucredsys call with code %ld.", SARG1);
4653 /*NOTREACHED*/
4654 break;
4658 POST(sys_ucredsys)
4660 switch (ARG1 /*code*/) {
4661 case VKI_UCREDSYS_UCREDGET:
4662 case VKI_UCREDSYS_GETPEERUCRED:
4663 vg_assert(ARG3 != 0);
4664 POST_MEM_WRITE(ARG3, ((vki_ucred_t *) ARG3)->uc_size);
4665 break;
4667 default:
4668 vg_assert(0);
4669 break;
4673 PRE(sys_sysfs)
4675 /* Kernel: int sysfs(int opcode, long a1, long a2); */
4676 PRINT("sys_sysfs ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
4678 switch (ARG1 /*opcode*/) {
4679 case VKI_GETFSIND:
4680 /* Libc: int sysfs(int opcode, const char *fsname); */
4681 PRE_REG_READ2(long, SC2("sysfs", "getfsind"), int, opcode,
4682 const char *, fsname);
4683 PRE_MEM_RASCIIZ("sysfs(fsname)", ARG2);
4684 break;
4685 case VKI_GETFSTYP:
4686 /* Libc: int sysfs(int opcode, int fs_index, char *buf); */
4687 PRE_REG_READ3(long, SC2("sysfs", "getfstyp"), int, opcode,
4688 int, fs_index, char *, buf);
4689 PRE_MEM_WRITE("sysfs(buf)", ARG3, VKI_FSTYPSZ + 1);
4690 break;
4691 case VKI_GETNFSTYP:
4692 /* Libc: int sysfs(int opcode); */
4693 PRE_REG_READ1(long, SC2("sysfs", "getnfstyp"), int, opcode);
4694 break;
4695 default:
4696 VG_(unimplemented)("Syswrap of the sysfs call with opcode %ld.", SARG1);
4697 /*NOTREACHED*/
4698 break;
4702 POST(sys_sysfs)
4704 switch (ARG1 /*opcode*/) {
4705 case VKI_GETFSIND:
4706 case VKI_GETNFSTYP:
4707 break;
4708 case VKI_GETFSTYP:
4709 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4710 break;
4711 default:
4712 vg_assert(0);
4713 break;
4718 PRE(sys_getmsg)
4720 /* int getmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4721 int *flagsp); */
4722 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4723 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4724 *flags |= SfMayBlock;
4725 PRINT("sys_getmsg ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
4726 PRE_REG_READ4(long, "getmsg", int, fildes, struct vki_strbuf *, ctlptr,
4727 struct vki_strbuf *, dataptr, int *, flagsp);
4728 if (ctrlptr) {
4729 PRE_FIELD_READ("getmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
4730 PRE_FIELD_WRITE("getmsg(ctrlptr->len)", ctrlptr->len);
4731 PRE_FIELD_READ("getmsg(ctrlptr->buf)", ctrlptr->buf);
4732 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4733 && ctrlptr->maxlen > 0)
4734 PRE_MEM_WRITE("getmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4735 ctrlptr->maxlen);
4737 if (dataptr) {
4738 PRE_FIELD_READ("getmsg(dataptr->maxlen)", dataptr->maxlen);
4739 PRE_FIELD_WRITE("getmsg(dataptr->len)", dataptr->len);
4740 PRE_FIELD_READ("getmsg(dataptr->buf)", dataptr->buf);
4741 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4742 && dataptr->maxlen > 0)
4743 PRE_MEM_WRITE("getmsg(dataptr->buf)", (Addr)dataptr->buf,
4744 dataptr->maxlen);
4746 PRE_MEM_READ("getmsg(flagsp)", ARG4, sizeof(int));
4747 /*PRE_MEM_WRITE("getmsg(flagsp)", ARG4, sizeof(int));*/
4749 /* Be strict. */
4750 if (!ML_(fd_allowed)(ARG1, "getmsg", tid, False))
4751 SET_STATUS_Failure(VKI_EBADF);
4754 POST(sys_getmsg)
4756 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4757 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4759 if (ctrlptr && ctrlptr->len > 0)
4760 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
4761 if (dataptr && dataptr->len > 0)
4762 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
4763 POST_MEM_WRITE(ARG4, sizeof(int));
4766 PRE(sys_putmsg)
4768 /* int putmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4769 int flags); */
4770 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4771 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4772 *flags |= SfMayBlock;
4773 PRINT("sys_putmsg ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
4774 PRE_REG_READ4(long, "putmsg", int, fildes, struct vki_strbuf *, ctrlptr,
4775 struct vki_strbuf *, dataptr, int, flags);
4776 if (ctrlptr) {
4777 PRE_FIELD_READ("putmsg(ctrlptr->len)", ctrlptr->len);
4778 PRE_FIELD_READ("putmsg(ctrlptr->buf)", ctrlptr->buf);
4779 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4780 && ctrlptr->len > 0)
4781 PRE_MEM_READ("putmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4782 ctrlptr->len);
4784 if (dataptr) {
4785 PRE_FIELD_READ("putmsg(dataptr->len)", dataptr->len);
4786 PRE_FIELD_READ("putmsg(dataptr->buf)", dataptr->buf);
4787 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4788 && dataptr->len > 0)
4789 PRE_MEM_READ("putmsg(dataptr->buf)", (Addr)dataptr->buf,
4790 dataptr->len);
4793 /* Be strict. */
4794 if (!ML_(fd_allowed)(ARG1, "putmsg", tid, False))
4795 SET_STATUS_Failure(VKI_EBADF);
4798 PRE(sys_lstat)
4800 /* int lstat(const char *path, struct stat *buf); */
4801 /* Note: We could use here the sys_newlstat generic wrapper, but the 'new'
4802 in its name is rather confusing in the Solaris context, thus we provide
4803 our own wrapper. */
4804 PRINT("sys_lstat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
4805 PRE_REG_READ2(long, "lstat", const char *, path, struct stat *, buf);
4807 PRE_MEM_RASCIIZ("lstat(path)", ARG1);
4808 PRE_MEM_WRITE("lstat(buf)", ARG2, sizeof(struct vki_stat));
4811 POST(sys_lstat)
4813 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
4816 PRE(sys_sigprocmask)
4818 /* int sigprocmask(int how, const sigset_t *set, sigset_t *oset); */
4819 PRINT("sys_sigprocmask ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4820 PRE_REG_READ3(long, "sigprocmask",
4821 int, how, vki_sigset_t *, set, vki_sigset_t *, oset);
4822 if (ARG2)
4823 PRE_MEM_READ("sigprocmask(set)", ARG2, sizeof(vki_sigset_t));
4824 if (ARG3)
4825 PRE_MEM_WRITE("sigprocmask(oset)", ARG3, sizeof(vki_sigset_t));
4827 /* Be safe. */
4828 if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_sigset_t)))) {
4829 SET_STATUS_Failure(VKI_EFAULT);
4831 if (ARG3 && !ML_(safe_to_deref((void*)ARG3, sizeof(vki_sigset_t)))) {
4832 SET_STATUS_Failure(VKI_EFAULT);
4835 if (!FAILURE)
4836 SET_STATUS_from_SysRes(
4837 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, (vki_sigset_t*)ARG2,
4838 (vki_sigset_t*)ARG3)
4841 if (SUCCESS)
4842 *flags |= SfPollAfter;
4845 POST(sys_sigprocmask)
4847 if (ARG3)
4848 POST_MEM_WRITE(ARG3, sizeof(vki_sigset_t));
4851 PRE(sys_sigsuspend)
4853 *flags |= SfMayBlock;
4855 /* int sigsuspend(const sigset_t *set); */
4856 PRINT("sys_sigsuspend ( %#lx )", ARG1);
4857 PRE_REG_READ1(long, "sigsuspend", vki_sigset_t *, set);
4858 PRE_MEM_READ("sigsuspend(set)", ARG1, sizeof(vki_sigset_t));
4860 /* Be safe. */
4861 if (ARG1 && ML_(safe_to_deref((void *) ARG1, sizeof(vki_sigset_t)))) {
4862 VG_(sigdelset)((vki_sigset_t *) ARG1, VG_SIGVGKILL);
4863 /* We cannot mask VG_SIGVGKILL, as otherwise this thread would not
4864 be killable by VG_(nuke_all_threads_except).
4865 We thus silently ignore the user request to mask this signal.
4866 Note that this is similar to what is done for e.g.
4867 sigprocmask (see m_signals.c calculate_SKSS_from_SCSS). */
4871 PRE(sys_sigaction)
4873 /* int sigaction(int signal, const struct sigaction *act,
4874 struct sigaction *oact); */
4875 PRINT("sys_sigaction ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4876 PRE_REG_READ3(long, "sigaction", int, signal,
4877 const struct sigaction *, act, struct sigaction *, oact);
4879 /* Note that on Solaris, vki_sigaction_toK_t and vki_sigaction_fromK_t are
4880 both typedefs of 'struct sigaction'. */
4882 if (ARG2) {
4883 vki_sigaction_toK_t *sa = (vki_sigaction_toK_t*)ARG2;
4884 PRE_FIELD_READ("sigaction(act->sa_flags)", sa->sa_flags);
4885 PRE_FIELD_READ("sigaction(act->sa_handler)", sa->ksa_handler);
4886 PRE_FIELD_READ("sigaction(act->sa_mask)", sa->sa_mask);
4888 if (ARG3)
4889 PRE_MEM_WRITE("sigaction(oact)", ARG3, sizeof(vki_sigaction_fromK_t));
4891 /* Be safe. */
4892 if (ARG2 && !ML_(safe_to_deref((void*)ARG2,
4893 sizeof(vki_sigaction_toK_t)))) {
4894 SET_STATUS_Failure(VKI_EFAULT);
4896 if (ARG3 && !ML_(safe_to_deref((void*)ARG3,
4897 sizeof(vki_sigaction_fromK_t)))) {
4898 SET_STATUS_Failure(VKI_EFAULT);
4901 if (!FAILURE)
4902 SET_STATUS_from_SysRes(
4903 VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t*)ARG2,
4904 (vki_sigaction_fromK_t*)ARG3));
4907 POST(sys_sigaction)
4909 if (ARG3)
4910 POST_MEM_WRITE(ARG3, sizeof(vki_sigaction_fromK_t));
4913 PRE(sys_sigpending)
4915 /* int sigpending(int flag, sigset_t *setp); */
4916 PRINT("sys_sigpending ( %ld, %#lx )", SARG1, ARG2);
4917 PRE_REG_READ2(long, "sigpending", int, flag, sigset_t *, setp);
4918 PRE_MEM_WRITE("sigpending(setp)", ARG2, sizeof(vki_sigset_t));
4921 POST(sys_sigpending)
4923 POST_MEM_WRITE(ARG2, sizeof(vki_sigset_t));
4926 PRE(sys_getsetcontext)
4928 /* Kernel: int getsetcontext(int flag, void *arg) */
4929 ThreadState *tst = VG_(get_ThreadState)(tid);
4930 PRINT("sys_getsetcontext ( %ld, %#lx )", SARG1, ARG2);
4931 switch (ARG1 /*flag*/) {
4932 case VKI_GETCONTEXT:
4933 /* Libc: int getcontext(ucontext_t *ucp); */
4934 PRE_REG_READ2(long, SC2("getsetcontext", "getcontext"), int, flag,
4935 ucontext_t *, ucp);
4936 PRE_MEM_WRITE("getsetcontext(ucp)", ARG2, sizeof(vki_ucontext_t));
4938 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4939 SET_STATUS_Failure(VKI_EFAULT);
4940 return;
4942 VG_(save_context)(tid, (vki_ucontext_t*)ARG2, Vg_CoreSysCall);
4943 SET_STATUS_Success(0);
4944 break;
4945 case VKI_SETCONTEXT:
4946 /* Libc: int setcontext(const ucontext_t *ucp); */
4947 PRE_REG_READ2(long, SC2("getsetcontext", "setcontext"), int, flag,
4948 const ucontext_t *, ucp);
4950 if (!ARG2) {
4951 /* Setting NULL context causes thread exit. */
4952 tst->exitreason = VgSrc_ExitThread;
4953 tst->os_state.exitcode = 0;
4954 SET_STATUS_Success(0);
4955 return;
4958 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4959 SET_STATUS_Failure(VKI_EFAULT);
4960 return;
4963 VG_(restore_context)(tid, (vki_ucontext_t*)ARG2,
4964 Vg_CoreSysCall, False/*esp_is_thrptr*/);
4965 /* Tell the driver not to update the guest state with the "result". */
4966 *flags |= SfNoWriteResult;
4967 /* Check to see if any signals arose as a result of this. */
4968 *flags |= SfPollAfter;
4970 /* Check if this is a possible return from a signal handler. */
4971 VG_(sigframe_return)(tid, (vki_ucontext_t*)ARG2);
4973 SET_STATUS_Success(0);
4974 break;
4975 case VKI_GETUSTACK:
4976 /* Libc: int getustack(stack_t **spp); */
4977 PRE_REG_READ2(long, SC2("getsetcontext", "getustack"), int, flag,
4978 stack_t **, spp);
4979 PRE_MEM_WRITE("getsetcontext(spp)", ARG2, sizeof(vki_stack_t*));
4981 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t*)))) {
4982 SET_STATUS_Failure(VKI_EFAULT);
4983 return;
4986 *(vki_stack_t**)ARG2 = tst->os_state.ustack;
4987 POST_MEM_WRITE(ARG2, sizeof(vki_stack_t*));
4988 SET_STATUS_Success(0);
4989 break;
4990 case VKI_SETUSTACK:
4992 /* Libc: int setustack(stack_t *sp); */
4993 PRE_REG_READ2(long, SC2("getsetcontext", "setustack"), int, flag,
4994 stack_t *, sp);
4996 /* The kernel does not read the stack data instantly but it can read
4997 them later so it is better to make sure the data are defined. */
4998 PRE_MEM_READ("getsetcontext_setustack(sp)", ARG2, sizeof(vki_stack_t));
5000 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) {
5001 SET_STATUS_Failure(VKI_EFAULT);
5002 return;
5005 vki_stack_t *old_stack = tst->os_state.ustack;
5006 tst->os_state.ustack = (vki_stack_t*)ARG2;
5008 /* The thread is setting the ustack pointer. It is a good time to get
5009 information about its stack. */
5010 if (tst->os_state.ustack->ss_flags == 0) {
5011 /* If the sanity check of ss_flags passed set the stack. */
5012 set_stack(tid, tst->os_state.ustack);
5014 if ((old_stack == NULL) && (tid > 1)) {
5015 /* New thread creation is now completed. Inform the tool. */
5016 VG_TRACK(pre_thread_first_insn, tid);
5020 SET_STATUS_Success(0);
5022 break;
5023 default:
5024 VG_(unimplemented)("Syswrap of the context call with flag %ld.", SARG1);
5025 /*NOTREACHED*/
5026 break;
5030 PRE(sys_fchmodat)
5032 /* int fchmodat(int fd, const char *path, mode_t mode, int flag); */
5034 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5035 This is different from Linux, for example, where glibc sign-extends it. */
5036 Int fd = (Int) ARG1;
5038 PRINT("sys_fchmodat ( %d, %#lx(%s), %ld, %ld )",
5039 fd, ARG2, (HChar *) ARG2, SARG3, SARG4);
5040 PRE_REG_READ4(long, "fchmodat",
5041 int, fd, const char *, path, vki_mode_t, mode, int, flag);
5043 if (ARG2)
5044 PRE_MEM_RASCIIZ("fchmodat(path)", ARG2);
5046 /* Be strict but ignore fd for absolute path. */
5047 if (fd != VKI_AT_FDCWD
5048 && ML_(safe_to_deref)((void *) ARG2, 1)
5049 && ((HChar *) ARG2)[0] != '/'
5050 && !ML_(fd_allowed)(fd, "fchmodat", tid, False))
5051 SET_STATUS_Failure(VKI_EBADF);
5054 PRE(sys_mkdirat)
5056 /* int mkdirat(int fd, const char *path, mode_t mode); */
5058 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5059 This is different from Linux, for example, where glibc sign-extends it. */
5060 Int fd = (Int) ARG1;
5062 *flags |= SfMayBlock;
5063 PRINT("sys_mkdirat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2, SARG3);
5064 PRE_REG_READ3(long, "mkdirat", int, fd, const char *, path,
5065 vki_mode_t, mode);
5066 PRE_MEM_RASCIIZ("mkdirat(path)", ARG2);
5068 /* Be strict but ignore fd for absolute path. */
5069 if (fd != VKI_AT_FDCWD
5070 && ML_(safe_to_deref)((void *) ARG2, 1)
5071 && ((HChar *) ARG2)[0] != '/'
5072 && !ML_(fd_allowed)(fd, "mkdirat", tid, False))
5073 SET_STATUS_Failure(VKI_EBADF);
5076 static void do_statvfs_post(struct vki_statvfs *stats, ThreadId tid)
5078 POST_FIELD_WRITE(stats->f_bsize);
5079 POST_FIELD_WRITE(stats->f_frsize);
5080 POST_FIELD_WRITE(stats->f_blocks);
5081 POST_FIELD_WRITE(stats->f_bfree);
5082 POST_FIELD_WRITE(stats->f_bavail);
5083 POST_FIELD_WRITE(stats->f_files);
5084 POST_FIELD_WRITE(stats->f_ffree);
5085 POST_FIELD_WRITE(stats->f_favail);
5086 POST_FIELD_WRITE(stats->f_fsid);
5087 POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
5088 POST_FIELD_WRITE(stats->f_flag);
5089 POST_FIELD_WRITE(stats->f_namemax);
5090 POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
5093 PRE(sys_statvfs)
5095 /* int statvfs(const char *path, struct statvfs *buf); */
5096 *flags |= SfMayBlock;
5097 PRINT("sys_statvfs ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
5098 PRE_REG_READ2(long, "statvfs", const char *, path,
5099 struct vki_statvfs *, buf);
5100 PRE_MEM_RASCIIZ("statvfs(path)", ARG1);
5101 PRE_MEM_WRITE("statvfs(buf)", ARG2, sizeof(struct vki_statvfs));
5104 POST(sys_statvfs)
5106 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
5109 PRE(sys_fstatvfs)
5111 /* int fstatvfs(int fd, struct statvfs *buf); */
5112 *flags |= SfMayBlock;
5113 PRINT("sys_fstatvfs ( %ld, %#lx )", SARG1, ARG2);
5114 PRE_REG_READ2(long, "fstatvfs", int, fd, struct vki_statvfs *, buf);
5115 PRE_MEM_WRITE("fstatvfs(buf)", ARG2, sizeof(struct vki_statvfs));
5117 /* Be strict. */
5118 if (!ML_(fd_allowed)(ARG1, "fstatvfs", tid, False))
5119 SET_STATUS_Failure(VKI_EBADF);
5122 POST(sys_fstatvfs)
5124 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
5127 PRE(sys_nfssys)
5129 /* int nfssys(enum nfssys_op opcode, void *arg); */
5130 *flags |= SfMayBlock;
5131 PRINT("sys_nfssys ( %ld, %#lx )", SARG1, ARG2);
5133 switch (ARG1 /*opcode*/) {
5134 case VKI_NFS_REVAUTH:
5135 PRE_REG_READ2(long, SC2("nfssys", "nfs_revauth"), int, opcode,
5136 struct vki_nfs_revauth_args *, args);
5137 PRE_MEM_READ("nfssys(arg)", ARG2,
5138 sizeof(struct vki_nfs_revauth_args));
5139 break;
5140 default:
5141 VG_(unimplemented)("Syswrap of the nfssys call with opcode %ld.", SARG1);
5142 /*NOTREACHED*/
5143 break;
5147 POST(sys_nfssys)
5149 switch (ARG1 /*opcode*/) {
5150 case VKI_NFS_REVAUTH:
5151 break;
5152 default:
5153 vg_assert(0);
5154 break;
5158 PRE(sys_waitid)
5160 /* int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); */
5161 *flags |= SfMayBlock;
5162 PRINT("sys_waitid( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
5163 PRE_REG_READ4(long, "waitid", vki_idtype_t, idtype, vki_id_t, id,
5164 siginfo_t *, infop, int, options);
5165 PRE_MEM_WRITE("waitid(infop)", ARG3, sizeof(vki_siginfo_t));
5168 POST(sys_waitid)
5170 POST_MEM_WRITE(ARG3, sizeof(vki_siginfo_t));
5173 PRE(sys_sigsendsys)
5175 /* int sigsendsys(procset_t *psp, int sig); */
5176 PRINT("sys_sigsendsys( %#lx, %ld )", ARG1, SARG2);
5177 PRE_REG_READ2(long, "sigsendsys", vki_procset_t *, psp, int, signal);
5178 PRE_MEM_READ("sigsendsys(psp)", ARG1, sizeof(vki_procset_t));
5180 if (!ML_(client_signal_OK)(ARG1)) {
5181 SET_STATUS_Failure(VKI_EINVAL);
5183 if (!ML_(safe_to_deref)((void *) ARG1, sizeof(vki_procset_t))) {
5184 SET_STATUS_Failure(VKI_EFAULT);
5187 /* Exit early if there are problems. */
5188 if (FAILURE)
5189 return;
5191 vki_procset_t *psp = (vki_procset_t *) ARG1;
5192 switch (psp->p_op) {
5193 case VKI_POP_AND:
5194 break;
5195 default:
5196 VG_(unimplemented)("Syswrap of the sigsendsys call with op %u.",
5197 psp->p_op);
5200 UInt pid;
5201 if ((psp->p_lidtype == VKI_P_PID) && (psp->p_ridtype == VKI_P_ALL)) {
5202 pid = psp->p_lid;
5203 } else if ((psp->p_lidtype == VKI_P_ALL) && (psp->p_ridtype == VKI_P_PID)) {
5204 pid = psp->p_rid;
5205 } else {
5206 VG_(unimplemented)("Syswrap of the sigsendsys call with lidtype %u and"
5207 "ridtype %u.", psp->p_lidtype, psp->p_ridtype);
5210 if (VG_(clo_trace_signals))
5211 VG_(message)(Vg_DebugMsg, "sigsendsys: sending signal to process %u\n",
5212 pid);
5214 /* Handle SIGKILL specially. */
5215 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(pid, -1)) {
5216 SET_STATUS_Success(0);
5217 return;
5220 /* Check to see if this gave us a pending signal. */
5221 *flags |= SfPollAfter;
5224 #if defined(SOLARIS_UTIMESYS_SYSCALL)
5225 PRE(sys_utimesys)
5227 /* Kernel: int utimesys(int code, uintptr_t arg1, uintptr_t arg2,
5228 uintptr_t arg3, uintptr_t arg4);
5231 switch (ARG1 /*code*/) {
5232 case 0:
5233 /* Libc: int futimens(int fd, const timespec_t times[2]); */
5234 PRINT("sys_utimesys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
5235 PRE_REG_READ3(long, "utimesys", int, code, int, fd,
5236 const vki_timespec_t *, times);
5237 if (ARG3)
5238 PRE_MEM_READ("utimesys(times)", ARG3, 2 * sizeof(vki_timespec_t));
5240 /* Be strict. */
5241 if (!ML_(fd_allowed)(ARG2, "utimesys", tid, False))
5242 SET_STATUS_Failure(VKI_EBADF);
5243 break;
5244 case 1:
5246 /* Libc: int utimensat(int fd, const char *path,
5247 const timespec_t times[2], int flag);
5250 /* Interpret the second argument as 32-bit value even on 64-bit
5251 architecture. This is different from Linux, for example, where glibc
5252 sign-extends it. */
5253 Int fd = (Int) ARG2;
5255 PRINT("sys_utimesys ( %ld, %d, %#lx(%s), %#lx, %ld )",
5256 SARG1, fd, ARG3, (HChar *) ARG3, ARG4, SARG5);
5257 PRE_REG_READ5(long, "utimesys", int, code, int, fd, const char *, path,
5258 const vki_timespec_t *, times, int, flag);
5259 if (ARG3)
5260 PRE_MEM_RASCIIZ("utimesys(path)", ARG3);
5261 if (ARG4)
5262 PRE_MEM_READ("utimesys(times)", ARG4, 2 * sizeof(vki_timespec_t));
5264 /* Be strict but ignore fd for absolute path. */
5265 if (fd != VKI_AT_FDCWD
5266 && ML_(safe_to_deref)((void *) ARG3, 1)
5267 && ((HChar *) ARG3)[0] != '/'
5268 && !ML_(fd_allowed)(fd, "utimesys", tid, False))
5269 SET_STATUS_Failure(VKI_EBADF);
5270 break;
5272 default:
5273 VG_(unimplemented)("Syswrap of the utimesys call with code %ld.", SARG1);
5274 /*NOTREACHED*/
5275 break;
5278 #endif /* SOLARIS_UTIMESYS_SYSCALL */
5280 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
5281 PRE(sys_utimensat)
5283 /* int utimensat(int fd, const char *path, const timespec_t times[2],
5284 int flag);
5287 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5288 This is different from Linux, for example, where glibc sign-extends it. */
5289 Int fd = (Int) ARG1;
5291 PRINT("sys_utimensat ( %d, %#lx(%s), %#lx, %ld )",
5292 fd, ARG2, (HChar *) ARG2, ARG3, SARG4);
5293 PRE_REG_READ4(long, "utimensat", int, fd, const char *, path,
5294 const vki_timespec_t *, times, int, flag);
5295 if (ARG2)
5296 PRE_MEM_RASCIIZ("utimensat(path)", ARG2);
5297 if (ARG3)
5298 PRE_MEM_READ("utimensat(times)", ARG3, 2 * sizeof(vki_timespec_t));
5300 /* Be strict but ignore fd for absolute path. */
5301 if (fd != VKI_AT_FDCWD
5302 && ML_(safe_to_deref)((void *) ARG2, 1)
5303 && ((HChar *) ARG2)[0] != '/'
5304 && !ML_(fd_allowed)(fd, "utimensat", tid, False))
5305 SET_STATUS_Failure(VKI_EBADF);
5307 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
5309 PRE(sys_sigresend)
5311 /* int sigresend(int signal, siginfo_t *siginfo, sigset_t *mask); */
5312 /* Sends a signal to the calling thread, the mask parameter specifies a new
5313 signal mask. */
5315 /* Static (const) mask accessible from outside of this function. */
5316 static vki_sigset_t block_all;
5318 PRINT("sys_sigresend( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
5319 PRE_REG_READ3(long, "sigresend", int, signal, vki_siginfo_t *, siginfo,
5320 vki_sigset_t *, mask);
5322 if (ARG2)
5323 PRE_MEM_READ("sigresend(siginfo)", ARG2, sizeof(vki_siginfo_t));
5324 PRE_MEM_WRITE("sigresend(mask)", ARG3, sizeof(vki_sigset_t));
5326 /* Check the signal and mask. */
5327 if (!ML_(client_signal_OK)(ARG1)) {
5328 SET_STATUS_Failure(VKI_EINVAL);
5330 if (!ML_(safe_to_deref)((void*)ARG3, sizeof(vki_sigset_t))) {
5331 SET_STATUS_Failure(VKI_EFAULT);
5334 /* Exit early if there are problems. */
5335 if (FAILURE)
5336 return;
5338 /* Save the requested mask to unused ARG4. */
5339 ARG4 = ARG3;
5341 /* Fake the requested sigmask with a block-all mask. If the syscall
5342 succeeds then we will block "all" signals for a few instructions (in
5343 syscall-x86-solaris.S) but the correct mask will be almost instantly set
5344 again by a call to sigprocmask (also in syscall-x86-solaris.S). If the
5345 syscall fails then the mask is not changed, so everything is ok too. */
5346 VG_(sigfillset)(&block_all);
5347 ARG3 = (UWord)&block_all;
5349 /* Check to see if this gave us a pending signal. */
5350 *flags |= SfPollAfter;
5352 if (VG_(clo_trace_signals))
5353 VG_(message)(Vg_DebugMsg, "sigresend: resending signal %lu\n", ARG1);
5355 /* Handle SIGKILL specially. */
5356 if (ARG1 == VKI_SIGKILL && ML_(do_sigkill)(tid, -1)) {
5357 SET_STATUS_Success(0);
5358 return;
5361 /* Ask to handle this syscall via the slow route, since that's the only one
5362 that sets tst->status to VgTs_WaitSys. If the result of doing the
5363 syscall is an immediate run of async_signalhandler() in m_signals.c,
5364 then we need the thread to be properly tidied away. */
5365 *flags |= SfMayBlock;
5368 POST(sys_sigresend)
5370 /* The syscall succeeded, set the requested mask. */
5371 VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, (vki_sigset_t*)ARG4, NULL);
5373 if (VG_(clo_trace_signals))
5374 VG_(message)(Vg_DebugMsg, "sigresend: resent signal %lu\n", ARG1);
5377 static void mem_priocntlsys_parm_ok(ThreadId tid, Bool pre, Bool reade,
5378 vki_pc_vaparm_t *parm)
5380 if (reade)
5381 return;
5383 if (pre)
5384 PRE_FIELD_WRITE("priocntlsys(parm)", parm->pc_parm);
5385 else
5386 POST_FIELD_WRITE(parm->pc_parm);
5389 static void mem_priocntlsys_parm(ThreadId tid, Bool pre, Bool reade,
5390 const HChar *clname,
5391 vki_pc_vaparm_t *parm)
5393 /* This function is used to handle the PC_SETXPARMS and PC_GETXPARMS
5394 parameters. In the case of PC_SETXPARMS, the code below merely checks
5395 if all parameters are scalar, PRE_MEM_READ() for these parameters is
5396 already done by the PC_SETXPARMS handler in PRE(sys_priocntlsys).
5398 A caller of this function is responsible for checking that clname and
5399 &parm->key can be dereferenced. */
5401 if (VG_STREQ(clname, "RT")) {
5402 switch (parm->pc_key) {
5403 case VKI_RT_KY_PRI:
5404 case VKI_RT_KY_TQSECS:
5405 case VKI_RT_KY_TQNSECS:
5406 case VKI_RT_KY_TQSIG:
5407 /* Scalar values that are stored directly in pc_parm. */
5408 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5409 return;
5412 else if (VG_STREQ(clname, "TS")) {
5413 switch (parm->pc_key) {
5414 case VKI_TS_KY_UPRILIM:
5415 case VKI_TS_KY_UPRI:
5416 /* Scalar values that are stored directly in pc_parm. */
5417 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5418 return;
5421 else if (VG_STREQ(clname, "IA")) {
5422 switch (parm->pc_key) {
5423 case VKI_IA_KY_UPRILIM:
5424 case VKI_IA_KY_UPRI:
5425 case VKI_IA_KY_MODE:
5426 /* Scalar values that are stored directly in pc_parm. */
5427 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5428 return;
5431 else if (VG_STREQ(clname, "FSS")) {
5432 switch (parm->pc_key) {
5433 case VKI_FSS_KY_UPRILIM:
5434 case VKI_FSS_KY_UPRI:
5435 /* Scalar values that are stored directly in pc_parm. */
5436 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5437 return;
5440 else if (VG_STREQ(clname, "FX")) {
5441 switch (parm->pc_key) {
5442 case VKI_FX_KY_UPRILIM:
5443 case VKI_FX_KY_UPRI:
5444 case VKI_FX_KY_TQSECS:
5445 case VKI_FX_KY_TQNSECS:
5446 /* Scalar values that are stored directly in pc_parm. */
5447 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5448 return;
5451 else {
5452 /* Unknown class. */
5453 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5454 clname);
5455 /*NOTREACHED*/
5458 /* The class is known but pc_key is unknown. */
5459 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s "
5460 "and pc_key=%d.", clname, parm->pc_key);
5461 /*NOTREACHED*/
5464 PRE(sys_priocntlsys)
5466 /* long priocntlsys(int pc_version, procset_t *psp, int cmd, caddr_t arg,
5467 caddr_t arg2); */
5469 if (ARG1 != 1) {
5470 /* Only the first version of priocntlsys is supported by the code below.
5472 VG_(unimplemented)("Syswrap of the priocntlsys where pc_version=%lu.",
5473 ARG1);
5474 /*NOTREACHED*/
5477 PRINT("sys_priocntlsys ( %ld, %#lx, %ld, %#lx, %#lx )", SARG1, ARG2, SARG3,
5478 ARG4, ARG5);
5479 PRE_REG_READ5(long, "priocntlsys", int, pc_version, procset_t *, psp,
5480 int, cmd, void *, arg, void *, arg2);
5482 switch (ARG3 /*cmd*/) {
5483 case VKI_PC_GETCID:
5484 if (ARG4) {
5485 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5486 PRE_MEM_RASCIIZ("priocntlsys(clname)", (Addr)info->pc_clname);
5487 /* The next line says that the complete pcinfo_t structure can be
5488 written, but this actually isn't true for pc_clname which is
5489 always only read. */
5490 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5492 break;
5493 case VKI_PC_GETCLINFO:
5494 if (ARG4) {
5495 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5496 PRE_FIELD_READ("priocntlsys(cid)", info->pc_cid);
5497 /* The next line says that the complete pcinfo_t structure can be
5498 written, but this actually isn't true for pc_cid which is
5499 always only read. */
5500 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5502 break;
5503 case VKI_PC_SETPARMS:
5504 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5505 /* The next line says that the complete pcparms_t structure is read
5506 which is never actually true (we are too pessimistic here).
5507 Unfortunately we can't do better because we don't know what
5508 process class is involved. */
5509 PRE_MEM_READ("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5510 break;
5511 case VKI_PC_GETPARMS:
5512 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5513 PRE_MEM_WRITE("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5514 break;
5515 case VKI_PC_GETPRIRANGE:
5517 vki_pcpri_t *pcpri = (vki_pcpri_t*)ARG4;
5518 PRE_FIELD_READ("priocntlsys(cid)", pcpri->pc_cid);
5520 PRE_MEM_WRITE("priocntlsys(pri)", ARG4, sizeof(vki_pcpri_t));
5521 break;
5522 case VKI_PC_DONICE:
5523 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5525 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5526 PRE_FIELD_READ("priocntlsys(op)", nicee->pc_op);
5527 if (ML_(safe_to_deref)(&nicee->pc_op, sizeof(nicee->pc_op))) {
5528 switch (nicee->pc_op) {
5529 case VKI_PC_GETNICE:
5530 PRE_FIELD_WRITE("priocntlsys(val)", nicee->pc_val);
5531 break;
5532 case VKI_PC_SETNICE:
5533 PRE_FIELD_READ("priocntlsys(val)", nicee->pc_val);
5534 break;
5535 default:
5536 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5537 "cmd=PC_DONICE and pc_op=%d", nicee->pc_op);
5538 /*NOTREACHED*/
5539 break;
5543 break;
5544 case VKI_PC_SETXPARMS:
5545 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5546 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5547 if (ARG5) {
5548 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5549 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5550 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5551 sizeof(parms->pc_vaparmscnt))) {
5552 vki_uint_t i;
5553 PRE_MEM_READ("priocntlsys(parms)", (Addr)parms->pc_parms,
5554 parms->pc_vaparmscnt * sizeof(parms->pc_parms[0]));
5555 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5556 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5557 if (ML_(safe_to_deref)(parm, sizeof(*parm)) &&
5558 ML_(safe_to_deref)((void*)ARG4, 1))
5559 mem_priocntlsys_parm(tid, True /*pre*/, True /*read*/,
5560 (HChar*)ARG4, parm);
5564 break;
5565 case VKI_PC_GETXPARMS:
5566 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5567 if (ARG4)
5568 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5569 if (ARG5) {
5570 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5571 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5572 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5573 sizeof(parms->pc_vaparmscnt))) {
5574 vki_uint_t i;
5575 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5576 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5577 PRE_MEM_READ("priocntlsys(parms)", (Addr)&parm->pc_key,
5578 parms->pc_vaparmscnt * sizeof(parm->pc_key));
5579 if (ML_(safe_to_deref)(&parm->pc_key,
5580 sizeof(parm->pc_key))) {
5581 /* First handle PC_KY_CLNAME, then class specific keys.
5582 Note that PC_KY_CLNAME can be used only with
5583 ARG4==NULL && parms->pc_vaparmscnt==1. We are not so
5584 strict here and handle this special case as a regular
5585 one which makes the code simpler. */
5586 if (parm->pc_key == VKI_PC_KY_CLNAME)
5587 PRE_MEM_WRITE("priocntlsys(clname)", parm->pc_parm,
5588 VKI_PC_CLNMSZ);
5589 else if (ARG4 && ML_(safe_to_deref)((void*)ARG4, 1))
5590 mem_priocntlsys_parm(tid, True /*pre*/,
5591 False /*read*/, (HChar*)ARG4,
5592 parm);
5597 break;
5598 case VKI_PC_SETDFLCL:
5599 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5600 break;
5601 case VKI_PC_GETDFLCL:
5602 if (ARG4) {
5603 /* GETDFLCL writes to the ARG4 buffer only if ARG4 isn't NULL. Also
5604 note that if ARG4 is NULL then the syscall succeeds. */
5605 PRE_MEM_WRITE("priocntlsys(clname)", ARG4, VKI_PC_CLNMSZ);
5607 break;
5608 case VKI_PC_DOPRIO:
5609 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5611 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5612 PRE_FIELD_READ("priocntlsys(op)", prio->pc_op);
5613 if (ML_(safe_to_deref)(&prio->pc_op, sizeof(prio->pc_op))) {
5614 switch (prio->pc_op) {
5615 case VKI_PC_GETPRIO:
5616 PRE_FIELD_WRITE("priocntlsys(cid)", prio->pc_cid);
5617 PRE_FIELD_WRITE("priocntlsys(val)", prio->pc_val);
5618 break;
5619 case VKI_PC_SETPRIO:
5620 PRE_FIELD_READ("priocntlsys(cid)", prio->pc_cid);
5621 PRE_FIELD_READ("priocntlsys(val)", prio->pc_val);
5622 break;
5623 default:
5624 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5625 "cmd=PC_DOPRIO and pc_op=%d", prio->pc_op);
5626 /*NOTREACHED*/
5627 break;
5631 break;
5632 case VKI_PC_ADMIN:
5633 default:
5634 VG_(unimplemented)("Syswrap of the priocntlsys call with cmd %ld.", SARG3);
5635 /*NOTREACHED*/
5636 break;
5640 static void post_mem_write_priocntlsys_clinfo(ThreadId tid,
5641 const HChar *clname, Addr clinfo)
5643 if (VG_STREQ(clname, "RT"))
5644 POST_MEM_WRITE(clinfo, sizeof(vki_rtinfo_t));
5645 else if (VG_STREQ(clname, "TS"))
5646 POST_MEM_WRITE(clinfo, sizeof(vki_tsinfo_t));
5647 else if (VG_STREQ(clname, "IA"))
5648 POST_MEM_WRITE(clinfo, sizeof(vki_iainfo_t));
5649 else if (VG_STREQ(clname, "FSS"))
5650 POST_MEM_WRITE(clinfo, sizeof(vki_fssinfo_t));
5651 else if (VG_STREQ(clname, "FX"))
5652 POST_MEM_WRITE(clinfo, sizeof(vki_fxinfo_t));
5653 else if (VG_STREQ(clname, "SDC")) {
5654 /* Relax. */
5656 else {
5657 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5658 clname);
5659 /*NOTREACHED*/
5663 POST(sys_priocntlsys)
5665 switch (ARG3 /*cmd*/) {
5666 case VKI_PC_GETCID:
5667 if (ARG4) {
5668 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5669 POST_FIELD_WRITE(info->pc_cid);
5670 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5671 (Addr)&info->pc_clinfo);
5673 break;
5674 case VKI_PC_GETCLINFO:
5675 if (ARG4) {
5676 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5677 POST_MEM_WRITE((Addr)info->pc_clname,
5678 VG_(strlen)((HChar*)info->pc_clname) + 1);
5679 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5680 (Addr)&info->pc_clinfo);
5682 break;
5683 case VKI_PC_SETPARMS:
5684 /* Relax. */
5685 break;
5686 case VKI_PC_GETPARMS:
5687 /* The next line says that the complete pcparms_t structure is
5688 written which is never actually true (we are too optimistic here).
5689 Unfortunately we can't do better because we don't know what
5690 process class is involved. */
5691 POST_MEM_WRITE(ARG4, sizeof(vki_pcparms_t));
5692 break;
5693 case VKI_PC_GETPRIRANGE:
5694 POST_MEM_WRITE(ARG4, sizeof(vki_pcpri_t));
5695 break;
5696 case VKI_PC_DONICE:
5698 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5699 if (nicee->pc_op == VKI_PC_GETNICE)
5700 POST_FIELD_WRITE(nicee->pc_val);
5702 break;
5703 case VKI_PC_SETXPARMS:
5704 /* Relax. */
5705 break;
5706 case VKI_PC_GETXPARMS:
5708 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5709 vki_uint_t i;
5710 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5711 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5712 if (parm->pc_key == VKI_PC_KY_CLNAME)
5713 POST_MEM_WRITE(parm->pc_parm,
5714 VG_(strlen)((HChar*)(Addr)parm->pc_parm) + 1);
5715 else if (ARG4)
5716 mem_priocntlsys_parm(tid, False /*pre*/, False /*read*/,
5717 (HChar*)ARG4, parm);
5720 break;
5721 case VKI_PC_SETDFLCL:
5722 /* Relax. */
5723 break;
5724 case VKI_PC_GETDFLCL:
5725 if (ARG4)
5726 POST_MEM_WRITE(ARG4, VG_(strlen)((HChar*)ARG4) + 1);
5727 break;
5728 case VKI_PC_DOPRIO:
5730 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5731 if (prio->pc_op == VKI_PC_GETPRIO) {
5732 POST_FIELD_WRITE(prio->pc_cid);
5733 POST_FIELD_WRITE(prio->pc_val);
5736 break;
5737 case VKI_PC_ADMIN:
5738 default:
5739 vg_assert(0);
5740 break;
5744 PRE(sys_pathconf)
5746 /* long pathconf(const char *path, int name); */
5747 PRINT("sys_pathconf ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
5748 PRE_REG_READ2(long, "pathconf", const char *, path, int, name);
5749 PRE_MEM_RASCIIZ("pathconf(path)", ARG1);
5752 PRE(sys_mmap)
5754 /* void *mmap(void *addr, size_t len, int prot, int flags,
5755 int fildes, off_t off); */
5756 SysRes r;
5757 OffT offset;
5759 /* Stay sane. */
5760 vg_assert(VKI_PAGE_SIZE == 4096);
5761 vg_assert(sizeof(offset) == sizeof(ARG6));
5763 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx )",
5764 ARG1, ARG2, ARG3, ARG4, SARG5, ARG6);
5765 PRE_REG_READ6(long, "mmap", void *, start, vki_size_t, length,
5766 int, prot, int, flags, int, fd, vki_off_t, offset);
5768 /* Make sure that if off < 0 then it's passed correctly to the generic mmap
5769 wraper. */
5770 offset = *(OffT*)&ARG6;
5772 r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
5773 SET_STATUS_from_SysRes(r);
5776 #if defined(SOLARIS_UUIDSYS_SYSCALL)
5777 PRE(sys_uuidsys)
5779 /* int uuidsys(struct uuid *uuid); */
5780 PRINT("sys_uuidsys ( %#lx )", ARG1);
5781 PRE_REG_READ1(long, "uuidsys", struct vki_uuid *, uuid);
5782 PRE_MEM_WRITE("uuidsys(uuid)", ARG1, sizeof(struct vki_uuid));
5785 POST(sys_uuidsys)
5787 POST_MEM_WRITE(ARG1, sizeof(struct vki_uuid));
5789 #endif /* SOLARIS_UUIDSYS_SYSCALL */
5791 /* Syscall mmapobj emulation. Processes ELF program headers
5792 and maps them into correct place in memory. Not an easy task, though.
5793 ELF program header of PT_LOAD/PT_SUNWBSS type specifies:
5794 o p_vaddr - actually a memory offset
5795 o p_memsz - total segment size, including text, data and BSS
5796 o p_filesz - file-based segment size mapping (includes only text and data);
5797 p_memsz - p_filesz is the size of BSS
5798 o p_offset - offset into the ELF file where the file-based mapping starts
5800 Several problematic areas to cover here:
5801 1. p_offset can contain a value which is not page-aligned. In that case
5802 we mmap a part of the file prior to p_offset to make the start address
5803 page-aligned.
5804 2. Partially unused page after the file-based mapping must be zeroed.
5805 3. The first mapping is flagged with MR_HDR_ELF and needs to contain
5806 the ELF header. This information is used and verified by the dynamic
5807 linker (ld.so.1). */
5808 static SysRes mmapobj_process_phdrs(ThreadId tid, Int fd,
5809 vki_mmapobj_result_t *storage,
5810 vki_uint_t *elements,
5811 const VKI_ESZ(Ehdr) *ehdr,
5812 const VKI_ESZ(Phdr) *phdrs)
5814 #define ADVANCE_PHDR(ehdr, phdr) \
5815 (const VKI_ESZ(Phdr) *) ((const HChar *) (phdr) + (ehdr)->e_phentsize)
5817 SysRes res;
5818 Int i;
5819 Int first_segment_idx = -1;
5820 UInt idx;
5821 UInt segments = 0; /* loadable segments */
5822 Addr start_addr = 0;
5823 Addr end_addr = 0;
5824 Addr elfbrk = 0;
5825 SizeT max_align = VKI_PAGE_SIZE;
5827 /* 1. First pass over phdrs - determine number, span and max alignment. */
5828 const VKI_ESZ(Phdr) *phdr = phdrs;
5829 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5830 /* Skip this header if no memory is requested. */
5831 if (phdr->p_memsz == 0)
5832 continue;
5834 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5835 Off64T offset = 0;
5837 if (VG_(clo_trace_syscalls))
5838 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
5839 "program header #%u: addr=%#lx type=%#lx "
5840 "prot=%#lx memsz=%#lx filesz=%#lx file "
5841 "offset=%#lx\n", idx, phdr->p_vaddr,
5842 (UWord) phdr->p_type, (UWord) phdr->p_flags,
5843 phdr->p_memsz, phdr->p_filesz, phdr->p_offset);
5845 if (segments == 0) {
5846 first_segment_idx = idx;
5848 if (phdr->p_filesz == 0) {
5849 VG_(unimplemented)("Syswrap of the mmapobj call with the first "
5850 "loadable ELF program header specifying "
5851 "p_filesz == 0");
5852 /*NOTREACHED*/
5853 return res;
5856 /* Address of the first segment must be either NULL or within the
5857 first page. */
5858 if ((ehdr->e_type == VKI_ET_DYN) &&
5859 ((phdr->p_vaddr & VKI_PAGEMASK) != 0)) {
5860 if (VG_(clo_trace_syscalls))
5861 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5862 "ELF program header #%u does not land on "
5863 "the first page (vaddr=%#lx)\n", idx,
5864 phdr->p_vaddr);
5865 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5868 start_addr = phdr->p_vaddr;
5869 /* The first segment is mapped from the beginning of the file (to
5870 include also the ELF header), so include this memory as well.
5871 Later on we flag this mapping with MR_HDR_ELF. */
5872 offset = phdr->p_offset;
5875 if (phdr->p_align > 1) {
5876 if ((phdr->p_vaddr % phdr->p_align) !=
5877 (phdr->p_offset % phdr->p_align)) {
5878 if (VG_(clo_trace_syscalls))
5879 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5880 "ELF program header #%u does not have "
5881 "congruent offset and vaddr (vaddr=%#lx "
5882 "file offset=%#lx align=%#lx)\n", idx,
5883 phdr->p_vaddr, phdr->p_offset,
5884 phdr->p_align);
5885 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5889 if (phdr->p_vaddr < end_addr) {
5890 if (VG_(clo_trace_syscalls))
5891 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5892 "ELF program header #%u specifies overlaping "
5893 "address (vaddr=%#lx end_addr=%#lx)\n",
5894 idx, phdr->p_vaddr, end_addr);
5895 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5898 end_addr = elfbrk = phdr->p_vaddr + phdr->p_memsz + offset;
5899 end_addr = VG_PGROUNDUP(end_addr);
5900 if (phdr->p_align > max_align) {
5901 max_align = phdr->p_align;
5904 segments += 1;
5908 /* Alignment check - it should be power of two. */
5909 if ((max_align & (max_align - 1)) != 0) {
5910 if (VG_(clo_trace_syscalls))
5911 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5912 "is not a power of 2 (%#lx)\n", max_align);
5913 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5915 vg_assert(max_align >= VKI_PAGE_SIZE);
5917 #if defined(VGP_x86_solaris)
5918 if (max_align > VKI_UINT_MAX) {
5919 if (VG_(clo_trace_syscalls))
5920 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5921 "for 32-bit ELF is >32-bits (%#lx)\n", max_align);
5922 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5924 #endif /* VGP_x86_solaris */
5926 if (segments == 0) {
5927 if (VG_(clo_trace_syscalls))
5928 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5929 "to map (0 loadable segments)");
5930 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5933 vg_assert(end_addr >= start_addr);
5934 SizeT span = end_addr - start_addr;
5935 if (span == 0) {
5936 if (VG_(clo_trace_syscalls))
5937 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5938 "to map (%u loadable segments spanning 0 bytes)\n",
5939 segments);
5940 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5942 vg_assert(first_segment_idx >= 0);
5944 if (segments > *elements) {
5945 if (VG_(clo_trace_syscalls))
5946 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: too many "
5947 "segments (%u)\n", segments);
5948 return VG_(mk_SysRes_Error)(VKI_E2BIG);
5951 if (VG_(clo_trace_syscalls))
5952 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: there "
5953 "are %u loadable segments spanning %#lx bytes; max "
5954 "align is %#lx\n", segments, span, max_align);
5956 /* Now get the aspacemgr oraculum advisory.
5957 Later on we mmap file-based and BSS mappings into this address space area
5958 as required and leave the holes unmapped. */
5959 if (ehdr->e_type == VKI_ET_DYN) {
5960 MapRequest mreq = {MAlign, max_align, span};
5961 Bool ok;
5962 start_addr = VG_(am_get_advisory)(&mreq, True /* forClient */, &ok);
5963 if (!ok) {
5964 if (VG_(clo_trace_syscalls))
5965 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5966 "failed to reserve address space of %#lx bytes "
5967 "with alignment %#lx\n", span, max_align);
5968 return VG_(mk_SysRes_Error)(VKI_ENOMEM);
5970 vg_assert(VG_ROUNDUP(start_addr, max_align) == start_addr);
5972 if (VG_(clo_trace_syscalls))
5973 VG_(debugLog)(2, "syswrap-solaris", "PRE(sys_mmapobj): address space "
5974 "reserved at: vaddr=%#lx size=%#lx\n",
5975 start_addr, span);
5976 } else {
5977 vg_assert(ehdr->e_type == VKI_ET_EXEC);
5978 /* ET_EXEC uses fixed mappings. Will be checked when processing phdrs. */
5981 /* This is an utterly ugly hack, the aspacemgr assumes that only one
5982 segment is added at the time. However we add here multiple segments so
5983 AM_SANITY_CHECK inside the aspacemgr can easily fail. We want to
5984 prevent that thus we disable these checks. The scheduler will check the
5985 aspacemgr sanity after the syscall. */
5986 UInt sanity_level = VG_(clo_sanity_level);
5987 VG_(clo_sanity_level) = 1;
5989 /* 2. Second pass over phdrs - map the program headers and fill in
5990 the mmapobj_result_t array. */
5991 phdr = phdrs;
5992 *elements = 0;
5993 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5994 /* Skip this header if no memory is requested. */
5995 if (phdr->p_memsz == 0)
5996 continue;
5998 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5999 UInt prot = 0;
6000 if (phdr->p_flags & VKI_PF_R)
6001 prot |= VKI_PROT_READ;
6002 if (phdr->p_flags & VKI_PF_W)
6003 prot |= VKI_PROT_WRITE;
6004 if (phdr->p_flags & VKI_PF_X)
6005 prot |= VKI_PROT_EXEC;
6007 vki_mmapobj_result_t *mrp = &storage[*elements];
6008 mrp->mr_msize = phdr->p_memsz;
6009 mrp->mr_fsize = phdr->p_filesz;
6010 mrp->mr_offset = 0;
6011 mrp->mr_prot = prot;
6012 mrp->mr_flags = 0;
6013 Off64T file_offset = phdr->p_offset;
6014 if (idx == first_segment_idx) {
6015 mrp->mr_flags = VKI_MR_HDR_ELF;
6016 if (ehdr->e_type == VKI_ET_DYN) {
6017 if (phdr->p_offset > 0) {
6018 /* Include the ELF header into the first segment.
6019 This means we ignore p_offset from the program header
6020 and map from file offset 0. */
6021 mrp->mr_msize += phdr->p_offset;
6022 mrp->mr_fsize += phdr->p_offset;
6023 file_offset = 0;
6025 } else {
6026 vg_assert(ehdr->e_type == VKI_ET_EXEC);
6027 start_addr = phdr->p_vaddr;
6031 /* p_vaddr is absolute for ET_EXEC, and relative for ET_DYN. */
6032 mrp->mr_addr = (vki_caddr_t) phdr->p_vaddr;
6033 if (ehdr->e_type == VKI_ET_DYN) {
6034 mrp->mr_addr += start_addr;
6037 SizeT page_offset = (Addr) mrp->mr_addr & VKI_PAGEOFFSET;
6038 if (page_offset > 0) {
6039 vg_assert(file_offset >= page_offset);
6040 /* Mapping address does not start at the beginning of a page.
6041 Therefore include some bytes before to make it page aligned. */
6042 mrp->mr_addr -= page_offset;
6043 mrp->mr_msize += page_offset;
6044 mrp->mr_offset = page_offset;
6045 file_offset -= page_offset;
6047 SizeT file_size = mrp->mr_fsize + mrp->mr_offset;
6048 if (VG_(clo_trace_syscalls))
6049 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
6050 "mmapobj result #%u: addr=%#lx msize=%#lx "
6051 "fsize=%#lx mr_offset=%#lx prot=%#x flags=%#x\n",
6052 *elements, (Addr) mrp->mr_addr,
6053 (UWord) mrp->mr_msize, (UWord) mrp->mr_fsize,
6054 (UWord) mrp->mr_offset, mrp->mr_prot,
6055 mrp->mr_flags);
6057 UInt flags = VKI_MAP_PRIVATE | VKI_MAP_FIXED;
6058 if ((mrp->mr_prot & (VKI_PROT_WRITE | VKI_PROT_EXEC)) ==
6059 VKI_PROT_EXEC) {
6060 flags |= VKI_MAP_TEXT;
6061 } else {
6062 flags |= VKI_MAP_INITDATA;
6065 /* Determine if there will be partially unused page after file-based
6066 mapping. If so, then we need to zero it explicitly afterwards. */
6067 Addr mapping_end = (Addr) mrp->mr_addr + file_size;
6068 SizeT zeroed_size = VG_PGROUNDUP(mapping_end) - mapping_end;
6069 Bool mprotect_needed = False;
6070 if ((zeroed_size > 0) && ((prot & VKI_PROT_WRITE) == 0)) {
6071 prot |= VKI_PROT_WRITE;
6072 mprotect_needed = True;
6075 if (ehdr->e_type == VKI_ET_EXEC) {
6076 /* Now check if the requested address space is available. */
6077 if (!VG_(am_is_free_or_resvn)((Addr) mrp->mr_addr, mrp->mr_msize)) {
6078 if (VG_(clo_trace_syscalls))
6079 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6080 "requested segment at %#lx with size of "
6081 "%#lx bytes is not available\n",
6082 (Addr) mrp->mr_addr, (UWord) mrp->mr_msize);
6083 res = VG_(mk_SysRes_Error)(VKI_EADDRINUSE);
6084 goto mmap_error;
6088 if (file_size > 0) {
6089 res = VG_(am_mmap_file_fixed_client_flags)((Addr) mrp->mr_addr,
6090 file_size, prot, flags, fd, file_offset);
6091 if (sr_isError(res)) {
6092 if (VG_(clo_trace_syscalls))
6093 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6094 "mmap failed: addr=%#lx size=%#lx prot=%#x "
6095 "flags=%#x fd=%d file offset=%#llx\n",
6096 (Addr) mrp->mr_addr, file_size,
6097 prot, flags, fd, (unsigned long long)file_offset);
6098 goto mmap_error;
6101 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
6102 "segment: vaddr=%#lx size=%#lx prot=%#x "
6103 "flags=%#x fd=%d file offset=%#llx\n",
6104 (Addr) mrp->mr_addr, file_size, mrp->mr_prot,
6105 flags, fd, (unsigned long long)file_offset);
6108 if (zeroed_size > 0) {
6109 /* Now zero out the end of partially used page. */
6110 VG_(memset)((void *) mapping_end, 0, zeroed_size);
6111 if (mprotect_needed) {
6112 prot &= ~VKI_PROT_WRITE;
6113 res = VG_(do_syscall3)(SYS_mprotect, (Addr) mrp->mr_addr,
6114 file_size, prot);
6115 if (sr_isError(res)) {
6116 if (VG_(clo_trace_syscalls))
6117 VG_(debugLog)(3, "syswrap-solaris",
6118 "mmapobj_process_phdrs: mprotect failed: "
6119 "addr=%#lx size=%#lx prot=%#x",
6120 (Addr) mrp->mr_addr, file_size, prot);
6121 /* Mapping for this segment was already established. */
6122 idx += 1;
6123 goto mmap_error;
6128 if (file_size > 0) {
6129 ML_(notify_core_and_tool_of_mmap)((Addr) mrp->mr_addr, file_size,
6130 prot, flags, fd, file_offset);
6133 /* Page(s) after the mapping backed up by the file are part of BSS.
6134 They need to be mmap'ed over with correct flags and will be
6135 implicitly zeroed. */
6136 mapping_end = VG_PGROUNDUP(mrp->mr_addr + mrp->mr_msize);
6137 Addr page_end = VG_PGROUNDUP(mrp->mr_addr + file_size);
6138 vg_assert(mapping_end >= page_end);
6139 zeroed_size = mapping_end - page_end;
6140 if (zeroed_size > 0) {
6141 flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS;
6142 res = VG_(am_mmap_anon_fixed_client)(page_end, zeroed_size, prot);
6143 if (sr_isError(res)) {
6144 if (VG_(clo_trace_syscalls))
6145 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6146 "mmap_anon failed: addr=%#lx size=%#lx "
6147 "prot=%#x\n", page_end, zeroed_size, prot);
6148 idx += 1; /* mapping for this segment was already established */
6149 goto mmap_error;
6152 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
6153 "anonymous segment (BSS): vaddr=%#lx size=%#lx "
6154 "prot=%#x\n", page_end, zeroed_size, prot);
6155 ML_(notify_core_and_tool_of_mmap)(page_end, zeroed_size,
6156 prot, flags, -1, 0);
6159 VG_(di_notify_mmap)((Addr) mrp->mr_addr, False /*allow_SkFileV*/, fd);
6161 *elements += 1;
6162 vg_assert(*elements <= segments);
6166 if ((ehdr->e_type == VKI_ET_EXEC) && (!brk_segment_established)) {
6167 vg_assert(VG_(brk_base) == VG_(brk_limit));
6168 vg_assert(VG_(brk_base) == -1);
6169 VG_(brk_base) = VG_(brk_limit) = elfbrk;
6171 if (!VG_(setup_client_dataseg)()) {
6172 VG_(umsg)("Cannot map memory to initialize brk segment in thread #%u "
6173 "at %#lx\n", tid, VG_(brk_base));
6174 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
6175 goto mmap_error;
6178 VG_(track_client_dataseg)(tid);
6181 /* Restore VG_(clo_sanity_level). The scheduler will perform the aspacemgr
6182 sanity check after the syscall. */
6183 VG_(clo_sanity_level) = sanity_level;
6185 return VG_(mk_SysRes_Success)(0);
6187 mmap_error:
6188 for (i = idx - 1; i > 0; i--) {
6189 Bool discard_translations;
6190 Addr addr = (Addr) storage[i].mr_addr;
6192 VG_(am_munmap_client)(&discard_translations, addr, storage[i].mr_msize);
6193 ML_(notify_core_and_tool_of_munmap)(addr, storage[i].mr_msize);
6195 *elements = 0;
6196 return res;
6198 #undef ADVANCE_PHDR
6201 static SysRes mmapobj_interpret(ThreadId tid, Int fd,
6202 vki_mmapobj_result_t *storage,
6203 vki_uint_t *elements)
6205 SysRes res;
6207 struct vg_stat stats;
6208 if (VG_(fstat)(fd, &stats) != 0) {
6209 return VG_(mk_SysRes_Error)(VKI_EBADF);
6212 if (stats.size < sizeof(VKI_ESZ(Ehdr))) {
6213 if (VG_(clo_trace_syscalls))
6214 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: insufficient "
6215 "file size (%lld)\n", stats.size);
6216 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6219 /* Align the header buffer appropriately. */
6220 vki_ulong_t lheader[sizeof(VKI_ESZ(Ehdr)) / sizeof(vki_ulong_t) + 1];
6221 HChar *header = (HChar *) &lheader;
6223 res = VG_(pread)(fd, header, sizeof(VKI_ESZ(Ehdr)), 0);
6224 if (sr_isError(res)) {
6225 if (VG_(clo_trace_syscalls))
6226 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6227 "header failed\n");
6228 return res;
6229 } else if (sr_Res(res) != sizeof(VKI_ESZ(Ehdr))) {
6230 if (VG_(clo_trace_syscalls))
6231 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6232 "header failed - only %lu bytes out of %lu\n",
6233 sr_Res(res), (UWord) sizeof(VKI_ESZ(Ehdr)));
6234 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6237 /* Verify file type is ELF. */
6238 if ((header[VKI_EI_MAG0] != VKI_ELFMAG0) ||
6239 (header[VKI_EI_MAG1] != VKI_ELFMAG1) ||
6240 (header[VKI_EI_MAG2] != VKI_ELFMAG2) ||
6241 (header[VKI_EI_MAG3] != VKI_ELFMAG3)) {
6242 if (VG_(clo_trace_syscalls))
6243 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6244 "missing magic\n");
6245 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6248 if (header[VKI_EI_CLASS] != VG_ELF_CLASS) {
6249 if (VG_(clo_trace_syscalls))
6250 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF class "
6251 "mismatch (%d vs %d)\n", header[VKI_EI_CLASS],
6252 VG_ELF_CLASS);
6253 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6256 VKI_ESZ(Ehdr) *ehdr = (VKI_ESZ(Ehdr) *) header;
6257 if ((ehdr->e_type != VKI_ET_EXEC) && (ehdr->e_type != VKI_ET_DYN)) {
6258 VG_(unimplemented)("Syswrap of the mmapobj call with ELF type %u.",
6259 ehdr->e_type);
6260 /*NOTREACHED*/
6261 return res;
6264 if (ehdr->e_phnum == VKI_PN_XNUM) {
6265 VG_(unimplemented)("Syswrap of the mmapobj call with number of ELF "
6266 "program headers == PN_XNUM");
6267 /*NOTREACHED*/
6268 return res;
6271 /* Check alignment. */
6272 #if defined(VGP_x86_solaris)
6273 if (!VG_IS_4_ALIGNED(ehdr->e_phentsize)) {
6274 #elif defined(VGP_amd64_solaris)
6275 if (!VG_IS_8_ALIGNED(ehdr->e_phentsize)) {
6276 #else
6277 # error "Unknown platform"
6278 #endif
6279 if (VG_(clo_trace_syscalls))
6280 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6281 "phentsize not aligned properly (%u)\n",
6282 ehdr->e_phentsize);
6283 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6286 SizeT phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
6287 if (phdrs_size == 0) {
6288 if (VG_(clo_trace_syscalls))
6289 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: no ELF "
6290 "program headers\n");
6291 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6294 VKI_ESZ(Phdr) *phdrs = VG_(malloc)("syswrap.mi.1", phdrs_size);
6295 res = VG_(pread)(fd, phdrs, phdrs_size, ehdr->e_phoff);
6296 if (sr_isError(res)) {
6297 if (VG_(clo_trace_syscalls))
6298 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6299 "program headers failed\n");
6300 VG_(free)(phdrs);
6301 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6302 } else if (sr_Res(res) != phdrs_size) {
6303 if (VG_(clo_trace_syscalls))
6304 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6305 "program headers failed - only %lu bytes out of %lu\n",
6306 sr_Res(res), phdrs_size);
6307 VG_(free)(phdrs);
6308 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6311 if (VG_(clo_trace_syscalls))
6312 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_interpret: %u ELF "
6313 "program headers with total size of %lu bytes\n",
6314 ehdr->e_phnum, phdrs_size);
6316 /* Now process the program headers. */
6317 res = mmapobj_process_phdrs(tid, fd, storage, elements, ehdr, phdrs);
6318 VG_(free)(phdrs);
6319 return res;
6322 PRE(sys_mmapobj)
6324 /* int mmapobj(int fd, uint_t flags, mmapobj_result_t *storage,
6325 uint_t *elements, void *arg); */
6326 PRINT("sys_mmapobj ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6327 ARG4, ARG5);
6328 PRE_REG_READ5(long, "mmapobj", int, fd, vki_uint_t, flags,
6329 mmapobj_result_t *, storage, uint_t *, elements,
6330 void *, arg);
6332 PRE_MEM_READ("mmapobj(elements)", ARG4, sizeof(vki_uint_t));
6333 /*PRE_MEM_WRITE("mmapobj(elements)", ARG4, sizeof(vki_uint_t));*/
6334 if (ML_(safe_to_deref)((void*)ARG4, sizeof(vki_uint_t))) {
6335 vki_uint_t *u = (vki_uint_t*)ARG4;
6336 PRE_MEM_WRITE("mmapobj(storage)", ARG3,
6337 *u * sizeof(vki_mmapobj_result_t));
6340 if (ARG2 & VKI_MMOBJ_PADDING)
6341 PRE_MEM_READ("mmapobj(arg)", ARG5, sizeof(vki_size_t));
6343 /* Be strict. */
6344 if (!ML_(fd_allowed)(ARG1, "mmapobj", tid, False)) {
6345 SET_STATUS_Failure(VKI_EBADF);
6346 return;
6349 /* We cannot advise mmapobj about desired address(es). Unfortunately
6350 kernel places mappings from mmapobj at the end of process address
6351 space, defeating memcheck's optimized fast 2-level array algorithm.
6352 So we need to emulate what mmapobj does in the kernel. */
6354 /* Sanity check on parameters. */
6355 if ((ARG2 & ~VKI_MMOBJ_ALL_FLAGS) != 0) {
6356 SET_STATUS_Failure(VKI_EINVAL);
6357 return;
6360 if (!ML_(safe_to_deref)((void *) ARG4, sizeof(vki_uint_t))) {
6361 SET_STATUS_Failure(VKI_EFAULT);
6362 return;
6364 vki_uint_t *elements = (vki_uint_t *) ARG4;
6366 if (*elements > 0) {
6367 if (!ML_(safe_to_deref)((void *) ARG3,
6368 *elements * sizeof(vki_mmapobj_result_t))) {
6369 SET_STATUS_Failure(VKI_EFAULT);
6370 return;
6374 /* For now, supported is only MMOBJ_INTERPRET and no MMOBJ_PADDING. */
6375 if (ARG2 != VKI_MMOBJ_INTERPRET) {
6376 VG_(unimplemented)("Syswrap of the mmapobj call with flags %lu.", ARG2);
6377 /*NOTREACHED*/
6378 return;
6381 SysRes res = mmapobj_interpret(tid, (Int) ARG1,
6382 (vki_mmapobj_result_t *) ARG3, elements);
6383 SET_STATUS_from_SysRes(res);
6385 if (!sr_isError(res)) {
6386 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
6388 UInt idx;
6389 for (idx = 0; idx < *(vki_uint_t *) ARG4; idx++) {
6390 vki_mmapobj_result_t *mrp = &((vki_mmapobj_result_t *) ARG3)[idx];
6391 POST_FIELD_WRITE(mrp->mr_addr);
6392 POST_FIELD_WRITE(mrp->mr_msize);
6393 POST_FIELD_WRITE(mrp->mr_fsize);
6394 POST_FIELD_WRITE(mrp->mr_prot);
6395 POST_FIELD_WRITE(mrp->mr_flags);
6396 POST_FIELD_WRITE(mrp->mr_offset);
6401 PRE(sys_memcntl)
6403 /* int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
6404 int attr, int mask); */
6405 PRINT("sys_memcntl ( %#lx, %#lx, %ld, %#lx, %#lx, %#lx )", ARG1, ARG2,
6406 SARG3, ARG4, ARG5, ARG6);
6407 PRE_REG_READ6(long, "memcntl", void *, addr, vki_size_t, len, int, cmd,
6408 void *, arg, int, attr, int, mask);
6410 if (ARG3 != VKI_MC_LOCKAS && ARG3 != VKI_MC_UNLOCKAS &&
6411 !ML_(valid_client_addr)(ARG1, ARG2, tid, "memcntl")) {
6412 /* MC_LOCKAS and MC_UNLOCKAS work on the complete address space thus we
6413 don't check the address range validity if these commands are
6414 requested. */
6415 SET_STATUS_Failure(VKI_ENOMEM);
6416 return;
6419 if (ARG3 == VKI_MC_HAT_ADVISE)
6420 PRE_MEM_READ("memcntl(arg)", ARG4, sizeof(struct vki_memcntl_mha));
6423 PRE(sys_getpmsg)
6425 /* int getpmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
6426 int *bandp, int *flagsp); */
6427 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6428 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6429 *flags |= SfMayBlock;
6430 PRINT("sys_getpmsg ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6431 ARG4, ARG5);
6432 PRE_REG_READ5(long, "getpmsg", int, fildes, struct vki_strbuf *, ctlptr,
6433 struct vki_strbuf *, dataptr, int *, bandp, int *, flagsp);
6434 if (ctrlptr) {
6435 PRE_FIELD_READ("getpmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
6436 PRE_FIELD_WRITE("getpmsg(ctrlptr->len)", ctrlptr->len);
6437 PRE_FIELD_READ("getpmsg(ctrlptr->buf)", ctrlptr->buf);
6438 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6439 && ctrlptr->maxlen > 0)
6440 PRE_MEM_WRITE("getpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6441 ctrlptr->maxlen);
6443 if (dataptr) {
6444 PRE_FIELD_READ("getpmsg(dataptr->maxlen)", dataptr->maxlen);
6445 PRE_FIELD_WRITE("getpmsg(dataptr->len)", dataptr->len);
6446 PRE_FIELD_READ("getpmsg(dataptr->buf)", dataptr->buf);
6447 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6448 && dataptr->maxlen > 0)
6449 PRE_MEM_WRITE("getpmsg(dataptr->buf)", (Addr)dataptr->buf,
6450 dataptr->maxlen);
6452 PRE_MEM_READ("getpmsg(bandp)", ARG4, sizeof(int));
6453 /*PRE_MEM_WRITE("getpmsg(bandp)", ARG4, sizeof(int));*/
6454 PRE_MEM_READ("getpmsg(flagsp)", ARG5, sizeof(int));
6455 /*PRE_MEM_WRITE("getpmsg(flagsp)", ARG5, sizeof(int));*/
6457 /* Be strict. */
6458 if (!ML_(fd_allowed)(ARG1, "getpmsg", tid, False))
6459 SET_STATUS_Failure(VKI_EBADF);
6462 POST(sys_getpmsg)
6464 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6465 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6467 if (ctrlptr && ctrlptr->len > 0)
6468 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
6469 if (dataptr && dataptr->len > 0)
6470 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
6471 POST_MEM_WRITE(ARG4, sizeof(int));
6472 POST_MEM_WRITE(ARG5, sizeof(int));
6475 PRE(sys_putpmsg)
6477 /* int putpmsg(int fildes, const struct strbuf *ctlptr,
6478 const struct strbuf *dataptr, int band, int flags); */
6479 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6480 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6481 *flags |= SfMayBlock;
6482 PRINT("sys_putpmsg ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
6483 SARG5);
6484 PRE_REG_READ5(long, "putpmsg", int, fildes, struct vki_strbuf *, ctrlptr,
6485 struct vki_strbuf *, dataptr, int, band, int, flags);
6486 if (ctrlptr) {
6487 PRE_FIELD_READ("putpmsg(ctrlptr->len)", ctrlptr->len);
6488 PRE_FIELD_READ("putpmsg(ctrlptr->buf)", ctrlptr->buf);
6489 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6490 && ctrlptr->len > 0)
6491 PRE_MEM_READ("putpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6492 ctrlptr->len);
6494 if (dataptr) {
6495 PRE_FIELD_READ("putpmsg(dataptr->len)", dataptr->len);
6496 PRE_FIELD_READ("putpmsg(dataptr->buf)", dataptr->buf);
6497 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6498 && dataptr->len > 0)
6499 PRE_MEM_READ("putpmsg(dataptr->buf)", (Addr)dataptr->buf,
6500 dataptr->len);
6503 /* Be strict. */
6504 if (!ML_(fd_allowed)(ARG1, "putpmsg", tid, False))
6505 SET_STATUS_Failure(VKI_EBADF);
6508 #if defined(SOLARIS_OLD_SYSCALLS)
6509 PRE(sys_rename)
6511 /* int rename(const char *from, const char *to); */
6513 *flags |= SfMayBlock;
6514 PRINT("sys_rename ( %#lx(%s), %#lx(%s) )",
6515 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2);
6516 PRE_REG_READ2(long, "rename", const char *, from, const char *, to);
6518 PRE_MEM_RASCIIZ("rename(from)", ARG1);
6519 PRE_MEM_RASCIIZ("rename(to)", ARG2);
6521 #endif /* SOLARIS_OLD_SYSCALLS */
6523 PRE(sys_uname)
6525 /* int uname(struct utsname *name); */
6526 PRINT("sys_uname ( %#lx )", ARG1);
6527 PRE_REG_READ1(long, "uname", struct vki_utsname *, name);
6528 PRE_MEM_WRITE("uname(name)", ARG1, sizeof(struct vki_utsname));
6531 POST(sys_uname)
6533 struct vki_utsname *name = (struct vki_utsname *) ARG1;
6534 POST_MEM_WRITE((Addr) name->sysname, VG_(strlen)(name->sysname) + 1);
6535 POST_MEM_WRITE((Addr) name->nodename, VG_(strlen)(name->nodename) + 1);
6536 POST_MEM_WRITE((Addr) name->release, VG_(strlen)(name->release) + 1);
6537 POST_MEM_WRITE((Addr) name->version, VG_(strlen)(name->version) + 1);
6538 POST_MEM_WRITE((Addr) name->machine, VG_(strlen)(name->machine) + 1);
6541 PRE(sys_setegid)
6543 /* int setegid(gid_t egid); */
6544 PRINT("sys_setegid ( %ld )", SARG1);
6545 PRE_REG_READ1(long, "setegid", vki_gid_t, egid);
6548 PRE(sys_sysconfig)
6550 /* long sysconf(int name); */
6551 PRINT("sys_sysconfig ( %ld )", SARG1);
6552 PRE_REG_READ1(long, "sysconf", int, name);
6554 if (ARG1 == VKI_CONFIG_OPEN_FILES)
6555 SET_STATUS_Success(VG_(fd_soft_limit));
6558 PRE(sys_systeminfo)
6560 /* int sysinfo(int command, char *buf, long count); */
6561 PRINT("sys_systeminfo ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
6562 PRE_REG_READ3(long, "sysinfo", int, command, char *, buf, long, count);
6564 switch (ARG1 /*command*/) {
6565 case VKI_SI_SYSNAME:
6566 case VKI_SI_HOSTNAME:
6567 case VKI_SI_RELEASE:
6568 case VKI_SI_VERSION:
6569 case VKI_SI_MACHINE:
6570 case VKI_SI_ARCHITECTURE:
6571 case VKI_SI_HW_SERIAL:
6572 case VKI_SI_HW_PROVIDER:
6573 case VKI_SI_SRPC_DOMAIN:
6574 case VKI_SI_PLATFORM:
6575 case VKI_SI_ISALIST:
6576 case VKI_SI_DHCP_CACHE:
6577 case VKI_SI_ARCHITECTURE_32:
6578 case VKI_SI_ARCHITECTURE_64:
6579 case VKI_SI_ARCHITECTURE_K:
6580 case VKI_SI_ARCHITECTURE_NATIVE:
6581 PRE_MEM_WRITE("sysinfo(buf)", ARG2, ARG3);
6582 break;
6584 case VKI_SI_SET_HOSTNAME:
6585 case VKI_SI_SET_SRCP_DOMAIN:
6586 PRE_MEM_RASCIIZ("sysinfo(buf)", ARG2);
6587 break;
6589 default:
6590 VG_(unimplemented)("Syswrap of the sysinfo call with command %ld.", SARG1);
6591 /*NOTREACHED*/
6592 break;
6596 POST(sys_systeminfo)
6598 if (ARG1 != VKI_SI_SET_HOSTNAME && ARG1 != VKI_SI_SET_SRCP_DOMAIN)
6599 POST_MEM_WRITE(ARG2, MIN(RES, ARG3));
6602 PRE(sys_seteuid)
6604 /* int seteuid(uid_t euid); */
6605 PRINT("sys_seteuid ( %ld )", SARG1);
6606 PRE_REG_READ1(long, "seteuid", vki_uid_t, euid);
6609 PRE(sys_forksys)
6611 /* int64_t forksys(int subcode, int flags); */
6612 Int fds[2];
6613 Int res;
6614 PRINT("sys_forksys ( %ld, %ld )", SARG1, SARG2);
6615 PRE_REG_READ2(long, "forksys", int, subcode, int, flags);
6617 if (ARG1 == 1) {
6618 /* Support for forkall() requires changes to the big lock processing
6619 which are not yet implemented. */
6620 VG_(unimplemented)("Support for forkall().");
6621 /*NOTREACHED*/
6622 return;
6625 if (ARG1 != 0 && ARG1 != 2) {
6626 VG_(unimplemented)("Syswrap of the forksys call where subcode=%ld.",
6627 SARG1);
6628 /*NOTREACHED*/
6631 if (ARG1 == 2) {
6632 /* vfork() is requested. Translate it to a normal fork() but work around
6633 a problem with posix_spawn() which relies on the real vfork()
6634 behaviour. See a description in vg_preloaded.c for details. */
6635 res = VG_(pipe)(fds);
6636 vg_assert(res == 0);
6638 vg_assert(fds[0] != fds[1]);
6640 /* Move to Valgrind fds and set close-on-exec flag on both of them (done
6641 by VG_(safe_fd). */
6642 fds[0] = VG_(safe_fd)(fds[0]);
6643 fds[1] = VG_(safe_fd)(fds[1]);
6644 vg_assert(fds[0] != fds[1]);
6646 vg_assert(VG_(vfork_fildes_addr) != NULL);
6647 vg_assert(*VG_(vfork_fildes_addr) == -1);
6648 *VG_(vfork_fildes_addr) = fds[0];
6651 VG_(do_atfork_pre)(tid);
6652 SET_STATUS_from_SysRes(VG_(do_syscall2)(__NR_forksys, 0, ARG2));
6654 if (!SUCCESS) {
6655 /* vfork */
6656 if (ARG1 == 2) {
6657 VG_(close)(fds[0]);
6658 VG_(close)(fds[1]);
6661 return;
6664 if (RESHI) {
6665 VG_(do_atfork_child)(tid);
6667 /* vfork */
6668 if (ARG1 == 2)
6669 VG_(close)(fds[1]);
6671 # if defined(SOLARIS_PT_SUNDWTRACE_THRP)
6672 /* Kernel can map a new page as a scratch space of the DTrace fasttrap
6673 provider. There is no way we can directly get its address - it's all
6674 private to the kernel. Fish it the slow way. */
6675 Addr addr;
6676 SizeT size;
6677 UInt prot;
6678 Bool found = VG_(am_search_for_new_segment)(&addr, &size, &prot);
6679 if (found) {
6680 VG_(debugLog)(1, "syswrap-solaris", "PRE(forksys), new segment: "
6681 "vaddr=%#lx, size=%#lx, prot=%#x\n", addr, size, prot);
6682 vg_assert(prot == (VKI_PROT_READ | VKI_PROT_EXEC));
6683 vg_assert(size == VKI_PAGE_SIZE);
6684 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, VKI_MAP_ANONYMOUS,
6685 -1, 0);
6687 /* Note: We don't notify the debuginfo reader about this mapping
6688 because there is no debug information stored in this segment. */
6690 # endif /* SOLARIS_PT_SUNDWTRACE_THRP */
6692 else {
6693 VG_(do_atfork_parent)(tid);
6695 /* Print information about the fork. */
6696 PRINT(" fork: process %d created child %d\n", VG_(getpid)(),
6697 (Int)RES);
6699 /* vfork */
6700 if (ARG1 == 2) {
6701 /* Wait for the child to finish (exec or exit). */
6702 UChar w;
6704 VG_(close)(fds[0]);
6706 res = VG_(read)(fds[1], &w, 1);
6707 if (res == 1)
6708 SET_STATUS_Failure(w);
6709 VG_(close)(fds[1]);
6711 *VG_(vfork_fildes_addr) = -1;
6716 #if defined(SOLARIS_GETRANDOM_SYSCALL)
6717 PRE(sys_getrandom)
6719 /* int getrandom(void *buf, size_t buflen, uint_t flags); */
6720 PRINT("sys_getrandom ( %#lx, %lu, %lu )", ARG1, ARG2, ARG3);
6721 PRE_REG_READ3(long, "getrandom", void *, buf, vki_size_t, buflen,
6722 vki_uint_t, flags);
6723 PRE_MEM_WRITE("getrandom(buf)", ARG1, ARG2);
6726 POST(sys_getrandom)
6728 POST_MEM_WRITE(ARG1, RES);
6730 #endif /* SOLARIS_GETRANDOM_SYSCALL */
6732 PRE(sys_sigtimedwait)
6734 /* int sigtimedwait(const sigset_t *set, siginfo_t *info,
6735 const timespec_t *timeout); */
6736 *flags |= SfMayBlock;
6737 PRINT("sys_sigtimedwait ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
6738 PRE_REG_READ3(long, "sigtimedwait", vki_sigset_t *, set,
6739 vki_siginfo_t *, info, vki_timespec_t *, timeout);
6740 PRE_MEM_READ("sigtimewait(set)", ARG1, sizeof(vki_sigset_t));
6741 if (ARG2)
6742 PRE_MEM_WRITE("sigtimedwait(info)", ARG2, sizeof(vki_siginfo_t));
6743 if (ARG3)
6744 PRE_MEM_READ("sigtimedwait(timeout)", ARG3, sizeof(vki_timespec_t));
6747 POST(sys_sigtimedwait)
6749 if (ARG2)
6750 POST_MEM_WRITE(ARG2, sizeof(vki_siginfo_t));
6753 PRE(sys_yield)
6755 /* void yield(void); */
6756 *flags |= SfMayBlock;
6757 PRINT("sys_yield ( )");
6758 PRE_REG_READ0(long, "yield");
6761 PRE(sys_lwp_sema_post)
6763 /* int lwp_sema_post(lwp_sema_t *sema); */
6764 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6765 *flags |= SfMayBlock;
6766 PRINT("sys_lwp_sema_post ( %#lx )", ARG1);
6767 PRE_REG_READ1(long, "lwp_sema_post", lwp_sema_t *, sema);
6769 PRE_FIELD_READ("lwp_sema_post(sema->type)", sema->vki_sema_type);
6770 PRE_FIELD_READ("lwp_sema_post(sema->count)", sema->vki_sema_count);
6771 /*PRE_FIELD_WRITE("lwp_sema_post(sema->count)", sema->vki_sema_count);*/
6772 PRE_FIELD_READ("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);
6773 /*PRE_FIELD_WRITE("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);*/
6776 POST(sys_lwp_sema_post)
6778 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6779 POST_FIELD_WRITE(sema->vki_sema_count);
6780 POST_FIELD_WRITE(sema->vki_sema_waiters);
6783 PRE(sys_lwp_sema_trywait)
6785 /* int lwp_sema_trywait(lwp_sema_t *sema); */
6786 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6787 PRINT("sys_lwp_sema_trywait ( %#lx )", ARG1);
6788 PRE_REG_READ1(long, "lwp_sema_trywait", lwp_sema_t *, sema);
6790 PRE_FIELD_READ("lwp_sema_trywait(sema->type)", sema->vki_sema_type);
6791 PRE_FIELD_READ("lwp_sema_trywait(sema->count)", sema->vki_sema_count);
6792 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->count)", sema->vki_sema_count);*/
6793 PRE_FIELD_READ("lwp_sema_trywait(sema->waiters)", sema->vki_sema_waiters);
6794 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->waiters)",
6795 sema->vki_sema_waiters);*/
6798 POST(sys_lwp_sema_trywait)
6800 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6801 POST_FIELD_WRITE(sema->vki_sema_count);
6802 POST_FIELD_WRITE(sema->vki_sema_waiters);
6805 PRE(sys_lwp_detach)
6807 /* int lwp_detach(id_t lwpid); */
6808 PRINT("sys_lwp_detach ( %ld )", SARG1);
6809 PRE_REG_READ1(long, "lwp_detach", vki_id_t, lwpid);
6812 PRE(sys_modctl)
6814 /* int modctl(int cmd, uintptr_t a1, uintptr_t a2, uintptr_t a3,
6815 uintptr_t a4, uintptr_t a5); */
6816 *flags |= SfMayBlock;
6818 switch (ARG1 /*cmd*/) {
6819 case VKI_MODLOAD:
6820 /* int modctl_modload(int use_path, char *filename, int *rvp); */
6821 PRINT("sys_modctl ( %ld, %lu, %#lx(%s), %#lx )",
6822 SARG1, ARG2, ARG3, (HChar *) ARG3, ARG4);
6823 PRE_REG_READ4(long, SC2("modctl", "modload"),
6824 int, cmd, int, use_path, char *, filename, int *, rvp);
6825 PRE_MEM_RASCIIZ("modctl(filaneme)", ARG3);
6826 if (ARG4 != 0) {
6827 PRE_MEM_WRITE("modctl(rvp)", ARG4, sizeof(int *));
6829 break;
6830 case VKI_MODUNLOAD:
6831 /* int modctl_modunload(modid_t id); */
6832 PRINT("sys_modctl ( %ld, %ld )", SARG1, SARG2);
6833 PRE_REG_READ2(long, SC2("modctl", "modunload"),
6834 int, cmd, vki_modid_t, id);
6835 break;
6836 case VKI_MODINFO: {
6837 /* int modctl_modinfo(modid_t id, struct modinfo *umodi); */
6838 PRINT("sys_modctl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
6839 PRE_REG_READ3(long, SC2("modctl", "modinfo"),
6840 int, cmd, vki_modid_t, id, struct modinfo *, umodi);
6842 struct vki_modinfo *umodi = (struct vki_modinfo *) ARG3;
6843 PRE_FIELD_READ("modctl(umodi->mi_info)", umodi->mi_info);
6844 PRE_FIELD_READ("modctl(umodi->mi_id)", umodi->mi_id);
6845 PRE_FIELD_READ("modctl(umodi->mi_nextid)", umodi->mi_nextid);
6846 PRE_MEM_WRITE("modctl(umodi)", ARG3, sizeof(struct vki_modinfo));
6847 break;
6850 # if defined(SOLARIS_MODCTL_MODNVL)
6851 case VKI_MODNVL_DEVLINKSYNC:
6852 /* int modnvl_devlinksync(sysnvl_op_t a1, uintptr_t a2, uintptr_t a3,
6853 uintptr_t a4); */
6854 switch (ARG2 /*op*/) {
6856 # if defined(HAVE_SYS_SYSNVL_H)
6857 case VKI_SYSNVL_OP_GET:
6858 PRE_REG_READ5(long, SC3("modctl", "modnvl_devlinksync", "get"),
6859 int, cmd, sysnvl_op_t, a1, char *, bufp,
6860 uint64_t *, buflenp, uint64_t *, genp);
6861 # else
6862 case VKI_MODCTL_NVL_OP_GET:
6863 PRE_REG_READ5(long, SC3("modctl", "modnvl_devlinksync", "get"),
6864 int, cmd, modctl_nvl_op_t, a1, char *, bufp,
6865 uint64_t *, buflenp, uint64_t *, genp);
6866 # endif /* HAVE_SYS_SYSNVL_H */
6868 PRINT("sys_modctl ( %ld, %lu, %#lx, %#lx, %#lx )",
6869 SARG1, ARG2, ARG3, ARG4, ARG5);
6870 PRE_MEM_WRITE("modctl(buflenp)", ARG4, sizeof(vki_uint64_t));
6871 if (ML_(safe_to_deref)((vki_uint64_t *) ARG4, sizeof(vki_uint64_t))) {
6872 if (ARG3 != 0) {
6873 PRE_MEM_WRITE("modctl(bufp)", ARG3, *(vki_uint64_t *) ARG4);
6876 if (ARG5 != 0) {
6877 PRE_MEM_WRITE("modctl(genp)", ARG5, sizeof(vki_uint64_t));
6879 break;
6881 # if defined(HAVE_SYS_SYSNVL_H)
6882 case VKI_SYSNVL_OP_UPDATE:
6883 PRE_REG_READ4(long, SC3("modctl", "modnvl_devlinksync", "update"),
6884 int, cmd, sysnvl_op_t, a1, char *, bufp,
6885 uint64_t *, buflenp);
6886 # else
6887 case VKI_MODCTL_NVL_OP_UPDATE:
6888 PRE_REG_READ4(long, SC3("modctl", "modnvl_devlinksync", "update"),
6889 int, cmd, modctl_nvl_op_t, a1, char *, bufp,
6890 uint64_t *, buflenp);
6891 # endif /* HAVE_SYS_SYSNVL_H */
6893 PRINT("sys_modctl ( %ld, %lu, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
6894 PRE_MEM_READ("modctl(buflenp)", ARG4, sizeof(vki_uint64_t));
6895 if (ML_(safe_to_deref)((vki_uint64_t *) ARG4, sizeof(vki_uint64_t))) {
6896 PRE_MEM_READ("modctl(bufp)", ARG3, *(vki_uint64_t *) ARG4);
6898 break;
6900 default:
6901 VG_(unimplemented)("Syswrap of the modctl call with command "
6902 "MODNVL_DEVLINKSYNC and op %ld.", ARG2);
6903 /*NOTREACHED*/
6904 break;
6906 break;
6908 case VKI_MODDEVINFO_CACHE_TS:
6909 /* int modctl_devinfo_cache_ts(uint64_t *utsp); */
6910 PRINT("sys_modctl ( %ld, %#lx )", SARG1, ARG2);
6911 PRE_REG_READ2(long, SC2("modctl", "moddevinfo_cache_ts"),
6912 int, cmd, uint64_t *, utsp);
6913 PRE_MEM_WRITE("modctl(utsp)", ARG2, sizeof(vki_uint64_t));
6914 break;
6915 # endif /* SOLARIS_MODCTL_MODNVL */
6917 default:
6918 VG_(unimplemented)("Syswrap of the modctl call with command %ld.", SARG1);
6919 /*NOTREACHED*/
6920 break;
6924 POST(sys_modctl)
6926 switch (ARG1 /*cmd*/) {
6927 case VKI_MODLOAD:
6928 if (ARG4 != 0) {
6929 POST_MEM_WRITE(ARG4, sizeof(int *));
6931 break;
6932 case VKI_MODUNLOAD:
6933 break;
6934 case VKI_MODINFO:
6935 POST_MEM_WRITE(ARG3, sizeof(struct vki_modinfo));
6936 break;
6937 # if defined(SOLARIS_MODCTL_MODNVL)
6938 case VKI_MODNVL_DEVLINKSYNC:
6939 switch (ARG2 /*op*/) {
6941 # if defined(HAVE_SYS_SYSNVL_H)
6942 case VKI_SYSNVL_OP_GET:
6943 # else
6944 case VKI_MODCTL_NVL_OP_GET:
6945 # endif /* HAVE_SYS_SYSNVL_H */
6947 POST_MEM_WRITE(ARG4, sizeof(vki_uint64_t));
6948 if (ARG3 != 0) {
6949 POST_MEM_WRITE(ARG3, *(vki_uint64_t *) ARG4);
6951 if (ARG5 != 0) {
6952 POST_MEM_WRITE(ARG5, sizeof(vki_uint64_t));
6954 break;
6956 # if defined(HAVE_SYS_SYSNVL_H)
6957 case VKI_SYSNVL_OP_UPDATE:
6958 # else
6959 case VKI_MODCTL_NVL_OP_UPDATE:
6960 # endif /* HAVE_SYS_SYSNVL_H */
6961 break;
6963 default:
6964 vg_assert(0);
6965 break;
6967 break;
6968 case VKI_MODDEVINFO_CACHE_TS:
6969 POST_MEM_WRITE(ARG2, sizeof(vki_uint64_t));
6970 break;
6971 # endif /* SOLARIS_MODCTL_MODNVL */
6973 default:
6974 vg_assert(0);
6975 break;
6979 PRE(sys_fchroot)
6981 /* int fchroot(int fd); */
6982 PRINT("sys_fchroot ( %ld )", SARG1);
6983 PRE_REG_READ1(long, "fchroot", int, fd);
6985 /* Be strict. */
6986 if (!ML_(fd_allowed)(ARG1, "fchroot", tid, False))
6987 SET_STATUS_Failure(VKI_EBADF);
6990 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
6991 PRE(sys_system_stats)
6993 /* void system_stats(int flag); */
6994 PRINT("sys_system_stats ( %ld )", SARG1);
6995 PRE_REG_READ1(void, "system_stats", int, flag);
6997 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
6999 PRE(sys_gettimeofday)
7001 /* Kernel: int gettimeofday(struct timeval *tp); */
7002 PRINT("sys_gettimeofday ( %#lx )", ARG1);
7003 PRE_REG_READ1(long, "gettimeofday", struct timeval *, tp);
7004 if (ARG1)
7005 PRE_timeval_WRITE("gettimeofday(tp)", ARG1);
7008 POST(sys_gettimeofday)
7010 if (ARG1)
7011 POST_timeval_WRITE(ARG1);
7014 PRE(sys_lwp_create)
7016 /* int lwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) */
7018 ThreadId ctid;
7019 ThreadState *ptst;
7020 ThreadState *ctst;
7021 Addr stack;
7022 SysRes res;
7023 vki_ucontext_t uc;
7024 Bool tool_informed = False;
7026 PRINT("sys_lwp_create ( %#lx, %lu, %#lx )", ARG1, ARG2, ARG3);
7027 PRE_REG_READ3(long, "lwp_create", ucontext_t *, ucp, int, flags,
7028 id_t *, new_lwp);
7030 if (ARG3 != 0)
7031 PRE_MEM_WRITE("lwp_create(new_lwp)", ARG3, sizeof(vki_id_t));
7033 /* If we can't deref ucontext_t then we can't do anything. */
7034 if (!ML_(safe_to_deref)((void*)ARG1, sizeof(vki_ucontext_t))) {
7035 SET_STATUS_Failure(VKI_EINVAL);
7036 return;
7039 ctid = VG_(alloc_ThreadState)();
7040 ptst = VG_(get_ThreadState)(tid);
7041 ctst = VG_(get_ThreadState)(ctid);
7043 /* Stay sane. */
7044 vg_assert(VG_(is_running_thread)(tid));
7045 vg_assert(VG_(is_valid_tid)(ctid));
7047 stack = ML_(allocstack)(ctid);
7048 if (!stack) {
7049 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
7050 goto out;
7053 /* First inherit parent's guest state */
7054 ctst->arch.vex = ptst->arch.vex;
7055 ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
7056 ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
7058 /* Set up some values. */
7059 ctst->os_state.parent = tid;
7060 ctst->os_state.threadgroup = ptst->os_state.threadgroup;
7061 ctst->sig_mask = ptst->sig_mask;
7062 ctst->tmp_sig_mask = ptst->sig_mask;
7064 /* No stack definition should be currently present. The stack will be set
7065 later by libc by a setustack() call (the getsetcontext syscall). */
7066 ctst->client_stack_highest_byte = 0;
7067 ctst->client_stack_szB = 0;
7068 vg_assert(ctst->os_state.stk_id == NULL_STK_ID);
7070 /* Inform a tool that a new thread is created. This has to be done before
7071 any other core->tool event is sent. */
7072 vg_assert(VG_(owns_BigLock_LL)(tid));
7073 VG_TRACK(pre_thread_ll_create, tid, ctid);
7074 tool_informed = True;
7076 #if defined(VGP_x86_solaris)
7077 /* Set up GDT (this has to be done before calling
7078 VG_(restore_context)(). */
7079 ML_(setup_gdt)(&ctst->arch.vex);
7080 #elif defined(VGP_amd64_solaris)
7081 /* Nothing to do. */
7082 #else
7083 # error "Unknown platform"
7084 #endif
7086 /* Now set up the new thread according to ucontext_t. */
7087 VG_(restore_context)(ctid, (vki_ucontext_t*)ARG1, Vg_CoreSysCall,
7088 True/*esp_is_thrptr*/);
7090 /* Set up V thread (this also tells the kernel to block all signals in the
7091 thread). */
7092 ML_(setup_start_thread_context)(ctid, &uc);
7094 /* Actually create the new thread. */
7095 res = VG_(do_syscall3)(__NR_lwp_create, (UWord)&uc, ARG2, ARG3);
7097 if (!sr_isError(res)) {
7098 if (ARG3 != 0)
7099 POST_MEM_WRITE(ARG3, sizeof(vki_id_t));
7100 if (ARG2 & VKI_LWP_DAEMON)
7101 ctst->os_state.daemon_thread = True;
7104 out:
7105 if (sr_isError(res)) {
7106 if (tool_informed) {
7107 /* Tell a tool the thread exited in a hurry. */
7108 VG_TRACK(pre_thread_ll_exit, ctid);
7111 /* lwp_create failed. */
7112 VG_(cleanup_thread)(&ctst->arch);
7113 ctst->status = VgTs_Empty;
7116 SET_STATUS_from_SysRes(res);
7119 PRE(sys_lwp_exit)
7121 /* void syslwp_exit(); */
7122 ThreadState *tst = VG_(get_ThreadState)(tid);
7123 PRINT("sys_lwp_exit ( )");
7124 PRE_REG_READ0(long, "lwp_exit");
7126 /* Set the thread's status to be exiting, then claim that the syscall
7127 succeeded. */
7128 tst->exitreason = VgSrc_ExitThread;
7129 tst->os_state.exitcode = 0;
7130 SET_STATUS_Success(0);
7133 PRE(sys_lwp_suspend)
7135 /* int lwp_suspend(id_t lwpid); */
7136 ThreadState *tst = VG_(get_ThreadState)(tid);
7137 PRINT("sys_lwp_suspend ( %ld )", SARG1);
7138 PRE_REG_READ1(long, "lwp_suspend", vki_id_t, lwpid);
7140 if (ARG1 == tst->os_state.lwpid) {
7141 /* Set the SfMayBlock flag only if the currently running thread should
7142 be suspended. If this flag was used also when suspending other
7143 threads then it could happen that a thread holding the_BigLock would
7144 be suspended and Valgrind would hang. */
7145 *flags |= SfMayBlock;
7149 PRE(sys_lwp_continue)
7151 /* int lwp_continue(id_t target_lwp); */
7152 PRINT("sys_lwp_continue ( %ld )", SARG1);
7153 PRE_REG_READ1(long, "lwp_continue", vki_id_t, target_lwp);
7156 static void
7157 do_lwp_sigqueue(const HChar *syscall_name, UWord target_lwp, UWord signo,
7158 SyscallStatus *status, UWord *flags)
7160 if (!ML_(client_signal_OK)(signo)) {
7161 SET_STATUS_Failure(VKI_EINVAL);
7162 return;
7165 /* Check to see if this gave us a pending signal. */
7166 *flags |= SfPollAfter;
7168 if (VG_(clo_trace_signals))
7169 VG_(message)(Vg_DebugMsg, "%s: sending signal %lu to thread %lu\n",
7170 syscall_name, signo, target_lwp);
7172 /* If we're sending SIGKILL, check to see if the target is one of our
7173 threads and handle it specially. */
7174 if (signo == VKI_SIGKILL && ML_(do_sigkill)(target_lwp, -1)) {
7175 SET_STATUS_Success(0);
7176 return;
7179 /* Ask to handle this syscall via the slow route, since that's the only one
7180 that sets tst->status to VgTs_WaitSys. If the result of doing the
7181 syscall is an immediate run of async_signalhandler() in m_signals.c,
7182 then we need the thread to be properly tidied away. */
7183 *flags |= SfMayBlock;
7186 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
7187 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID)
7188 PRE(sys_lwp_sigqueue)
7190 /* int lwp_sigqueue(pid_t target_pid, id_t target_lwp, int signal,
7191 void *value, int si_code, timespec_t *timeout);
7193 PRINT("sys_lwp_sigqueue ( %ld, %ld, %ld, %#lx, %ld, %#lx )",
7194 SARG1, SARG2, SARG3, ARG4, SARG5, ARG6);
7195 PRE_REG_READ6(long, "lwp_sigqueue", vki_pid_t, target_pid,
7196 vki_id_t, target_lwp, int, signal, void *, value, int, si_code,
7197 vki_timespec_t *, timeout);
7199 if (ARG6)
7200 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG6, sizeof(vki_timespec_t));
7202 if ((ARG1 == 0) || (ARG1 == VG_(getpid)())) {
7203 do_lwp_sigqueue("lwp_sigqueue", ARG2, ARG3, status, flags);
7204 } else {
7205 /* Signal is sent to a different process. */
7206 if (VG_(clo_trace_signals))
7207 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sending signal %ld to "
7208 "process %ld, thread %ld\n", SARG3, SARG1, SARG2);
7209 *flags |= SfMayBlock;
7213 POST(sys_lwp_sigqueue)
7215 if (VG_(clo_trace_signals))
7216 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %ld to process %ld, "
7217 "thread %ld\n", SARG3, SARG1, SARG2);
7220 #else
7222 PRE(sys_lwp_sigqueue)
7224 /* int lwp_sigqueue(id_t target_lwp, int signal, void *value,
7225 int si_code, timespec_t *timeout);
7227 PRINT("sys_lwp_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
7228 SARG1, SARG2, ARG3, SARG4, ARG5);
7229 PRE_REG_READ5(long, "lwp_sigqueue", vki_id_t, target_lwp, int, signal,
7230 void *, value, int, si_code, vki_timespec_t *, timeout);
7232 if (ARG5)
7233 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
7235 do_lwp_sigqueue("lwp_sigqueue", ARG1, ARG2, status, flags);
7238 POST(sys_lwp_sigqueue)
7240 if (VG_(clo_trace_signals))
7241 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %lu to thread %lu\n",
7242 ARG2, ARG1);
7246 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID */
7248 #else
7250 PRE(sys_lwp_kill)
7252 /* int lwp_kill(id_t target_lwp, int signal); */
7253 PRINT("sys_lwp_kill ( %ld, %ld )", SARG1, SARG2);
7254 PRE_REG_READ2(long, "lwp_kill", vki_id_t, target_lwp, int, signal);
7256 do_lwp_sigqueue("lwp_kill", ARG1, ARG2, status, flags);
7259 POST(sys_lwp_kill)
7261 if (VG_(clo_trace_signals))
7262 VG_(message)(Vg_DebugMsg, "lwp_kill: sent signal %lu to thread %lu\n",
7263 ARG2, ARG1);
7265 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
7267 PRE(sys_lwp_self)
7269 /* id_t lwp_self(void); */
7270 PRINT("sys_lwp_self ( )");
7271 PRE_REG_READ0(long, "lwp_self");
7274 PRE(sys_lwp_sigmask)
7276 /* int64_t lwp_sigmask(int how, uint_t bits0, uint_t bits1, uint_t bits2,
7277 uint_t bits3); */
7278 vki_sigset_t sigset;
7279 PRINT("sys_lwp_sigmask ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
7280 ARG4, ARG5);
7281 PRE_REG_READ5(long, "lwp_sigmask", int, how, vki_uint_t, bits0,
7282 vki_uint_t, bits1, vki_uint_t, bits2, vki_uint_t, bits3);
7284 sigset.__sigbits[0] = ARG2;
7285 sigset.__sigbits[1] = ARG3;
7286 sigset.__sigbits[2] = ARG4;
7287 sigset.__sigbits[3] = ARG5;
7289 SET_STATUS_from_SysRes(
7290 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, &sigset, NULL)
7293 if (SUCCESS)
7294 *flags |= SfPollAfter;
7297 PRE(sys_lwp_private)
7299 /* int lwp_private(int cmd, int which, uintptr_t base); */
7300 ThreadState *tst = VG_(get_ThreadState)(tid);
7301 Int supported_base, supported_sel;
7302 PRINT("sys_lwp_private ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7303 PRE_REG_READ3(long, "lwp_private", int, cmd, int, which,
7304 uintptr_t, base);
7306 /* Note: Only the %gs base is currently supported on x86 and the %fs base
7307 on amd64. Support for the %fs base on x86 and for the %gs base on amd64
7308 should be added. Anything else is probably a client program error. */
7309 #if defined(VGP_x86_solaris)
7310 supported_base = VKI_LWP_GSBASE;
7311 supported_sel = VKI_LWPGS_SEL;
7312 #elif defined(VGP_amd64_solaris)
7313 supported_base = VKI_LWP_FSBASE;
7314 supported_sel = 0;
7315 #else
7316 #error "Unknown platform"
7317 #endif
7318 if (ARG2 != supported_base) {
7319 VG_(unimplemented)("Syswrap of the lwp_private call where which=%ld.",
7320 SARG2);
7321 /*NOTREACHED*/
7324 switch (ARG1 /*cmd*/) {
7325 case VKI_LWP_SETPRIVATE:
7326 #if defined(VGP_x86_solaris)
7327 tst->os_state.thrptr = ARG3;
7328 ML_(update_gdt_lwpgs)(tid);
7329 #elif defined(VGP_amd64_solaris)
7330 tst->arch.vex.guest_FS_CONST = ARG3;
7331 #else
7332 #error "Unknown platform"
7333 #endif
7334 SET_STATUS_Success(supported_sel);
7335 break;
7336 case VKI_LWP_GETPRIVATE:
7338 int thrptr;
7339 #if defined(VGP_x86_solaris)
7340 thrptr = tst->os_state.thrptr;
7341 #elif defined(VGP_amd64_solaris)
7342 thrptr = tst->arch.vex.guest_FS_CONST;
7343 #else
7344 #error "Unknown platform"
7345 #endif
7347 if (thrptr == 0) {
7348 SET_STATUS_Failure(VKI_EINVAL);
7349 return;
7352 #if defined(VGP_x86_solaris)
7353 if (tst->arch.vex.guest_GS != supported_sel) {
7354 SET_STATUS_Failure(VKI_EINVAL);
7355 return;
7357 #elif defined(VGP_amd64_solaris)
7358 /* Valgrind on amd64 does not allow to change the gs register so
7359 a check that guest_GS is equal to supported_sel is not needed
7360 here. */
7361 #else
7362 #error "Unknown platform"
7363 #endif
7365 PRE_MEM_WRITE("lwp_private(base)", ARG3, sizeof(Addr));
7366 if (!ML_(safe_to_deref((void*)ARG3, sizeof(Addr)))) {
7367 SET_STATUS_Failure(VKI_EFAULT);
7368 return;
7370 *(Addr*)ARG3 = thrptr;
7371 POST_MEM_WRITE((Addr)ARG3, sizeof(Addr));
7372 SET_STATUS_Success(0);
7373 break;
7375 default:
7376 VG_(unimplemented)("Syswrap of the lwp_private call where cmd=%ld.",
7377 SARG1);
7378 /*NOTREACHED*/
7379 break;
7383 PRE(sys_lwp_wait)
7385 /* int lwp_wait(id_t lwpid, id_t *departed); */
7386 *flags |= SfMayBlock;
7387 PRINT("sys_lwp_wait ( %ld, %#lx )", SARG1, ARG2);
7388 PRE_REG_READ2(long, "lwp_wait", vki_id_t, lwpid, vki_id_t *, departed);
7389 if (ARG2)
7390 PRE_MEM_WRITE("lwp_wait(departed)", ARG2, sizeof(vki_id_t));
7393 POST(sys_lwp_wait)
7395 POST_MEM_WRITE(ARG2, sizeof(vki_id_t));
7398 PRE(sys_lwp_mutex_wakeup)
7400 /* int lwp_mutex_wakeup(lwp_mutex_t *lp, int release_all); */
7401 *flags |= SfMayBlock;
7402 PRINT("sys_lwp_mutex_wakeup ( %#lx, %ld )", ARG1, SARG2);
7403 PRE_REG_READ2(long, "lwp_mutex_wakeup", vki_lwp_mutex_t *, lp,
7404 int, release_all);
7405 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
7406 PRE_FIELD_READ("lwp_mutex_wakeup(lp->mutex_type)", lp->vki_mutex_type);
7407 PRE_FIELD_WRITE("lwp_mutex_wakeup(lp->mutex_waiters)",
7408 lp->vki_mutex_waiters);
7411 POST(sys_lwp_mutex_wakeup)
7413 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
7414 POST_FIELD_WRITE(lp->vki_mutex_waiters);
7417 PRE(sys_lwp_cond_wait)
7419 /* int lwp_cond_wait(lwp_cond_t *cvp, lwp_mutex_t *mp, timespec_t *tsp,
7420 int check_park); */
7421 *flags |= SfMayBlock;
7422 PRINT("sys_lwp_cond_wait( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
7423 PRE_REG_READ4(long, "lwp_cond_wait", vki_lwp_cond_t *, cvp,
7424 vki_lwp_mutex_t *, mp, vki_timespec_t *, tsp, int, check_part);
7426 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7427 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7428 PRE_FIELD_READ("lwp_cond_wait(cvp->type)", cvp->vki_cond_type);
7429 PRE_FIELD_READ("lwp_cond_wait(cvp->waiters_kernel)",
7430 cvp->vki_cond_waiters_kernel);
7431 PRE_FIELD_READ("lwp_cond_wait(mp->mutex_type)", mp->vki_mutex_type);
7432 PRE_FIELD_WRITE("lwp_cond_wait(mp->mutex_waiters)", mp->vki_mutex_waiters);
7433 if (ARG3 != 0)
7434 PRE_MEM_READ("lwp_cond_wait(tsp)", ARG3, sizeof(vki_timespec_t));
7437 POST(sys_lwp_cond_wait)
7439 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7440 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7441 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7442 POST_FIELD_WRITE(mp->vki_mutex_waiters);
7443 if (ARG3 != 0)
7444 POST_MEM_WRITE(ARG3, sizeof(vki_timespec_t));
7447 PRE(sys_lwp_cond_signal)
7449 /* int lwp_cond_signal(lwp_cond_t *cvp); */
7450 *flags |= SfMayBlock;
7451 PRINT("sys_lwp_cond_signal( %#lx )", ARG1);
7452 PRE_REG_READ1(long, "lwp_cond_signal", vki_lwp_cond_t *, cvp);
7454 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7455 PRE_FIELD_READ("lwp_cond_signal(cvp->type)", cvp->vki_cond_type);
7456 PRE_FIELD_READ("lwp_cond_signal(cvp->waiters_kernel)",
7457 cvp->vki_cond_waiters_kernel);
7460 POST(sys_lwp_cond_signal)
7462 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7463 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7466 PRE(sys_lwp_cond_broadcast)
7468 /* int lwp_cond_broadcast(lwp_cond_t *cvp); */
7469 *flags |= SfMayBlock;
7470 PRINT("sys_lwp_cond_broadcast ( %#lx )", ARG1);
7471 PRE_REG_READ1(long, "lwp_cond_broadcast", vki_lwp_cond_t *, cvp);
7473 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7474 PRE_FIELD_READ("lwp_cond_broadcast(cvp->type)", cvp->vki_cond_type);
7475 PRE_FIELD_READ("lwp_cond_broadcast(cvp->waiters_kernel)",
7476 cvp->vki_cond_waiters_kernel);
7477 /*PRE_FIELD_WRITE("lwp_cond_broadcast(cvp->waiters_kernel)",
7478 cvp->vki_cond_waiters_kernel);*/
7481 POST(sys_lwp_cond_broadcast)
7483 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7484 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7487 PRE(sys_pread)
7489 /* ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset); */
7490 *flags |= SfMayBlock;
7491 PRINT("sys_pread ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7492 PRE_REG_READ4(long, "pread", int, fildes, void *, buf,
7493 vki_size_t, nbyte, vki_off_t, offset);
7494 PRE_MEM_WRITE("pread(buf)", ARG2, ARG3);
7496 /* Be strict. */
7497 if (!ML_(fd_allowed)(ARG1, "pread", tid, False))
7498 SET_STATUS_Failure(VKI_EBADF);
7501 POST(sys_pread)
7503 POST_MEM_WRITE(ARG2, RES);
7506 PRE(sys_pwrite)
7508 /* ssize_t pwrite(int fildes, const void *buf, size_t nbyte,
7509 off_t offset); */
7510 *flags |= SfMayBlock;
7511 PRINT("sys_pwrite ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7512 PRE_REG_READ4(long, "pwrite", int, fildes, const void *, buf,
7513 vki_size_t, nbyte, vki_off_t, offset);
7514 PRE_MEM_READ("pwrite(buf)", ARG2, ARG3);
7516 /* Be strict. */
7517 if (!ML_(fd_allowed)(ARG1, "pwrite", tid, False))
7518 SET_STATUS_Failure(VKI_EBADF);
7521 PRE(sys_getpagesizes)
7523 /* int getpagesizes(int legacy, size_t *buf, int nelem); */
7524 PRINT("sys_getpagesizes ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7525 PRE_REG_READ3(long, "getpagesizes", int, legacy, size_t *, buf,
7526 int, nelem);
7527 if (ARG2)
7528 PRE_MEM_WRITE("getpagesizes(buf)", ARG2, ARG3 * sizeof(vki_size_t));
7531 POST(sys_getpagesizes)
7533 if (ARG2)
7534 POST_MEM_WRITE(ARG2, RES * sizeof(vki_size_t));
7537 PRE(sys_lgrpsys)
7539 /* Kernel: int lgrpsys(int subcode, long ia, void *ap); */
7540 switch (ARG1 /*subcode*/) {
7541 case VKI_LGRP_SYS_MEMINFO:
7542 PRINT("sys_lgrpsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7543 PRE_REG_READ3(long, SC2("lgrpsys", "meminfo"), int, subcode,
7544 int, addr_count, vki_meminfo_t *, minfo);
7545 PRE_MEM_READ("lgrpsys(minfo)", ARG3, sizeof(vki_meminfo_t));
7547 if (ML_(safe_to_deref)((vki_meminfo_t *) ARG3, sizeof(vki_meminfo_t))) {
7548 vki_meminfo_t *minfo = (vki_meminfo_t *) ARG3;
7549 PRE_MEM_READ("lgrpsys(minfo->mi_inaddr)",
7550 (Addr) minfo->mi_inaddr, SARG2 * sizeof(vki_uint64_t));
7551 PRE_MEM_READ("lgrpsys(minfo->mi_info_req)", (Addr) minfo->mi_info_req,
7552 minfo->mi_info_count * sizeof(vki_uint_t));
7553 PRE_MEM_WRITE("lgrpsys(minfo->mi_outdata)", (Addr) minfo->mi_outdata,
7554 SARG2 * minfo->mi_info_count * sizeof(vki_uint64_t));
7555 PRE_MEM_WRITE("lgrpsys(minfo->mi_validity)",
7556 (Addr) minfo->mi_validity, SARG2 * sizeof(vki_uint_t));
7558 break;
7559 case VKI_LGRP_SYS_GENERATION:
7560 /* Liblgrp: lgrp_gen_t lgrp_generation(lgrp_view_t view); */
7561 PRINT("sys_lgrpsys ( %ld, %ld )", SARG1, SARG2);
7562 PRE_REG_READ2(long, SC2("lgrpsys", "generation"), int, subcode,
7563 vki_lgrp_view_t, view);
7564 break;
7565 case VKI_LGRP_SYS_VERSION:
7566 /* Liblgrp: int lgrp_version(int version); */
7567 PRINT("sys_lgrpsys ( %ld, %ld )", SARG1, SARG2);
7568 PRE_REG_READ2(long, SC2("lgrpsys", "version"), int, subcode,
7569 int, version);
7570 break;
7571 case VKI_LGRP_SYS_SNAPSHOT:
7572 /* Liblgrp: int lgrp_snapshot(void *buf, size_t bufsize); */
7573 PRINT("sys_lgrpsys ( %ld, %lu, %#lx )", SARG1, ARG2, ARG3);
7574 PRE_REG_READ3(long, SC2("lgrpsys", "snapshot"), int, subcode,
7575 vki_size_t, bufsize, void *, buf);
7576 PRE_MEM_WRITE("lgrpsys(buf)", ARG3, ARG2);
7577 break;
7578 default:
7579 VG_(unimplemented)("Syswrap of the lgrpsys call with subcode %ld.",
7580 SARG1);
7581 /*NOTREACHED*/
7582 break;
7586 POST(sys_lgrpsys)
7588 switch (ARG1 /*subcode*/) {
7589 case VKI_LGRP_SYS_MEMINFO:
7591 vki_meminfo_t *minfo = (vki_meminfo_t *) ARG3;
7592 POST_MEM_WRITE((Addr) minfo->mi_outdata,
7593 SARG2 * minfo->mi_info_count * sizeof(vki_uint64_t));
7594 POST_MEM_WRITE((Addr) minfo->mi_validity, SARG2 * sizeof(vki_uint_t));
7596 break;
7597 case VKI_LGRP_SYS_GENERATION:
7598 case VKI_LGRP_SYS_VERSION:
7599 break;
7600 case VKI_LGRP_SYS_SNAPSHOT:
7601 POST_MEM_WRITE(ARG3, RES);
7602 break;
7603 default:
7604 vg_assert(0);
7605 break;
7609 PRE(sys_rusagesys)
7611 /* Kernel: int rusagesys(int code, void *arg1, void *arg2,
7612 void *arg3, void *arg4); */
7613 switch (ARG1 /*code*/) {
7614 case VKI__RUSAGESYS_GETRUSAGE:
7615 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7616 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7617 /* Libc: int getrusage(int who, struct rusage *r_usage); */
7618 PRINT("sys_rusagesys ( %ld, %#lx )", SARG1, ARG2);
7619 PRE_REG_READ2(long, SC2("rusagesys", "getrusage"), int, code,
7620 struct vki_rusage *, r_usage);
7621 PRE_MEM_WRITE("rusagesys(r_usage)", ARG2, sizeof(struct vki_rusage));
7622 break;
7624 case VKI__RUSAGESYS_GETVMUSAGE:
7625 /* Libc: int getvmusage(uint_t flags, time_t age,
7626 vmusage_t *buf, size_t *nres); */
7627 PRINT("sys_rusagesys ( %ld, %lu, %ld, %#lx, %#lx )",
7628 SARG1, ARG2, SARG3, ARG4, ARG5);
7629 PRE_REG_READ5(long, SC2("rusagesys", "getvmusage"), int, code,
7630 vki_uint_t, flags, vki_time_t, age,
7631 vki_vmusage_t *, buf, vki_size_t *, nres);
7632 PRE_MEM_READ("rusagesys(nres)", ARG5, sizeof(vki_size_t));
7633 /* PRE_MEM_WRITE("rusagesys(nres)", ARG5, sizeof(vki_size_t)); */
7635 if (ML_(safe_to_deref)((void *) ARG5, sizeof(vki_size_t))) {
7636 vki_size_t *nres = (vki_size_t *) ARG5;
7637 PRE_MEM_WRITE("rusagesys(buf)", ARG4,
7638 *nres * sizeof(vki_vmusage_t));
7640 *flags |= SfMayBlock;
7641 break;
7643 default:
7644 VG_(unimplemented)("Syswrap of the rusagesys call with code %ld.", SARG1);
7645 /*NOTREACHED*/
7646 break;
7650 POST(sys_rusagesys)
7652 switch (ARG1 /*code*/) {
7653 case VKI__RUSAGESYS_GETRUSAGE:
7654 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7655 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7656 POST_MEM_WRITE(ARG2, sizeof(struct vki_rusage));
7657 break;
7658 case VKI__RUSAGESYS_GETVMUSAGE:
7660 vki_size_t *nres = (vki_size_t *) ARG5;
7661 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
7662 POST_MEM_WRITE(ARG4, *nres * sizeof(vki_vmusage_t));
7664 break;
7665 default:
7666 vg_assert(0);
7667 break;
7671 PRE(sys_port)
7673 /* Kernel: int64_t portfs(int opcode, uintptr_t a0, uintptr_t a1,
7674 uintptr_t a2, uintptr_t a3, uintptr_t a4); */
7675 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7676 *flags |= SfMayBlock;
7677 switch (opcode) {
7678 case VKI_PORT_CREATE:
7679 PRINT("sys_port ( %ld )", SARG1);
7680 PRE_REG_READ1(long, SC2("port", "create"), int, opcode);
7681 break;
7682 case VKI_PORT_ASSOCIATE:
7683 case VKI_PORT_DISSOCIATE:
7684 PRINT("sys_port ( %ld, %ld, %ld, %#lx, %ld, %#lx )", SARG1, SARG2, SARG3,
7685 ARG4, SARG5, ARG6);
7686 if (opcode == VKI_PORT_ASSOCIATE) {
7687 PRE_REG_READ6(long, SC2("port", "associate"), int, opcode, int, a0,
7688 int, a1, uintptr_t, a2, int, a3, void *, a4);
7690 else {
7691 PRE_REG_READ6(long, SC2("port", "dissociate"), int, opcode, int, a0,
7692 int, a1, uintptr_t, a2, int, a3, void *, a4);
7695 switch (ARG3 /*source*/) {
7696 case VKI_PORT_SOURCE_FD:
7697 if (!ML_(fd_allowed)(ARG4, "port", tid, False)) {
7698 SET_STATUS_Failure(VKI_EBADF);
7700 break;
7701 case VKI_PORT_SOURCE_FILE:
7703 struct vki_file_obj *fo = (struct vki_file_obj *)ARG4;
7704 PRE_MEM_READ("port(file_obj)", ARG4, sizeof(struct vki_file_obj));
7705 if (ML_(safe_to_deref)(&fo->fo_name, sizeof(fo->fo_name)))
7706 PRE_MEM_RASCIIZ("port(file_obj->fo_name)", (Addr)fo->fo_name);
7708 break;
7709 default:
7710 VG_(unimplemented)("Syswrap of the port_associate/dissociate call "
7711 "type %ld.", SARG3);
7712 /*NOTREACHED*/
7713 break;
7715 break;
7716 case VKI_PORT_SEND:
7717 PRINT("sys_port ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
7718 PRE_REG_READ4(long, SC2("port", "send"), int, opcode, int, a0, int, a1,
7719 void *, a2);
7720 break;
7721 case VKI_PORT_SENDN:
7722 PRINT("sys_port ( %ld, %#lx, %#lx, %lu, %lx, %#lx)", SARG1, ARG2, ARG3,
7723 ARG4, ARG5, ARG6);
7724 PRE_REG_READ6(long, SC2("port", "sendn"), int, opcode, int *, a0,
7725 int *, a1, vki_uint_t, a2, int, a3, void *, a4);
7726 PRE_MEM_READ("port(ports)", ARG2, ARG4 * sizeof(int));
7727 PRE_MEM_WRITE("port(errors)", ARG3, ARG4 * sizeof(int));
7728 break;
7729 case VKI_PORT_GET:
7730 PRINT("sys_port ( %ld, %ld, %#lx, %ld, %ld, %#lx )", SARG1, SARG2, ARG3,
7731 SARG4, SARG5, ARG6);
7732 PRE_REG_READ6(long, SC2("port", "get"), int, opcode, int, a0,
7733 port_event_t *, a1, vki_time_t, a2, long, a3,
7734 timespec_t *, a4);
7735 PRE_MEM_WRITE("port(uevp)", ARG3, sizeof(vki_port_event_t));
7736 break;
7737 case VKI_PORT_GETN:
7738 PRINT("sys_port ( %ld, %ld, %#lx, %lu, %lu, %#lx )", SARG1, SARG2, ARG3,
7739 ARG4, ARG5, ARG6);
7740 PRE_REG_READ6(long, SC2("port", "getn"), int, opcode, int, a0,
7741 port_event_t *, a1, vki_uint_t, a2, vki_uint_t, a3,
7742 timespec_t *, a4);
7743 if (ARG6)
7744 PRE_MEM_READ("port(timeout)", ARG6, sizeof(vki_timespec_t));
7745 PRE_MEM_WRITE("port(uevp)", ARG3, ARG4 * sizeof(vki_port_event_t));
7746 break;
7747 case VKI_PORT_ALERT:
7748 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, SARG4,
7749 ARG5);
7750 PRE_REG_READ5(long, SC2("port", "alert"), int, opcode, int, a0, int, a1,
7751 int, a2, void *, a3);
7752 break;
7753 case VKI_PORT_DISPATCH:
7754 // FIXME: check order: SARG2, SARG1 or SARG1, SARG2 ??
7755 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx, %#lx )", SARG2, SARG1, SARG3,
7756 SARG4, ARG5, ARG6);
7757 PRE_REG_READ6(long, SC2("port", "dispatch"), int, opcode, int, a0,
7758 int, a1, int, a2, uintptr_t, a3, void *, a4);
7759 break;
7760 default:
7761 VG_(unimplemented)("Syswrap of the port call with opcode %ld.", SARG1);
7762 /*NOTREACHED*/
7763 break;
7766 /* Be strict. */
7767 if ((opcode != VKI_PORT_CREATE && opcode != VKI_PORT_SENDN) &&
7768 !ML_(fd_allowed)(ARG2, "port", tid, False))
7769 SET_STATUS_Failure(VKI_EBADF);
7772 POST(sys_port)
7774 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7775 switch (opcode) {
7776 case VKI_PORT_CREATE:
7777 if (!ML_(fd_allowed)(RES, "port", tid, True)) {
7778 VG_(close)(RES);
7779 SET_STATUS_Failure(VKI_EMFILE);
7781 else if (VG_(clo_track_fds))
7782 ML_(record_fd_open_named)(tid, RES);
7783 break;
7784 case VKI_PORT_ASSOCIATE:
7785 case VKI_PORT_DISSOCIATE:
7786 case VKI_PORT_SEND:
7787 break;
7788 case VKI_PORT_SENDN:
7789 if (RES != ARG4) {
7790 /* If there is any error then the whole errors area is written. */
7791 POST_MEM_WRITE(ARG3, ARG4 * sizeof(int));
7793 break;
7794 case VKI_PORT_GET:
7795 POST_MEM_WRITE(ARG3, sizeof(vki_port_event_t));
7796 break;
7797 case VKI_PORT_GETN:
7798 POST_MEM_WRITE(ARG3, RES * sizeof(vki_port_event_t));
7799 break;
7800 case VKI_PORT_ALERT:
7801 case VKI_PORT_DISPATCH:
7802 break;
7803 default:
7804 VG_(unimplemented)("Syswrap of the port call with opcode %lu.", ARG1);
7805 /*NOTREACHED*/
7806 break;
7810 PRE(sys_pollsys)
7812 /* int pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeout,
7813 sigset_t *set); */
7814 UWord i;
7815 struct vki_pollfd *ufds = (struct vki_pollfd *)ARG1;
7817 *flags |= SfMayBlock | SfPostOnFail;
7819 PRINT("sys_pollsys ( %#lx, %lu, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
7820 PRE_REG_READ4(long, "poll", pollfd_t *, fds, vki_nfds_t, nfds,
7821 timespec_t *, timeout, sigset_t *, set);
7823 for (i = 0; i < ARG2; i++) {
7824 vki_pollfd_t *u = &ufds[i];
7825 PRE_FIELD_READ("poll(ufds.fd)", u->fd);
7826 if (ML_(safe_to_deref)(&ufds[i].fd, sizeof(ufds[i].fd)) && ufds[i].fd >= 0) {
7827 PRE_FIELD_READ("poll(ufds.events)", u->events);
7829 PRE_FIELD_WRITE("poll(ufds.revents)", u->revents);
7832 if (ARG3)
7833 PRE_MEM_READ("poll(timeout)", ARG3, sizeof(vki_timespec_t));
7835 if (ARG4) {
7836 PRE_MEM_READ("poll(set)", ARG4, sizeof(vki_sigset_t));
7838 const vki_sigset_t *guest_sigmask = (vki_sigset_t *) ARG4;
7839 if (!ML_(safe_to_deref)(guest_sigmask, sizeof(vki_sigset_t))) {
7840 ARG4 = 1; /* Something recognisable to POST() hook. */
7841 } else {
7842 vki_sigset_t *vg_sigmask =
7843 VG_(malloc)("syswrap.pollsys.1", sizeof(vki_sigset_t));
7844 ARG4 = (Addr) vg_sigmask;
7845 *vg_sigmask = *guest_sigmask;
7846 VG_(sanitize_client_sigmask)(vg_sigmask);
7851 POST(sys_pollsys)
7853 vg_assert(SUCCESS || FAILURE);
7855 if (SUCCESS && (RES >= 0)) {
7856 UWord i;
7857 vki_pollfd_t *ufds = (vki_pollfd_t*)ARG1;
7858 for (i = 0; i < ARG2; i++)
7859 POST_FIELD_WRITE(ufds[i].revents);
7862 if ((ARG4 != 0) && (ARG4 != 1)) {
7863 VG_(free)((vki_sigset_t *) ARG4);
7867 PRE(sys_labelsys)
7869 /* Kernel: int labelsys(int op, void *a1, void *a2, void *a3,
7870 void *a4, void *a5); */
7872 switch (ARG1 /*op*/) {
7873 case VKI_TSOL_SYSLABELING:
7874 /* Libc: int is_system_labeled(void); */
7875 PRINT("sys_labelsys ( %ld )", SARG1);
7876 PRE_REG_READ1(long, SC2("labelsys", "syslabeling"), int, op);
7877 break;
7879 case VKI_TSOL_TNRH:
7880 /* Libtsnet: int tnrh(int cmd, tsol_rhent_t *buf); */
7881 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7882 PRE_REG_READ3(long, SC2("labelsys", "tnrh"), int, op, int, cmd,
7883 vki_tsol_rhent_t *, buf);
7884 if (ARG2 != VKI_TNDB_FLUSH)
7885 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_rhent_t));
7886 break;
7888 case VKI_TSOL_TNRHTP:
7889 /* Libtsnet: int tnrhtp(int cmd, tsol_tpent_t *buf); */
7890 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7891 PRE_REG_READ3(long, SC2("labelsys", "tnrhtp"), int, op, int, cmd,
7892 vki_tsol_tpent_t *, buf);
7893 if (ARG2 != VKI_TNDB_FLUSH)
7894 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_tpent_t));
7895 break;
7897 case VKI_TSOL_TNMLP:
7898 /* Libtsnet: int tnmlp(int cmd, tsol_mlpent_t *buf); */
7899 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7900 PRE_REG_READ3(long, SC2("labelsys", "tnmlp"), int, op, int, cmd,
7901 vki_tsol_mlpent_t *, buf);
7902 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_mlpent_t));
7903 break;
7905 case VKI_TSOL_GETLABEL:
7906 /* Libtsol: int getlabel(const char *path, bslabel_t *label); */
7907 PRINT("sys_labelsys ( %ld, %#lx(%s), %#lx )",
7908 SARG1, ARG2, (HChar *) ARG2, ARG3);
7909 PRE_REG_READ3(long, SC2("labelsys", "getlabel"), int, op,
7910 const char *, path, vki_bslabel_t *, label);
7911 PRE_MEM_RASCIIZ("labelsys(path)", ARG2);
7912 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7913 break;
7915 case VKI_TSOL_FGETLABEL:
7916 /* Libtsol: int fgetlabel(int fd, bslabel_t *label); */
7917 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7918 PRE_REG_READ3(long, SC2("labelsys", "fgetlabel"), int, op,
7919 int, fd, vki_bslabel_t *, label);
7920 /* Be strict. */
7921 if (!ML_(fd_allowed)(ARG2, "labelsys(fgetlabel)", tid, False))
7922 SET_STATUS_Failure(VKI_EBADF);
7923 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7924 break;
7926 #if defined(SOLARIS_TSOL_CLEARANCE)
7927 case VKI_TSOL_GETCLEARANCE:
7928 /* Libtsol: int getclearance(bslabel_t *clearance); */
7929 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7930 PRE_REG_READ2(long, SC2("labelsys", "getclearance"), int, op,
7931 vki_bslabel_t *, clearance);
7932 PRE_MEM_WRITE("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7933 break;
7935 case VKI_TSOL_SETCLEARANCE:
7936 /* Libtsol: int setclearance(bslabel_t *clearance); */
7937 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7938 PRE_REG_READ2(long, SC2("labelsys", "setclearance"), int, op,
7939 vki_bslabel_t *, clearance);
7940 PRE_MEM_READ("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7941 break;
7942 #endif /* SOLARIS_TSOL_CLEARANCE */
7944 default:
7945 VG_(unimplemented)("Syswrap of the labelsys call with op %ld.", SARG1);
7946 /*NOTREACHED*/
7947 break;
7951 POST(sys_labelsys)
7953 switch (ARG1 /*op*/) {
7954 case VKI_TSOL_SYSLABELING:
7955 break;
7957 case VKI_TSOL_TNRH:
7958 switch (ARG2 /*cmd*/) {
7959 case VKI_TNDB_LOAD:
7960 case VKI_TNDB_DELETE:
7961 case VKI_TNDB_FLUSH:
7962 break;
7963 #if defined(SOLARIS_TNDB_GET_TNIP)
7964 case TNDB_GET_TNIP:
7965 #endif /* SOLARIS_TNDB_GET_TNIP */
7966 case VKI_TNDB_GET:
7967 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_rhent_t));
7968 break;
7969 default:
7970 vg_assert(0);
7971 break;
7973 break;
7975 case VKI_TSOL_TNRHTP:
7976 switch (ARG2 /*cmd*/) {
7977 case VKI_TNDB_LOAD:
7978 case VKI_TNDB_DELETE:
7979 case VKI_TNDB_FLUSH:
7980 break;
7981 case VKI_TNDB_GET:
7982 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_tpent_t));
7983 break;
7984 default:
7985 vg_assert(0);
7986 break;
7988 break;
7990 case VKI_TSOL_TNMLP:
7991 switch (ARG2 /*cmd*/) {
7992 case VKI_TNDB_LOAD:
7993 case VKI_TNDB_DELETE:
7994 case VKI_TNDB_FLUSH:
7995 break;
7996 case VKI_TNDB_GET:
7997 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_mlpent_t));
7998 break;
7999 default:
8000 vg_assert(0);
8001 break;
8003 break;
8005 case VKI_TSOL_GETLABEL:
8006 case VKI_TSOL_FGETLABEL:
8007 POST_MEM_WRITE(ARG3, sizeof(vki_bslabel_t));
8008 break;
8010 #if defined(SOLARIS_TSOL_CLEARANCE)
8011 case VKI_TSOL_GETCLEARANCE:
8012 POST_MEM_WRITE(ARG2, sizeof(vki_bslabel_t));
8013 break;
8015 case VKI_TSOL_SETCLEARANCE:
8016 break;
8017 #endif /* SOLARIS_TSOL_CLEARANCE */
8019 default:
8020 vg_assert(0);
8021 break;
8025 PRE(sys_acl)
8027 /* int acl(char *pathp, int cmd, int nentries, void *aclbufp); */
8028 PRINT("sys_acl ( %#lx(%s), %ld, %ld, %#lx )", ARG1, (HChar *) ARG1, SARG2,
8029 SARG3, ARG4);
8031 PRE_REG_READ4(long, "acl", char *, pathp, int, cmd,
8032 int, nentries, void *, aclbufp);
8033 PRE_MEM_RASCIIZ("acl(pathp)", ARG1);
8035 switch (ARG2 /*cmd*/) {
8036 case VKI_SETACL:
8037 if (ARG4)
8038 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8039 break;
8040 case VKI_GETACL:
8041 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8042 break;
8043 case VKI_GETACLCNT:
8044 break;
8045 case VKI_ACE_SETACL:
8046 if (ARG4)
8047 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8048 break;
8049 case VKI_ACE_GETACL:
8050 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8051 break;
8052 case VKI_ACE_GETACLCNT:
8053 break;
8054 default:
8055 VG_(unimplemented)("Syswrap of the acl call with cmd %ld.", SARG2);
8056 /*NOTREACHED*/
8057 break;
8061 POST(sys_acl)
8063 switch (ARG2 /*cmd*/) {
8064 case VKI_SETACL:
8065 break;
8066 case VKI_GETACL:
8067 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8068 break;
8069 case VKI_GETACLCNT:
8070 break;
8071 case VKI_ACE_SETACL:
8072 break;
8073 case VKI_ACE_GETACL:
8074 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8075 break;
8076 case VKI_ACE_GETACLCNT:
8077 break;
8078 default:
8079 vg_assert(0);
8080 break;
8084 PRE(sys_auditsys)
8086 /* Kernel: int auditsys(long code, long a1, long a2, long a3, long a4); */
8087 switch (ARG1 /*code*/) {
8088 case VKI_BSM_GETAUID:
8089 /* Libbsm: int getauid(au_id_t *auid); */
8090 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8091 PRE_REG_READ2(long, SC2("auditsys", "getauid"), long, code,
8092 vki_au_id_t *, auid);
8093 PRE_MEM_WRITE("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
8094 break;
8095 case VKI_BSM_SETAUID:
8096 /* Libbsm: int setauid(au_id_t *auid); */
8097 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8098 PRE_REG_READ2(long, SC2("auditsys", "setauid"), long, code,
8099 vki_au_id_t *, auid);
8100 PRE_MEM_READ("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
8101 break;
8102 case VKI_BSM_GETAUDIT:
8103 /* Libbsm: int getaudit(auditinfo_t *ai); */
8104 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8105 PRE_REG_READ2(long, SC2("auditsys", "getaudit"), long, code,
8106 vki_auditinfo_t *, ai);
8107 PRE_MEM_WRITE("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
8108 break;
8109 case VKI_BSM_SETAUDIT:
8110 /* Libbsm: int setaudit(auditinfo_t *ai); */
8111 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8112 PRE_REG_READ2(long, SC2("auditsys", "setaudit"), long, code,
8113 vki_auditinfo_t *, ai);
8114 PRE_MEM_READ("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
8115 break;
8116 case VKI_BSM_AUDIT:
8117 /* Libbsm: int audit(void *record, int length); */
8118 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8119 PRE_REG_READ3(long, SC2("auditsys", "audit"), long, code,
8120 void *, record, int, length);
8121 PRE_MEM_READ("auditsys(record)", ARG2, ARG3);
8122 break;
8123 case VKI_BSM_AUDITCTL:
8124 /* Libbsm: int auditon(int cmd, caddr_t data, int length); */
8125 PRINT("sys_auditsys ( %ld, %ld, %#lx, %ld )",
8126 SARG1, SARG2, ARG3, SARG4);
8128 switch (ARG2 /*cmd*/) {
8129 case VKI_A_GETPOLICY:
8130 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpolicy"),
8131 long, code, int, cmd, vki_uint32_t *, policy);
8132 PRE_MEM_WRITE("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
8133 break;
8134 case VKI_A_SETPOLICY:
8135 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpolicy"),
8136 long, code, int, cmd, vki_uint32_t *, policy);
8137 PRE_MEM_READ("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
8138 break;
8139 case VKI_A_GETKMASK:
8140 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getkmask"),
8141 long, code, int, cmd, vki_au_mask_t *, kmask);
8142 PRE_MEM_WRITE("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
8143 break;
8144 case VKI_A_SETKMASK:
8145 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setkmask"),
8146 long, code, int, cmd, vki_au_mask_t *, kmask);
8147 PRE_MEM_READ("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
8148 break;
8149 case VKI_A_GETQCTRL:
8150 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getqctrl"),
8151 long, code, int, cmd,
8152 struct vki_au_qctrl *, qctrl);
8153 PRE_MEM_WRITE("auditsys(qctrl)", ARG3,
8154 sizeof(struct vki_au_qctrl));
8155 break;
8156 case VKI_A_SETQCTRL:
8157 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setqctrl"),
8158 long, code, int, cmd,
8159 struct vki_au_qctrl *, qctrl);
8160 PRE_MEM_READ("auditsys(qctrl)", ARG3,
8161 sizeof(struct vki_au_qctrl));
8162 break;
8163 case VKI_A_GETCWD:
8164 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcwd"),
8165 long, code, int, cmd, char *, data, int, length);
8166 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
8167 break;
8168 case VKI_A_GETCAR:
8169 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcar"),
8170 long, code, int, cmd, char *, data, int, length);
8171 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
8172 break;
8173 #if defined(SOLARIS_AUDITON_STAT)
8174 case VKI_A_GETSTAT:
8175 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getstat"),
8176 long, code, int, cmd, vki_au_stat_t *, stats);
8177 PRE_MEM_WRITE("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
8178 break;
8179 case VKI_A_SETSTAT:
8180 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setstat"),
8181 long, code, int, cmd, vki_au_stat_t *, stats);
8182 PRE_MEM_READ("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
8183 break;
8184 #endif /* SOLARIS_AUDITON_STAT */
8185 case VKI_A_SETUMASK:
8186 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setumask"),
8187 long, code, int, cmd, vki_auditinfo_t *, umask);
8188 PRE_MEM_READ("auditsys(umask)", ARG3, sizeof(vki_auditinfo_t));
8189 break;
8190 case VKI_A_SETSMASK:
8191 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setsmask"),
8192 long, code, int, cmd, vki_auditinfo_t *, smask);
8193 PRE_MEM_READ("auditsys(smask)", ARG3, sizeof(vki_auditinfo_t));
8194 break;
8195 case VKI_A_GETCOND:
8196 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getcond"),
8197 long, code, int, cmd, int *, cond);
8198 PRE_MEM_WRITE("auditsys(cond)", ARG3, sizeof(int));
8199 break;
8200 case VKI_A_SETCOND:
8201 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setcond"),
8202 long, code, int, cmd, int *, state);
8203 PRE_MEM_READ("auditsys(cond)", ARG3, sizeof(int));
8204 break;
8205 case VKI_A_GETCLASS:
8206 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getclass"),
8207 long, code, int, cmd,
8208 vki_au_evclass_map_t *, classmap);
8210 if (ML_(safe_to_deref((void *) ARG3,
8211 sizeof(vki_au_evclass_map_t)))) {
8212 vki_au_evclass_map_t *classmap =
8213 (vki_au_evclass_map_t *) ARG3;
8214 PRE_FIELD_READ("auditsys(classmap.ec_number)",
8215 classmap->ec_number);
8216 PRE_MEM_WRITE("auditsys(classmap)", ARG3,
8217 sizeof(vki_au_evclass_map_t));
8219 break;
8220 case VKI_A_SETCLASS:
8221 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setclass"),
8222 long, code, int, cmd,
8223 vki_au_evclass_map_t *, classmap);
8225 if (ML_(safe_to_deref((void *) ARG3,
8226 sizeof(vki_au_evclass_map_t)))) {
8227 vki_au_evclass_map_t *classmap =
8228 (vki_au_evclass_map_t *) ARG3;
8229 PRE_FIELD_READ("auditsys(classmap.ec_number)",
8230 classmap->ec_number);
8231 PRE_FIELD_READ("auditsys(classmap.ec_class)",
8232 classmap->ec_class);
8234 break;
8235 case VKI_A_GETPINFO:
8236 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpinfo"),
8237 long, code, int, cmd,
8238 struct vki_auditpinfo *, apinfo);
8240 if (ML_(safe_to_deref((void *) ARG3,
8241 sizeof(struct vki_auditpinfo)))) {
8242 struct vki_auditpinfo *apinfo =
8243 (struct vki_auditpinfo *) ARG3;
8244 PRE_FIELD_READ("auditsys(apinfo.ap_pid)", apinfo->ap_pid);
8245 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
8246 sizeof(struct vki_auditpinfo));
8248 break;
8249 case VKI_A_SETPMASK:
8250 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpmask"),
8251 long, code, int, cmd,
8252 struct vki_auditpinfo *, apinfo);
8253 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
8254 sizeof(struct vki_auditpinfo));
8255 break;
8256 case VKI_A_GETPINFO_ADDR:
8257 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getpinfo_addr"),
8258 long, code, int, cmd,
8259 struct vki_auditpinfo_addr *, apinfo, int, length);
8261 if (ML_(safe_to_deref((void *) ARG3,
8262 sizeof(struct vki_auditpinfo_addr)))) {
8263 struct vki_auditpinfo_addr *apinfo_addr =
8264 (struct vki_auditpinfo_addr *) ARG3;
8265 PRE_FIELD_READ("auditsys(apinfo_addr.ap_pid)",
8266 apinfo_addr->ap_pid);
8267 PRE_MEM_WRITE("auditsys(apinfo_addr)", ARG3, ARG4);
8269 break;
8270 case VKI_A_GETKAUDIT:
8271 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getkaudit"),
8272 long, code, int, cmd,
8273 vki_auditinfo_addr_t *, kaudit, int, length);
8274 PRE_MEM_WRITE("auditsys(kaudit)", ARG3, ARG4);
8275 break;
8276 case VKI_A_SETKAUDIT:
8277 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "setkaudit"),
8278 long, code, int, cmd,
8279 vki_auditinfo_addr_t *, kaudit, int, length);
8280 PRE_MEM_READ("auditsys(kaudit)", ARG3, ARG4);
8281 break;
8282 case VKI_A_GETAMASK:
8283 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getamask"),
8284 long, code, int, cmd, vki_au_mask_t *, amask);
8285 PRE_MEM_WRITE("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
8286 break;
8287 case VKI_A_SETAMASK:
8288 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setamask"),
8289 long, code, int, cmd, vki_au_mask_t *, amask);
8290 PRE_MEM_READ("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
8291 break;
8292 default:
8293 VG_(unimplemented)("Syswrap of the auditsys(auditctl) call "
8294 "with cmd %lu.", ARG2);
8295 /*NOTREACHED*/
8296 break;
8298 break;
8299 case VKI_BSM_GETAUDIT_ADDR:
8300 /* Libbsm: int getaudit_addr(auditinfo_addr_t *ai, int len); */
8301 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8302 PRE_REG_READ3(long, SC2("auditsys", "getaudit_addr"), long, code,
8303 vki_auditinfo_addr_t *, ai, int, len);
8304 PRE_MEM_WRITE("auditsys(ai)", ARG2, ARG3);
8305 break;
8306 case VKI_BSM_SETAUDIT_ADDR:
8307 /* Libbsm: int setaudit_addr(auditinfo_addr_t *ai, int len); */
8308 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8309 PRE_REG_READ3(long, SC2("auditsys", "setaudit_addr"), long, code,
8310 vki_auditinfo_addr_t *, ai, int, len);
8311 PRE_MEM_READ("auditsys(ai)", ARG2, ARG3);
8312 break;
8313 case VKI_BSM_AUDITDOOR:
8314 /* Libbsm: int auditdoor(int fd); */
8315 PRINT("sys_auditsys ( %ld, %ld )", SARG1, SARG2);
8316 PRE_REG_READ2(long, SC2("auditsys", "door"), long, code, int, fd);
8318 /* Be strict. */
8319 if (!ML_(fd_allowed)(ARG2, SC2("auditsys", "door")"(fd)",
8320 tid, False))
8321 SET_STATUS_Failure(VKI_EBADF);
8322 break;
8323 default:
8324 VG_(unimplemented)("Syswrap of the auditsys call with code %lu.", ARG1);
8325 /*NOTREACHED*/
8326 break;
8330 POST(sys_auditsys)
8332 switch (ARG1 /*code*/) {
8333 case VKI_BSM_GETAUID:
8334 POST_MEM_WRITE(ARG2, sizeof(vki_au_id_t));
8335 break;
8336 case VKI_BSM_SETAUID:
8337 break;
8338 case VKI_BSM_GETAUDIT:
8339 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_t));
8340 break;
8341 case VKI_BSM_SETAUDIT:
8342 case VKI_BSM_AUDIT:
8343 break;
8344 case VKI_BSM_AUDITCTL:
8345 switch (ARG2 /*cmd*/) {
8346 case VKI_A_GETPOLICY:
8347 POST_MEM_WRITE(ARG3, sizeof(vki_uint32_t));
8348 break;
8349 case VKI_A_SETPOLICY:
8350 break;
8351 case VKI_A_GETKMASK:
8352 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
8353 break;
8354 case VKI_A_SETKMASK:
8355 break;
8356 case VKI_A_GETQCTRL:
8357 POST_MEM_WRITE(ARG3, sizeof(struct vki_au_qctrl));
8358 break;
8359 case VKI_A_SETQCTRL:
8360 break;
8361 case VKI_A_GETCWD:
8362 case VKI_A_GETCAR:
8363 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
8364 break;
8365 #if defined(SOLARIS_AUDITON_STAT)
8366 case VKI_A_GETSTAT:
8367 POST_MEM_WRITE(ARG3, sizeof(vki_au_stat_t));
8368 break;
8369 case VKI_A_SETSTAT:
8370 #endif /* SOLARIS_AUDITON_STAT */
8371 case VKI_A_SETUMASK:
8372 case VKI_A_SETSMASK:
8373 break;
8374 case VKI_A_GETCOND:
8375 POST_MEM_WRITE(ARG3, sizeof(int));
8376 break;
8377 case VKI_A_SETCOND:
8378 break;
8379 case VKI_A_GETCLASS:
8380 POST_MEM_WRITE(ARG3, sizeof(vki_au_evclass_map_t));
8381 break;
8382 case VKI_A_SETCLASS:
8383 break;
8384 case VKI_A_GETPINFO:
8385 POST_MEM_WRITE(ARG3, sizeof(struct vki_auditpinfo));
8386 break;
8387 case VKI_A_SETPMASK:
8388 break;
8389 case VKI_A_GETPINFO_ADDR:
8390 POST_MEM_WRITE(ARG3, sizeof(struct auditpinfo_addr));
8391 break;
8392 case VKI_A_GETKAUDIT:
8393 POST_MEM_WRITE(ARG3, sizeof(vki_auditinfo_addr_t));
8394 break;
8395 case VKI_A_SETKAUDIT:
8396 break;
8397 case VKI_A_GETAMASK:
8398 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
8399 break;
8400 case VKI_A_SETAMASK:
8401 break;
8403 break;
8404 case VKI_BSM_GETAUDIT_ADDR:
8405 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_addr_t));
8406 break;
8407 case VKI_BSM_SETAUDIT_ADDR:
8408 break;
8409 case VKI_BSM_AUDITDOOR:
8410 break;
8414 PRE(sys_p_online)
8416 /* int p_online(processorid_t processorid, int flag); */
8417 PRINT("sys_p_online ( %ld, %ld )", SARG1, SARG2);
8418 PRE_REG_READ2(long, "p_online", vki_processorid_t, processorid, int, flag);
8421 PRE(sys_sigqueue)
8423 /* int sigqueue(pid_t pid, int signo, void *value,
8424 int si_code, timespec_t *timeout);
8426 PRINT("sys_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
8427 SARG1, SARG2, ARG3, SARG4, ARG5);
8428 PRE_REG_READ5(long, "sigqueue", vki_pid_t, pid, int, signo,
8429 void *, value, int, si_code,
8430 vki_timespec_t *, timeout);
8432 if (ARG5)
8433 PRE_MEM_READ("sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
8435 if (!ML_(client_signal_OK)(ARG2)) {
8436 SET_STATUS_Failure(VKI_EINVAL);
8437 return;
8440 /* If we're sending SIGKILL, check to see if the target is one of
8441 our threads and handle it specially. */
8442 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
8443 SET_STATUS_Success(0);
8444 } else {
8445 SysRes res = VG_(do_syscall5)(SYSNO, ARG1, ARG2, ARG3, ARG4,
8446 ARG5);
8447 SET_STATUS_from_SysRes(res);
8450 if (VG_(clo_trace_signals))
8451 VG_(message)(Vg_DebugMsg,
8452 "sigqueue: signal %ld queued for pid %ld\n",
8453 SARG2, SARG1);
8455 /* Check to see if this gave us a pending signal. */
8456 *flags |= SfPollAfter;
8459 PRE(sys_clock_gettime)
8461 /* int clock_gettime(clockid_t clock_id, struct timespec *tp); */
8462 PRINT("sys_clock_gettime ( %ld, %#lx )", SARG1, ARG2);
8463 PRE_REG_READ2(long, "clock_gettime", vki_clockid_t, clock_id,
8464 struct timespec *, tp);
8465 PRE_MEM_WRITE("clock_gettime(tp)", ARG2, sizeof(struct vki_timespec));
8468 POST(sys_clock_gettime)
8470 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
8473 PRE(sys_clock_settime)
8475 /* int clock_settime(clockid_t clock_id, const struct timespec *tp); */
8476 PRINT("sys_clock_settime ( %ld, %#lx )", SARG1, ARG2);
8477 PRE_REG_READ2(long, "clock_settime", vki_clockid_t, clock_id,
8478 const struct timespec *, tp);
8479 PRE_MEM_READ("clock_settime(tp)", ARG2, sizeof(struct vki_timespec));
8482 PRE(sys_clock_getres)
8484 /* int clock_getres(clockid_t clock_id, struct timespec *res); */
8485 PRINT("sys_clock_getres ( %ld, %#lx )", SARG1, ARG2);
8486 PRE_REG_READ2(long, "clock_getres", vki_clockid_t, clock_id,
8487 struct timespec *, res);
8489 if (ARG2)
8490 PRE_MEM_WRITE("clock_getres(res)", ARG2, sizeof(struct vki_timespec));
8493 POST(sys_clock_getres)
8495 if (ARG2)
8496 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
8499 PRE(sys_timer_create)
8501 /* int timer_create(clockid_t clock_id,
8502 struct sigevent *evp, timer_t *timerid);
8504 PRINT("sys_timer_create ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
8505 PRE_REG_READ3(long, "timer_create", vki_clockid_t, clock_id,
8506 struct vki_sigevent *, evp, vki_timer_t *, timerid);
8508 if (ARG2) {
8509 struct vki_sigevent *evp = (struct vki_sigevent *) ARG2;
8510 PRE_FIELD_READ("timer_create(evp.sigev_notify)", evp->sigev_notify);
8511 PRE_FIELD_READ("timer_create(evp.sigev_signo)", evp->sigev_signo);
8512 PRE_FIELD_READ("timer_create(evp.sigev_value.sival_int)",
8513 evp->sigev_value.sival_int);
8515 /* Be safe. */
8516 if (ML_(safe_to_deref(evp, sizeof(struct vki_sigevent)))) {
8517 if ((evp->sigev_notify == VKI_SIGEV_PORT) ||
8518 (evp->sigev_notify == VKI_SIGEV_THREAD))
8519 PRE_MEM_READ("timer_create(evp.sigev_value.sival_ptr)",
8520 (Addr) evp->sigev_value.sival_ptr,
8521 sizeof(vki_port_notify_t));
8525 PRE_MEM_WRITE("timer_create(timerid)", ARG3, sizeof(vki_timer_t));
8528 POST(sys_timer_create)
8530 POST_MEM_WRITE(ARG3, sizeof(vki_timer_t));
8533 PRE(sys_timer_delete)
8535 /* int timer_delete(timer_t timerid); */
8536 PRINT("sys_timer_delete ( %ld )", SARG1);
8537 PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
8540 PRE(sys_timer_settime)
8542 /* int timer_settime(timer_t timerid, int flags,
8543 const struct itimerspec *value,
8544 struct itimerspec *ovalue);
8546 PRINT("sys_timer_settime ( %ld, %ld, %#lx, %#lx )",
8547 SARG1, SARG2, ARG3, ARG4);
8548 PRE_REG_READ4(long, "timer_settime", vki_timer_t, timerid,
8549 int, flags, const struct vki_itimerspec *, value,
8550 struct vki_itimerspec *, ovalue);
8551 PRE_MEM_READ("timer_settime(value)",
8552 ARG3, sizeof(struct vki_itimerspec));
8553 if (ARG4)
8554 PRE_MEM_WRITE("timer_settime(ovalue)",
8555 ARG4, sizeof(struct vki_itimerspec));
8558 POST(sys_timer_settime)
8560 if (ARG4)
8561 POST_MEM_WRITE(ARG4, sizeof(struct vki_itimerspec));
8564 PRE(sys_timer_gettime)
8566 /* int timer_gettime(timer_t timerid, struct itimerspec *value); */
8567 PRINT("sys_timer_gettime ( %ld, %#lx )", SARG1, ARG2);
8568 PRE_REG_READ2(long, "timer_gettime", vki_timer_t, timerid,
8569 struct vki_itimerspec *, value);
8570 PRE_MEM_WRITE("timer_gettime(value)",
8571 ARG2, sizeof(struct vki_itimerspec));
8574 POST(sys_timer_gettime)
8576 POST_MEM_WRITE(ARG2, sizeof(struct vki_itimerspec));
8579 PRE(sys_timer_getoverrun)
8581 /* int timer_getoverrun(timer_t timerid); */
8582 PRINT("sys_timer_getoverrun ( %ld )", SARG1);
8583 PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
8586 PRE(sys_facl)
8588 /* int facl(int fildes, int cmd, int nentries, void *aclbufp); */
8589 PRINT("sys_facl ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
8591 PRE_REG_READ4(long, "facl", int, fildes, int, cmd,
8592 int, nentries, void *, aclbufp);
8594 switch (ARG2 /*cmd*/) {
8595 case VKI_SETACL:
8596 if (ARG4)
8597 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_aclent_t));
8598 break;
8599 case VKI_GETACL:
8600 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8601 break;
8602 case VKI_GETACLCNT:
8603 break;
8604 case VKI_ACE_SETACL:
8605 if (ARG4)
8606 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_ace_t));
8607 break;
8608 case VKI_ACE_GETACL:
8609 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8610 break;
8611 case VKI_ACE_GETACLCNT:
8612 break;
8613 default:
8614 VG_(unimplemented)("Syswrap of the facl call with cmd %ld.", SARG2);
8615 /*NOTREACHED*/
8616 break;
8619 /* Be strict. */
8620 if (!ML_(fd_allowed)(ARG1, "facl", tid, False))
8621 SET_STATUS_Failure(VKI_EBADF);
8624 POST(sys_facl)
8626 switch (ARG2 /*cmd*/) {
8627 case VKI_SETACL:
8628 break;
8629 case VKI_GETACL:
8630 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8631 break;
8632 case VKI_GETACLCNT:
8633 break;
8634 case VKI_ACE_SETACL:
8635 break;
8636 case VKI_ACE_GETACL:
8637 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8638 break;
8639 case VKI_ACE_GETACLCNT:
8640 break;
8641 default:
8642 vg_assert(0);
8643 break;
8647 static Int pre_check_and_close_fds(ThreadId tid, const HChar *name,
8648 vki_door_desc_t *desc_ptr,
8649 vki_uint_t desc_num)
8651 vki_uint_t i;
8653 /* Verify passed file descriptors. */
8654 for (i = 0; i < desc_num; i++) {
8655 vki_door_desc_t *desc = &desc_ptr[i];
8656 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8657 (desc->d_attributes & DOOR_RELEASE)) {
8658 Int fd = desc->d_data.d_desc.d_descriptor;
8660 /* Detect and negate attempts by the client to close Valgrind's fds.
8661 Also if doing -d style logging (which is to fd = 2 = stderr),
8662 don't allow that to be closed either. */
8663 if (!ML_(fd_allowed)(fd, name, tid, False) ||
8664 (fd == 2 && VG_(debugLog_getLevel)() > 0))
8665 return VKI_EBADF;
8669 /* All fds are allowed, record information about the closed ones.
8671 Note: Recording information about any closed fds should generally happen
8672 in a post wrapper but it is not possible in this case because door calls
8673 are "very blocking", if the information was recorded after the syscall
8674 finishes then it would be out-of-date during the call, i.e. while the
8675 syscall is blocked in the kernel. Therefore, we record closed fds for
8676 this specific syscall in the PRE wrapper. Unfortunately, this creates
8677 a problem when the syscall fails, for example, door_call() can fail with
8678 EBADF or EFAULT and then no fds are released. If that happens the
8679 information about opened fds is incorrect. This should be very rare (I
8680 hope) and such a condition is also reported in the post wrapper. */
8681 if (VG_(clo_track_fds)) {
8682 for (i = 0; i < desc_num; i++) {
8683 vki_door_desc_t *desc = &desc_ptr[i];
8684 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8685 (desc->d_attributes & DOOR_RELEASE)) {
8686 Int fd = desc->d_data.d_desc.d_descriptor;
8687 ML_(record_fd_close)(tid, fd);
8692 return 0;
8695 static void post_record_fds(ThreadId tid, const HChar *name,
8696 vki_door_desc_t *desc_ptr, vki_uint_t desc_num)
8698 vki_uint_t i;
8700 /* Record returned file descriptors. */
8701 for (i = 0; i < desc_num; i++) {
8702 vki_door_desc_t *desc = &desc_ptr[i];
8703 if (desc->d_attributes & DOOR_DESCRIPTOR) {
8704 Int fd = desc->d_data.d_desc.d_descriptor;
8705 if (!ML_(fd_allowed)(fd, name, tid, True)) {
8706 /* Unfortunately, we cannot recover at this point and have to fail
8707 hard. */
8708 VG_(message)(Vg_UserMsg, "The %s syscall returned an unallowed"
8709 "file descriptor %d.\n", name, fd);
8710 VG_(exit)(101);
8712 else if (VG_(clo_track_fds))
8713 ML_(record_fd_open_named)(tid, fd);
8718 /* Handles repository door protocol request over client door fd. */
8719 static void repository_door_pre_mem_door_call_hook(ThreadId tid, Int fd,
8720 void *data_ptr,
8721 SizeT data_size)
8723 vki_rep_protocol_request_t *p = (vki_rep_protocol_request_t *) data_ptr;
8724 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8725 "request->rpr_request)", p->rpr_request);
8727 if (ML_(safe_to_deref)(p, sizeof(vki_rep_protocol_request_t))) {
8728 switch (p->rpr_request) {
8729 case VKI_REP_PROTOCOL_CLOSE:
8730 break;
8731 case VKI_REP_PROTOCOL_ENTITY_SETUP:
8733 struct vki_rep_protocol_entity_setup *r =
8734 (struct vki_rep_protocol_entity_setup *) p;
8735 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8736 "entity_setup->rpr_entityid)", r->rpr_entityid);
8737 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8738 "entity_setup->rpr_entitytype)", r->rpr_entitytype);
8740 break;
8741 case VKI_REP_PROTOCOL_ENTITY_NAME:
8743 struct vki_rep_protocol_entity_name *r =
8744 (struct vki_rep_protocol_entity_name *) p;
8745 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8746 "entity_name->rpr_entityid)", r->rpr_entityid);
8747 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8748 "entity_name->rpr_answertype)", r->rpr_answertype);
8750 break;
8751 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 24) && (SOLARIS_REPCACHE_PROTOCOL_VERSION <= 30)
8752 case VKI_REP_PROTOCOL_ENTITY_FMRI:
8754 struct vki_rep_protocol_entity_fmri *r =
8755 (struct vki_rep_protocol_entity_fmri *) p;
8756 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8757 "entity_fmri->rpr_entityid)", r->rpr_entityid);
8759 break;
8760 #endif /* 24 <= SOLARIS_REPCACHE_PROTOCOL_VERSION =< 30 */
8761 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25)
8762 case VKI_REP_PROTOCOL_ENTITY_GET_ROOT:
8764 struct vki_rep_protocol_entity_root *r =
8765 (struct vki_rep_protocol_entity_root *) p;
8766 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8767 "entity_root->rpr_entityid)", r->rpr_entityid);
8768 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8769 "entity_root->rpr_outid)", r->rpr_outid);
8771 break;
8772 #endif /* SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25 */
8773 case VKI_REP_PROTOCOL_ENTITY_GET:
8775 struct vki_rep_protocol_entity_get *r =
8776 (struct vki_rep_protocol_entity_get *) p;
8777 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8778 "entity_get->rpr_entityid)", r->rpr_entityid);
8779 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8780 "entity_get->rpr_object)", r->rpr_object);
8782 break;
8783 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD:
8784 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 31)
8785 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD_COMPOSED:
8786 #endif
8788 struct vki_rep_protocol_entity_get_child *r =
8789 (struct vki_rep_protocol_entity_get_child *) p;
8790 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8791 "entity_get_child->rpr_entityid)", r->rpr_entityid);
8792 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8793 "entity_get_child->rpr_childid)", r->rpr_childid);
8794 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8795 "entity_get_child->rpr_name)", (Addr) r->rpr_name);
8797 break;
8798 case VKI_REP_PROTOCOL_ENTITY_GET_PARENT:
8800 struct vki_rep_protocol_entity_parent *r =
8801 (struct vki_rep_protocol_entity_parent *) p;
8802 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8803 "entity_get_parent->rpr_entityid)", r->rpr_entityid);
8804 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8805 "entity_get_parent->rpr_outid)", r->rpr_outid);
8807 break;
8808 case VKI_REP_PROTOCOL_ENTITY_RESET:
8810 struct vki_rep_protocol_entity_reset *r =
8811 (struct vki_rep_protocol_entity_reset *) p;
8812 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8813 "entity_reset->rpr_entityid)", r->rpr_entityid);
8815 break;
8816 case VKI_REP_PROTOCOL_ENTITY_TEARDOWN:
8818 struct vki_rep_protocol_entity_teardown *r =
8819 (struct vki_rep_protocol_entity_teardown *) p;
8820 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8821 "entity_teardown->rpr_entityid)", r->rpr_entityid);
8823 break;
8824 case VKI_REP_PROTOCOL_ITER_READ:
8826 struct vki_rep_protocol_iter_read *r =
8827 (struct vki_rep_protocol_iter_read *) p;
8828 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8829 "iter_read->rpr_iterid)", r->rpr_iterid);
8830 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8831 "iter_read->rpr_sequence)", r->rpr_sequence);
8832 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8833 "iter_read->rpr_entityid)", r->rpr_entityid);
8835 break;
8836 case VKI_REP_PROTOCOL_ITER_READ_VALUE:
8838 struct vki_rep_protocol_iter_read_value *r =
8839 (struct vki_rep_protocol_iter_read_value *) p;
8840 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8841 "iter_read_value->rpr_iterid)", r->rpr_iterid);
8842 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8843 "iter_read_value->rpr_sequence)", r->rpr_sequence);
8845 break;
8846 case VKI_REP_PROTOCOL_ITER_RESET:
8847 case VKI_REP_PROTOCOL_ITER_SETUP:
8848 case VKI_REP_PROTOCOL_ITER_TEARDOWN:
8850 struct vki_rep_protocol_iter_request *r =
8851 (struct vki_rep_protocol_iter_request *) p;
8852 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8853 "iter_request->rpr_iterid)", r->rpr_iterid);
8855 break;
8856 case VKI_REP_PROTOCOL_ITER_START:
8858 struct vki_rep_protocol_iter_start *r =
8859 (struct vki_rep_protocol_iter_start *) p;
8860 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8861 "iter_start->rpr_iterid)", r->rpr_iterid);
8862 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8863 "iter_start->rpr_entity)", r->rpr_entity);
8864 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8865 "iter_start->rpr_itertype)", r->rpr_itertype);
8866 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8867 "iter_start->rpr_flags)", r->rpr_flags);
8868 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8869 "iter_start->rpr_pattern)", (Addr) r->rpr_pattern);
8871 break;
8872 case VKI_REP_PROTOCOL_PROPERTY_GET_TYPE:
8873 case VKI_REP_PROTOCOL_PROPERTY_GET_VALUE:
8875 struct vki_rep_protocol_property_request *r =
8876 (struct vki_rep_protocol_property_request *) p;
8877 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8878 "property_request->rpr_entityid)", r->rpr_entityid);
8880 break;
8881 default:
8882 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8883 " where rpr_request=%#x.", p->rpr_request);
8884 /* NOTREACHED */
8885 break;
8890 /* Handles repository door protocol response over client door fd. */
8891 static void repository_door_post_mem_door_call_hook(ThreadId tid, Int fd,
8892 void *rbuf, SizeT rsize)
8894 /* :TODO: Ideally we would need to match the response type with the
8895 previous request because response itself does not contain any
8896 type identification.
8897 For now simply make defined whole response buffer. */
8898 POST_MEM_WRITE((Addr) rbuf, rsize);
8901 /* Pre-syscall checks for params->data_ptr contents of a door_call(). */
8902 static void door_call_pre_mem_params_data(ThreadId tid, Int fd,
8903 void *data_ptr, SizeT data_size)
8905 const HChar *pathname;
8907 /* Get pathname of the door file descriptor, if not already done.
8908 Needed to dissect door service on the pathname below. */
8909 if (!VG_(clo_track_fds) && !ML_(fd_recorded)(fd)) {
8910 ML_(record_fd_open_named)(tid, fd);
8912 pathname = ML_(find_fd_recorded_by_fd)(fd);
8914 /* Debug-only printing. */
8915 if (0) {
8916 VG_(printf)("PRE(door_call) with fd=%d and filename=%s\n",
8917 fd, pathname);
8920 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8921 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
8923 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8924 "kcf_door_arg_t->da_version)", p->da_version);
8925 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8926 "kcf_door_arg_t->da_iskernel)", p->da_iskernel);
8927 PRE_MEM_RASCIIZ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8928 "kcf_door_arg_t->da_u.filename)",
8929 (Addr) p->vki_da_u.filename);
8930 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8931 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
8933 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8934 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
8935 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8936 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8937 /* request from an application towards nscd */
8938 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8939 "nss_pheader->p_version)", p->p_version);
8940 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8941 "nss_pheader->dbd_off)", p->dbd_off);
8942 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8943 "nss_pheader->dbd_len)", p->dbd_len);
8944 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8945 "nss_pheader->key_off)", p->key_off);
8946 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8947 "nss_pheader->key_len)", p->key_len);
8948 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8949 "nss_pheader->data_off)", p->data_off);
8950 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8951 "nss_pheader->data_len)", p->data_len);
8952 /* Fields ext_off and ext_len are set only sporadically. */
8953 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8954 "nss_pheader->pbufsiz)", p->pbufsiz);
8955 PRE_MEM_WRITE("door_call(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
8956 (Addr) p, p->pbufsiz);
8958 if (p->dbd_len > 0) {
8959 vki_nss_dbd_t *dbd
8960 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
8962 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR
8963 "\", nss_dbd)", (Addr) dbd, sizeof(vki_nss_dbd_t));
8964 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
8965 if (dbd->o_name != 0)
8966 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8967 "\", nss_dbd->o_name)", (Addr) ((HChar *) p
8968 + p->dbd_off + dbd->o_name));
8969 if (dbd->o_config_name != 0)
8970 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8971 "\", nss_dbd->o_config_name)",
8972 (Addr) ((HChar *) p + p->dbd_off
8973 + dbd->o_config_name));
8974 if (dbd->o_default_config != 0)
8975 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8976 "\", nss_dbd->o_default_config)",
8977 (Addr) ((HChar *) p + p->dbd_off +
8978 dbd->o_default_config));
8982 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", nss->key)",
8983 (Addr) ((HChar *) p + p->key_off), p->key_len);
8984 } else {
8985 /* request from a child nscd towards parent nscd */
8986 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8989 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8990 vki_repository_door_request_t *p =
8991 (vki_repository_door_request_t *) data_ptr;
8993 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8994 "request->rdr_version)", p->rdr_version);
8995 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8996 "request->rdr_request)", p->rdr_request);
8997 if (ML_(safe_to_deref)(p, sizeof(vki_repository_door_request_t))) {
8998 if (p->rdr_version == VKI_REPOSITORY_DOOR_VERSION) {
8999 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
9000 "request->rdr_flags)", p->rdr_flags);
9001 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
9002 "request->rdr_debug)", p->rdr_debug);
9003 } else {
9004 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
9005 " where version=%u.", p->rdr_version);
9008 } else {
9009 const OpenDoor *open_door = door_find_by_fd(fd);
9010 if ((open_door != NULL) && (open_door->pre_mem_hook != NULL)) {
9011 open_door->pre_mem_hook(tid, fd, data_ptr, data_size);
9012 } else {
9013 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
9015 * Be very lax about door syscall handling over unrecognized
9016 * door file descriptors. Does not require that full buffer
9017 * is initialized when writing. Without this, programs using
9018 * libdoor(3LIB) functionality with completely proprietary
9019 * semantics may report large number of false positives.
9021 } else {
9022 static Int moans = 3;
9024 /* generic default */
9025 if (moans > 0 && !VG_(clo_xml)) {
9026 moans--;
9027 VG_(umsg)(
9028 "Warning: noted and generically handled door call\n"
9029 " on file descriptor %d (filename: %s).\n"
9030 " This could cause spurious value errors to appear.\n"
9031 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
9032 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
9033 fd, pathname);
9035 PRE_MEM_READ("door_call(params->data_ptr)",
9036 (Addr) data_ptr, data_size);
9042 /* Post-syscall checks for params->rbuf contents of a door_call(). */
9043 static void door_call_post_mem_params_rbuf(ThreadId tid, Int fd,
9044 void *rbuf, SizeT rsize,
9045 const vki_door_desc_t *desc_ptr,
9046 vki_uint_t desc_num)
9048 const HChar *pathname = ML_(find_fd_recorded_by_fd)(fd);
9050 /* Debug-only printing. */
9051 if (0) {
9052 VG_(printf)("POST(door_call) with fd=%d and filename=%s\n",
9053 fd, pathname);
9056 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9057 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) rbuf;
9059 POST_FIELD_WRITE(p->da_version);
9060 POST_FIELD_WRITE(p->vki_da_u.result.status);
9061 POST_MEM_WRITE((Addr) p->vki_da_u.result.signature,
9062 p->vki_da_u.result.siglen);
9063 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9064 vki_nss_pheader_t *p = (vki_nss_pheader_t *) rbuf;
9066 POST_FIELD_WRITE(p->nsc_callnumber);
9067 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9068 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9069 /* response from nscd to an application */
9070 POST_FIELD_WRITE(p->p_status);
9071 POST_FIELD_WRITE(p->p_errno);
9072 POST_FIELD_WRITE(p->p_herrno);
9073 POST_FIELD_WRITE(p->dbd_off);
9074 POST_FIELD_WRITE(p->dbd_len);
9075 POST_FIELD_WRITE(p->key_off);
9076 POST_FIELD_WRITE(p->key_len);
9077 POST_FIELD_WRITE(p->data_off);
9078 POST_FIELD_WRITE(p->data_len);
9079 POST_FIELD_WRITE(p->ext_off);
9080 POST_FIELD_WRITE(p->ext_len);
9081 POST_FIELD_WRITE(p->pbufsiz);
9083 if (p->pbufsiz <= rsize) {
9084 if (p->dbd_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9085 SizeT len = MIN(p->dbd_len, p->pbufsiz - p->dbd_off);
9086 POST_MEM_WRITE((Addr) ((HChar *) p + p->dbd_off), len);
9089 if (p->key_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9090 SizeT len = MIN(p->key_len, p->pbufsiz - p->key_off);
9091 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), len);
9094 if (p->data_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9095 SizeT len = MIN(p->data_len, p->pbufsiz - p->data_off);
9096 POST_MEM_WRITE((Addr) ((HChar *) p + p->data_off), len);
9099 if (p->ext_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9100 SizeT len = MIN(p->ext_len, p->pbufsiz - p->ext_off);
9101 POST_MEM_WRITE((Addr) ((HChar *) p + p->ext_off), len);
9104 } else {
9105 /* response from parent nscd to a child nscd */
9106 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9109 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9110 POST_FIELD_WRITE(((vki_repository_door_response_t *) rbuf)->rdr_status);
9111 /* A new client door fd is passed over the global repository door. */
9112 if ((desc_ptr != NULL) && (desc_num > 0)) {
9113 if (desc_ptr[0].d_attributes & DOOR_DESCRIPTOR) {
9114 door_record_client(tid, desc_ptr[0].d_data.d_desc.d_descriptor,
9115 repository_door_pre_mem_door_call_hook,
9116 repository_door_post_mem_door_call_hook);
9119 } else {
9120 const OpenDoor *open_door = door_find_by_fd(fd);
9121 if ((open_door != NULL) && (open_door->post_mem_hook != NULL)) {
9122 open_door->post_mem_hook(tid, fd, rbuf, rsize);
9123 } else {
9124 /* generic default */
9125 POST_MEM_WRITE((Addr) rbuf, rsize);
9130 /* Pre-syscall checks for data_ptr contents in a door_return(). */
9131 static void door_return_pre_mem_data(ThreadId tid, Addr server_procedure,
9132 void *data_ptr, SizeT data_size)
9134 if ((data_size == 0) || (server_procedure == 0)) {
9135 /* There is nothing to check. This usually happens during thread's
9136 first call to door_return(). */
9137 return;
9140 /* Get pathname of the door file descriptor based on the
9141 door server procedure (that's all we have).
9142 Needed to dissect door service on the pathname below. */
9143 const OpenDoor *open_door = door_find_by_proc(server_procedure);
9144 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
9145 Int fd = (open_door != NULL) ? open_door->fd : -1;
9147 /* Debug-only printing. */
9148 if (0) {
9149 VG_(printf)("PRE(door_return) with fd=%d and filename=%s "
9150 "(nr_doors_recorded=%u)\n",
9151 fd, pathname, nr_doors_recorded);
9154 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9155 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
9157 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9158 "kcf_door_arg_t->da_version)", p->da_version);
9159 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9160 "kcf_door_arg_t->da_u.result.status)",
9161 p->vki_da_u.result.status);
9162 PRE_MEM_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9163 "kcf_door_arg_t->da_u.result.signature)",
9164 (Addr) p->vki_da_u.result.signature,
9165 p->vki_da_u.result.siglen);
9166 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9167 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
9169 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9170 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
9171 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9172 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9173 /* response from nscd to an application */
9174 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9175 "nss_pheader->p_status)", p->p_status);
9176 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9177 "nss_pheader->p_errno)", p->p_errno);
9178 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9179 "nss_pheader->p_herrno)", p->p_herrno);
9180 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9181 "nss_pheader->dbd_off)", p->dbd_off);
9182 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9183 "nss_pheader->dbd_len)", p->dbd_len);
9184 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9185 "nss_pheader->data_off)", p->data_off);
9186 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9187 "nss_pheader->data_len)", p->data_len);
9188 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9189 "nss_pheader->ext_off)", p->ext_off);
9190 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9191 "nss_pheader->ext_len)", p->ext_len);
9192 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9193 "nss_pheader->pbufsiz)", p->pbufsiz);
9194 PRE_MEM_WRITE("door_return(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
9195 (Addr) p, p->pbufsiz);
9196 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
9197 "\", nss->data)",
9198 (Addr) ((HChar *) p + p->data_off), p->data_len);
9199 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
9200 "\", nss->ext)",
9201 (Addr) ((HChar *) p + p->ext_off), p->ext_len);
9202 } else {
9203 /* response from parent nscd to a child nscd */
9204 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9207 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9208 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
9209 } else {
9210 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
9212 * Be very lax about door syscall handling over unrecognized
9213 * door file descriptors. Does not require that full buffer
9214 * is initialized when writing. Without this, programs using
9215 * libdoor(3LIB) functionality with completely proprietary
9216 * semantics may report large number of false positives.
9218 } else {
9219 static Int moans = 3;
9221 /* generic default */
9222 if (moans > 0 && !VG_(clo_xml)) {
9223 moans--;
9224 VG_(umsg)(
9225 "Warning: noted and generically handled door return\n"
9226 " on file descriptor %d (filename: %s).\n"
9227 " This could cause spurious value errors to appear.\n"
9228 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
9229 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
9230 fd, pathname);
9232 PRE_MEM_READ("door_return(data_ptr)",
9233 (Addr) data_ptr, data_size);
9238 /* Post-syscall checks for data_ptr contents in a door_return(). */
9239 static void door_return_post_mem_data(ThreadId tid, Addr server_procedure,
9240 void *data_ptr, SizeT data_size)
9242 const OpenDoor *open_door = door_find_by_proc(server_procedure);
9243 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
9245 /* Debug-only printing. */
9246 if (0) {
9247 Int fd = (open_door != NULL) ? open_door->fd : -1;
9248 VG_(printf)("POST(door_return) with fd=%d and filename=%s "
9249 "(nr_doors_recorded=%u)\n",
9250 fd, pathname, nr_doors_recorded);
9253 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9254 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
9256 POST_FIELD_WRITE(p->da_version);
9257 POST_FIELD_WRITE(p->da_iskernel);
9258 POST_MEM_WRITE((Addr) p->vki_da_u.filename,
9259 VG_(strlen)(p->vki_da_u.filename) + 1);
9260 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9261 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
9263 POST_FIELD_WRITE(p->nsc_callnumber);
9264 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9265 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9266 /* request from an application towards nscd */
9267 POST_FIELD_WRITE(p->p_version);
9268 POST_FIELD_WRITE(p->dbd_off);
9269 POST_FIELD_WRITE(p->dbd_len);
9270 POST_FIELD_WRITE(p->key_off);
9271 POST_FIELD_WRITE(p->key_len);
9272 POST_FIELD_WRITE(p->data_off);
9273 POST_FIELD_WRITE(p->data_len);
9274 POST_FIELD_WRITE(p->ext_off);
9275 POST_FIELD_WRITE(p->ext_len);
9276 POST_FIELD_WRITE(p->pbufsiz);
9278 if (p->dbd_len > 0) {
9279 vki_nss_dbd_t *dbd
9280 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
9282 POST_MEM_WRITE((Addr) dbd, sizeof(vki_nss_dbd_t));
9283 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
9284 SizeT headers_size = sizeof(vki_nss_pheader_t)
9285 + sizeof(vki_nss_dbd_t);
9287 if (dbd->o_name != 0) {
9288 HChar *name = (HChar *) p + p->dbd_off + dbd->o_name;
9289 SizeT name_len = VG_(strlen)(name) + 1;
9290 if (name_len <= data_size - headers_size)
9291 POST_MEM_WRITE((Addr) name, name_len);
9293 if (dbd->o_config_name != 0) {
9294 HChar *name = (HChar *) p + p->dbd_off + dbd->o_config_name;
9295 SizeT name_len = VG_(strlen)(name) + 1;
9296 if (name_len <= data_size - headers_size)
9297 POST_MEM_WRITE((Addr) name, name_len);
9299 if (dbd->o_default_config != 0) {
9300 HChar *name = (HChar *) p + p->dbd_off
9301 + dbd->o_default_config;
9302 SizeT name_len = VG_(strlen)(name) + 1;
9303 if (name_len <= data_size - headers_size)
9304 POST_MEM_WRITE((Addr) name, name_len);
9309 if (p->key_len <= data_size - p->key_off)
9310 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), p->key_len);
9311 } else {
9312 /* request from a child nscd towards parent nscd */
9313 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9316 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9317 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
9318 } else {
9319 /* generic default */
9320 POST_MEM_WRITE((Addr) data_ptr, data_size);
9324 PRE(sys_door)
9326 /* int doorfs(long arg1, long arg2, long arg3, long arg4, long arg5,
9327 long subcode); */
9328 ThreadState *tst = VG_(get_ThreadState)(tid);
9329 *flags |= SfMayBlock | SfPostOnFail;
9331 PRINT("sys_door ( %#lx, %#lx, %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3,
9332 ARG4, ARG5, SARG6);
9334 /* Macro PRE_REG_READ6 cannot be simply used because not all ARGs are used
9335 in door() syscall variants. Note that ARG6 (subcode) is used always. */
9336 #define PRE_REG_READ_SIXTH_ONLY \
9337 if (VG_(tdict).track_pre_reg_read) { \
9338 PRA6("door", long, subcode); \
9341 switch (ARG6 /*subcode*/) {
9342 case VKI_DOOR_CREATE:
9343 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9344 PRE_REG_READ_SIXTH_ONLY;
9345 /* Note: the first argument to DOOR_CREATE is a server procedure.
9346 This could lead to a problem if the kernel tries to force the
9347 execution of this procedure, similarly to how signal handlers are
9348 executed. Fortunately, the kernel never does that (for user-space
9349 server procedures). The procedure is always executed by the standard
9350 library. */
9351 break;
9352 case VKI_DOOR_REVOKE:
9353 PRE_REG_READ1(long, "door", long, arg1);
9354 PRE_REG_READ_SIXTH_ONLY;
9355 if (!ML_(fd_allowed)(ARG1, "door_revoke", tid, False))
9356 SET_STATUS_Failure(VKI_EBADF);
9357 break;
9358 case VKI_DOOR_INFO:
9359 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
9360 PRE_REG_READ_SIXTH_ONLY;
9361 PRE_MEM_WRITE("door_info(info)", ARG2, sizeof(vki_door_info_t));
9362 break;
9363 case VKI_DOOR_CALL:
9365 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
9366 PRE_REG_READ_SIXTH_ONLY;
9368 Int rval = 0;
9369 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9371 if (!ML_(fd_allowed)(ARG1, "door_call", tid, False))
9372 rval = VKI_EBADF;
9374 PRE_FIELD_READ("door_call(params->data_ptr)", params->data_ptr);
9375 PRE_FIELD_READ("door_call(params->data_size)", params->data_size);
9376 PRE_FIELD_READ("door_call(params->desc_ptr)", params->desc_ptr);
9377 PRE_FIELD_READ("door_call(params->desc_num)", params->desc_num);
9378 PRE_FIELD_READ("door_call(params->rbuf)", params->rbuf);
9379 PRE_FIELD_READ("door_call(params->rsize)", params->rsize);
9381 if (ML_(safe_to_deref)(params, sizeof(*params))) {
9382 if (params->data_ptr)
9383 door_call_pre_mem_params_data(tid, ARG1, params->data_ptr,
9384 params->data_size);
9386 if (params->desc_ptr) {
9387 SizeT desc_size = params->desc_num * sizeof(*params->desc_ptr);
9388 PRE_MEM_READ("door_call(params->desc_ptr)",
9389 (Addr)params->desc_ptr, desc_size);
9391 /* Do not record information about closed fds if we are going
9392 to fail the syscall and so no fds will be closed. */
9393 if ((rval == 0) &&
9394 (ML_(safe_to_deref)(params->desc_ptr, desc_size))) {
9395 rval = pre_check_and_close_fds(tid, "door_call",
9396 params->desc_ptr,
9397 params->desc_num);
9401 if (params->rbuf)
9402 PRE_MEM_WRITE("door_call(params->rbuf)", (Addr)params->rbuf,
9403 params->rsize);
9406 if (rval)
9407 SET_STATUS_Failure(rval);
9409 break;
9410 case VKI_DOOR_BIND:
9411 PRE_REG_READ1(long, "door", long, arg1);
9412 PRE_REG_READ_SIXTH_ONLY;
9413 VG_(unimplemented)("DOOR_BIND");
9414 break;
9415 case VKI_DOOR_UNBIND:
9416 PRE_REG_READ0(long, "door");
9417 PRE_REG_READ_SIXTH_ONLY;
9418 VG_(unimplemented)("DOOR_UNBIND");
9419 break;
9420 case VKI_DOOR_UNREFSYS:
9421 PRE_REG_READ0(long, "door");
9422 PRE_REG_READ_SIXTH_ONLY;
9423 VG_(unimplemented)("DOOR_UNREFSYS");
9424 break;
9425 case VKI_DOOR_UCRED:
9426 PRE_REG_READ1(long, "door", long, arg1);
9427 PRE_REG_READ_SIXTH_ONLY;
9428 VG_(unimplemented)("DOOR_UCRED");
9429 break;
9430 case VKI_DOOR_RETURN:
9431 PRE_REG_READ6(long, "door", long, arg1, long, arg2, long, arg3,
9432 long, arg4, long, arg5, long, subcode);
9434 /* Register %esp/%rsp is read and modified by the syscall. */
9435 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(sp)",
9436 VG_O_STACK_PTR, sizeof(UWord));
9437 /* Register %ebp/%rbp is not really read by the syscall, it is only
9438 written by it, but it is hard to determine when it is written so we
9439 make sure it is always valid prior to making the syscall. */
9440 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(bp)",
9441 VG_O_FRAME_PTR, sizeof(UWord));
9443 door_return_pre_mem_data(tid, tst->os_state.door_return_procedure,
9444 (void *) ARG1, ARG2);
9446 /* Do not tell the tool where the syscall is going to write the
9447 resulting data. It is necessary to skip this check because the data
9448 area starting at ARG4-ARG5 (of length ARG5) is usually on a client
9449 thread stack below the stack pointer and therefore it can be marked
9450 by a tool (for example, Memcheck) as inaccessible. It is ok to skip
9451 this check in this case because if there is something wrong with the
9452 data area then the syscall will fail or the error will be handled by
9453 POST_MEM_WRITE() in the post wrapper. */
9454 /*PRE_MEM_WRITE("door_return(sp)", ARG4 - ARG5, ARG5);*/
9456 if (ARG3) {
9457 vki_door_return_desc_t *desc_env = (vki_door_return_desc_t*)ARG3;
9459 PRE_MEM_READ("door_return(desc_env)", ARG3,
9460 sizeof(vki_door_return_desc_t));
9462 if (ML_(safe_to_deref)(desc_env, sizeof(*desc_env)) &&
9463 desc_env->desc_ptr) {
9464 Int rval;
9466 PRE_MEM_READ("door_return(desc_env->desc_ptr)",
9467 (Addr)desc_env->desc_ptr,
9468 desc_env->desc_num * sizeof(*desc_env->desc_ptr));
9470 rval = pre_check_and_close_fds(tid, "door_return",
9471 desc_env->desc_ptr,
9472 desc_env->desc_num);
9473 if (rval)
9474 SET_STATUS_Failure(rval);
9477 tst->os_state.in_door_return = True;
9478 tst->os_state.door_return_procedure = 0;
9479 break;
9480 case VKI_DOOR_GETPARAM:
9481 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9482 PRE_REG_READ_SIXTH_ONLY;
9483 VG_(unimplemented)("DOOR_GETPARAM");
9484 break;
9485 case VKI_DOOR_SETPARAM:
9486 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9487 PRE_REG_READ_SIXTH_ONLY;
9488 if (!ML_(fd_allowed)(ARG1, "door_setparam", tid, False))
9489 SET_STATUS_Failure(VKI_EBADF);
9490 break;
9491 default:
9492 VG_(unimplemented)("Syswrap of the door call with subcode %ld.", SARG6);
9493 /*NOTREACHED*/
9494 break;
9497 #undef PRE_REG_READ_SIXTH_ONLY
9500 POST(sys_door)
9502 ThreadState *tst = VG_(get_ThreadState)(tid);
9504 vg_assert(SUCCESS || FAILURE);
9506 /* Alter the tst->os_state.in_door_return flag. */
9507 if (ARG6 == VKI_DOOR_RETURN) {
9508 vg_assert(tst->os_state.in_door_return == True);
9509 tst->os_state.in_door_return = False;
9511 /* Inform the tool that %esp/%rsp and %ebp/%rbp were (potentially)
9512 modified. */
9513 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_STACK_PTR,
9514 sizeof(UWord));
9515 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_FRAME_PTR,
9516 sizeof(UWord));
9518 else
9519 vg_assert(tst->os_state.in_door_return == False);
9521 if (FAILURE) {
9522 if (VG_(clo_track_fds)) {
9523 /* See the discussion in pre_check_and_close_fds() to understand this
9524 part. */
9525 Bool loss = False;
9526 switch (ARG6 /*subcode*/) {
9527 case VKI_DOOR_CALL:
9528 if (ERR == VKI_EFAULT || ERR == VKI_EBADF)
9529 loss = True;
9530 break;
9531 case VKI_DOOR_RETURN:
9532 if (ERR == VKI_EFAULT || ERR == VKI_EINVAL)
9533 loss = True;
9534 break;
9535 default:
9536 break;
9538 if (loss)
9539 VG_(message)(Vg_UserMsg, "The door call failed with an "
9540 "unexpected error and information "
9541 "about open file descriptors can be "
9542 "now imprecise.\n");
9545 return;
9548 vg_assert(SUCCESS);
9550 switch (ARG6 /*subcode*/) {
9551 case VKI_DOOR_CREATE:
9552 door_record_server(tid, ARG1, RES);
9553 break;
9554 case VKI_DOOR_REVOKE:
9555 door_record_revoke(tid, ARG1);
9556 if (VG_(clo_track_fds))
9557 ML_(record_fd_close)(tid, ARG1);
9558 break;
9559 case VKI_DOOR_INFO:
9560 POST_MEM_WRITE(ARG2, sizeof(vki_door_info_t));
9561 break;
9562 case VKI_DOOR_CALL:
9564 /* Note that all returned values are stored in the rbuf, i.e.
9565 data_ptr and desc_ptr points into this buffer. */
9566 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9568 if (params->rbuf) {
9569 Addr addr = (Addr)params->rbuf;
9570 if (!VG_(am_find_anon_segment)(addr)) {
9571 /* This segment is new and was mapped by the kernel. */
9572 UInt prot, flags;
9573 SizeT size;
9575 prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
9576 flags = VKI_MAP_ANONYMOUS;
9577 size = VG_PGROUNDUP(params->rsize);
9579 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_door), "
9580 "new segment: vaddr=%#lx, size=%#lx, "
9581 "prot=%#x, flags=%#x, fd=%lu, offset=%#llx\n",
9582 addr, size, prot, flags, (UWord)-1, (ULong)0);
9584 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, flags,
9585 -1, 0);
9587 /* Note: We don't notify the debuginfo reader about this
9588 mapping because there is no debug information stored in
9589 this segment. */
9592 door_call_post_mem_params_rbuf(tid, ARG1, (void *) addr,
9593 params->rsize, params->desc_ptr,
9594 params->desc_num);
9597 if (params->desc_ptr) {
9598 POST_MEM_WRITE((Addr)params->desc_ptr,
9599 params->desc_num * sizeof(vki_door_desc_t));
9600 post_record_fds(tid, "door_call", params->desc_ptr,
9601 params->desc_num);
9604 break;
9605 case VKI_DOOR_BIND:
9606 break;
9607 case VKI_DOOR_UNBIND:
9608 break;
9609 case VKI_DOOR_UNREFSYS:
9610 break;
9611 case VKI_DOOR_UCRED:
9612 break;
9613 case VKI_DOOR_RETURN:
9615 struct vki_door_results *results
9616 = (struct vki_door_results*)VG_(get_SP)(tid);
9618 tst->os_state.door_return_procedure = (Addr)results->pc;
9620 POST_MEM_WRITE((Addr)results, sizeof(*results));
9621 if (results->data_ptr)
9622 door_return_post_mem_data(tid,
9623 tst->os_state.door_return_procedure,
9624 results->data_ptr,
9625 results->data_size);
9626 if (results->desc_ptr) {
9627 POST_MEM_WRITE((Addr)results->desc_ptr,
9628 results->desc_num * sizeof(vki_door_desc_t));
9629 post_record_fds(tid, "door_return", results->desc_ptr,
9630 results->desc_num);
9633 POST_MEM_WRITE((Addr)results->door_info,
9634 sizeof(*results->door_info));
9636 break;
9637 case VKI_DOOR_GETPARAM:
9638 break;
9639 case VKI_DOOR_SETPARAM:
9640 break;
9641 default:
9642 vg_assert(0);
9643 break;
9647 PRE(sys_schedctl)
9649 /* caddr_t schedctl(void); */
9650 /* This syscall returns an address that points to struct sc_shared.
9651 This per-thread structure is used as an interface between the libc and
9652 the kernel. */
9653 PRINT("sys_schedctl ( )");
9654 PRE_REG_READ0(long, "schedctl");
9657 POST(sys_schedctl)
9659 Addr a = RES;
9660 ThreadState *tst = VG_(get_ThreadState)(tid);
9662 /* Stay sane. */
9663 vg_assert((tst->os_state.schedctl_data == 0) ||
9664 (tst->os_state.schedctl_data == a));
9665 tst->os_state.schedctl_data = a;
9667 /* Returned address points to a block in a mapped page. */
9668 if (!VG_(am_find_anon_segment)(a)) {
9669 Addr page = VG_PGROUNDDN(a);
9670 UInt prot = VKI_PROT_READ | VKI_PROT_WRITE;
9671 # if defined(SOLARIS_SCHEDCTL_PAGE_EXEC)
9672 prot |= VKI_PROT_EXEC;
9673 # endif /* SOLARIS_SCHEDCTL_PAGE_EXEC */
9674 UInt flags = VKI_MAP_ANONYMOUS;
9675 /* The kernel always allocates one page for the sc_shared struct. */
9676 SizeT size = VKI_PAGE_SIZE;
9678 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_schedctl), new segment: "
9679 "vaddr=%#lx, size=%#lx, prot=%#x, flags=%#x, fd=-1, "
9680 "offset=0\n", page, size, prot, flags);
9682 /* The kernel always places redzone before and after the allocated page.
9683 Check this assertion now; the tool can later request to allocate
9684 a Valgrind segment and aspacemgr will place it adjacent. */
9685 const NSegment *seg = VG_(am_find_nsegment)(page - 1);
9686 vg_assert(seg == NULL || seg->kind == SkResvn);
9687 seg = VG_(am_find_nsegment)(page + VKI_PAGE_SIZE);
9688 vg_assert(seg == NULL || seg->kind == SkResvn);
9690 /* The address space manager works with whole pages. */
9691 VG_(am_notify_client_mmap)(page, size, prot, flags, -1, 0);
9693 /* Note: It isn't needed to notify debuginfo about the new mapping
9694 because it's only an anonymous mapping. */
9695 /* Note: schedctl data are cleaned in two places:
9696 - for the tool when the thread exits
9697 - for the core in child's post-fork handler clean_schedctl_data(). */
9700 /* The tool needs per-thread granularity, not whole pages. */
9701 VG_TRACK(new_mem_mmap, a, sizeof(struct vki_sc_shared), True, True, True, 0);
9702 POST_MEM_WRITE(a, sizeof(struct vki_sc_shared));
9705 PRE(sys_pset)
9707 /* Kernel: int pset(int subcode, long arg1, long arg2, long arg3,
9708 long arg4); */
9709 switch (ARG1 /* subcode */) {
9710 case VKI_PSET_CREATE:
9711 /* Libc: int pset_create(psetid_t *newpset); */
9712 PRINT("sys_pset ( %ld, %#lx )", SARG1, ARG2);
9713 PRE_REG_READ2(long, SC2("pset", "create"), int, subcode,
9714 vki_psetid_t *, newpset);
9715 PRE_MEM_WRITE("pset(newpset)", ARG2, sizeof(vki_psetid_t));
9716 break;
9717 case VKI_PSET_DESTROY:
9718 /* Libc: int pset_destroy(psetid_t pset); */
9719 PRINT("sys_pset ( %ld, %ld )", SARG1, SARG2);
9720 PRE_REG_READ2(long, SC2("pset", "destroy"), int, subcode,
9721 vki_psetid_t, pset);
9722 break;
9723 case VKI_PSET_ASSIGN:
9724 /* Libc: int pset_assign(psetid_t pset, processorid_t cpu,
9725 psetid_t *opset); */
9726 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9727 PRE_REG_READ4(long, SC2("pset", "assign"), int, subcode,
9728 vki_psetid_t, pset, vki_processorid_t, cpu,
9729 vki_psetid_t *, opset);
9730 if (ARG4 != 0)
9731 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9732 break;
9733 case VKI_PSET_INFO:
9734 /* Libc: int pset_info(psetid_t pset, int *type, uint_t *numcpus,
9735 processorid_t *cpulist); */
9736 PRINT("sys_pset ( %ld, %ld, %#lx, %#lx, %#lx )", SARG1, SARG2, ARG3,
9737 ARG4, ARG5);
9738 PRE_REG_READ5(long, SC2("pset", "info"), int, subcode, vki_psetid_t, pset,
9739 int *, type, vki_uint_t *, numcpus,
9740 vki_processorid_t *, cpulist);
9741 if (ARG3 != 0)
9742 PRE_MEM_WRITE("pset(type)", ARG3, sizeof(int));
9743 if (ARG4 != 0)
9744 PRE_MEM_WRITE("pset(numcpus)", ARG4, sizeof(vki_uint_t));
9745 if ((ARG4 != 0) && (ARG5 != 0)) {
9746 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9747 if (ML_(safe_to_deref(numcpus, sizeof(vki_uint_t)))) {
9748 PRE_MEM_WRITE("pset(cpulist)", ARG5,
9749 *numcpus * sizeof(vki_processorid_t));
9750 /* If cpulist buffer is not large enough, it will hold only as many
9751 entries as fit in the buffer. However numcpus will contain the
9752 real number of cpus which will be greater than originally passed
9753 in. Stash the original value in unused ARG6. */
9754 ARG6 = *numcpus;
9757 break;
9758 case VKI_PSET_BIND:
9759 /* Libc: int pset_bind(psetid_t pset, idtype_t idtype, id_t id,
9760 psetid_t *opset); */
9761 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9762 SARG4, ARG5);
9763 PRE_REG_READ5(long, SC2("pset", "bind"), int, subcode, vki_psetid_t, pset,
9764 vki_idtype_t, idtype, vki_id_t, id, vki_psetid_t *, opset);
9765 if (ARG5 != 0)
9766 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9767 break;
9768 case VKI_PSET_BIND_LWP:
9769 /* Libc: int pset_bind_lwp(psetid_t pset, id_t id, pid_t pid,
9770 psetid_t *opset); */
9771 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9772 SARG4, ARG5);
9773 PRE_REG_READ5(long, SC2("pset", "bind_lwp"), int, subcode,
9774 vki_psetid_t, pset, vki_id_t, id, vki_pid_t, pid,
9775 vki_psetid_t *, opset);
9776 if (ARG5 != 0)
9777 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9778 break;
9779 case VKI_PSET_GETLOADAVG:
9780 /* Libc: int pset_getloadavg(psetid_t pset, double loadavg[],
9781 int nelem); */
9782 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9783 PRE_REG_READ4(long, SC2("pset", "getloadavg"), int, subcode,
9784 vki_psetid_t, pset, int *, buf, int, nelem);
9785 if (ARG3 != 0)
9786 PRE_MEM_WRITE("pset(buf)", ARG3, SARG4 * sizeof(int));
9787 break;
9788 case VKI_PSET_LIST:
9789 /* Libc: int pset_list(psetid_t *psetlist, uint_t *numpsets); */
9790 PRINT("sys_pset ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9791 PRE_REG_READ3(long, SC2("pset", "list"), int, subcode,
9792 vki_psetid_t *, psetlist, vki_uint_t *, numpsets);
9793 if (ARG3 != 0)
9794 PRE_MEM_WRITE("pset(numpsets)", ARG3, sizeof(vki_uint_t));
9795 if ((ARG2 != 0) && (ARG3 != 0)) {
9796 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9797 if (ML_(safe_to_deref(numpsets, sizeof(vki_uint_t)))) {
9798 PRE_MEM_WRITE("pset(psetlist)", ARG2,
9799 *numpsets * sizeof(vki_psetid_t));
9800 /* If psetlist buffer is not large enough, it will hold only as many
9801 entries as fit in the buffer. However numpsets will contain the
9802 real number of processor sets which will be greater than
9803 originally passed in. Stash the original value in unused ARG6. */
9804 ARG6 = *numpsets;
9807 break;
9808 # if defined(SOLARIS_PSET_GET_NAME)
9809 case VKI_PSET_GET_NAME:
9810 /* Libc: int pset_get_name(psetid_t psetid, char *buf, uint_t len); */
9811 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9812 PRE_REG_READ4(long, SC2("pset", "get_name"), int, subcode,
9813 vki_psetid_t, pset, char *, buf, vki_uint_t, len);
9814 PRE_MEM_WRITE("pset(buf)", ARG3, ARG4);
9815 break;
9816 # endif /* SOLARIS_PSET_GET_NAME */
9817 case VKI_PSET_SETATTR:
9818 /* Libc: int pset_setattr(psetid_t pset, uint_t attr); */
9819 PRINT("sys_pset ( %ld, %ld, %lu )", SARG1, SARG2, ARG3);
9820 PRE_REG_READ3(long, SC2("pset", "setattr"), int, subcode,
9821 vki_psetid_t, pset, vki_uint_t, attr);
9822 break;
9823 case VKI_PSET_GETATTR:
9824 /* Libc: int pset_getattr(psetid_t pset, uint_t *attr); */
9825 PRINT("sys_pset ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
9826 PRE_REG_READ3(long, SC2("pset", "getattr"), int, subcode,
9827 vki_psetid_t, pset, vki_uint_t *, attr);
9828 PRE_MEM_WRITE("pset(attr)", ARG3, sizeof(vki_uint_t));
9829 break;
9830 case VKI_PSET_ASSIGN_FORCED:
9831 /* Libc: int pset_assign_forced(psetid_t pset, processorid_t cpu,
9832 psetid_t *opset); */
9833 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9834 PRE_REG_READ4(long, SC2("pset", "assign_forced"), int, subcode,
9835 vki_psetid_t, pset, vki_processorid_t, cpu,
9836 vki_psetid_t *, opset);
9837 if (ARG4 != 0)
9838 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9839 break;
9840 default:
9841 VG_(unimplemented)("Syswrap of pset syscall with subcode %ld.", SARG1);
9842 /*NOTREACHED*/
9843 break;
9847 POST(sys_pset)
9849 switch (ARG1 /*subcode*/) {
9850 case VKI_PSET_CREATE:
9851 POST_MEM_WRITE(ARG2, sizeof(vki_psetid_t));
9852 break;
9853 case VKI_PSET_DESTROY:
9854 break;
9855 case VKI_PSET_ASSIGN:
9856 if (ARG4 != 0)
9857 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9858 break;
9859 case VKI_PSET_INFO:
9860 if (ARG3 != 0)
9861 POST_MEM_WRITE(ARG3, sizeof(int));
9862 if (ARG4 != 0)
9863 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
9864 if ((ARG4 != 0) && (ARG5 != 0)) {
9865 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9866 POST_MEM_WRITE(ARG5, MIN(*numcpus, ARG6) * sizeof(vki_processorid_t));
9868 break;
9869 case VKI_PSET_BIND:
9870 if (ARG5 != 0)
9871 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9872 break;
9873 case VKI_PSET_BIND_LWP:
9874 if (ARG5 != 0)
9875 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9876 break;
9877 case VKI_PSET_GETLOADAVG:
9878 if (ARG3 != 0)
9879 POST_MEM_WRITE(ARG3, MIN(SARG4, VKI_LOADAVG_NSTATS) * sizeof(int));
9880 break;
9881 case VKI_PSET_LIST:
9882 if (ARG3 != 0)
9883 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9884 if ((ARG2 != 0) && (ARG3 != 0)) {
9885 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9886 POST_MEM_WRITE(ARG2, MIN(*numpsets, ARG6) * sizeof(vki_psetid_t));
9888 break;
9889 # if defined(SOLARIS_PSET_GET_NAME)
9890 case VKI_PSET_GET_NAME:
9891 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
9892 break;
9893 # endif /* SOLARIS_PSET_GET_NAME */
9894 case VKI_PSET_SETATTR:
9895 break;
9896 case VKI_PSET_GETATTR:
9897 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9898 break;
9899 case VKI_PSET_ASSIGN_FORCED:
9900 if (ARG4 != 0)
9901 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9902 break;
9903 default:
9904 vg_assert(0);
9905 break;
9909 PRE(sys_resolvepath)
9911 /* int resolvepath(const char *path, char *buf, size_t bufsiz); */
9912 PRINT("sys_resolvepath ( %#lx(%s), %#lx, %lu )", ARG1, (HChar *) ARG1, ARG2,
9913 ARG3);
9914 PRE_REG_READ3(long, "resolvepath", const char *, path, char *, buf,
9915 vki_size_t, bufsiz);
9917 PRE_MEM_RASCIIZ("resolvepath(path)", ARG1);
9918 PRE_MEM_WRITE("resolvepath(buf)", ARG2, ARG3);
9921 POST(sys_resolvepath)
9923 POST_MEM_WRITE(ARG2, RES);
9926 PRE(sys_lwp_mutex_timedlock)
9928 /* int lwp_mutex_timedlock(lwp_mutex_t *lp, timespec_t *tsp,
9929 uintptr_t owner); */
9930 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9931 *flags |= SfMayBlock;
9932 PRINT("lwp_mutex_timedlock ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
9933 PRE_REG_READ3(long, "lwp_mutex_timedlock", lwp_mutex_t *, lp,
9934 timespec_t *, tsp, uintptr_t, owner);
9936 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_flag)", lp->vki_mutex_flag);
9937 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_type)", lp->vki_mutex_type);
9938 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_owner)",
9939 lp->vki_mutex_owner);
9940 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_ownerpid)",
9941 lp->vki_mutex_ownerpid);
9942 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_lockw)", lp->vki_mutex_lockw);
9943 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_lockw)",
9944 lp->vki_mutex_lockw);*/
9945 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_waiters)",
9946 lp->vki_mutex_waiters);
9947 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_waiters)",
9948 lp->vki_mutex_waiters);*/
9949 if (ARG2) {
9950 PRE_MEM_READ("lwp_mutex_timedlock(tsp)", ARG2, sizeof(vki_timespec_t));
9951 /*PRE_MEM_WRITE("lwp_mutex_timedlock(tsp)", ARG2,
9952 sizeof(vki_timespec_t));*/
9956 POST(sys_lwp_mutex_timedlock)
9958 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9959 POST_FIELD_WRITE(lp->vki_mutex_owner);
9960 POST_FIELD_WRITE(lp->vki_mutex_ownerpid);
9961 POST_FIELD_WRITE(lp->vki_mutex_lockw);
9962 POST_FIELD_WRITE(lp->vki_mutex_waiters);
9963 if (ARG2)
9964 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
9967 PRE(sys_lwp_rwlock_sys)
9969 /* int lwp_rwlock_sys(int subcode, lwp_rwlock_t *rwlp, timespec_t *tsp); */
9970 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
9971 switch (ARG1 /*subcode*/) {
9972 case 0:
9973 case 1:
9974 case 2:
9975 case 3:
9976 *flags |= SfMayBlock;
9977 switch (ARG1 /*subcode*/) {
9978 case 0:
9979 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9980 PRE_REG_READ3(long, SC2("lwp_rwlock", "rdlock"), int, subcode,
9981 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9982 break;
9983 case 1:
9984 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9985 PRE_REG_READ3(long, SC2("lwp_rwlock", "wrlock"), int, subcode,
9986 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9987 break;
9988 case 2:
9989 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9990 PRE_REG_READ2(long, SC2("lwp_rwlock", "tryrdlock"), int, subcode,
9991 lwp_rwlock_t *, rwlp);
9992 break;
9993 case 3:
9994 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9995 PRE_REG_READ2(long, SC2("lwp_rwlock", "trywrlock"), int, subcode,
9996 lwp_rwlock_t *, rwlp);
9997 break;
9998 default:
9999 vg_assert(0);
10000 break;
10003 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_type)", rwlp->vki_rwlock_type);
10004 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
10005 rwlp->vki_rwlock_readers);
10006 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
10007 rwlp->vki_rwlock_readers);*/
10009 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
10010 rwlp->mutex.vki_mutex_type);
10011 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_owner)",
10012 rwlp->mutex.vki_mutex_owner);
10013 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_ownerpid)",
10014 rwlp->mutex.vki_mutex_ownerpid);
10015 /* The mutex_lockw member is not really read by the kernel for this
10016 syscall but it seems better to mark it that way because when locking
10017 an rwlock the associated mutex has to be locked. */
10018 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_lockw)",
10019 rwlp->mutex.vki_mutex_lockw);
10020 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_lockw)",
10021 rwlp->mutex.vki_mutex_lockw);*/
10022 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_waiters)",
10023 rwlp->mutex.vki_mutex_waiters);
10024 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_waiters)",
10025 rwlp->mutex.vki_mutex_waiters);*/
10027 if ((ARG1 == 0 || ARG1 == 1) && ARG3)
10028 PRE_MEM_READ("lwp_rwlock(tsp)", ARG3, sizeof(vki_timespec_t));
10029 break;
10030 case 4:
10031 PRINT("sys_lwp_rwlock( %ld, %#lx )", SARG1, ARG2);
10032 PRE_REG_READ2(long, SC2("lwp_rwlock", "unlock"), int, subcode,
10033 lwp_rwlock_t *, rwlp);
10034 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
10035 rwlp->mutex.vki_mutex_type);
10036 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
10037 rwlp->vki_rwlock_readers);
10038 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
10039 rwlp->vki_rwlock_readers);*/
10040 break;
10041 default:
10042 VG_(unimplemented)("Syswrap of the lwp_rwlock_sys call with subcode %ld.",
10043 SARG1);
10044 /*NOTREACHED*/
10045 break;
10049 POST(sys_lwp_rwlock_sys)
10051 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
10052 switch (ARG1 /*subcode*/) {
10053 case 0:
10054 case 1:
10055 case 2:
10056 case 3:
10057 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
10058 POST_FIELD_WRITE(rwlp->vki_rwlock_owner);
10059 POST_FIELD_WRITE(rwlp->vki_rwlock_ownerpid);
10060 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_lockw);
10061 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_waiters);
10062 break;
10063 case 4:
10064 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
10065 break;
10066 default:
10067 vg_assert(0);
10068 break;
10072 PRE(sys_lwp_sema_timedwait)
10074 /* int lwp_sema_timedwait(lwp_sema_t *sema, timespec_t *timeout,
10075 int check_park); */
10076 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
10077 *flags |= SfMayBlock;
10078 PRINT("sys_lwp_sema_timewait ( %#lx, %#lx, %ld )", ARG1, ARG2, SARG3);
10079 PRE_REG_READ3(long, "lwp_sema_timedwait", lwp_sema_t *, sema,
10080 timespec_t *, timeout, int, check_park);
10082 PRE_FIELD_READ("lwp_sema_timedwait(sema->type)", sema->vki_sema_type);
10083 PRE_FIELD_READ("lwp_sema_timedwait(sema->count)", sema->vki_sema_count);
10084 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->count)",
10085 sema->vki_sema_count);*/
10086 PRE_FIELD_READ("lwp_sema_timedwait(sema->waiters)", sema->vki_sema_waiters);
10087 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->waiters)",
10088 sema->vki_sema_waiters);*/
10089 if (ARG2) {
10090 PRE_MEM_READ("lwp_sema_timedwait(timeout)", ARG2,
10091 sizeof(vki_timespec_t));
10092 /*PRE_MEM_WRITE("lwp_sema_timedwait(timeout)", ARG2,
10093 sizeof(vki_timespec_t));*/
10097 POST(sys_lwp_sema_timedwait)
10099 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
10100 POST_FIELD_WRITE(sema->vki_sema_count);
10101 POST_FIELD_WRITE(sema->vki_sema_waiters);
10102 if (ARG2)
10103 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
10106 PRE(sys_zone)
10108 /* Kernel: long zone(int cmd, void *arg1, void *arg2, void *arg3,
10109 void *arg4);
10111 switch (ARG1 /*cmd*/) {
10112 case VKI_ZONE_CREATE:
10113 /* Libc: zoneid_t zone_create(const char *name, const char *root,
10114 const struct priv_set *privs,
10115 const char *rctls, size_t rctlsz,
10116 const char *zfs, size_t zfssz,
10117 int *extended_error, int match,
10118 int doi, const bslabel_t *label,
10119 int flags);
10120 Kernel: zoneid_t zone_create(zone_def *zd);
10122 PRINT("sys_zone ( %ld, %#lx )", SARG1, ARG2);
10123 PRE_REG_READ2(long, SC2("zone", "create"), int, cmd,
10124 vki_zone_def *, zd);
10126 vki_zone_def *zd = (vki_zone_def *) ARG2;
10127 PRE_FIELD_READ("zone(zd.zone_name)", zd->zone_name);
10128 PRE_FIELD_READ("zone(zd.zone_root)", zd->zone_root);
10129 PRE_FIELD_READ("zone(zd.zone_privs)", zd->zone_privs);
10130 PRE_FIELD_READ("zone(zd.zone_privssz)", zd->zone_privssz);
10131 PRE_FIELD_READ("zone(zd.rctlbuf)", zd->rctlbuf);
10132 PRE_FIELD_READ("zone(zd.rctlbufsz)", zd->rctlbufsz);
10133 PRE_FIELD_READ("zone(zd.zfsbuf)", zd->zfsbuf);
10134 PRE_FIELD_READ("zone(zd.zfsbufsz)", zd->zfsbufsz);
10135 PRE_FIELD_READ("zone(zd.extended_error)", zd->extended_error);
10136 PRE_FIELD_READ("zone(zd.match)", zd->match);
10137 PRE_FIELD_READ("zone(zd.doi)", zd->doi);
10138 PRE_FIELD_READ("zone(zd.label)", zd->label);
10139 PRE_FIELD_READ("zone(zd.flags)", zd->flags);
10141 if (ML_(safe_to_deref((void *)ARG2, sizeof(vki_zone_def)))) {
10142 if (zd->zone_name)
10143 PRE_MEM_RASCIIZ("zone(zd.zone_name)", (Addr) zd->zone_name);
10144 if (zd->zone_root)
10145 PRE_MEM_RASCIIZ("zone(zd.zone_root)", (Addr) zd->zone_root);
10146 PRE_MEM_READ("zone(zd.zone_privs)", (Addr) zd->zone_privs,
10147 zd->zone_privssz);
10148 PRE_MEM_READ("zone(zd.rctlbuf)", (Addr) zd->rctlbuf,
10149 zd->rctlbufsz);
10150 PRE_MEM_READ("zone(zd.zfsbuf)",
10151 (Addr) zd->zfsbuf, zd->zfsbufsz);
10152 if (zd->label)
10153 PRE_MEM_READ("zone(zd.label)", (Addr) zd->label,
10154 sizeof(vki_bslabel_t));
10156 break;
10157 case VKI_ZONE_DESTROY:
10158 /* Libc: int zone_destroy(zoneid_t zoneid); */
10159 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10160 PRE_REG_READ2(long, SC2("zone", "destroy"), int, cmd,
10161 vki_zoneid_t, zoneid);
10162 break;
10163 case VKI_ZONE_GETATTR:
10164 /* Libc: ssize_t zone_getattr(zoneid_t zoneid, int attr,
10165 void *valp, size_t size);
10167 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %ld )",
10168 SARG1, SARG2, SARG3, ARG4, SARG5);
10169 PRE_REG_READ5(long, SC2("zone", "getattr"), int, cmd,
10170 vki_zoneid_t, zoneid, int, attr, void *, valp,
10171 vki_size_t, size);
10172 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
10173 break;
10174 case VKI_ZONE_ENTER:
10175 /* Libc: int zone_enter(zoneid_t zoneid); */
10176 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10177 PRE_REG_READ2(long, SC2("zone", "enter"), int, cmd,
10178 vki_zoneid_t, zoneid);
10179 break;
10180 case VKI_ZONE_LIST:
10181 /* Libc: int zone_list(zoneid_t *zonelist, uint_t *numzones); */
10182 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
10183 PRE_REG_READ3(long, SC2("zone", "list"), int, cmd,
10184 vki_zoneid_t *, zonelist, vki_uint_t *, numzones);
10186 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
10188 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
10189 if (ARG2)
10190 PRE_MEM_WRITE("zone(zonelist)", ARG2,
10191 *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
10193 break;
10194 case VKI_ZONE_SHUTDOWN:
10195 /* Libc: int zone_shutdown(zoneid_t zoneid); */
10196 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10197 PRE_REG_READ2(long, SC2("zone", "shutdown"), int, cmd,
10198 vki_zoneid_t, zoneid);
10199 break;
10200 case VKI_ZONE_LOOKUP:
10201 /* Libc: zoneid_t zone_lookup(const char *name); */
10202 PRINT("sys_zone ( %ld, %#lx(%s) )", SARG1, ARG2, (HChar *) ARG2);
10203 PRE_REG_READ2(long, SC2("zone", "lookup"), int, cmd,
10204 const char *, name);
10205 if (ARG2)
10206 PRE_MEM_RASCIIZ("zone(name)", ARG2);
10207 break;
10208 case VKI_ZONE_BOOT:
10209 /* Libc: int zone_boot(zoneid_t zoneid); */
10210 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10211 PRE_REG_READ2(long, SC2("zone", "boot"), int, cmd,
10212 vki_zoneid_t, zoneid);
10213 break;
10214 case VKI_ZONE_SETATTR:
10215 /* Libc: int zone_setattr(zoneid_t zoneid, int attr, void *valp,
10216 size_t size);
10218 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %lu )",
10219 SARG1, SARG2, SARG3, ARG4, ARG5);
10220 PRE_REG_READ5(long, SC2("zone", "setattr"), int, cmd,
10221 vki_zoneid_t, zoneid, int, attr, void *, valp,
10222 vki_size_t, size);
10223 PRE_MEM_READ("zone(valp)", ARG4, ARG5);
10224 break;
10225 case VKI_ZONE_ADD_DATALINK:
10226 /* Libc: int zone_add_datalink(zoneid_t zoneid,
10227 datalink_id_t linkid);
10229 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10230 PRE_REG_READ3(long, SC2("zone", "add_datalink"), int, cmd,
10231 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
10232 break;
10233 case VKI_ZONE_DEL_DATALINK:
10234 /* Libc: int zone_remove_datalink(zoneid_t zoneid,
10235 datalink_id_t linkid);
10237 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10238 PRE_REG_READ3(long, SC2("zone", "del_datalink"), int, cmd,
10239 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
10240 break;
10241 case VKI_ZONE_CHECK_DATALINK:
10242 /* Libc: int zone_check_datalink(zoneid_t *zoneidp,
10243 datalink_id_t linkid);
10245 PRINT("sys_zone ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10246 PRE_REG_READ3(long, SC2("zone", "check_datalink"), int, cmd,
10247 vki_zoneid_t *, zoneidp, vki_datalink_id_t, linkid);
10248 PRE_MEM_WRITE("zone(zoneidp)", ARG2, sizeof(vki_zoneid_t));
10249 break;
10250 case VKI_ZONE_LIST_DATALINK:
10251 /* Libc: int zone_list_datalink(zoneid_t zoneid, int *dlnump,
10252 datalink_id_t *linkids);
10254 PRINT("sys_zone ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
10255 PRE_REG_READ4(long, SC2("zone", "list_datalink"), int, cmd,
10256 vki_zoneid_t, zoneid, int *, dlnump,
10257 vki_datalink_id_t *, linkids);
10259 PRE_MEM_WRITE("zone(dlnump)", ARG3, sizeof(int));
10260 if (ML_(safe_to_deref((void *) ARG3, sizeof(int)))) {
10261 if (ARG4)
10262 PRE_MEM_WRITE("zone(linkids)", ARG4,
10263 *(int *) ARG3 * sizeof(vki_datalink_id_t));
10265 break;
10266 #if defined(SOLARIS_ZONE_DEFUNCT)
10267 case VKI_ZONE_LIST_DEFUNCT:
10268 /* Libc: int zone_list_defunct(uint64_t *uniqidlist,
10269 uint_t *numzones);
10271 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
10272 PRE_REG_READ3(long, SC2("zone", "list_defunct"), int, cmd,
10273 vki_uint64_t *, uniqidlist, vki_uint_t *, numzones);
10275 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
10277 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
10278 if (ARG2)
10279 PRE_MEM_WRITE("zone(uniqidlist)", ARG2,
10280 *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
10282 break;
10283 case VKI_ZONE_GETATTR_DEFUNCT:
10284 /* Libc: ssize_t zone_getattr_defunct(uint64_t uniqid, int attr,
10285 void *valp, size_t size);
10286 Kernel: ssize_t zone_getattr_defunct(uint64_t *uniqid, int attr,
10287 void *valp, size_t size);
10289 PRINT("sys_zone ( %ld, %#lx, %ld, %#lx, %lu )",
10290 SARG1, ARG2, SARG3, ARG4, ARG5);
10291 PRE_REG_READ5(long, SC2("zone", "getattr_defunct"), int, cmd,
10292 vki_uint64_t *, uniqid, int, attr,
10293 void *, valp, vki_size_t, size);
10295 PRE_MEM_READ("zone(uniqid)", ARG2, sizeof(vki_uint64_t));
10296 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
10297 break;
10298 #endif /* SOLARIS_ZONE_DEFUNCT */
10299 default:
10300 VG_(unimplemented)("Syswrap of the zone call with cmd %ld.", SARG1);
10301 /*NOTREACHED*/
10302 break;
10307 POST(sys_zone)
10309 switch (ARG1 /*cmd*/) {
10310 case VKI_ZONE_CREATE:
10311 case VKI_ZONE_DESTROY:
10312 break;
10313 case VKI_ZONE_GETATTR:
10314 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
10315 break;
10316 case VKI_ZONE_ENTER:
10317 break;
10318 case VKI_ZONE_LIST:
10319 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
10320 break;
10321 case VKI_ZONE_SHUTDOWN:
10322 case VKI_ZONE_LOOKUP:
10323 case VKI_ZONE_BOOT:
10324 case VKI_ZONE_SETATTR:
10325 case VKI_ZONE_ADD_DATALINK:
10326 case VKI_ZONE_DEL_DATALINK:
10327 break;
10328 case VKI_ZONE_CHECK_DATALINK:
10329 POST_MEM_WRITE(ARG2, sizeof(vki_zoneid_t));
10330 break;
10331 case VKI_ZONE_LIST_DATALINK:
10332 POST_MEM_WRITE(ARG4, *(int *) ARG3 * sizeof(vki_datalink_id_t));
10333 break;
10334 #if defined(SOLARIS_ZONE_DEFUNCT)
10335 case VKI_ZONE_LIST_DEFUNCT:
10336 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
10337 break;
10338 case VKI_ZONE_GETATTR_DEFUNCT:
10339 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
10340 break;
10341 #endif /* SOLARIS_ZONE_DEFUNCT */
10342 default:
10343 vg_assert(0);
10344 break;
10348 PRE(sys_getcwd)
10350 /* int getcwd(char *buf, size_t size); */
10351 /* Note: Generic getcwd() syswrap can't be used because it expects
10352 a different return value. */
10353 PRINT("sys_getcwd ( %#lx, %lu )", ARG1, ARG2);
10354 PRE_REG_READ2(long, "getcwd", char *, buf, vki_size_t, size);
10355 PRE_MEM_WRITE("getcwd(buf)", ARG1, ARG2);
10358 POST(sys_getcwd)
10360 POST_MEM_WRITE(ARG1, VG_(strlen)((HChar*)ARG1) + 1);
10363 PRE(sys_so_socket)
10365 /* int so_socket(int family, int type, int protocol, char *devpath,
10366 int version); */
10367 PRINT("sys_so_socket ( %ld, %ld, %ld, %#lx(%s), %ld)", SARG1, SARG2, SARG3,
10368 ARG4, (HChar *) ARG4, SARG5);
10369 PRE_REG_READ5(long, "socket", int, family, int, type, int, protocol,
10370 char *, devpath, int, version);
10371 if (ARG4)
10372 PRE_MEM_RASCIIZ("socket(devpath)", ARG4);
10375 POST(sys_so_socket)
10377 SysRes r;
10378 r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
10379 SET_STATUS_from_SysRes(r);
10382 PRE(sys_so_socketpair)
10384 /* int so_socketpair(int sv[2]); */
10385 /* This syscall is used to connect two already created sockets together. */
10386 PRINT("sys_so_socketpair ( %#lx )", ARG1);
10387 PRE_REG_READ1(long, "socketpair", int *, sv);
10388 PRE_MEM_READ("socketpair(sv)", ARG1, 2 * sizeof(int));
10389 /*PRE_MEM_WRITE("socketpair(sv)", ARG1, 2 * sizeof(int));*/
10390 if (ML_(safe_to_deref)((void*)ARG1, 2 * sizeof(int))) {
10391 int *fds = (int*)ARG1;
10392 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, False))
10393 SET_STATUS_Failure(VKI_EBADF);
10394 else if (!ML_(fd_allowed)(fds[1], "socketpair", tid, False))
10395 SET_STATUS_Failure(VKI_EBADF);
10399 POST(sys_so_socketpair)
10401 /* The kernel can return new file descriptors, in such a case we have to
10402 validate them. */
10403 int *fds = (int*)ARG1;
10404 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
10405 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, True))
10406 SET_STATUS_Failure(VKI_EMFILE);
10407 if (!ML_(fd_allowed)(fds[1], "socketpair", tid, True))
10408 SET_STATUS_Failure(VKI_EMFILE);
10409 if (FAILURE) {
10410 /* One or both of the file descriptors weren't allowed, close newly
10411 created file descriptors but don't close the already recorded
10412 ones. */
10413 if (!ML_(fd_recorded)(fds[0]))
10414 VG_(close)(fds[0]);
10415 if (!ML_(fd_recorded)(fds[1]))
10416 VG_(close)(fds[1]);
10418 else if (VG_(clo_track_fds)) {
10419 /* Everything went better than expected, record the newly created file
10420 descriptors. Note: If the kernel actually returns the original file
10421 descriptors, then ML_(record_fd_open_nameless) notices that these
10422 file descriptors have been already recorded. */
10423 ML_(record_fd_open_nameless)(tid, fds[0]);
10424 ML_(record_fd_open_nameless)(tid, fds[1]);
10428 PRE(sys_bind)
10430 /* int bind(int s, struct sockaddr *name, socklen_t namelen,
10431 int version); */
10432 PRINT("sys_bind ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10433 PRE_REG_READ4(long, "bind", int, s, struct sockaddr *, name,
10434 vki_socklen_t, namelen, int, version);
10435 ML_(generic_PRE_sys_bind)(tid, ARG1, ARG2, ARG3);
10438 PRE(sys_listen)
10440 /* int listen(int s, int backlog, int version); */
10441 PRINT("sys_listen ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10442 PRE_REG_READ3(long, "listen", int, s, int, backlog, int, version);
10445 PRE(sys_accept)
10447 #if defined(SOLARIS_NEW_ACCEPT_SYSCALL)
10448 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
10449 int version, int flags); */
10450 *flags |= SfMayBlock;
10451 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
10452 SARG5);
10453 PRE_REG_READ5(long, "accept", int, s, struct sockaddr *, addr,
10454 socklen_t *, addrlen, int, version, int, flags);
10455 #else
10456 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
10457 int version); */
10458 *flags |= SfMayBlock;
10459 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10460 PRE_REG_READ4(long, "accept", int, s, struct sockaddr *, addr,
10461 socklen_t *, addrlen, int, version);
10462 #endif /* SOLARIS_NEW_ACCEPT_SYSCALL */
10463 ML_(generic_PRE_sys_accept)(tid, ARG1, ARG2, ARG3);
10466 POST(sys_accept)
10468 SysRes r;
10469 r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
10470 ARG1, ARG2, ARG3);
10471 SET_STATUS_from_SysRes(r);
10474 PRE(sys_connect)
10476 /* int connect(int s, struct sockaddr *name, socklen_t namelen,
10477 int version); */
10478 *flags |= SfMayBlock;
10479 PRINT("sys_connect ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10480 PRE_REG_READ4(long, "connect", int, s, struct sockaddr *, name,
10481 vki_socklen_t, namelen, int, version);
10482 ML_(generic_PRE_sys_connect)(tid, ARG1, ARG2, ARG3);
10485 PRE(sys_shutdown)
10487 /* Kernel: int shutdown(int sock, int how, int version);
10488 Libc: int shutdown(int sock, int how);
10490 *flags |= SfMayBlock;
10491 PRINT("sys_shutdown ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10492 PRE_REG_READ3(int, "shutdown", int, sock, int, how, int, version);
10494 /* Be strict. */
10495 if (!ML_(fd_allowed)(ARG1, "shutdown", tid, False))
10496 SET_STATUS_Failure(VKI_EBADF);
10499 PRE(sys_recv)
10501 /* ssize_t recv(int s, void *buf, size_t len, int flags); */
10502 *flags |= SfMayBlock;
10503 PRINT("sys_recv ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10504 PRE_REG_READ4(long, "recv", int, s, void *, buf, vki_size_t, len,
10505 int, flags);
10506 ML_(generic_PRE_sys_recv)(tid, ARG1, ARG2, ARG3);
10509 POST(sys_recv)
10511 ML_(generic_POST_sys_recv)(tid, RES, ARG1, ARG2, ARG3);
10514 PRE(sys_recvfrom)
10516 /* ssize_t recvfrom(int s, void *buf, size_t len, int flags,
10517 struct sockaddr *from, socklen_t *fromlen); */
10518 *flags |= SfMayBlock;
10519 PRINT("sys_recvfrom ( %ld, %#lx, %lu, %ld, %#lx, %#lx )", SARG1, ARG2, ARG3,
10520 SARG4, ARG5, ARG6);
10521 PRE_REG_READ6(long, "recvfrom", int, s, void *, buf, vki_size_t, len,
10522 int, flags, struct sockaddr *, from, socklen_t *, fromlen);
10523 ML_(generic_PRE_sys_recvfrom)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10526 POST(sys_recvfrom)
10528 ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
10529 ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10532 PRE(sys_recvmsg)
10534 /* ssize_t recvmsg(int s, struct msghdr *msg, int flags); */
10535 *flags |= SfMayBlock;
10536 PRINT("sys_recvmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10537 PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
10538 ML_(generic_PRE_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10541 POST(sys_recvmsg)
10543 ML_(generic_POST_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2, RES);
10546 PRE(sys_send)
10548 /* ssize_t send(int s, const void *msg, size_t len, int flags); */
10549 *flags |= SfMayBlock;
10550 PRINT("sys_send ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10551 PRE_REG_READ4(long, "send", int, s, const void *, msg, vki_size_t, len,
10552 int, flags);
10553 ML_(generic_PRE_sys_send)(tid, ARG1, ARG2, ARG3);
10556 PRE(sys_sendmsg)
10558 /* ssize_t sendmsg(int s, const struct msghdr *msg, int flags); */
10559 *flags |= SfMayBlock;
10560 PRINT("sys_sendmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10561 PRE_REG_READ3(long, "sendmsg", int, s, const struct msghdr *, msg,
10562 int, flags);
10563 ML_(generic_PRE_sys_sendmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10566 PRE(sys_sendto)
10568 /* ssize_t sendto(int s, const void *msg, size_t len, int flags,
10569 const struct sockaddr *to, int tolen); */
10570 *flags |= SfMayBlock;
10571 PRINT("sys_sendto ( %ld, %#lx, %lu, %ld, %#lx, %ld )", SARG1, ARG2, ARG3,
10572 SARG4, ARG5, SARG6);
10573 PRE_REG_READ6(long, "sendto", int, s, const void *, msg, vki_size_t, len,
10574 int, flags, const struct sockaddr *, to, int, tolen);
10575 ML_(generic_PRE_sys_sendto)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10578 PRE(sys_getpeername)
10580 /* Kernel: int getpeername(int s, struct sockaddr *name,
10581 socklen_t *namelen, int version);
10582 Libc: int getpeername(int s, struct sockaddr *name,
10583 socklen_t *namelen);
10585 *flags |= SfMayBlock;
10586 PRINT("sys_getpeername ( %ld, %#lx, %#lx, %ld )",
10587 SARG1, ARG2, ARG3, SARG4);
10588 PRE_REG_READ4(long, "getpeername", int, s, struct vki_sockaddr *, name,
10589 vki_socklen_t *, namelen, int, version);
10590 ML_(buf_and_len_pre_check)(tid, ARG2, ARG3, "getpeername(name)",
10591 "getpeername(namelen)");
10593 /* Be strict. */
10594 if (!ML_(fd_allowed)(ARG1, "getpeername", tid, False))
10595 SET_STATUS_Failure(VKI_EBADF);
10598 POST(sys_getpeername)
10600 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES),
10601 ARG2, ARG3, "getpeername(namelen)");
10604 PRE(sys_getsockname)
10606 /* int getsockname(int s, struct sockaddr *name, socklen_t *namelen,
10607 int version); */
10608 PRINT("sys_getsockname ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10609 PRE_REG_READ4(long, "getsockname", int, s, struct sockaddr *, name,
10610 socklen_t *, namelen, int, version);
10611 ML_(generic_PRE_sys_getsockname)(tid, ARG1, ARG2, ARG3);
10614 POST(sys_getsockname)
10616 ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
10617 ARG1, ARG2, ARG3);
10620 PRE(sys_getsockopt)
10622 /* int getsockopt(int s, int level, int optname, void *optval,
10623 socklen_t *optlen, int version); */
10624 PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx, %ld )", SARG1, SARG2,
10625 SARG3, ARG4, ARG5, SARG6);
10626 PRE_REG_READ6(long, "getsockopt", int, s, int, level, int, optname,
10627 void *, optval, socklen_t *, option, int, version);
10628 if (ARG4)
10629 ML_(buf_and_len_pre_check)(tid, ARG4, ARG5, "getsockopt(optval)",
10630 "getsockopt(optlen)");
10633 POST(sys_getsockopt)
10635 if (ARG4)
10636 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES), ARG4,
10637 ARG5, "getsockopt(optlen_out)");
10640 PRE(sys_setsockopt)
10642 /* int setsockopt(int s, int level, int optname, const void *optval,
10643 socklen_t optlen, int version); */
10644 PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %lu, %ld )", SARG1, SARG2,
10645 SARG3, ARG4, ARG5, SARG6);
10646 PRE_REG_READ6(long, "setsockopt", int, s, int, level, int, optname,
10647 const void *, optval, vki_socklen_t, optlen, int, version);
10648 ML_(generic_PRE_sys_setsockopt)(tid, ARG1, ARG2, ARG3, ARG4, ARG5);
10651 PRE(sys_lwp_mutex_register)
10653 /* int lwp_mutex_register(lwp_mutex_t *mp, caddr_t uaddr); */
10654 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t*)ARG1;
10655 PRINT("sys_lwp_mutex_register ( %#lx, %#lx )", ARG1, ARG2);
10656 PRE_REG_READ2(long, "lwp_mutex_register", lwp_mutex_t *, mp,
10657 void *, uaddr);
10658 PRE_FIELD_READ("lwp_mutex_register(mp->mutex_type)", mp->vki_mutex_type);
10661 PRE(sys_lwp_mutex_unlock)
10663 /* int lwp_mutex_unlock(lwp_mutex_t *lp); */
10664 /* see https://github.com/illumos/illumos-gate/blob/master/usr/src/uts/common/syscall/lwp_sobj.c#L3137-L3138
10665 * (illumos, obviously) */
10666 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t*)ARG1;
10667 PRINT("sys_lwp_mutex_unlock ( %#lx )", ARG1);
10668 PRE_REG_READ1(int, "lwp_mutex_unlock", lwp_mutex_t *, lp);
10669 PRE_MEM_READ("lwp_mutex_unlock(lp)", (Addr)lp, sizeof(vki_lwp_mutex_t));
10670 PRE_MEM_WRITE("lwp_mutex_unlock(lp)", (Addr)lp, sizeof(vki_lwp_mutex_t));
10673 POST(sys_lwp_mutex_unlock)
10675 POST_MEM_WRITE(ARG1, sizeof(vki_lwp_mutex_t));
10678 PRE(sys_uucopy)
10680 /* int uucopy(const void *s1, void *s2, size_t n); */
10681 PRINT("sys_uucopy ( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3);
10682 PRE_REG_READ3(long, "uucopy", const void *, s1, void *, s2, vki_size_t, n);
10684 /* Stay away from V segments. */
10685 if (!ML_(valid_client_addr)(ARG1, ARG3, tid, "uucopy(s1)")) {
10686 SET_STATUS_Failure(VKI_EFAULT);
10688 if (!ML_(valid_client_addr)(ARG2, ARG3, tid, "uucopy(s2)")) {
10689 SET_STATUS_Failure(VKI_EFAULT);
10692 if (FAILURE)
10693 return;
10695 /* XXX This is actually incorrect, we should be able to copy undefined
10696 values through to their new destination. */
10697 PRE_MEM_READ("uucopy(s1)", ARG1, ARG3);
10698 PRE_MEM_WRITE("uucopy(s2)", ARG2, ARG3);
10701 POST(sys_uucopy)
10703 POST_MEM_WRITE(ARG2, ARG3);
10706 PRE(sys_umount2)
10708 /* int umount2(const char *file, int mflag); */
10709 *flags |= SfMayBlock;
10710 PRINT("sys_umount2 ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
10711 PRE_REG_READ2(long, "umount2", const char *, file, int, mflag);
10712 PRE_MEM_RASCIIZ("umount2(file)", ARG1);
10715 PRE(fast_gethrtime)
10717 PRINT("fast_gethrtime ( )");
10718 PRE_REG_READ0(long, "gethrtime");
10721 PRE(fast_gethrvtime)
10723 PRINT("fast_gethrvtime ( )");
10724 PRE_REG_READ0(long, "gethrvtime");
10727 PRE(fast_gethrestime)
10729 /* Used by gettimeofday(3C). */
10730 PRINT("fast_gethrestime ( )");
10731 PRE_REG_READ0(long, "gethrestime");
10734 PRE(fast_getlgrp)
10736 /* Fasttrap number shared between gethomelgroup() and getcpuid(). */
10737 PRINT("fast_getlgrp ( )");
10738 PRE_REG_READ0(long, "getlgrp");
10741 #if defined(SOLARIS_GETHRT_FASTTRAP)
10742 PRE(fast_gethrt)
10744 /* Used by gethrtime(3C) when tsp & tscp HWCAPs are present. */
10745 PRINT("fast_gethrt ( )");
10746 PRE_REG_READ0(long, "gethrt");
10749 POST(fast_gethrt)
10751 if (RES == 0)
10752 return;
10754 VG_(change_mapping_ownership)(RES, False);
10756 #endif /* SOLARIS_GETHRT_FASTTRAP */
10758 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
10759 PRE(fast_getzoneoffset)
10761 /* Returns kernel's time zone offset data. */
10762 PRINT("fast_getzoneoffset ( )");
10763 PRE_REG_READ0(long, "get_zone_offset");
10766 POST(fast_getzoneoffset)
10768 if (RES == 0)
10769 return;
10771 VG_(change_mapping_ownership)(RES, False);
10773 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
10775 #undef PRE
10776 #undef POST
10778 /* ---------------------------------------------------------------------
10779 The Solaris syscall table
10780 ------------------------------------------------------------------ */
10782 /* Add a Solaris-specific, arch-independent wrapper to a syscall table. */
10783 #define SOLX_(sysno, name) \
10784 WRAPPER_ENTRY_X_(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10785 #define SOLXY(sysno, name) \
10786 WRAPPER_ENTRY_XY(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10788 #if defined(VGP_x86_solaris)
10789 /* Add an x86-solaris specific wrapper to a syscall table. */
10790 #define PLAX_(sysno, name) \
10791 WRAPPER_ENTRY_X_(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10792 #define PLAXY(sysno, name) \
10793 WRAPPER_ENTRY_XY(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10795 #elif defined(VGP_amd64_solaris)
10796 /* Add an amd64-solaris specific wrapper to a syscall table. */
10797 #define PLAX_(sysno, name) \
10798 WRAPPER_ENTRY_X_(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10799 #define PLAXY(sysno, name) \
10800 WRAPPER_ENTRY_XY(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10802 #else
10803 # error "Unknown platform"
10804 #endif
10807 GEN : handlers are in syswrap-generic.c
10808 SOL : handlers are in this file
10809 X_ : PRE handler only
10810 XY : PRE and POST handlers
10813 static SyscallTableEntry syscall_table[] = {
10814 SOLX_(__NR_exit, sys_exit), /* 1 */
10815 #if defined(SOLARIS_SPAWN_SYSCALL)
10816 SOLX_(__NR_spawn, sys_spawn), /* 2 */
10817 #endif /* SOLARIS_SPAWN_SYSCALL */
10818 GENXY(__NR_read, sys_read), /* 3 */
10819 GENX_(__NR_write, sys_write), /* 4 */
10820 #if defined(SOLARIS_OLD_SYSCALLS)
10821 SOLXY(__NR_open, sys_open), /* 5 */
10822 #endif /* SOLARIS_OLD_SYSCALLS */
10823 SOLX_(__NR_close, sys_close), /* 6 */
10824 SOLX_(__NR_linkat, sys_linkat), /* 7 */
10825 #if defined(SOLARIS_OLD_SYSCALLS)
10826 GENX_(__NR_link, sys_link), /* 9 */
10827 GENX_(__NR_unlink, sys_unlink), /* 10 */
10828 #endif /* SOLARIS_OLD_SYSCALLS */
10829 SOLX_(__NR_symlinkat, sys_symlinkat), /* 11 */
10830 GENX_(__NR_chdir, sys_chdir), /* 12 */
10831 SOLX_(__NR_time, sys_time), /* 13 */
10832 #if defined(SOLARIS_OLD_SYSCALLS)
10833 GENX_(__NR_chmod, sys_chmod), /* 15 */
10834 GENX_(__NR_chown, sys_chown), /* 16 */
10835 #endif /* SOLARIS_OLD_SYSCALLS */
10836 SOLX_(__NR_brk, sys_brk), /* 17 */
10837 #if defined(SOLARIS_OLD_SYSCALLS)
10838 SOLXY(__NR_stat, sys_stat), /* 18 */
10839 #endif /* SOLARIS_OLD_SYSCALLS */
10840 SOLX_(__NR_lseek, sys_lseek), /* 19 */
10841 GENX_(__NR_getpid, sys_getpid), /* 20 */
10842 SOLXY(__NR_mount, sys_mount), /* 21 */
10843 SOLXY(__NR_readlinkat, sys_readlinkat), /* 22 */
10844 GENX_(__NR_setuid, sys_setuid), /* 23 */
10845 GENX_(__NR_getuid, sys_getuid), /* 24 */
10846 SOLX_(__NR_stime, sys_stime), /* 25 */
10847 GENX_(__NR_alarm, sys_alarm), /* 27 */
10848 #if defined(SOLARIS_OLD_SYSCALLS)
10849 SOLXY(__NR_fstat, sys_fstat), /* 28 */
10850 #endif /* SOLARIS_OLD_SYSCALLS */
10851 GENX_(__NR_pause, sys_pause), /* 29 */
10852 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
10853 SOLXY(__NR_frealpathat, sys_frealpathat), /* 30 */
10854 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
10855 SOLX_(__NR_stty, sys_stty), /* 31 */
10856 SOLXY(__NR_gtty, sys_gtty), /* 32 */
10857 #if defined(SOLARIS_OLD_SYSCALLS)
10858 GENX_(__NR_access, sys_access), /* 33 */
10859 #endif /* SOLARIS_OLD_SYSCALLS */
10860 GENX_(__NR_kill, sys_kill), /* 37 */
10861 SOLX_(__NR_pgrpsys, sys_pgrpsys), /* 39 */
10862 SOLXY(__NR_pipe, sys_pipe), /* 42 */
10863 GENXY(__NR_times, sys_times), /* 43 */
10864 SOLX_(__NR_faccessat, sys_faccessat), /* 45 */
10865 GENX_(__NR_setgid, sys_setgid), /* 46 */
10866 GENX_(__NR_getgid, sys_getgid), /* 47 */
10867 SOLXY(__NR_mknodat, sys_mknodat), /* 48 */
10868 SOLXY(__NR_sysi86, sys_sysi86), /* 50 */
10869 SOLXY(__NR_shmsys, sys_shmsys), /* 52 */
10870 SOLXY(__NR_semsys, sys_semsys), /* 53 */
10871 SOLXY(__NR_ioctl, sys_ioctl), /* 54 */
10872 SOLX_(__NR_fchownat, sys_fchownat), /* 56 */
10873 SOLX_(__NR_fdsync, sys_fdsync), /* 58 */
10874 SOLX_(__NR_execve, sys_execve), /* 59 */
10875 GENX_(__NR_umask, sys_umask), /* 60 */
10876 GENX_(__NR_chroot, sys_chroot), /* 61 */
10877 SOLXY(__NR_fcntl, sys_fcntl), /* 62 */
10878 SOLX_(__NR_renameat, sys_renameat), /* 64 */
10879 SOLX_(__NR_unlinkat, sys_unlinkat), /* 65 */
10880 SOLXY(__NR_fstatat, sys_fstatat), /* 66 */
10881 #if defined(VGP_x86_solaris)
10882 PLAXY(__NR_fstatat64, sys_fstatat64), /* 67 */
10883 #endif /* VGP_x86_solaris */
10884 SOLXY(__NR_openat, sys_openat), /* 68 */
10885 #if defined(VGP_x86_solaris)
10886 PLAXY(__NR_openat64, sys_openat64), /* 69 */
10887 #endif /* VGP_x86_solaris */
10888 SOLXY(__NR_tasksys, sys_tasksys), /* 70 */
10889 SOLXY(__NR_getpagesizes, sys_getpagesizes), /* 73 */
10890 SOLXY(__NR_lwp_park, sys_lwp_park), /* 77 */
10891 SOLXY(__NR_sendfilev, sys_sendfilev), /* 78 */
10892 #if defined(SOLARIS_LWP_NAME_SYSCALL)
10893 SOLXY(__NR_lwp_name, sys_lwp_name), /* 79 */
10894 #endif /* SOLARIS_LWP_NAME_SYSCALL */
10895 #if defined(SOLARIS_OLD_SYSCALLS)
10896 GENX_(__NR_rmdir, sys_rmdir), /* 79 */
10897 GENX_(__NR_mkdir, sys_mkdir), /* 80 */
10898 #endif /* SOLARIS_OLD_SYSCALLS */
10899 GENXY(__NR_getdents, sys_getdents), /* 81 */
10900 SOLXY(__NR_privsys, sys_privsys), /* 82 */
10901 SOLXY(__NR_ucredsys, sys_ucredsys), /* 83 */
10902 SOLXY(__NR_sysfs, sys_sysfs), /* 84 */
10903 SOLXY(__NR_getmsg, sys_getmsg), /* 85 */
10904 SOLX_(__NR_putmsg, sys_putmsg), /* 86 */
10905 #if defined(SOLARIS_OLD_SYSCALLS)
10906 SOLXY(__NR_lstat, sys_lstat), /* 88 */
10907 GENX_(__NR_symlink, sys_symlink), /* 89 */
10908 GENX_(__NR_readlink, sys_readlink), /* 90 */
10909 #endif /* SOLARIS_OLD_SYSCALLS */
10910 GENX_(__NR_setgroups, sys_setgroups), /* 91 */
10911 GENXY(__NR_getgroups, sys_getgroups), /* 92 */
10912 #if defined(SOLARIS_OLD_SYSCALLS)
10913 GENX_(__NR_fchmod, sys_fchmod), /* 93 */
10914 GENX_(__NR_fchown, sys_fchown), /* 94 */
10915 #endif /* SOLARIS_OLD_SYSCALLS */
10916 SOLXY(__NR_sigprocmask, sys_sigprocmask), /* 95 */
10917 SOLX_(__NR_sigsuspend, sys_sigsuspend), /* 96 */
10918 GENXY(__NR_sigaltstack, sys_sigaltstack), /* 97 */
10919 SOLXY(__NR_sigaction, sys_sigaction), /* 98 */
10920 SOLXY(__NR_sigpending, sys_sigpending), /* 99 */
10921 SOLX_(__NR_context, sys_getsetcontext), /* 100 */
10922 SOLX_(__NR_fchmodat, sys_fchmodat), /* 101 */
10923 SOLX_(__NR_mkdirat, sys_mkdirat), /* 102 */
10924 SOLXY(__NR_statvfs, sys_statvfs), /* 103 */
10925 SOLXY(__NR_fstatvfs, sys_fstatvfs), /* 104 */
10926 SOLXY(__NR_nfssys, sys_nfssys), /* 106 */
10927 SOLXY(__NR_waitid, sys_waitid), /* 107 */
10928 SOLX_(__NR_sigsendsys, sys_sigsendsys), /* 108 */
10929 #if defined(SOLARIS_UTIMESYS_SYSCALL)
10930 SOLX_(__NR_utimesys, sys_utimesys), /* 110 */
10931 #endif /* SOLARIS_UTIMESYS_SYSCALL */
10932 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
10933 SOLX_(__NR_utimensat, sys_utimensat), /* 110 */
10934 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
10935 SOLXY(__NR_sigresend, sys_sigresend), /* 111 */
10936 SOLXY(__NR_priocntlsys, sys_priocntlsys), /* 112 */
10937 SOLX_(__NR_pathconf, sys_pathconf), /* 113 */
10938 SOLX_(__NR_mmap, sys_mmap), /* 115 */
10939 GENXY(__NR_mprotect, sys_mprotect), /* 116 */
10940 GENXY(__NR_munmap, sys_munmap), /* 117 */
10941 GENX_(__NR_fchdir, sys_fchdir), /* 120 */
10942 GENXY(__NR_readv, sys_readv), /* 121 */
10943 GENX_(__NR_writev, sys_writev), /* 122 */
10944 #if defined(SOLARIS_UUIDSYS_SYSCALL)
10945 SOLXY(__NR_uuidsys, sys_uuidsys), /* 124 */
10946 #endif /* SOLARIS_UUIDSYS_SYSCALL */
10947 #if defined(HAVE_MREMAP)
10948 GENX_(__NR_mremap, sys_mremap), /* 126 */
10949 #endif /* HAVE_MREMAP */
10950 SOLX_(__NR_mmapobj, sys_mmapobj), /* 127 */
10951 GENX_(__NR_setrlimit, sys_setrlimit), /* 128 */
10952 GENXY(__NR_getrlimit, sys_getrlimit), /* 129 */
10953 #if defined(SOLARIS_OLD_SYSCALLS)
10954 GENX_(__NR_lchown, sys_lchown), /* 130 */
10955 #endif /* SOLARIS_OLD_SYSCALLS */
10956 SOLX_(__NR_memcntl, sys_memcntl), /* 131 */
10957 SOLXY(__NR_getpmsg, sys_getpmsg), /* 132 */
10958 SOLX_(__NR_putpmsg, sys_putpmsg), /* 133 */
10959 #if defined(SOLARIS_OLD_SYSCALLS)
10960 SOLX_(__NR_rename, sys_rename), /* 134 */
10961 #endif /* SOLARIS_OLD_SYSCALLS */
10962 SOLXY(__NR_uname, sys_uname), /* 135 */
10963 SOLX_(__NR_setegid, sys_setegid), /* 136 */
10964 SOLX_(__NR_sysconfig, sys_sysconfig), /* 137 */
10965 SOLXY(__NR_systeminfo, sys_systeminfo), /* 139 */
10966 SOLX_(__NR_seteuid, sys_seteuid), /* 141 */
10967 SOLX_(__NR_forksys, sys_forksys), /* 142 */
10968 #if defined(SOLARIS_GETRANDOM_SYSCALL)
10969 SOLXY(__NR_getrandom, sys_getrandom), /* 143 */
10970 #endif /* SOLARIS_GETRANDOM_SYSCALL */
10971 SOLXY(__NR_sigtimedwait, sys_sigtimedwait), /* 144 */
10972 SOLX_(__NR_yield, sys_yield), /* 146 */
10973 SOLXY(__NR_lwp_sema_post, sys_lwp_sema_post), /* 148 */
10974 SOLXY(__NR_lwp_sema_trywait, sys_lwp_sema_trywait), /* 149 */
10975 SOLX_(__NR_lwp_detach, sys_lwp_detach), /* 150 */
10976 SOLXY(__NR_modctl, sys_modctl), /* 152 */
10977 SOLX_(__NR_fchroot, sys_fchroot), /* 153 */
10978 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
10979 SOLX_(__NR_system_stats, sys_system_stats), /* 154 */
10980 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
10981 SOLXY(__NR_gettimeofday, sys_gettimeofday), /* 156 */
10982 GENXY(__NR_getitimer, sys_getitimer), /* 157 */
10983 GENXY(__NR_setitimer, sys_setitimer), /* 158 */
10984 SOLX_(__NR_lwp_create, sys_lwp_create), /* 159 */
10985 SOLX_(__NR_lwp_exit, sys_lwp_exit), /* 160 */
10986 SOLX_(__NR_lwp_suspend, sys_lwp_suspend), /* 161 */
10987 SOLX_(__NR_lwp_continue, sys_lwp_continue), /* 162 */
10988 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
10989 SOLXY(__NR_lwp_sigqueue, sys_lwp_sigqueue), /* 163 */
10990 #else
10991 SOLXY(__NR_lwp_kill, sys_lwp_kill), /* 163 */
10992 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
10993 SOLX_(__NR_lwp_self, sys_lwp_self), /* 164 */
10994 SOLX_(__NR_lwp_sigmask, sys_lwp_sigmask), /* 165 */
10995 SOLX_(__NR_lwp_private, sys_lwp_private), /* 166 */
10996 SOLXY(__NR_lwp_wait, sys_lwp_wait), /* 167 */
10997 SOLXY(__NR_lwp_mutex_wakeup, sys_lwp_mutex_wakeup), /* 168 */
10998 SOLXY(__NR_lwp_cond_wait, sys_lwp_cond_wait), /* 170 */
10999 SOLXY(__NR_lwp_cond_signal, sys_lwp_cond_signal), /* 171 */
11000 SOLX_(__NR_lwp_cond_broadcast, sys_lwp_cond_broadcast), /* 172 */
11001 SOLXY(__NR_pread, sys_pread), /* 173 */
11002 SOLX_(__NR_pwrite, sys_pwrite), /* 174 */
11003 #if defined(VGP_x86_solaris)
11004 PLAX_(__NR_llseek, sys_llseek32), /* 175 */
11005 #endif /* VGP_x86_solaris */
11006 SOLXY(__NR_lgrpsys, sys_lgrpsys), /* 180 */
11007 SOLXY(__NR_rusagesys, sys_rusagesys), /* 181 */
11008 SOLXY(__NR_port, sys_port), /* 182 */
11009 SOLXY(__NR_pollsys, sys_pollsys), /* 183 */
11010 SOLXY(__NR_labelsys, sys_labelsys), /* 184 */
11011 SOLXY(__NR_acl, sys_acl), /* 185 */
11012 SOLXY(__NR_auditsys, sys_auditsys), /* 186 */
11013 SOLX_(__NR_p_online, sys_p_online), /* 189 */
11014 SOLX_(__NR_sigqueue, sys_sigqueue), /* 190 */
11015 SOLXY(__NR_clock_gettime, sys_clock_gettime), /* 191 */
11016 SOLX_(__NR_clock_settime, sys_clock_settime), /* 192 */
11017 SOLXY(__NR_clock_getres, sys_clock_getres), /* 193 */
11018 SOLXY(__NR_timer_create, sys_timer_create), /* 194 */
11019 SOLX_(__NR_timer_delete, sys_timer_delete), /* 195 */
11020 SOLXY(__NR_timer_settime, sys_timer_settime), /* 196 */
11021 SOLXY(__NR_timer_gettime, sys_timer_gettime), /* 197 */
11022 SOLX_(__NR_timer_getoverrun, sys_timer_getoverrun), /* 198 */
11023 GENXY(__NR_nanosleep, sys_nanosleep), /* 199 */
11024 SOLXY(__NR_facl, sys_facl), /* 200 */
11025 SOLXY(__NR_door, sys_door), /* 201 */
11026 GENX_(__NR_setreuid, sys_setreuid), /* 202 */
11027 GENX_(__NR_setregid, sys_setregid), /* 202 */
11028 SOLXY(__NR_schedctl, sys_schedctl), /* 206 */
11029 SOLXY(__NR_pset, sys_pset), /* 207 */
11030 SOLXY(__NR_resolvepath, sys_resolvepath), /* 209 */
11031 SOLXY(__NR_lwp_mutex_timedlock, sys_lwp_mutex_timedlock), /* 210 */
11032 SOLXY(__NR_lwp_sema_timedwait, sys_lwp_sema_timedwait), /* 211 */
11033 SOLXY(__NR_lwp_rwlock_sys, sys_lwp_rwlock_sys), /* 212 */
11034 #if defined(VGP_x86_solaris)
11035 GENXY(__NR_getdents64, sys_getdents64), /* 213 */
11036 PLAX_(__NR_mmap64, sys_mmap64), /* 214 */
11037 #if defined(SOLARIS_OLD_SYSCALLS)
11038 PLAXY(__NR_stat64, sys_stat64), /* 215 */
11039 PLAXY(__NR_lstat64, sys_lstat64), /* 216 */
11040 PLAXY(__NR_fstat64, sys_fstat64), /* 217 */
11041 #endif /* SOLARIS_OLD_SYSCALLS */
11042 PLAXY(__NR_statvfs64, sys_statvfs64), /* 218 */
11043 PLAXY(__NR_fstatvfs64, sys_fstatvfs64), /* 219 */
11044 #endif /* VGP_x86_solaris */
11045 #if defined(VGP_x86_solaris)
11046 PLAX_(__NR_setrlimit64, sys_setrlimit64), /* 220 */
11047 PLAXY(__NR_getrlimit64, sys_getrlimit64), /* 221 */
11048 PLAXY(__NR_pread64, sys_pread64), /* 222 */
11049 PLAX_(__NR_pwrite64, sys_pwrite64), /* 223 */
11050 #if defined(SOLARIS_OLD_SYSCALLS)
11051 PLAXY(__NR_open64, sys_open64), /* 225 */
11052 #endif /* SOLARIS_OLD_SYSCALLS */
11053 #endif /* VGP_x86_solaris */
11054 SOLXY(__NR_zone, sys_zone), /* 227 */
11055 SOLXY(__NR_getcwd, sys_getcwd), /* 229 */
11056 SOLXY(__NR_so_socket, sys_so_socket), /* 230 */
11057 SOLXY(__NR_so_socketpair, sys_so_socketpair), /* 231 */
11058 SOLX_(__NR_bind, sys_bind), /* 232 */
11059 SOLX_(__NR_listen, sys_listen), /* 233 */
11060 SOLXY(__NR_accept, sys_accept), /* 234 */
11061 SOLX_(__NR_connect, sys_connect), /* 235 */
11062 SOLX_(__NR_shutdown, sys_shutdown), /* 236 */
11063 SOLXY(__NR_recv, sys_recv), /* 237 */
11064 SOLXY(__NR_recvfrom, sys_recvfrom), /* 238 */
11065 SOLXY(__NR_recvmsg, sys_recvmsg), /* 239 */
11066 SOLX_(__NR_send, sys_send), /* 240 */
11067 SOLX_(__NR_sendmsg, sys_sendmsg), /* 241 */
11068 SOLX_(__NR_sendto, sys_sendto), /* 242 */
11069 SOLXY(__NR_getpeername, sys_getpeername), /* 243 */
11070 SOLXY(__NR_getsockname, sys_getsockname), /* 244 */
11071 SOLXY(__NR_getsockopt, sys_getsockopt), /* 245 */
11072 SOLX_(__NR_setsockopt, sys_setsockopt), /* 246 */
11073 SOLXY(__NR_lwp_mutex_unlock, sys_lwp_mutex_unlock), /* 250 */
11074 SOLX_(__NR_lwp_mutex_register, sys_lwp_mutex_register), /* 252 */
11075 SOLXY(__NR_uucopy, sys_uucopy), /* 254 */
11076 SOLX_(__NR_umount2, sys_umount2) /* 255 */
11079 static SyscallTableEntry fasttrap_table[] = {
11080 SOLX_(__NR_gethrtime, fast_gethrtime), /* 3 */
11081 SOLX_(__NR_gethrvtime, fast_gethrvtime), /* 4 */
11082 SOLX_(__NR_gethrestime, fast_gethrestime), /* 5 */
11083 SOLX_(__NR_getlgrp, fast_getlgrp) /* 6 */
11084 #if defined(SOLARIS_GETHRT_FASTTRAP)
11086 SOLXY(__NR_gethrt, fast_gethrt) /* 7 */
11087 #endif /* SOLARIS_GETHRT_FASTTRAP */
11088 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
11090 SOLXY(__NR_getzoneoffset, fast_getzoneoffset) /* 8 */
11091 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
11095 SyscallTableEntry *ML_(get_solaris_syscall_entry)(UInt sysno)
11097 const UInt syscall_table_size
11098 = sizeof(syscall_table) / sizeof(syscall_table[0]);
11099 const UInt fasttrap_table_size
11100 = sizeof(fasttrap_table) / sizeof(fasttrap_table[0]);
11102 SyscallTableEntry *table;
11103 Int size;
11105 switch (VG_SOLARIS_SYSNO_CLASS(sysno)) {
11106 case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
11107 table = syscall_table;
11108 size = syscall_table_size;
11109 break;
11110 case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
11111 table = fasttrap_table;
11112 size = fasttrap_table_size;
11113 break;
11114 default:
11115 vg_assert(0);
11116 break;
11118 sysno = VG_SOLARIS_SYSNO_INDEX(sysno);
11119 if (sysno < size) {
11120 SyscallTableEntry *sys = &table[sysno];
11121 if (!sys->before)
11122 return NULL; /* no entry */
11123 return sys;
11126 /* Can't find a wrapper. */
11127 return NULL;
11130 #endif // defined(VGO_solaris)
11132 /*--------------------------------------------------------------------*/
11133 /*--- end ---*/
11134 /*--------------------------------------------------------------------*/