FreeBSD regtest: add _write to none filter_fdleak
[valgrind.git] / coregrind / m_syswrap / syswrap-x86-linux.c
blob31243a0db3739d6ecbd5bd5254e5d2617789d70b
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-x86-linux.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2000-2017 Nicholas Nethercote
11 njn@valgrind.org
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #if defined(VGP_x86_linux)
31 /* TODO/FIXME jrs 20050207: assignments to the syscall return result
32 in interrupted_syscall() need to be reviewed. They don't seem
33 to assign the shadow state.
36 #include "pub_core_basics.h"
37 #include "pub_core_vki.h"
38 #include "pub_core_vkiscnums.h"
39 #include "pub_core_threadstate.h"
40 #include "pub_core_aspacemgr.h"
41 #include "pub_core_debuglog.h"
42 #include "pub_core_libcbase.h"
43 #include "pub_core_libcassert.h"
44 #include "pub_core_libcprint.h"
45 #include "pub_core_libcproc.h"
46 #include "pub_core_libcsignal.h"
47 #include "pub_core_mallocfree.h"
48 #include "pub_core_options.h"
49 #include "pub_core_scheduler.h"
50 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
51 #include "pub_core_signals.h"
52 #include "pub_core_syscall.h"
53 #include "pub_core_syswrap.h"
54 #include "pub_core_tooliface.h"
56 #include "priv_types_n_macros.h"
57 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
58 #include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */
59 #include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */
60 #include "priv_syswrap-main.h"
63 /* ---------------------------------------------------------------------
64 clone() handling
65 ------------------------------------------------------------------ */
67 /* Call f(arg1), but first switch stacks, using 'stack' as the new
68 stack, and use 'retaddr' as f's return-to address. Also, clear all
69 the integer registers before entering f.*/
70 __attribute__((noreturn))
71 void ML_(call_on_new_stack_0_1) ( Addr stack,
72 Addr retaddr,
73 void (*f)(Word),
74 Word arg1 );
75 // 4(%esp) == stack
76 // 8(%esp) == retaddr
77 // 12(%esp) == f
78 // 16(%esp) == arg1
79 asm(
80 ".text\n"
81 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
82 "vgModuleLocal_call_on_new_stack_0_1:\n"
83 " movl %esp, %esi\n" // remember old stack pointer
84 " movl 4(%esi), %esp\n" // set stack, assume %esp is now 16-byte aligned
85 " subl $12, %esp\n" // skip 12 bytes
86 " pushl 16(%esi)\n" // arg1 to stack, %esp is 16-byte aligned
87 " pushl 8(%esi)\n" // retaddr to stack
88 " pushl 12(%esi)\n" // f to stack
89 " movl $0, %eax\n" // zero all GP regs
90 " movl $0, %ebx\n"
91 " movl $0, %ecx\n"
92 " movl $0, %edx\n"
93 " movl $0, %esi\n"
94 " movl $0, %edi\n"
95 " movl $0, %ebp\n"
96 " ret\n" // jump to f
97 " ud2\n" // should never get here
98 ".previous\n"
103 Perform a clone system call. clone is strange because it has
104 fork()-like return-twice semantics, so it needs special
105 handling here.
107 Upon entry, we have:
109 int (fn)(void*) in 0+FSZ(%esp)
110 void* child_stack in 4+FSZ(%esp)
111 int flags in 8+FSZ(%esp)
112 void* arg in 12+FSZ(%esp)
113 pid_t* child_tid in 16+FSZ(%esp)
114 pid_t* parent_tid in 20+FSZ(%esp)
115 void* tls_ptr in 24+FSZ(%esp)
117 System call requires:
119 int $__NR_clone in %eax
120 int flags in %ebx
121 void* child_stack in %ecx
122 pid_t* parent_tid in %edx
123 pid_t* child_tid in %edi
124 void* tls_ptr in %esi
126 Returns an Int encoded in the linux-x86 way, not a SysRes.
128 #define FSZ "4+4+4+4" /* frame size = retaddr+ebx+edi+esi */
129 #define __NR_CLONE VG_STRINGIFY(__NR_clone)
130 #define __NR_EXIT VG_STRINGIFY(__NR_exit)
132 // See priv_syswrap-linux.h for arg profile.
133 asm(
134 ".text\n"
135 ".globl do_syscall_clone_x86_linux\n"
136 "do_syscall_clone_x86_linux:\n"
137 " push %ebx\n"
138 " push %edi\n"
139 " push %esi\n"
141 /* set up child stack with function and arg */
142 " movl 4+"FSZ"(%esp), %ecx\n" /* syscall arg2: child stack */
143 " movl 12+"FSZ"(%esp), %ebx\n" /* fn arg */
144 " movl 0+"FSZ"(%esp), %eax\n" /* fn */
145 " andl $-16, %ecx\n" /* align to 16-byte */
146 " lea -20(%ecx), %ecx\n" /* allocate 16*n+4 bytes on stack */
147 " movl %ebx, 4(%ecx)\n" /* fn arg */
148 " movl %eax, 0(%ecx)\n" /* fn */
150 /* get other args to clone */
151 " movl 8+"FSZ"(%esp), %ebx\n" /* syscall arg1: flags */
152 " movl 20+"FSZ"(%esp), %edx\n" /* syscall arg3: parent tid * */
153 " movl 16+"FSZ"(%esp), %edi\n" /* syscall arg5: child tid * */
154 " movl 24+"FSZ"(%esp), %esi\n" /* syscall arg4: tls_ptr * */
155 " movl $"__NR_CLONE", %eax\n"
156 " int $0x80\n" /* clone() */
157 " testl %eax, %eax\n" /* child if retval == 0 */
158 " jnz 1f\n"
160 /* CHILD - call thread function */
161 " popl %eax\n" /* child %esp is 16-byte aligned */
162 " call *%eax\n" /* call fn */
164 /* exit with result */
165 " movl %eax, %ebx\n" /* arg1: return value from fn */
166 " movl $"__NR_EXIT", %eax\n"
167 " int $0x80\n"
169 /* Hm, exit returned */
170 " ud2\n"
172 "1:\n" /* PARENT or ERROR */
173 " pop %esi\n"
174 " pop %edi\n"
175 " pop %ebx\n"
176 " ret\n"
177 ".previous\n"
180 #undef FSZ
181 #undef __NR_CLONE
182 #undef __NR_EXIT
185 /* ---------------------------------------------------------------------
186 LDT/GDT simulation
187 ------------------------------------------------------------------ */
189 /* Details of the LDT simulation
190 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
192 When a program runs natively, the linux kernel allows each *thread*
193 in it to have its own LDT. Almost all programs never do this --
194 it's wildly unportable, after all -- and so the kernel never
195 allocates the structure, which is just as well as an LDT occupies
196 64k of memory (8192 entries of size 8 bytes).
198 A thread may choose to modify its LDT entries, by doing the
199 __NR_modify_ldt syscall. In such a situation the kernel will then
200 allocate an LDT structure for it. Each LDT entry is basically a
201 (base, limit) pair. A virtual address in a specific segment is
202 translated to a linear address by adding the segment's base value.
203 In addition, the virtual address must not exceed the limit value.
205 To use an LDT entry, a thread loads one of the segment registers
206 (%cs, %ss, %ds, %es, %fs, %gs) with the index of the LDT entry (0
207 .. 8191) it wants to use. In fact, the required value is (index <<
208 3) + 7, but that's not important right now. Any normal instruction
209 which includes an addressing mode can then be made relative to that
210 LDT entry by prefixing the insn with a so-called segment-override
211 prefix, a byte which indicates which of the 6 segment registers
212 holds the LDT index.
214 Now, a key constraint is that valgrind's address checks operate in
215 terms of linear addresses. So we have to explicitly translate
216 virtual addrs into linear addrs, and that means doing a complete
217 LDT simulation.
219 Calls to modify_ldt are intercepted. For each thread, we maintain
220 an LDT (with the same normally-never-allocated optimisation that
221 the kernel does). This is updated as expected via calls to
222 modify_ldt.
224 When a thread does an amode calculation involving a segment
225 override prefix, the relevant LDT entry for the thread is
226 consulted. It all works.
228 There is a conceptual problem, which appears when switching back to
229 native execution, either temporarily to pass syscalls to the
230 kernel, or permanently, when debugging V. Problem at such points
231 is that it's pretty pointless to copy the simulated machine's
232 segment registers to the real machine, because we'd also need to
233 copy the simulated LDT into the real one, and that's prohibitively
234 expensive.
236 Fortunately it looks like no syscalls rely on the segment regs or
237 LDT being correct, so we can get away with it. Apart from that the
238 simulation is pretty straightforward. All 6 segment registers are
239 tracked, although only %ds, %es, %fs and %gs are allowed as
240 prefixes. Perhaps it could be restricted even more than that -- I
241 am not sure what is and isn't allowed in user-mode.
244 /* Translate a struct modify_ldt_ldt_s to a VexGuestX86SegDescr, using
245 the Linux kernel's logic (cut-n-paste of code in
246 linux/kernel/ldt.c). */
248 static
249 void translate_to_hw_format ( /* IN */ vki_modify_ldt_t* inn,
250 /* OUT */ VexGuestX86SegDescr* out,
251 Int oldmode )
253 UInt entry_1, entry_2;
254 vg_assert(8 == sizeof(VexGuestX86SegDescr));
256 if (0)
257 VG_(printf)("translate_to_hw_format: base %#lx, limit %u\n",
258 inn->base_addr, inn->limit );
260 /* Allow LDTs to be cleared by the user. */
261 if (inn->base_addr == 0 && inn->limit == 0) {
262 if (oldmode ||
263 (inn->contents == 0 &&
264 inn->read_exec_only == 1 &&
265 inn->seg_32bit == 0 &&
266 inn->limit_in_pages == 0 &&
267 inn->seg_not_present == 1 &&
268 inn->useable == 0 )) {
269 entry_1 = 0;
270 entry_2 = 0;
271 goto install;
275 entry_1 = ((inn->base_addr & 0x0000ffff) << 16) |
276 (inn->limit & 0x0ffff);
277 entry_2 = (inn->base_addr & 0xff000000) |
278 ((inn->base_addr & 0x00ff0000) >> 16) |
279 (inn->limit & 0xf0000) |
280 ((inn->read_exec_only ^ 1) << 9) |
281 (inn->contents << 10) |
282 ((inn->seg_not_present ^ 1) << 15) |
283 (inn->seg_32bit << 22) |
284 (inn->limit_in_pages << 23) |
285 0x7000;
286 if (!oldmode)
287 entry_2 |= (inn->useable << 20);
289 /* Install the new entry ... */
290 install:
291 out->LdtEnt.Words.word1 = entry_1;
292 out->LdtEnt.Words.word2 = entry_2;
295 /* Create initial GDT. */
296 static VexGuestX86SegDescr* alloc_system_x86_GDT ( void )
298 Int nbytes = VEX_GUEST_X86_GDT_NENT * sizeof(VexGuestX86SegDescr);
299 VexGuestX86SegDescr* gdt = VG_(calloc)("di.syswrap-x86.azxG.1", nbytes, 1);
300 vki_modify_ldt_t info;
301 UShort seg;
303 VG_(memset)(&info, 0, sizeof(info));
304 info.entry_number = 0;
305 info.base_addr = 0;
306 info.limit = 0xfffff;
307 info.seg_32bit = 1;
308 info.contents = 0;
309 info.read_exec_only = 0;
310 info.limit_in_pages = 1;
311 info.seg_not_present = 0;
312 info.useable = 0;
313 info.reserved = 0;
315 asm volatile("movw %%ds, %0" : : "m" (seg));
316 if (!(seg & 4)) translate_to_hw_format(&info, &gdt[seg >> 3], 0);
317 asm volatile("movw %%ss, %0" : : "m" (seg));
318 if (!(seg & 4)) translate_to_hw_format(&info, &gdt[seg >> 3], 0);
320 info.contents = 2;
322 asm volatile("movw %%cs, %0" : : "m" (seg));
323 if (!(seg & 4)) translate_to_hw_format(&info, &gdt[seg >> 3], 0);
325 return gdt;
328 /* Create a zeroed-out LDT. */
329 static VexGuestX86SegDescr* alloc_zeroed_x86_LDT ( void )
331 Int nbytes = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
332 return VG_(calloc)("di.syswrap-x86.azxL.1", nbytes, 1);
335 /* Free up an LDT or GDT allocated by the above fns. */
336 static void free_LDT_or_GDT ( VexGuestX86SegDescr* dt )
338 vg_assert(dt);
339 VG_(free)(dt);
342 /* Copy contents between two existing LDTs. */
343 static void copy_LDT_from_to ( VexGuestX86SegDescr* src,
344 VexGuestX86SegDescr* dst )
346 Int i;
347 vg_assert(src);
348 vg_assert(dst);
349 for (i = 0; i < VEX_GUEST_X86_LDT_NENT; i++)
350 dst[i] = src[i];
353 /* Copy contents between two existing GDTs. */
354 static void copy_GDT_from_to ( VexGuestX86SegDescr* src,
355 VexGuestX86SegDescr* dst )
357 Int i;
358 vg_assert(src);
359 vg_assert(dst);
360 for (i = 0; i < VEX_GUEST_X86_GDT_NENT; i++)
361 dst[i] = src[i];
364 /* Free this thread's DTs, if it has any. */
365 static void deallocate_LGDTs_for_thread ( VexGuestX86State* vex )
367 vg_assert(sizeof(HWord) == sizeof(void*));
369 if (0)
370 VG_(printf)("deallocate_LGDTs_for_thread: "
371 "ldt = 0x%llx, gdt = 0x%llx\n",
372 vex->guest_LDT, vex->guest_GDT );
374 if (vex->guest_LDT != (HWord)NULL) {
375 free_LDT_or_GDT( (VexGuestX86SegDescr*)(HWord)vex->guest_LDT );
376 vex->guest_LDT = (HWord)NULL;
379 if (vex->guest_GDT != (HWord)NULL) {
380 free_LDT_or_GDT( (VexGuestX86SegDescr*)(HWord)vex->guest_GDT );
381 vex->guest_GDT = (HWord)NULL;
387 * linux/kernel/ldt.c
389 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
390 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
394 * read_ldt() is not really atomic - this is not a problem since
395 * synchronization of reads and writes done to the LDT has to be
396 * assured by user-space anyway. Writes are atomic, to protect
397 * the security checks done on new descriptors.
399 static
400 SysRes read_ldt ( ThreadId tid, UChar* ptr, UInt bytecount )
402 SysRes res;
403 UInt i, size;
404 UChar* ldt;
406 if (0)
407 VG_(printf)("read_ldt: tid = %u, ptr = %p, bytecount = %u\n",
408 tid, ptr, bytecount );
410 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
411 vg_assert(8 == sizeof(VexGuestX86SegDescr));
413 ldt = (UChar*)(HWord)(VG_(threads)[tid].arch.vex.guest_LDT);
414 res = VG_(mk_SysRes_Success)( 0 );
415 if (ldt == NULL)
416 /* LDT not allocated, meaning all entries are null */
417 goto out;
419 size = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
420 if (size > bytecount)
421 size = bytecount;
423 res = VG_(mk_SysRes_Success)( size );
424 for (i = 0; i < size; i++)
425 ptr[i] = ldt[i];
427 out:
428 return res;
432 static
433 SysRes write_ldt ( ThreadId tid, void* ptr, UInt bytecount, Int oldmode )
435 SysRes res;
436 VexGuestX86SegDescr* ldt;
437 vki_modify_ldt_t* ldt_info;
439 if (0)
440 VG_(printf)("write_ldt: tid = %u, ptr = %p, "
441 "bytecount = %u, oldmode = %d\n",
442 tid, ptr, bytecount, oldmode );
444 vg_assert(8 == sizeof(VexGuestX86SegDescr));
445 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
447 ldt = (VexGuestX86SegDescr*)(HWord)VG_(threads)[tid].arch.vex.guest_LDT;
448 ldt_info = (vki_modify_ldt_t*)ptr;
450 res = VG_(mk_SysRes_Error)( VKI_EINVAL );
451 if (bytecount != sizeof(vki_modify_ldt_t))
452 goto out;
454 res = VG_(mk_SysRes_Error)( VKI_EINVAL );
455 if (ldt_info->entry_number >= VEX_GUEST_X86_LDT_NENT)
456 goto out;
457 if (ldt_info->contents == 3) {
458 if (oldmode)
459 goto out;
460 if (ldt_info->seg_not_present == 0)
461 goto out;
464 /* If this thread doesn't have an LDT, we'd better allocate it
465 now. */
466 if (ldt == NULL) {
467 ldt = alloc_zeroed_x86_LDT();
468 VG_(threads)[tid].arch.vex.guest_LDT = (HWord)ldt;
471 /* Install the new entry ... */
472 translate_to_hw_format ( ldt_info, &ldt[ldt_info->entry_number], oldmode );
473 res = VG_(mk_SysRes_Success)( 0 );
475 out:
476 return res;
480 static SysRes sys_modify_ldt ( ThreadId tid,
481 Int func, void* ptr, UInt bytecount )
483 /* Set return value to something "safe". I think this will never
484 actually be returned, though. */
485 SysRes ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
487 if (func != 0 && func != 1 && func != 2 && func != 0x11) {
488 ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
489 } else if (ptr != NULL && ! ML_(safe_to_deref)(ptr, bytecount)) {
490 ret = VG_(mk_SysRes_Error)( VKI_EFAULT );
491 } else {
492 switch (func) {
493 case 0:
494 ret = read_ldt(tid, ptr, bytecount);
495 break;
496 case 1:
497 ret = write_ldt(tid, ptr, bytecount, 1);
498 break;
499 case 2:
500 ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
501 VG_(unimplemented)("sys_modify_ldt: func == 2");
502 /* god knows what this is about */
503 /* ret = read_default_ldt(ptr, bytecount); */
504 /*UNREACHED*/
505 break;
506 case 0x11:
507 ret = write_ldt(tid, ptr, bytecount, 0);
508 break;
511 return ret;
515 SysRes ML_(x86_sys_set_thread_area) ( ThreadId tid, vki_modify_ldt_t* info )
517 Int idx;
518 VexGuestX86SegDescr* gdt;
520 vg_assert(8 == sizeof(VexGuestX86SegDescr));
521 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
523 if (info == NULL || ! ML_(safe_to_deref)(info, sizeof(vki_modify_ldt_t))) {
524 VG_(umsg)("Warning: bad u_info address %p in set_thread_area\n", info);
525 return VG_(mk_SysRes_Error)( VKI_EFAULT );
528 gdt = (VexGuestX86SegDescr*)(HWord)VG_(threads)[tid].arch.vex.guest_GDT;
530 /* If the thread doesn't have a GDT, allocate it now. */
531 if (!gdt) {
532 gdt = alloc_system_x86_GDT();
533 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
536 idx = info->entry_number;
538 if (idx == -1) {
539 /* Find and use the first free entry. Don't allocate entry
540 zero, because the hardware will never do that, and apparently
541 doing so confuses some code (perhaps stuff running on
542 Wine). */
543 for (idx = 1; idx < VEX_GUEST_X86_GDT_NENT; idx++) {
544 if (gdt[idx].LdtEnt.Words.word1 == 0
545 && gdt[idx].LdtEnt.Words.word2 == 0)
546 break;
549 if (idx == VEX_GUEST_X86_GDT_NENT)
550 return VG_(mk_SysRes_Error)( VKI_ESRCH );
551 } else if (idx < 0 || idx == 0 || idx >= VEX_GUEST_X86_GDT_NENT) {
552 /* Similarly, reject attempts to use GDT[0]. */
553 return VG_(mk_SysRes_Error)( VKI_EINVAL );
556 translate_to_hw_format(info, &gdt[idx], 0);
558 VG_TRACK( pre_mem_write, Vg_CoreSysCall, tid,
559 "set_thread_area(info->entry)",
560 (Addr) & info->entry_number, sizeof(unsigned int) );
561 info->entry_number = idx;
562 VG_TRACK( post_mem_write, Vg_CoreSysCall, tid,
563 (Addr) & info->entry_number, sizeof(unsigned int) );
565 return VG_(mk_SysRes_Success)( 0 );
569 static SysRes sys_get_thread_area ( ThreadId tid, vki_modify_ldt_t* info )
571 Int idx;
572 VexGuestX86SegDescr* gdt;
574 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
575 vg_assert(8 == sizeof(VexGuestX86SegDescr));
577 if (info == NULL || ! ML_(safe_to_deref)(info, sizeof(vki_modify_ldt_t))) {
578 VG_(umsg)("Warning: bad u_info address %p in get_thread_area\n", info);
579 return VG_(mk_SysRes_Error)( VKI_EFAULT );
582 idx = info->entry_number;
584 if (idx < 0 || idx >= VEX_GUEST_X86_GDT_NENT)
585 return VG_(mk_SysRes_Error)( VKI_EINVAL );
587 gdt = (VexGuestX86SegDescr*)(HWord)VG_(threads)[tid].arch.vex.guest_GDT;
589 /* If the thread doesn't have a GDT, allocate it now. */
590 if (!gdt) {
591 gdt = alloc_system_x86_GDT();
592 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
595 info->base_addr = ( gdt[idx].LdtEnt.Bits.BaseHi << 24 ) |
596 ( gdt[idx].LdtEnt.Bits.BaseMid << 16 ) |
597 gdt[idx].LdtEnt.Bits.BaseLow;
598 info->limit = ( gdt[idx].LdtEnt.Bits.LimitHi << 16 ) |
599 gdt[idx].LdtEnt.Bits.LimitLow;
600 info->seg_32bit = gdt[idx].LdtEnt.Bits.Default_Big;
601 info->contents = ( gdt[idx].LdtEnt.Bits.Type >> 2 ) & 0x3;
602 info->read_exec_only = ( gdt[idx].LdtEnt.Bits.Type & 0x1 ) ^ 0x1;
603 info->limit_in_pages = gdt[idx].LdtEnt.Bits.Granularity;
604 info->seg_not_present = gdt[idx].LdtEnt.Bits.Pres ^ 0x1;
605 info->useable = gdt[idx].LdtEnt.Bits.Sys;
606 info->reserved = 0;
608 return VG_(mk_SysRes_Success)( 0 );
611 /* ---------------------------------------------------------------------
612 More thread stuff
613 ------------------------------------------------------------------ */
615 void VG_(cleanup_thread) ( ThreadArchState* arch )
617 /* Release arch-specific resources held by this thread. */
618 /* On x86, we have to dump the LDT and GDT. */
619 deallocate_LGDTs_for_thread( &arch->vex );
623 void ML_(x86_setup_LDT_GDT) ( /*OUT*/ ThreadArchState *child,
624 /*IN*/ ThreadArchState *parent )
626 /* We inherit our parent's LDT. */
627 if (parent->vex.guest_LDT == (HWord)NULL) {
628 /* We hope this is the common case. */
629 child->vex.guest_LDT = (HWord)NULL;
630 } else {
631 /* No luck .. we have to take a copy of the parent's. */
632 child->vex.guest_LDT = (HWord)alloc_zeroed_x86_LDT();
633 copy_LDT_from_to( (VexGuestX86SegDescr*)(HWord)parent->vex.guest_LDT,
634 (VexGuestX86SegDescr*)(HWord)child->vex.guest_LDT );
637 /* Either we start with an empty GDT (the usual case) or inherit a
638 copy of our parents' one (Quadrics Elan3 driver -style clone
639 only). */
640 child->vex.guest_GDT = (HWord)NULL;
642 if (parent->vex.guest_GDT != (HWord)NULL) {
643 child->vex.guest_GDT = (HWord)alloc_system_x86_GDT();
644 copy_GDT_from_to( (VexGuestX86SegDescr*)(HWord)parent->vex.guest_GDT,
645 (VexGuestX86SegDescr*)(HWord)child->vex.guest_GDT );
650 /* ---------------------------------------------------------------------
651 PRE/POST wrappers for x86/Linux-specific syscalls
652 ------------------------------------------------------------------ */
654 #define PRE(name) DEFN_PRE_TEMPLATE(x86_linux, name)
655 #define POST(name) DEFN_POST_TEMPLATE(x86_linux, name)
657 /* Add prototypes for the wrappers declared here, so that gcc doesn't
658 harass us for not having prototypes. Really this is a kludge --
659 the right thing to do is to make these wrappers 'static' since they
660 aren't visible outside this file, but that requires even more macro
661 magic. */
662 DECL_TEMPLATE(x86_linux, sys_stat64);
663 DECL_TEMPLATE(x86_linux, sys_fstatat64);
664 DECL_TEMPLATE(x86_linux, sys_fstat64);
665 DECL_TEMPLATE(x86_linux, sys_lstat64);
666 DECL_TEMPLATE(x86_linux, old_mmap);
667 DECL_TEMPLATE(x86_linux, sys_mmap2);
668 DECL_TEMPLATE(x86_linux, sys_sigreturn);
669 DECL_TEMPLATE(x86_linux, sys_rt_sigreturn);
670 DECL_TEMPLATE(x86_linux, sys_modify_ldt);
671 DECL_TEMPLATE(x86_linux, sys_set_thread_area);
672 DECL_TEMPLATE(x86_linux, sys_get_thread_area);
673 DECL_TEMPLATE(x86_linux, sys_ptrace);
674 DECL_TEMPLATE(x86_linux, sys_sigsuspend);
675 DECL_TEMPLATE(x86_linux, old_select);
676 DECL_TEMPLATE(x86_linux, sys_vm86old);
677 DECL_TEMPLATE(x86_linux, sys_vm86);
678 DECL_TEMPLATE(x86_linux, sys_syscall223);
680 PRE(old_select)
682 /* struct sel_arg_struct {
683 unsigned long n;
684 fd_set *inp, *outp, *exp;
685 struct timeval *tvp;
688 PRE_REG_READ1(long, "old_select", struct sel_arg_struct *, args);
689 PRE_MEM_READ( "old_select(args)", ARG1, 5*sizeof(UWord) );
690 *flags |= SfMayBlock;
692 UInt* arg_struct = (UInt*)ARG1;
693 UInt a1, a2, a3, a4, a5;
695 a1 = arg_struct[0];
696 a2 = arg_struct[1];
697 a3 = arg_struct[2];
698 a4 = arg_struct[3];
699 a5 = arg_struct[4];
701 PRINT("old_select ( %d, %#x, %#x, %#x, %#x )", (Int)a1,a2,a3,a4,a5);
702 if (a2 != (Addr)NULL)
703 PRE_MEM_READ( "old_select(readfds)", a2, a1/8 /* __FD_SETSIZE/8 */ );
704 if (a3 != (Addr)NULL)
705 PRE_MEM_READ( "old_select(writefds)", a3, a1/8 /* __FD_SETSIZE/8 */ );
706 if (a4 != (Addr)NULL)
707 PRE_MEM_READ( "old_select(exceptfds)", a4, a1/8 /* __FD_SETSIZE/8 */ );
708 if (a5 != (Addr)NULL)
709 PRE_MEM_READ( "old_select(timeout)", a5, sizeof(struct vki_timeval) );
713 PRE(sys_sigreturn)
715 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
716 an explanation of what follows. */
718 ThreadState* tst;
719 PRINT("sys_sigreturn ( )");
721 vg_assert(VG_(is_valid_tid)(tid));
722 vg_assert(tid >= 1 && tid < VG_N_THREADS);
723 vg_assert(VG_(is_running_thread)(tid));
725 /* Adjust esp to point to start of frame; skip back up over
726 sigreturn sequence's "popl %eax" and handler ret addr */
727 tst = VG_(get_ThreadState)(tid);
728 tst->arch.vex.guest_ESP -= sizeof(Addr)+sizeof(Word);
729 /* XXX why does ESP change differ from rt_sigreturn case below? */
731 /* This is only so that the EIP is (might be) useful to report if
732 something goes wrong in the sigreturn */
733 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
735 /* Restore register state from frame and remove it */
736 VG_(sigframe_destroy)(tid, False);
738 /* Tell the driver not to update the guest state with the "result",
739 and set a bogus result to keep it happy. */
740 *flags |= SfNoWriteResult;
741 SET_STATUS_Success(0);
743 /* Check to see if any signals arose as a result of this. */
744 *flags |= SfPollAfter;
747 PRE(sys_rt_sigreturn)
749 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
750 an explanation of what follows. */
752 ThreadState* tst;
753 PRINT("sys_rt_sigreturn ( )");
755 vg_assert(VG_(is_valid_tid)(tid));
756 vg_assert(tid >= 1 && tid < VG_N_THREADS);
757 vg_assert(VG_(is_running_thread)(tid));
759 /* Adjust esp to point to start of frame; skip back up over handler
760 ret addr */
761 tst = VG_(get_ThreadState)(tid);
762 tst->arch.vex.guest_ESP -= sizeof(Addr);
763 /* XXX why does ESP change differ from sigreturn case above? */
765 /* This is only so that the EIP is (might be) useful to report if
766 something goes wrong in the sigreturn */
767 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
769 /* Restore register state from frame and remove it */
770 VG_(sigframe_destroy)(tid, True);
772 /* Tell the driver not to update the guest state with the "result",
773 and set a bogus result to keep it happy. */
774 *flags |= SfNoWriteResult;
775 SET_STATUS_Success(0);
777 /* Check to see if any signals arose as a result of this. */
778 *flags |= SfPollAfter;
781 PRE(sys_modify_ldt)
783 PRINT("sys_modify_ldt ( %ld, %#lx, %lu )", SARG1, ARG2, ARG3);
784 PRE_REG_READ3(int, "modify_ldt", int, func, void *, ptr,
785 unsigned long, bytecount);
787 if (ARG1 == 0) {
788 /* read the LDT into ptr */
789 PRE_MEM_WRITE( "modify_ldt(ptr)", ARG2, ARG3 );
791 if (ARG1 == 1 || ARG1 == 0x11) {
792 /* write the LDT with the entry pointed at by ptr */
793 PRE_MEM_READ( "modify_ldt(ptr)", ARG2, sizeof(vki_modify_ldt_t) );
795 /* "do" the syscall ourselves; the kernel never sees it */
796 SET_STATUS_from_SysRes( sys_modify_ldt( tid, ARG1, (void*)ARG2, ARG3 ) );
798 if (ARG1 == 0 && SUCCESS && RES > 0) {
799 POST_MEM_WRITE( ARG2, RES );
803 PRE(sys_set_thread_area)
805 PRINT("sys_set_thread_area ( %#lx )", ARG1);
806 PRE_REG_READ1(int, "set_thread_area", struct user_desc *, u_info)
807 PRE_MEM_READ( "set_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) );
809 /* "do" the syscall ourselves; the kernel never sees it */
810 SET_STATUS_from_SysRes( ML_(x86_sys_set_thread_area)( tid, (void *)ARG1 ) );
813 PRE(sys_get_thread_area)
815 PRINT("sys_get_thread_area ( %#lx )", ARG1);
816 PRE_REG_READ1(int, "get_thread_area", struct user_desc *, u_info)
817 PRE_MEM_WRITE( "get_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) );
819 /* "do" the syscall ourselves; the kernel never sees it */
820 SET_STATUS_from_SysRes( sys_get_thread_area( tid, (void *)ARG1 ) );
822 if (SUCCESS) {
823 POST_MEM_WRITE( ARG1, sizeof(vki_modify_ldt_t) );
827 // Parts of this are x86-specific, but the *PEEK* cases are generic.
829 // ARG3 is only used for pointers into the traced process's address
830 // space and for offsets into the traced process's struct
831 // user_regs_struct. It is never a pointer into this process's memory
832 // space, and we should therefore not check anything it points to.
833 PRE(sys_ptrace)
835 PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
836 PRE_REG_READ4(int, "ptrace",
837 long, request, long, pid, unsigned long, addr,
838 unsigned long, data);
839 switch (ARG1) {
840 case VKI_PTRACE_PEEKTEXT:
841 case VKI_PTRACE_PEEKDATA:
842 case VKI_PTRACE_PEEKUSR:
843 PRE_MEM_WRITE( "ptrace(peek)", ARG4,
844 sizeof (long));
845 break;
846 case VKI_PTRACE_GETREGS:
847 PRE_MEM_WRITE( "ptrace(getregs)", ARG4,
848 sizeof (struct vki_user_regs_struct));
849 break;
850 case VKI_PTRACE_GETFPREGS:
851 PRE_MEM_WRITE( "ptrace(getfpregs)", ARG4,
852 sizeof (struct vki_user_i387_struct));
853 break;
854 case VKI_PTRACE_GETFPXREGS:
855 PRE_MEM_WRITE( "ptrace(getfpxregs)", ARG4,
856 sizeof(struct vki_user_fxsr_struct) );
857 break;
858 case VKI_PTRACE_GET_THREAD_AREA:
859 PRE_MEM_WRITE( "ptrace(get_thread_area)", ARG4,
860 sizeof(struct vki_user_desc) );
861 break;
862 case VKI_PTRACE_SETREGS:
863 PRE_MEM_READ( "ptrace(setregs)", ARG4,
864 sizeof (struct vki_user_regs_struct));
865 break;
866 case VKI_PTRACE_SETFPREGS:
867 PRE_MEM_READ( "ptrace(setfpregs)", ARG4,
868 sizeof (struct vki_user_i387_struct));
869 break;
870 case VKI_PTRACE_SETFPXREGS:
871 PRE_MEM_READ( "ptrace(setfpxregs)", ARG4,
872 sizeof(struct vki_user_fxsr_struct) );
873 break;
874 case VKI_PTRACE_SET_THREAD_AREA:
875 PRE_MEM_READ( "ptrace(set_thread_area)", ARG4,
876 sizeof(struct vki_user_desc) );
877 break;
878 case VKI_PTRACE_GETEVENTMSG:
879 PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long));
880 break;
881 case VKI_PTRACE_GETSIGINFO:
882 PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t));
883 break;
884 case VKI_PTRACE_SETSIGINFO:
885 PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t));
886 break;
887 case VKI_PTRACE_GETREGSET:
888 ML_(linux_PRE_getregset)(tid, ARG3, ARG4);
889 break;
890 case VKI_PTRACE_SETREGSET:
891 ML_(linux_PRE_setregset)(tid, ARG3, ARG4);
892 break;
893 default:
894 break;
898 POST(sys_ptrace)
900 switch (ARG1) {
901 case VKI_PTRACE_TRACEME:
902 ML_(linux_POST_traceme)(tid);
903 break;
904 case VKI_PTRACE_PEEKTEXT:
905 case VKI_PTRACE_PEEKDATA:
906 case VKI_PTRACE_PEEKUSR:
907 POST_MEM_WRITE( ARG4, sizeof (long));
908 break;
909 case VKI_PTRACE_GETREGS:
910 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_regs_struct));
911 break;
912 case VKI_PTRACE_GETFPREGS:
913 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_i387_struct));
914 break;
915 case VKI_PTRACE_GETFPXREGS:
916 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_fxsr_struct) );
917 break;
918 case VKI_PTRACE_GET_THREAD_AREA:
919 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_desc) );
920 break;
921 case VKI_PTRACE_GETEVENTMSG:
922 POST_MEM_WRITE( ARG4, sizeof(unsigned long));
923 break;
924 case VKI_PTRACE_GETSIGINFO:
925 /* XXX: This is a simplification. Different parts of the
926 * siginfo_t are valid depending on the type of signal.
928 POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t));
929 break;
930 case VKI_PTRACE_GETREGSET:
931 ML_(linux_POST_getregset)(tid, ARG3, ARG4);
932 break;
933 default:
934 break;
938 PRE(old_mmap)
940 /* struct mmap_arg_struct {
941 unsigned long addr;
942 unsigned long len;
943 unsigned long prot;
944 unsigned long flags;
945 unsigned long fd;
946 unsigned long offset;
947 }; */
948 UWord a1, a2, a3, a4, a5, a6;
949 SysRes r;
951 UWord* args = (UWord*)ARG1;
952 PRE_REG_READ1(long, "old_mmap", struct mmap_arg_struct *, args);
953 PRE_MEM_READ( "old_mmap(args)", (Addr)args, 6*sizeof(UWord) );
955 a1 = args[1-1];
956 a2 = args[2-1];
957 a3 = args[3-1];
958 a4 = args[4-1];
959 a5 = args[5-1];
960 a6 = args[6-1];
962 PRINT("old_mmap ( %#lx, %lu, %ld, %ld, %ld, %ld )",
963 a1, a2, (Word)a3, (Word)a4, (Word)a5, (Word)a6 );
965 r = ML_(generic_PRE_sys_mmap)( tid, a1, a2, a3, a4, a5, (Off64T)a6 );
966 SET_STATUS_from_SysRes(r);
969 PRE(sys_mmap2)
971 SysRes r;
973 // Exactly like old_mmap() except:
974 // - all 6 args are passed in regs, rather than in a memory-block.
975 // - the file offset is specified in pagesize units rather than bytes,
976 // so that it can be used for files bigger than 2^32 bytes.
977 // pagesize or 4K-size units in offset? For ppc32/64-linux, this is
978 // 4K-sized. Assert that the page size is 4K here for safety.
979 vg_assert(VKI_PAGE_SIZE == 4096);
980 PRINT("sys_mmap2 ( %#lx, %lu, %lu, %lu, %lu, %lu )",
981 ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 );
982 PRE_REG_READ6(long, "mmap2",
983 unsigned long, start, unsigned long, length,
984 unsigned long, prot, unsigned long, flags,
985 unsigned long, fd, unsigned long, offset);
987 r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5,
988 4096 * (Off64T)ARG6 );
989 SET_STATUS_from_SysRes(r);
992 // XXX: lstat64/fstat64/stat64 are generic, but not necessarily
993 // applicable to every architecture -- I think only to 32-bit archs.
994 // We're going to need something like linux/core_os32.h for such
995 // things, eventually, I think. --njn
996 PRE(sys_lstat64)
998 PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
999 PRE_REG_READ2(long, "lstat64", char *, file_name, struct stat64 *, buf);
1000 PRE_MEM_RASCIIZ( "lstat64(file_name)", ARG1 );
1001 PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) );
1004 POST(sys_lstat64)
1006 vg_assert(SUCCESS);
1007 if (RES == 0) {
1008 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1012 PRE(sys_stat64)
1014 FUSE_COMPATIBLE_MAY_BLOCK();
1015 PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
1016 PRE_REG_READ2(long, "stat64", char *, file_name, struct stat64 *, buf);
1017 PRE_MEM_RASCIIZ( "stat64(file_name)", ARG1 );
1018 PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) );
1021 POST(sys_stat64)
1023 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1026 PRE(sys_fstatat64)
1028 FUSE_COMPATIBLE_MAY_BLOCK();
1029 // ARG4 = int flags; Flags are or'ed together, therefore writing them
1030 // as a hex constant is more meaningful.
1031 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %#lx )",
1032 SARG1, ARG2, (HChar*)ARG2, ARG3, ARG4);
1033 PRE_REG_READ4(long, "fstatat64",
1034 int, dfd, char *, file_name, struct stat64 *, buf, int, flags);
1035 PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 );
1036 PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) );
1039 POST(sys_fstatat64)
1041 POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) );
1044 PRE(sys_fstat64)
1046 PRINT("sys_fstat64 ( %lu, %#lx )", ARG1, ARG2);
1047 PRE_REG_READ2(long, "fstat64", unsigned long, fd, struct stat64 *, buf);
1048 PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) );
1051 POST(sys_fstat64)
1053 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1056 /* NB: arm-linux has a clone of this one, and ppc32-linux has an almost
1057 identical version. */
1058 PRE(sys_sigsuspend)
1060 /* The C library interface to sigsuspend just takes a pointer to
1061 a signal mask but this system call has three arguments - the first
1062 two don't appear to be used by the kernel and are always passed as
1063 zero by glibc and the third is the first word of the signal mask
1064 so only 32 signals are supported.
1066 In fact glibc normally uses rt_sigsuspend if it is available as
1067 that takes a pointer to the signal mask so supports more signals.
1069 *flags |= SfMayBlock;
1070 PRINT("sys_sigsuspend ( %ld, %ld, %lu )", SARG1, SARG2, ARG3 );
1071 PRE_REG_READ3(int, "sigsuspend",
1072 int, history0, int, history1,
1073 vki_old_sigset_t, mask);
1076 PRE(sys_vm86old)
1078 PRINT("sys_vm86old ( %#lx )", ARG1);
1079 PRE_REG_READ1(int, "vm86old", struct vm86_struct *, info);
1080 PRE_MEM_WRITE( "vm86old(info)", ARG1, sizeof(struct vki_vm86_struct));
1083 POST(sys_vm86old)
1085 POST_MEM_WRITE( ARG1, sizeof(struct vki_vm86_struct));
1088 PRE(sys_vm86)
1090 PRINT("sys_vm86 ( %lu, %#lx )", ARG1, ARG2);
1091 PRE_REG_READ2(int, "vm86", unsigned long, fn, struct vm86plus_struct *, v86);
1092 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS)
1093 PRE_MEM_WRITE( "vm86(v86)", ARG2, sizeof(struct vki_vm86plus_struct));
1096 POST(sys_vm86)
1098 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS)
1099 POST_MEM_WRITE( ARG2, sizeof(struct vki_vm86plus_struct));
1103 /* ---------------------------------------------------------------
1104 PRE/POST wrappers for x86/Linux-variant specific syscalls
1105 ------------------------------------------------------------ */
1107 PRE(sys_syscall223)
1109 Int err;
1111 /* 223 is used by sys_bproc. If we're not on a declared bproc
1112 variant, fail in the usual way. */
1114 if (!KernelVariantiS(KernelVariant_bproc, VG_(clo_kernel_variant))) {
1115 PRINT("non-existent syscall! (syscall 223)");
1116 PRE_REG_READ0(long, "ni_syscall(223)");
1117 SET_STATUS_Failure( VKI_ENOSYS );
1118 return;
1121 err = ML_(linux_variant_PRE_sys_bproc)( ARG1, ARG2, ARG3,
1122 ARG4, ARG5, ARG6 );
1123 if (err) {
1124 SET_STATUS_Failure( err );
1125 return;
1127 /* Let it go through. */
1128 *flags |= SfMayBlock; /* who knows? play safe. */
1131 POST(sys_syscall223)
1133 ML_(linux_variant_POST_sys_bproc)( ARG1, ARG2, ARG3,
1134 ARG4, ARG5, ARG6 );
1137 #undef PRE
1138 #undef POST
1141 /* ---------------------------------------------------------------------
1142 The x86/Linux syscall table
1143 ------------------------------------------------------------------ */
1145 /* Add an x86-linux specific wrapper to a syscall table. */
1146 #define PLAX_(sysno, name) WRAPPER_ENTRY_X_(x86_linux, sysno, name)
1147 #define PLAXY(sysno, name) WRAPPER_ENTRY_XY(x86_linux, sysno, name)
1150 // This table maps from __NR_xxx syscall numbers (from
1151 // linux/include/asm-i386/unistd.h) to the appropriate PRE/POST sys_foo()
1152 // wrappers on x86 (as per sys_call_table in linux/arch/i386/kernel/entry.S).
1154 // For those syscalls not handled by Valgrind, the annotation indicate its
1155 // arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
1156 // (unknown).
1158 static SyscallTableEntry syscall_table[] = {
1159 //zz // (restart_syscall) // 0
1160 GENX_(__NR_exit, sys_exit), // 1
1161 GENX_(__NR_fork, sys_fork), // 2
1162 GENXY(__NR_read, sys_read), // 3
1163 GENX_(__NR_write, sys_write), // 4
1165 GENXY(__NR_open, sys_open), // 5
1166 GENX_(__NR_close, sys_close), // 6
1167 GENXY(__NR_waitpid, sys_waitpid), // 7
1168 GENXY(__NR_creat, sys_creat), // 8
1169 GENX_(__NR_link, sys_link), // 9
1171 GENX_(__NR_unlink, sys_unlink), // 10
1172 GENX_(__NR_execve, sys_execve), // 11
1173 GENX_(__NR_chdir, sys_chdir), // 12
1174 GENXY(__NR_time, sys_time), // 13
1175 GENX_(__NR_mknod, sys_mknod), // 14
1177 GENX_(__NR_chmod, sys_chmod), // 15
1178 //zz LINX_(__NR_lchown, sys_lchown16), // 16
1179 GENX_(__NR_break, sys_ni_syscall), // 17
1180 //zz // (__NR_oldstat, sys_stat), // 18 (obsolete)
1181 LINX_(__NR_lseek, sys_lseek), // 19
1183 GENX_(__NR_getpid, sys_getpid), // 20
1184 LINX_(__NR_mount, sys_mount), // 21
1185 LINX_(__NR_umount, sys_oldumount), // 22
1186 LINX_(__NR_setuid, sys_setuid16), // 23 ## P
1187 LINX_(__NR_getuid, sys_getuid16), // 24 ## P
1189 LINX_(__NR_stime, sys_stime), // 25 * (SVr4,SVID,X/OPEN)
1190 PLAXY(__NR_ptrace, sys_ptrace), // 26
1191 GENX_(__NR_alarm, sys_alarm), // 27
1192 //zz // (__NR_oldfstat, sys_fstat), // 28 * L -- obsolete
1193 GENX_(__NR_pause, sys_pause), // 29
1195 LINX_(__NR_utime, sys_utime), // 30
1196 GENX_(__NR_stty, sys_ni_syscall), // 31
1197 GENX_(__NR_gtty, sys_ni_syscall), // 32
1198 GENX_(__NR_access, sys_access), // 33
1199 GENX_(__NR_nice, sys_nice), // 34
1201 GENX_(__NR_ftime, sys_ni_syscall), // 35
1202 GENX_(__NR_sync, sys_sync), // 36
1203 GENX_(__NR_kill, sys_kill), // 37
1204 GENX_(__NR_rename, sys_rename), // 38
1205 GENX_(__NR_mkdir, sys_mkdir), // 39
1207 GENX_(__NR_rmdir, sys_rmdir), // 40
1208 GENXY(__NR_dup, sys_dup), // 41
1209 LINXY(__NR_pipe, sys_pipe), // 42
1210 GENXY(__NR_times, sys_times), // 43
1211 GENX_(__NR_prof, sys_ni_syscall), // 44
1212 //zz
1213 GENX_(__NR_brk, sys_brk), // 45
1214 LINX_(__NR_setgid, sys_setgid16), // 46
1215 LINX_(__NR_getgid, sys_getgid16), // 47
1216 //zz // (__NR_signal, sys_signal), // 48 */* (ANSI C)
1217 LINX_(__NR_geteuid, sys_geteuid16), // 49
1219 LINX_(__NR_getegid, sys_getegid16), // 50
1220 GENX_(__NR_acct, sys_acct), // 51
1221 LINX_(__NR_umount2, sys_umount), // 52
1222 GENX_(__NR_lock, sys_ni_syscall), // 53
1223 LINXY(__NR_ioctl, sys_ioctl), // 54
1225 LINXY(__NR_fcntl, sys_fcntl), // 55
1226 GENX_(__NR_mpx, sys_ni_syscall), // 56
1227 GENX_(__NR_setpgid, sys_setpgid), // 57
1228 GENX_(__NR_ulimit, sys_ni_syscall), // 58
1229 //zz // (__NR_oldolduname, sys_olduname), // 59 Linux -- obsolete
1230 //zz
1231 GENX_(__NR_umask, sys_umask), // 60
1232 GENX_(__NR_chroot, sys_chroot), // 61
1233 //zz // (__NR_ustat, sys_ustat) // 62 SVr4 -- deprecated
1234 GENXY(__NR_dup2, sys_dup2), // 63
1235 GENX_(__NR_getppid, sys_getppid), // 64
1237 GENX_(__NR_getpgrp, sys_getpgrp), // 65
1238 GENX_(__NR_setsid, sys_setsid), // 66
1239 LINXY(__NR_sigaction, sys_sigaction), // 67
1240 //zz // (__NR_sgetmask, sys_sgetmask), // 68 */* (ANSI C)
1241 //zz // (__NR_ssetmask, sys_ssetmask), // 69 */* (ANSI C)
1242 //zz
1243 LINX_(__NR_setreuid, sys_setreuid16), // 70
1244 LINX_(__NR_setregid, sys_setregid16), // 71
1245 PLAX_(__NR_sigsuspend, sys_sigsuspend), // 72
1246 LINXY(__NR_sigpending, sys_sigpending), // 73
1247 GENX_(__NR_sethostname, sys_sethostname), // 74
1248 //zz
1249 GENX_(__NR_setrlimit, sys_setrlimit), // 75
1250 GENXY(__NR_getrlimit, sys_old_getrlimit), // 76
1251 GENXY(__NR_getrusage, sys_getrusage), // 77
1252 GENXY(__NR_gettimeofday, sys_gettimeofday), // 78
1253 GENX_(__NR_settimeofday, sys_settimeofday), // 79
1255 LINXY(__NR_getgroups, sys_getgroups16), // 80
1256 LINX_(__NR_setgroups, sys_setgroups16), // 81
1257 PLAX_(__NR_select, old_select), // 82
1258 GENX_(__NR_symlink, sys_symlink), // 83
1259 //zz // (__NR_oldlstat, sys_lstat), // 84 -- obsolete
1260 //zz
1261 GENXY(__NR_readlink, sys_readlink), // 85
1262 //zz // (__NR_uselib, sys_uselib), // 86 */Linux
1263 //zz // (__NR_swapon, sys_swapon), // 87 */Linux
1264 //zz // (__NR_reboot, sys_reboot), // 88 */Linux
1265 //zz // (__NR_readdir, old_readdir), // 89 -- superseded
1266 //zz
1267 PLAX_(__NR_mmap, old_mmap), // 90
1268 GENXY(__NR_munmap, sys_munmap), // 91
1269 GENX_(__NR_truncate, sys_truncate), // 92
1270 GENX_(__NR_ftruncate, sys_ftruncate), // 93
1271 GENX_(__NR_fchmod, sys_fchmod), // 94
1273 LINX_(__NR_fchown, sys_fchown16), // 95
1274 GENX_(__NR_getpriority, sys_getpriority), // 96
1275 GENX_(__NR_setpriority, sys_setpriority), // 97
1276 GENX_(__NR_profil, sys_ni_syscall), // 98
1277 GENXY(__NR_statfs, sys_statfs), // 99
1279 GENXY(__NR_fstatfs, sys_fstatfs), // 100
1280 LINX_(__NR_ioperm, sys_ioperm), // 101
1281 LINXY(__NR_socketcall, sys_socketcall), // 102 x86/Linux-only
1282 LINXY(__NR_syslog, sys_syslog), // 103
1283 GENXY(__NR_setitimer, sys_setitimer), // 104
1285 GENXY(__NR_getitimer, sys_getitimer), // 105
1286 GENXY(__NR_stat, sys_newstat), // 106
1287 GENXY(__NR_lstat, sys_newlstat), // 107
1288 GENXY(__NR_fstat, sys_newfstat), // 108
1289 //zz // (__NR_olduname, sys_uname), // 109 -- obsolete
1290 //zz
1291 GENX_(__NR_iopl, sys_iopl), // 110
1292 LINX_(__NR_vhangup, sys_vhangup), // 111
1293 GENX_(__NR_idle, sys_ni_syscall), // 112
1294 PLAXY(__NR_vm86old, sys_vm86old), // 113 x86/Linux-only
1295 GENXY(__NR_wait4, sys_wait4), // 114
1296 //zz
1297 //zz // (__NR_swapoff, sys_swapoff), // 115 */Linux
1298 LINXY(__NR_sysinfo, sys_sysinfo), // 116
1299 LINXY(__NR_ipc, sys_ipc), // 117
1300 GENX_(__NR_fsync, sys_fsync), // 118
1301 PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux
1303 LINX_(__NR_clone, sys_clone), // 120
1304 //zz // (__NR_setdomainname, sys_setdomainname), // 121 */*(?)
1305 GENXY(__NR_uname, sys_newuname), // 122
1306 PLAX_(__NR_modify_ldt, sys_modify_ldt), // 123
1307 LINXY(__NR_adjtimex, sys_adjtimex), // 124
1309 GENXY(__NR_mprotect, sys_mprotect), // 125
1310 LINXY(__NR_sigprocmask, sys_sigprocmask), // 126
1311 //zz // Nb: create_module() was removed 2.4-->2.6
1312 GENX_(__NR_create_module, sys_ni_syscall), // 127
1313 LINX_(__NR_init_module, sys_init_module), // 128
1314 LINX_(__NR_delete_module, sys_delete_module), // 129
1315 //zz
1316 //zz // Nb: get_kernel_syms() was removed 2.4-->2.6
1317 GENX_(__NR_get_kernel_syms, sys_ni_syscall), // 130
1318 LINX_(__NR_quotactl, sys_quotactl), // 131
1319 GENX_(__NR_getpgid, sys_getpgid), // 132
1320 GENX_(__NR_fchdir, sys_fchdir), // 133
1321 //zz // (__NR_bdflush, sys_bdflush), // 134 */Linux
1322 //zz
1323 //zz // (__NR_sysfs, sys_sysfs), // 135 SVr4
1324 LINX_(__NR_personality, sys_personality), // 136
1325 GENX_(__NR_afs_syscall, sys_ni_syscall), // 137
1326 LINX_(__NR_setfsuid, sys_setfsuid16), // 138
1327 LINX_(__NR_setfsgid, sys_setfsgid16), // 139
1329 LINXY(__NR__llseek, sys_llseek), // 140
1330 GENXY(__NR_getdents, sys_getdents), // 141
1331 GENX_(__NR__newselect, sys_select), // 142
1332 GENX_(__NR_flock, sys_flock), // 143
1333 GENX_(__NR_msync, sys_msync), // 144
1335 GENXY(__NR_readv, sys_readv), // 145
1336 GENX_(__NR_writev, sys_writev), // 146
1337 GENX_(__NR_getsid, sys_getsid), // 147
1338 GENX_(__NR_fdatasync, sys_fdatasync), // 148
1339 LINXY(__NR__sysctl, sys_sysctl), // 149
1341 GENX_(__NR_mlock, sys_mlock), // 150
1342 GENX_(__NR_munlock, sys_munlock), // 151
1343 GENX_(__NR_mlockall, sys_mlockall), // 152
1344 LINX_(__NR_munlockall, sys_munlockall), // 153
1345 LINXY(__NR_sched_setparam, sys_sched_setparam), // 154
1347 LINXY(__NR_sched_getparam, sys_sched_getparam), // 155
1348 LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156
1349 LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157
1350 LINX_(__NR_sched_yield, sys_sched_yield), // 158
1351 LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159
1353 LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160
1354 LINXY(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161
1355 GENXY(__NR_nanosleep, sys_nanosleep), // 162
1356 GENX_(__NR_mremap, sys_mremap), // 163
1357 LINX_(__NR_setresuid, sys_setresuid16), // 164
1359 LINXY(__NR_getresuid, sys_getresuid16), // 165
1360 PLAXY(__NR_vm86, sys_vm86), // 166 x86/Linux-only
1361 GENX_(__NR_query_module, sys_ni_syscall), // 167
1362 GENXY(__NR_poll, sys_poll), // 168
1363 //zz // (__NR_nfsservctl, sys_nfsservctl), // 169 */Linux
1364 //zz
1365 LINX_(__NR_setresgid, sys_setresgid16), // 170
1366 LINXY(__NR_getresgid, sys_getresgid16), // 171
1367 LINXY(__NR_prctl, sys_prctl), // 172
1368 PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173 x86/Linux only?
1369 LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174
1371 LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175
1372 LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176
1373 LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 177
1374 LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 178
1375 LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179
1377 GENXY(__NR_pread64, sys_pread64), // 180
1378 GENX_(__NR_pwrite64, sys_pwrite64), // 181
1379 LINX_(__NR_chown, sys_chown16), // 182
1380 GENXY(__NR_getcwd, sys_getcwd), // 183
1381 LINXY(__NR_capget, sys_capget), // 184
1383 LINX_(__NR_capset, sys_capset), // 185
1384 GENXY(__NR_sigaltstack, sys_sigaltstack), // 186
1385 LINXY(__NR_sendfile, sys_sendfile), // 187
1386 GENXY(__NR_getpmsg, sys_getpmsg), // 188
1387 GENX_(__NR_putpmsg, sys_putpmsg), // 189
1389 // Nb: we treat vfork as fork
1390 GENX_(__NR_vfork, sys_fork), // 190
1391 GENXY(__NR_ugetrlimit, sys_getrlimit), // 191
1392 PLAX_(__NR_mmap2, sys_mmap2), // 192
1393 GENX_(__NR_truncate64, sys_truncate64), // 193
1394 GENX_(__NR_ftruncate64, sys_ftruncate64), // 194
1396 PLAXY(__NR_stat64, sys_stat64), // 195
1397 PLAXY(__NR_lstat64, sys_lstat64), // 196
1398 PLAXY(__NR_fstat64, sys_fstat64), // 197
1399 GENX_(__NR_lchown32, sys_lchown), // 198
1400 GENX_(__NR_getuid32, sys_getuid), // 199
1402 GENX_(__NR_getgid32, sys_getgid), // 200
1403 GENX_(__NR_geteuid32, sys_geteuid), // 201
1404 GENX_(__NR_getegid32, sys_getegid), // 202
1405 GENX_(__NR_setreuid32, sys_setreuid), // 203
1406 GENX_(__NR_setregid32, sys_setregid), // 204
1408 GENXY(__NR_getgroups32, sys_getgroups), // 205
1409 GENX_(__NR_setgroups32, sys_setgroups), // 206
1410 GENX_(__NR_fchown32, sys_fchown), // 207
1411 LINX_(__NR_setresuid32, sys_setresuid), // 208
1412 LINXY(__NR_getresuid32, sys_getresuid), // 209
1414 LINX_(__NR_setresgid32, sys_setresgid), // 210
1415 LINXY(__NR_getresgid32, sys_getresgid), // 211
1416 GENX_(__NR_chown32, sys_chown), // 212
1417 GENX_(__NR_setuid32, sys_setuid), // 213
1418 GENX_(__NR_setgid32, sys_setgid), // 214
1420 LINX_(__NR_setfsuid32, sys_setfsuid), // 215
1421 LINX_(__NR_setfsgid32, sys_setfsgid), // 216
1422 LINX_(__NR_pivot_root, sys_pivot_root), // 217
1423 GENXY(__NR_mincore, sys_mincore), // 218
1424 GENX_(__NR_madvise, sys_madvise), // 219
1426 GENXY(__NR_getdents64, sys_getdents64), // 220
1427 LINXY(__NR_fcntl64, sys_fcntl64), // 221
1428 GENX_(222, sys_ni_syscall), // 222
1429 PLAXY(223, sys_syscall223), // 223 // sys_bproc?
1430 LINX_(__NR_gettid, sys_gettid), // 224
1432 LINX_(__NR_readahead, sys_readahead), // 225 */Linux
1433 LINX_(__NR_setxattr, sys_setxattr), // 226
1434 LINX_(__NR_lsetxattr, sys_lsetxattr), // 227
1435 LINX_(__NR_fsetxattr, sys_fsetxattr), // 228
1436 LINXY(__NR_getxattr, sys_getxattr), // 229
1438 LINXY(__NR_lgetxattr, sys_lgetxattr), // 230
1439 LINXY(__NR_fgetxattr, sys_fgetxattr), // 231
1440 LINXY(__NR_listxattr, sys_listxattr), // 232
1441 LINXY(__NR_llistxattr, sys_llistxattr), // 233
1442 LINXY(__NR_flistxattr, sys_flistxattr), // 234
1444 LINX_(__NR_removexattr, sys_removexattr), // 235
1445 LINX_(__NR_lremovexattr, sys_lremovexattr), // 236
1446 LINX_(__NR_fremovexattr, sys_fremovexattr), // 237
1447 LINXY(__NR_tkill, sys_tkill), // 238 */Linux
1448 LINXY(__NR_sendfile64, sys_sendfile64), // 239
1450 LINXY(__NR_futex, sys_futex), // 240
1451 LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 241
1452 LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 242
1453 PLAX_(__NR_set_thread_area, sys_set_thread_area), // 243
1454 PLAX_(__NR_get_thread_area, sys_get_thread_area), // 244
1456 LINXY(__NR_io_setup, sys_io_setup), // 245
1457 LINX_(__NR_io_destroy, sys_io_destroy), // 246
1458 LINXY(__NR_io_getevents, sys_io_getevents), // 247
1459 LINX_(__NR_io_submit, sys_io_submit), // 248
1460 LINXY(__NR_io_cancel, sys_io_cancel), // 249
1462 LINX_(__NR_fadvise64, sys_fadvise64), // 250 */(Linux?)
1463 GENX_(251, sys_ni_syscall), // 251
1464 LINX_(__NR_exit_group, sys_exit_group), // 252
1465 LINXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 253
1466 LINXY(__NR_epoll_create, sys_epoll_create), // 254
1468 LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 255
1469 LINXY(__NR_epoll_wait, sys_epoll_wait), // 256
1470 //zz // (__NR_remap_file_pages, sys_remap_file_pages), // 257 */Linux
1471 LINX_(__NR_set_tid_address, sys_set_tid_address), // 258
1472 LINXY(__NR_timer_create, sys_timer_create), // 259
1474 LINXY(__NR_timer_settime, sys_timer_settime), // (timer_create+1)
1475 LINXY(__NR_timer_gettime, sys_timer_gettime), // (timer_create+2)
1476 LINX_(__NR_timer_getoverrun, sys_timer_getoverrun),//(timer_create+3)
1477 LINX_(__NR_timer_delete, sys_timer_delete), // (timer_create+4)
1478 LINX_(__NR_clock_settime, sys_clock_settime), // (timer_create+5)
1480 LINXY(__NR_clock_gettime, sys_clock_gettime), // (timer_create+6)
1481 LINXY(__NR_clock_getres, sys_clock_getres), // (timer_create+7)
1482 LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// (timer_create+8) */*
1483 GENXY(__NR_statfs64, sys_statfs64), // 268
1484 GENXY(__NR_fstatfs64, sys_fstatfs64), // 269
1486 LINX_(__NR_tgkill, sys_tgkill), // 270 */Linux
1487 GENX_(__NR_utimes, sys_utimes), // 271
1488 LINX_(__NR_fadvise64_64, sys_fadvise64_64), // 272 */(Linux?)
1489 GENX_(__NR_vserver, sys_ni_syscall), // 273
1490 LINX_(__NR_mbind, sys_mbind), // 274 ?/?
1492 LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 275 ?/?
1493 LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 276 ?/?
1494 LINXY(__NR_mq_open, sys_mq_open), // 277
1495 LINX_(__NR_mq_unlink, sys_mq_unlink), // (mq_open+1)
1496 LINX_(__NR_mq_timedsend, sys_mq_timedsend), // (mq_open+2)
1498 LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// (mq_open+3)
1499 LINX_(__NR_mq_notify, sys_mq_notify), // (mq_open+4)
1500 LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // (mq_open+5)
1501 GENX_(__NR_sys_kexec_load, sys_ni_syscall), // 283
1502 LINXY(__NR_waitid, sys_waitid), // 284
1504 GENX_(285, sys_ni_syscall), // 285
1505 LINX_(__NR_add_key, sys_add_key), // 286
1506 LINX_(__NR_request_key, sys_request_key), // 287
1507 LINXY(__NR_keyctl, sys_keyctl), // 288
1508 LINX_(__NR_ioprio_set, sys_ioprio_set), // 289
1510 LINX_(__NR_ioprio_get, sys_ioprio_get), // 290
1511 LINXY(__NR_inotify_init, sys_inotify_init), // 291
1512 LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 292
1513 LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 293
1514 // LINX_(__NR_migrate_pages, sys_migrate_pages), // 294
1516 LINXY(__NR_openat, sys_openat), // 295
1517 LINX_(__NR_mkdirat, sys_mkdirat), // 296
1518 LINX_(__NR_mknodat, sys_mknodat), // 297
1519 LINX_(__NR_fchownat, sys_fchownat), // 298
1520 LINX_(__NR_futimesat, sys_futimesat), // 299
1522 PLAXY(__NR_fstatat64, sys_fstatat64), // 300
1523 LINX_(__NR_unlinkat, sys_unlinkat), // 301
1524 LINX_(__NR_renameat, sys_renameat), // 302
1525 LINX_(__NR_linkat, sys_linkat), // 303
1526 LINX_(__NR_symlinkat, sys_symlinkat), // 304
1528 LINXY(__NR_readlinkat, sys_readlinkat), // 305
1529 LINX_(__NR_fchmodat, sys_fchmodat), // 306
1530 LINX_(__NR_faccessat, sys_faccessat), // 307
1531 LINXY(__NR_pselect6, sys_pselect6), // 308
1532 LINXY(__NR_ppoll, sys_ppoll), // 309
1534 LINX_(__NR_unshare, sys_unshare), // 310
1535 LINX_(__NR_set_robust_list, sys_set_robust_list), // 311
1536 LINXY(__NR_get_robust_list, sys_get_robust_list), // 312
1537 LINX_(__NR_splice, sys_splice), // 313
1538 LINX_(__NR_sync_file_range, sys_sync_file_range), // 314
1540 LINX_(__NR_tee, sys_tee), // 315
1541 LINXY(__NR_vmsplice, sys_vmsplice), // 316
1542 LINXY(__NR_move_pages, sys_move_pages), // 317
1543 LINXY(__NR_getcpu, sys_getcpu), // 318
1544 LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 319
1546 LINX_(__NR_utimensat, sys_utimensat), // 320
1547 LINXY(__NR_signalfd, sys_signalfd), // 321
1548 LINXY(__NR_timerfd_create, sys_timerfd_create), // 322
1549 LINXY(__NR_eventfd, sys_eventfd), // 323
1550 LINX_(__NR_fallocate, sys_fallocate), // 324
1552 LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 325
1553 LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 326
1554 LINXY(__NR_signalfd4, sys_signalfd4), // 327
1555 LINXY(__NR_eventfd2, sys_eventfd2), // 328
1556 LINXY(__NR_epoll_create1, sys_epoll_create1), // 329
1558 LINXY(__NR_dup3, sys_dup3), // 330
1559 LINXY(__NR_pipe2, sys_pipe2), // 331
1560 LINXY(__NR_inotify_init1, sys_inotify_init1), // 332
1561 LINXY(__NR_preadv, sys_preadv), // 333
1562 LINX_(__NR_pwritev, sys_pwritev), // 334
1564 LINXY(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo),// 335
1565 LINXY(__NR_perf_event_open, sys_perf_event_open), // 336
1566 LINXY(__NR_recvmmsg, sys_recvmmsg), // 337
1567 LINXY(__NR_fanotify_init, sys_fanotify_init), // 338
1568 LINX_(__NR_fanotify_mark, sys_fanotify_mark), // 339
1570 LINXY(__NR_prlimit64, sys_prlimit64), // 340
1571 LINXY(__NR_name_to_handle_at, sys_name_to_handle_at),// 341
1572 LINXY(__NR_open_by_handle_at, sys_open_by_handle_at),// 342
1573 LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 343
1574 LINX_(__NR_syncfs, sys_syncfs), // 344
1576 LINXY(__NR_sendmmsg, sys_sendmmsg), // 345
1577 LINX_(__NR_setns, sys_setns), // 346
1578 LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 347
1579 LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 348
1580 LINX_(__NR_kcmp, sys_kcmp), // 349
1582 // LIN__(__NR_finit_module, sys_ni_syscall), // 350
1583 LINX_(__NR_sched_setattr, sys_sched_setattr), // 351
1584 LINXY(__NR_sched_getattr, sys_sched_getattr), // 352
1585 LINX_(__NR_renameat2, sys_renameat2), // 353
1586 // LIN__(__NR_seccomp, sys_ni_syscall), // 354
1588 LINXY(__NR_getrandom, sys_getrandom), // 355
1589 LINXY(__NR_memfd_create, sys_memfd_create), // 356
1590 // LIN__(__NR_bpf, sys_ni_syscall), // 357
1591 LINX_(__NR_execveat, sys_execveat), // 358
1592 LINXY(__NR_socket, sys_socket), // 359
1593 LINXY(__NR_socketpair, sys_socketpair), // 360
1594 LINX_(__NR_bind, sys_bind), // 361
1595 LINX_(__NR_connect, sys_connect), // 362
1596 LINX_(__NR_listen, sys_listen), // 363
1597 LINXY(__NR_accept4, sys_accept4), // 364
1598 LINXY(__NR_getsockopt, sys_getsockopt), // 365
1599 LINX_(__NR_setsockopt, sys_setsockopt), // 366
1600 LINXY(__NR_getsockname, sys_getsockname), // 367
1601 LINXY(__NR_getpeername, sys_getpeername), // 368
1602 LINX_(__NR_sendto, sys_sendto), // 369
1603 LINX_(__NR_sendmsg, sys_sendmsg), // 370
1604 LINXY(__NR_recvfrom, sys_recvfrom), // 371
1605 LINXY(__NR_recvmsg, sys_recvmsg), // 372
1606 LINX_(__NR_shutdown, sys_shutdown), // 373
1608 LINX_(__NR_membarrier, sys_membarrier), // 375
1609 GENX_(__NR_mlock2, sys_mlock2), // 376
1610 LINX_(__NR_copy_file_range, sys_copy_file_range), // 377
1611 LINXY(__NR_preadv2, sys_preadv2), // 378
1612 LINX_(__NR_pwritev2, sys_pwritev2), // 379
1614 LINXY(__NR_pkey_mprotect, sys_pkey_mprotect), // 380
1615 LINX_(__NR_pkey_alloc, sys_pkey_alloc), // 381
1616 LINX_(__NR_pkey_free, sys_pkey_free), // 382
1617 LINXY(__NR_statx, sys_statx), // 383
1619 /* Explicitly not supported on i386 yet. */
1620 GENX_(__NR_arch_prctl, sys_ni_syscall), // 384
1622 GENX_(__NR_rseq, sys_ni_syscall), // 386
1624 LINXY(__NR_clock_gettime64, sys_clock_gettime64), // 403
1625 LINX_(__NR_clock_settime64, sys_clock_settime64), // 404
1627 LINXY(__NR_clock_getres_time64, sys_clock_getres_time64), // 406
1628 LINXY(__NR_clock_nanosleep_time64, sys_clock_nanosleep_time64), // 407
1629 LINXY(__NR_timer_gettime64, sys_timer_gettime64), // 408
1630 LINXY(__NR_timer_settime64, sys_timer_settime64), // 409
1631 LINXY(__NR_timerfd_gettime64, sys_timerfd_gettime64),// 410
1632 LINXY(__NR_timerfd_settime64, sys_timerfd_settime64),// 411
1633 LINX_(__NR_utimensat_time64, sys_utimensat_time64), // 412
1634 LINXY(__NR_pselect6_time64, sys_pselect6_time64), // 413
1635 LINXY(__NR_ppoll_time64, sys_ppoll_time64), // 414
1637 LINXY(__NR_recvmmsg_time64, sys_recvmmsg_time64), // 417
1638 LINX_(__NR_mq_timedsend_time64, sys_mq_timedsend_time64), // 418
1639 LINXY(__NR_mq_timedreceive_time64, sys_mq_timedreceive_time64), // 419
1640 LINX_(__NR_semtimedop_time64, sys_semtimedop_time64),// 420
1641 LINXY(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait_time64), // 421
1642 LINXY(__NR_futex_time64, sys_futex_time64), // 422
1643 LINXY(__NR_sched_rr_get_interval_time64,
1644 sys_sched_rr_get_interval_time64), // 423
1646 LINXY(__NR_io_uring_setup, sys_io_uring_setup), // 425
1647 LINXY(__NR_io_uring_enter, sys_io_uring_enter), // 426
1648 LINXY(__NR_io_uring_register, sys_io_uring_register),// 427
1650 LINXY(__NR_pidfd_open, sys_pidfd_open), // 434
1651 GENX_(__NR_clone3, sys_ni_syscall), // 435
1652 LINXY(__NR_close_range, sys_close_range), // 436
1653 LINXY(__NR_openat2, sys_openat2), // 437
1654 LINXY(__NR_pidfd_getfd, sys_pidfd_getfd), // 438
1655 LINX_(__NR_faccessat2, sys_faccessat2), // 439
1657 LINXY(__NR_epoll_pwait2, sys_epoll_pwait2), // 441
1659 LINXY(__NR_memfd_secret, sys_memfd_secret), // 447
1661 LINX_(__NR_fchmodat2, sys_fchmodat2), // 452
1664 SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
1666 const UInt syscall_table_size
1667 = sizeof(syscall_table) / sizeof(syscall_table[0]);
1669 /* Is it in the contiguous initial section of the table? */
1670 if (sysno < syscall_table_size) {
1671 SyscallTableEntry* sys = &syscall_table[sysno];
1672 if (sys->before == NULL)
1673 return NULL; /* no entry */
1674 else
1675 return sys;
1678 /* Can't find a wrapper */
1679 return NULL;
1682 #endif // defined(VGP_x86_linux)
1684 /*--------------------------------------------------------------------*/
1685 /*--- end ---*/
1686 /*--------------------------------------------------------------------*/