syswrap openat2 for all linux arches
[valgrind.git] / coregrind / pub_core_threadstate.h
blobae7cf6312b49794fff9e10ff36b49996608e04f1
2 /*--------------------------------------------------------------------*/
3 /*--- The thread state. pub_core_threadstate.h ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2000-2017 Julian Seward
11 jseward@acm.org
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #ifndef __PUB_CORE_THREADSTATE_H
30 #define __PUB_CORE_THREADSTATE_H
32 //--------------------------------------------------------------------
33 // PURPOSE: This module defines the ThreadState type and the
34 // VG_(threads)[] data structure which holds all the important thread
35 // state. It also defines some simple operations on the data structure
36 // that don't require any external help. (m_scheduler does the complex
37 // stuff).
38 //--------------------------------------------------------------------
40 #include "pub_tool_threadstate.h"
41 #include "pub_core_libcsetjmp.h" // VG_MINIMAL_JMP_BUF
42 #include "pub_core_vki.h" // vki_sigset_t
43 #include "pub_core_guest.h" // VexGuestArchState
44 #include "libvex.h" // LibVEX_N_SPILL_BYTES
47 /*------------------------------------------------------------*/
48 /*--- Types ---*/
49 /*------------------------------------------------------------*/
51 /*
52 Thread state machine:
54 Empty -> Init -> Runnable <=> WaitSys/Yielding
55 ^ |
56 \---- Zombie -----/
58 typedef
59 enum ThreadStatus {
60 VgTs_Empty, /* this slot is not in use */
61 VgTs_Init, /* just allocated */
62 VgTs_Runnable, /* ready to run */
63 VgTs_WaitSys, /* waiting for a syscall to complete */
64 VgTs_Yielding, /* temporarily yielding the CPU */
65 VgTs_Zombie, /* transient state just before exiting */
67 ThreadStatus;
69 /* Return codes from the scheduler. */
70 typedef
71 enum {
72 VgSrc_None, /* not exiting yet */
73 VgSrc_ExitThread, /* just this thread is exiting */
74 VgSrc_ExitProcess, /* this thread is exiting due to another thread
75 calling exit() */
76 VgSrc_FatalSig /* Killed by the default action of a fatal
77 signal */
79 VgSchedReturnCode;
82 /* Forward declarations */
83 struct SyscallStatus;
84 struct SyscallArgs;
86 /* Architecture-specific thread state */
87 typedef
88 struct {
89 /* --- BEGIN vex-mandated guest state --- */
91 /* Note that for code generation reasons, we require that the
92 guest state area, its two shadows, and the spill area, are
93 aligned on LibVEX_GUEST_STATE_ALIGN and have sizes, such that
94 there are no holes in between. This is checked by do_pre_run_checks()
95 in scheduler.c. */
97 /* Saved machine context. */
98 VexGuestArchState vex __attribute__((aligned(LibVEX_GUEST_STATE_ALIGN)));
100 /* Saved shadow context (2 copies). */
101 VexGuestArchState vex_shadow1
102 __attribute__((aligned(LibVEX_GUEST_STATE_ALIGN)));
103 VexGuestArchState vex_shadow2
104 __attribute__((aligned(LibVEX_GUEST_STATE_ALIGN)));
106 /* Spill area. */
107 UChar vex_spill[LibVEX_N_SPILL_BYTES]
108 __attribute__((aligned(LibVEX_GUEST_STATE_ALIGN)));
110 /* --- END vex-mandated guest state --- */
112 ThreadArchState;
115 #define NULL_STK_ID (~(UWord)0)
117 /* OS-specific thread state. IMPORTANT: if you add fields to this,
118 you _must_ add code to os_state_clear() to initialise those
119 fields. */
120 typedef
121 struct {
122 /* who we are */
123 Int lwpid; // PID of kernel task (Darwin: Mach thread)
124 Int threadgroup; // thread group id
126 ThreadId parent; // parent tid (if any)
128 /* runtime details */
129 Addr valgrind_stack_base; // Valgrind's stack (VgStack*)
130 Addr valgrind_stack_init_SP; // starting value for SP
132 /* Client stack is registered as stk_id (on linux/darwin, by
133 ML_(guess_and_register_stack)).
134 Stack id NULL_STK_ID means that the user stack is not (yet)
135 registered. */
136 UWord stk_id;
138 /* exit details */
139 Word exitcode; // in the case of exitgroup, set by someone else
140 Int fatalsig; // fatal signal
142 # if defined(VGO_darwin)
143 // Mach trap POST handler as chosen by PRE
144 void (*post_mach_trap_fn)(ThreadId tid,
145 struct SyscallArgs *, struct SyscallStatus *);
147 // This thread's pthread
148 Addr pthread;
150 // Argument passed when thread started
151 Addr func_arg;
153 // Synchronization between child thread and parent thread's POST wrapper
154 semaphore_t child_go;
155 semaphore_t child_done;
157 // Workqueue re-entry
158 // (setjmp in PRE(workq_ops), longjmp in wqthread_hijack)
159 // DDD: JRS fixme: this comment is no longer correct; wq_jmpbuf is
160 // never used, and there is no such setjmp or longjmp pair.
161 // I guess we could leave wq_jmpbuf_valid in place though, since
162 // it does allow for an assertion in ML_(wqthread_continue_NORETURN).
163 Bool wq_jmpbuf_valid;
164 //jmp_buf wq_jmpbuf;
166 // Values saved from transient Mach RPC messages
167 Addr remote_port; // destination for original message
168 Int msgh_id; // outgoing message id
169 union {
170 struct {
171 Addr port;
172 } mach_port;
173 struct {
174 Int right;
175 } mach_port_allocate;
176 struct {
177 Addr port;
178 Int right;
179 Int delta;
180 } mach_port_mod_refs;
181 struct {
182 Addr task;
183 Addr name;
184 Int disposition;
185 } mach_port_insert_right;
186 struct {
187 Addr size;
188 int flags;
189 } vm_allocate;
190 struct {
191 Addr address;
192 Addr size;
193 } vm_deallocate;
194 struct {
195 Addr src;
196 Addr dst;
197 Addr size;
198 } vm_copy;
199 struct {
200 Addr address;
201 Addr size;
202 int set_maximum;
203 UWord new_protection;
204 } vm_protect;
205 struct {
206 Addr addr;
207 SizeT size;
208 } vm_read;
209 struct {
210 ULong addr;
211 ULong size;
212 } mach_vm_read;
213 struct {
214 Addr addr;
215 SizeT size;
216 Addr data;
217 } vm_read_overwrite;
218 struct {
219 Addr size;
220 int copy;
221 UWord protection;
222 } vm_map;
223 struct {
224 Addr size;
225 } vm_remap;
226 struct {
227 ULong size;
228 int flags;
229 } mach_vm_allocate;
230 struct {
231 ULong address;
232 ULong size;
233 } mach_vm_deallocate;
234 struct {
235 ULong address;
236 ULong size;
237 int set_maximum;
238 unsigned int new_protection;
239 } mach_vm_protect;
240 struct {
241 ULong size;
242 int copy;
243 UWord protection;
244 } mach_vm_map;
245 struct {
246 ULong size;
247 int copy;
248 } mach_vm_remap;
249 struct {
250 Addr thread;
251 UWord flavor;
252 } thread_get_state;
253 struct {
254 Addr address;
255 } io_connect_unmap_memory;
256 struct {
257 int which_port;
258 } task_get_special_port;
259 struct {
260 int which;
261 } host_get_special_port;
262 struct {
263 char *service_name;
264 } bootstrap_look_up;
265 struct {
266 vki_size_t size;
267 } WindowServer_29828;
268 struct {
269 Int access_rights;
270 } WindowServer_29831;
271 struct {
272 char *path;
273 } io_registry_entry_from_path;
274 } mach_args;
276 # elif defined(VGO_solaris)
277 # if defined(VGP_x86_solaris)
278 /* A pointer to thread related data. The pointer is used to set up
279 a segment descriptor (GDT[VKI_GDT_LWPGS]) when the thread is about to
280 be run. A client program sets this value explicitly by calling the
281 lwp_private syscall or it can be passed as a part of ucontext_t when
282 a new thread is created (the lwp_create syscall). */
283 Addr thrptr;
284 # elif defined(VGP_amd64_solaris)
285 /* GDT is not fully simulated by AMD64/Solaris. The %fs segment
286 register is assumed to be always zero and vex->guest_FS_CONST holds
287 the 64-bit offset associated with a %fs value of zero. */
288 # endif
290 /* Simulation of the kernel's lwp->lwp_ustack. Set in the PRE wrapper
291 of the getsetcontext syscall, for SETUSTACK. Used in
292 VG_(save_context)(), VG_(restore_context)() and
293 VG_(sigframe_create)(). */
294 vki_stack_t *ustack;
296 /* Flag saying if the current call is in the door_return() variant of
297 the door() syscall. */
298 Bool in_door_return;
300 /* Address of the door server procedure corresponding to the current
301 thread. Used to keep track which door call the current thread
302 services. Valid only between subsequent door_return() invocations. */
303 Addr door_return_procedure;
305 /* Simulation of the kernel's lwp->lwp_oldcontext. Set in
306 VG_(restore_context)() and VG_(sigframe_create)(). Used in
307 VG_(save_context)(). */
308 vki_ucontext_t *oldcontext;
310 /* Address of sc_shared_t struct shared between kernel and libc.
311 Set in POST(sys_schedctl). Every thread gets its own address
312 but typically many are squeezed on a singled mapped page.
313 Cleaned in the child atfork handler. */
314 Addr schedctl_data;
316 /* True if this is daemon thread. */
317 Bool daemon_thread;
318 # endif
321 ThreadOSstate;
324 /* Overall thread state */
325 typedef struct {
326 /* ThreadId == 0 (and hence vg_threads[0]) is NEVER USED.
327 The thread identity is simply the index in vg_threads[].
328 ThreadId == 1 is the root thread and has the special property
329 that we don't try and allocate or deallocate its stack. For
330 convenience of generating error message, we also put the
331 ThreadId in this tid field, but be aware that it should
332 ALWAYS == the index in vg_threads[]. */
333 ThreadId tid;
335 /* Current scheduling status. */
336 ThreadStatus status;
338 /* This is set if the thread is in the process of exiting for any
339 reason. The precise details of the exit are in the OS-specific
340 state. */
341 VgSchedReturnCode exitreason;
343 /* Architecture-specific thread state. */
344 ThreadArchState arch;
346 /* This thread's blocked-signals mask. Semantics is that for a
347 signal to be delivered to this thread, the signal must not be
348 blocked by this signal mask. If more than one thread accepts a
349 signal, then it will be delivered to one at random. If all
350 threads block the signal, it will remain pending until either a
351 thread unblocks it or someone uses sigwaitsig/sigtimedwait. */
352 vki_sigset_t sig_mask;
354 /* tmp_sig_mask is usually the same as sig_mask, and is kept in
355 sync whenever sig_mask is changed. The only time they have
356 different values is during the execution of a sigsuspend, where
357 tmp_sig_mask is the temporary mask which sigsuspend installs.
358 It is only consulted to compute the signal mask applied to a
359 signal handler.
360 PW Nov 2016 : it is not clear if and where this tmp_sig_mask
361 is set when an handler runs "inside" a sigsuspend. */
362 vki_sigset_t tmp_sig_mask;
364 /* A little signal queue for signals we can't get the kernel to
365 queue for us. This is only allocated as needed, since it should
366 be rare. */
367 struct SigQueue *sig_queue;
369 /* Client stacks. When a thread slot is freed, we don't deallocate its
370 stack; we just leave it lying around for the next use of the
371 slot. If the next use of the slot requires a larger stack,
372 only then is the old one deallocated and a new one
373 allocated.
375 For the main thread (threadid == 1), this mechanism doesn't
376 apply. We don't know the size of the stack since we didn't
377 allocate it, and furthermore we never reallocate it. */
379 /* The allocated size of this thread's stack */
380 SizeT client_stack_szB;
382 /* Address of the highest legitimate byte in this stack. This is
383 used for error messages only -- not critical for execution
384 correctness. Is is set for all stacks, specifically including
385 ThreadId == 1 (the main thread). */
386 Addr client_stack_highest_byte;
388 /* Alternate signal stack */
389 vki_stack_t altstack;
391 /* OS-specific thread state */
392 ThreadOSstate os_state;
394 /* Error disablement level. A counter which allows selectively
395 disabling error reporting in threads. When zero, reporting is
396 enabled. When nonzero, it is disabled. This is controlled by
397 the client request 'VG_USERREQ__CHANGE_ERR_DISABLEMENT'. New
398 threads are always created with this as zero (errors
399 enabled). */
400 UInt err_disablement_level;
402 /* Per-thread jmp_buf to resume scheduler after a signal */
403 Bool sched_jmpbuf_valid;
404 VG_MINIMAL_JMP_BUF(sched_jmpbuf);
406 /* This thread's name. NULL, if no name. */
407 HChar *thread_name;
408 UInt ptrace;
410 ThreadState;
413 /*------------------------------------------------------------*/
414 /*--- The thread table. ---*/
415 /*------------------------------------------------------------*/
417 /* An array of threads, dynamically allocated by VG_(init_Threads).
418 NOTE: [0] is never used, to simplify the simulation of initialisers
419 for LinuxThreads. */
420 extern ThreadState *VG_(threads);
422 /* In an outer valgrind, VG_(inner_threads) stores the address of
423 the inner VG_(threads) array, as reported by the inner using
424 the client request INNER_THREADS. */
425 extern ThreadState *VG_(inner_threads);
427 // The running thread. m_scheduler should be the only other module
428 // to write to this.
429 extern ThreadId VG_(running_tid);
432 /*------------------------------------------------------------*/
433 /*--- Basic operations on the thread table. ---*/
434 /*------------------------------------------------------------*/
436 /* Initialize the m_threadstate module. */
437 void VG_(init_Threads)(void);
439 // Convert a ThreadStatus to a string.
440 const HChar* VG_(name_of_ThreadStatus) ( ThreadStatus status );
442 // Convert a VgSchedReturnCode to a string.
443 const HChar* VG_(name_of_VgSchedReturnCode) ( VgSchedReturnCode retcode );
445 /* Get the ThreadState for a particular thread */
446 extern ThreadState *VG_(get_ThreadState) ( ThreadId tid );
448 /* Check that tid is in range and denotes a non-Empty thread. */
449 extern Bool VG_(is_valid_tid) ( ThreadId tid );
451 /* Returns true if a thread is currently running (ie, has the CPU lock) */
452 extern Bool VG_(is_running_thread)(ThreadId tid);
454 /* Returns true if the thread is in the process of exiting */
455 extern Bool VG_(is_exiting)(ThreadId tid);
457 /* Return the number of non-dead Threads */
458 extern Int VG_(count_living_threads)(void);
460 /* Return the number of threads in VgTs_Runnable state */
461 extern Int VG_(count_runnable_threads)(void);
463 /* Given an LWP id (ie, real kernel thread id), find the corresponding
464 ThreadId */
465 extern ThreadId VG_(lwpid_to_vgtid)(Int lwpid);
468 /* Same as VG_(lwpid_to_vgtid), but if no corresponding living thread is found,
469 searches also in dead threads.
470 This can be used when the tid is exiting, but the corresponding
471 lwpid is still running. */
472 extern ThreadId VG_(lwpid_to_vgtid_dead_ok)(Int lwpid);
474 #endif // __PUB_CORE_THREADSTATE_H
476 /*--------------------------------------------------------------------*/
477 /*--- end ---*/
478 /*--------------------------------------------------------------------*/