2015-06-22 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / libcilkrts / runtime / local_state.h
blob03f39897f51bb64ba35e418a0e7dd0753ccd8ae1
1 /* local_state.h -*-C++-*-
3 *************************************************************************
5 * @copyright
6 * Copyright (C) 2009-2013, Intel Corporation
7 * All rights reserved.
8 *
9 * @copyright
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * * Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * * Neither the name of Intel Corporation nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * @copyright
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
32 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
35 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************/
39 /**
40 * @file local_state.h
42 * @brief The local_state structure contains additional OS-independent
43 * information that's associated with a worker, but doesn't need to be visible
44 * to the code generated by the compiler.
47 #ifndef INCLUDED_LOCAL_STATE_DOT_H
48 #define INCLUDED_LOCAL_STATE_DOT_H
50 #include <internal/abi.h>
51 #include "worker_mutex.h"
52 #include "global_state.h"
53 #include "record-replay.h"
54 #include "signal_node.h"
56 #include <setjmp.h>
57 #include <stddef.h>
58 #include <stdio.h>
61 #ifndef _WIN32
62 # include <pthread.h>
63 #endif
65 __CILKRTS_BEGIN_EXTERN_C
67 /* Opaque types. */
69 struct full_frame;
70 struct free_list;
71 struct pending_exception_info;
72 /// Opaque type for replay entry.
73 typedef struct replay_entry_t replay_entry_t;
75 /**
76 * @brief Magic numbers for local_state, used for debugging
78 typedef unsigned long long ls_magic_t;
80 /**
81 * @brief Scheduling stack function: A function that is decided on the program stack,
82 * but that must be executed on the scheduling stack.
84 typedef void (*scheduling_stack_fcn_t) (__cilkrts_worker *w,
85 struct full_frame *ff,
86 __cilkrts_stack_frame *sf);
88 /**
89 * @brief Type of this worker.
90 **/
91 typedef enum cilk_worker_type
93 WORKER_FREE, ///< Unused worker - available to be bound to user threads
94 WORKER_SYSTEM, ///< Worker created by runtime - able to steal from any worker
95 WORKER_USER ///< User thread - able to steal only from team members
96 } cilk_worker_type;
99 /**
100 * @brief The local_state structure contains additional OS-independent
101 * information that's associated with a worker, but doesn't need to be
102 * visible to the compiler.
104 * No compiler-generated code should need to know the layout of this
105 * structure.
107 * The fields of this struct can be classified as either local or
108 * shared.
110 * Local: This field is only accessed by the thread bound to this
111 * worker struct. Local fields can be freely accessed without
112 * acquiring locks.
114 * Shared: This field may be accessed by multiple worker threads.
115 * Accesses to shared fields usually requires locks, except in
116 * special situations where one can prove that locks are
117 * unnecessary.
119 * The fields of this can also be classified as "read-only" if the
120 * field does not change after it is initialized. Otherwise, the
121 * field is "read/write". Read-only fields do not require locks to
122 * access (ignoring the synchronization that might be needed for
123 * initialization if this can occur in parallel).
125 * Finally, we explicitly classify some fields as "synchronization"
126 * fields if they are used as part of a synchronization protocol in
127 * the runtime. These variables are generally shared and read/write.
128 * Mostly, this category includes lock variables and other variables
129 * that are involved in synchronization protocols (i.e., the THE
130 * protocol).
132 struct local_state /* COMMON_PORTABLE */
134 /** This value should be in the first field in any local_state */
135 # define WORKER_MAGIC_0 ((ls_magic_t)0xe0831a4a940c60b8ULL)
138 * Should be WORKER_MAGIC_0 or the local_state has been corrupted
139 * This magic field is shared because it is read on lock acquisitions.
141 * [shared read-only]
143 ls_magic_t worker_magic_0;
146 * Mutex used to serialize access to the local_state
147 * Synchronization field. [shared read/write]
149 struct mutex lock;
152 * Flag that indicates that the worker is interested in grabbing
153 * LOCK, and thus thieves should leave the worker alone.
154 * Written only by self, may be read by others.
156 * Synchronization field. [shared read/write]
158 int do_not_steal;
161 * Lock that all thieves grab in order to compete for the right
162 * to disturb this worker.
164 * Synchronization field. [shared read/write]
166 struct mutex steal_lock;
169 * Full frame that the worker is working on.
171 * While a worker w is executing, a thief may change
172 * w->l->frame_ff (on a successful steal) after acquiring w's
173 * lock.
175 * Unlocked accesses to w->l->frame_ff are safe (by w itself) when
176 * w's deque is empty, or when stealing from w has been disabled.
178 * [shared read/write]
180 struct full_frame *frame_ff;
183 * Full frame that the worker will be working on next
185 * This field is normally local for a worker w. Another worker v
186 * may modify w->l->next_frame_ff, however, in the special case
187 * when v is returning a frame to a user thread w since w is the
188 * team leader.
190 * [shared read/write]
192 struct full_frame *next_frame_ff;
195 * This is set iff this is a WORKER_USER and there has been a steal. It
196 * points to the first frame that was stolen since the team was last fully
197 * sync'd. Only this worker may continue past a sync in this function.
199 * This field is set by a thief for a victim that is a user
200 * thread, while holding the victim's lock.
201 * It can be cleared without a lock by the worker that will
202 * continue exuecting past the sync.
204 * [shared read/write]
206 struct full_frame *last_full_frame;
209 * Team on which this worker is a participant. When a user worker enters,
210 * its team is its own worker struct and it can never change teams. When a
211 * system worker steals, it adopts the team of its victim.
213 * When a system worker w steals, it reads victim->l->team and
214 * joins this team. w->l->team is constant until the next time w
215 * returns control to the runtime.
216 * We must acquire the worker lock to change w->l->team.
218 * @note This field is 64-byte aligned because it is the first in
219 * the group of shared read-only fields. We want this group to
220 * fall on a different cache line from the previous group, which
221 * is shared read-write.
223 * [shared read-only]
225 __attribute__((aligned(64)))
226 __cilkrts_worker *team;
229 * Type of this worker
231 * This field changes only when a worker binds or unbinds.
232 * Otherwise, the field is read-only while the worker is bound.
234 * [shared read-only]
236 cilk_worker_type type;
239 * Lazy task queue of this worker - an array of pointers to stack frames.
241 * Read-only because deques are a fixed size in the current
242 * implementation.
244 * @note This field is 64-byte aligned because it is the first in
245 * the group of local fields. We want this group to fall on a
246 * different cache line from the previous group, which is shared
247 * read-only.
249 * [local read-only]
251 __attribute__((aligned(64)))
252 __cilkrts_stack_frame **ltq;
255 * Pool of fibers waiting to be reused.
256 * [local read/write]
258 cilk_fiber_pool fiber_pool;
261 * The fiber for the scheduling stacks.
262 * [local read/write]
264 cilk_fiber* scheduling_fiber;
267 * Saved pointer to the leaf node in thread-local storage, when a
268 * user thread is imported. This pointer gets set to a
269 * meaningful value when binding a user thread, and cleared on
270 * unbind.
272 * [local read/write]
274 __cilkrts_pedigree* original_pedigree_leaf;
277 * State of the random number generator
279 * [local read/write]
281 unsigned rand_seed;
284 * Function to execute after transferring onto the scheduling stack.
286 * [local read/write]
288 scheduling_stack_fcn_t post_suspend;
291 * __cilkrts_stack_frame we suspended when we transferred onto the
292 * scheduling stack.
294 * [local read/write]
296 __cilkrts_stack_frame *suspended_stack;
299 * cilk_fiber that should be freed after returning from a
300 * spawn with a stolen parent or after stalling at a sync.
302 * We calculate the stack to free when executing a reduction on
303 * the user stack, but we can not actually release the stack
304 * until control longjmps onto a runtime scheduling stack.
306 * This field is used to pass information to the runtime across
307 * the longjmp onto the scheduling stack.
309 * [local read/write]
311 cilk_fiber* fiber_to_free;
314 * Saved exception object for an exception that is being passed to
315 * our parent
317 * [local read/write]
319 struct pending_exception_info *pending_exception;
322 * Buckets for the memory allocator
324 * [local read/write]
326 struct free_list *free_list[FRAME_MALLOC_NBUCKETS];
329 * Potential function for the memory allocator
331 * [local read/write]
333 size_t bucket_potential[FRAME_MALLOC_NBUCKETS];
336 * Support for statistics
338 * Useful only when CILK_PROFIlE is compiled in.
339 * [local read/write]
341 statistics* stats;
344 * Count indicates number of failures since last successful steal. This is
345 * used by the scheduler to reduce contention on shared flags.
347 * [local read/write]
349 unsigned int steal_failure_count;
352 * 1 if work was stolen from another worker. When true, this will flag
353 * setup_for_execution_pedigree to increment the pedigree when we resume
354 * execution to match the increment that would have been done on a return
355 * from a spawn helper.
357 * [local read/write]
359 int work_stolen;
362 * File pointer for record or replay
363 * Does FILE * work on Windows?
364 * During record, the file will be opened in write-only mode.
365 * During replay, the file will be opened in read-only mode.
367 * [local read/write]
369 FILE *record_replay_fptr;
372 * Root of array of replay entries - NULL if we're not replaying a log
374 * [local read/write]
376 replay_entry_t *replay_list_root;
379 * Current replay entry - NULL if we're not replaying a log
381 * [local read/write]
383 replay_entry_t *replay_list_entry;
386 * Separate the signal_node from other things in the local_state by the
387 * sizeof a cache line for performance reasons.
389 * unused
391 char buf[64];
394 * Signal object for waking/sleeping the worker. This should be a pointer
395 * to avoid the possibility of caching problems.
397 * [shared read-only]
399 signal_node_t *signal_node;
401 /** This value should be in the last field in any local_state */
402 # define WORKER_MAGIC_1 ((ls_magic_t)0x16164afb0ea0dff9ULL)
405 * Should be WORKER_MAGIC_1 or the local_state has been corrupted
406 * This magic field is shared because it is read on lock acquisitions.
407 * [shared read-only]
409 ls_magic_t worker_magic_1;
413 * Perform cleanup according to the function set before the longjmp().
415 * Call this after longjmp() has completed and the worker is back on a
416 * scheduling stack.
418 * @param w __cilkrts_worker currently executing.
420 void run_scheduling_stack_fcn(__cilkrts_worker *w);
422 __CILKRTS_END_EXTERN_C
424 #endif // ! defined(INCLUDED_LOCAL_STATE_DOT_H)