1 /* Cilk_abi.c -*-C++-*-
3 *************************************************************************
5 * Copyright (C) 2010-2016, Intel Corporation
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
29 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
32 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
35 * *********************************************************************
37 * PLEASE NOTE: This file is a downstream copy of a file mainitained in
38 * a repository at cilkplus.org. Changes made to this file that are not
39 * submitted through the contribution process detailed at
40 * http://www.cilkplus.org/submit-cilk-contribution will be lost the next
41 * time that a new version is released. Changes only submitted to the
42 * GNU compiler collection or posted to the git repository at
43 * https://bitbucket.org/intelcilkruntime/intel-cilk-runtime.git are
46 * We welcome your contributions to this open source project. Thank you
47 * for your assistance in helping us improve Cilk Plus.
49 **************************************************************************/
54 * @brief cilk-abi.c implements all of the entrypoints to the Intel Cilk
59 * Define this macro so that compiliation of this file generates the
60 * non-inlined versions of certain functions in cilk_api.h.
62 #include "internal/abi.h"
63 #include "cilk/cilk_api.h"
64 #include "cilk/cilk_undocumented.h"
65 #include "cilktools/cilkscreen.h"
67 #include "global_state.h"
71 #include "local_state.h"
72 #include "full_frame.h"
73 #include "pedigrees.h"
74 #include "scheduler.h"
77 #include "cilk_malloc.h"
78 #include "record-replay.h"
85 /* Some versions of icc don't support limits.h on Linux if
86 gcc 4.3 or newer is installed. */
89 /* Declare _ReturnAddress compiler intrinsic */
90 void * _ReturnAddress(void);
91 #pragma intrinsic(_ReturnAddress)
93 #include "sysdep-win.h" // Needed for sysdep_init_module()
96 #include "metacall_impl.h"
97 #include "reducer_impl.h"
98 #include "cilk-ittnotify.h"
99 #include "cilk-tbb-interop.h"
101 #define TBB_INTEROP_DATA_DELAYED_UNTIL_BIND (void *)-1
104 * __cilkrts_bind_thread is a versioned entrypoint. The runtime should be
105 * exporting copies of __cilkrts_bind_version for the current and all previous
106 * versions of the ABI.
108 * This macro should always be set to generate a version to match the current
109 * version; __CILKRTS_ABI_VERSION.
111 #define BIND_THREAD_RTN __cilkrts_bind_thread_1
114 void enter_frame_internal(__cilkrts_stack_frame
*sf
, uint32_t version
)
116 __cilkrts_worker
*w
= __cilkrts_get_tls_worker();
117 if (w
== 0) { /* slow path */
118 w
= BIND_THREAD_RTN();
120 sf
->flags
= CILK_FRAME_LAST
| (version
<< 24);
121 CILK_ASSERT((sf
->flags
& CILK_FRAME_FLAGS_MASK
) == CILK_FRAME_LAST
);
123 sf
->flags
= (version
<< 24);
124 CILK_ASSERT((sf
->flags
& CILK_FRAME_FLAGS_MASK
) == 0);
126 sf
->call_parent
= w
->current_stack_frame
;
128 w
->current_stack_frame
= sf
;
131 CILK_ABI_VOID
__cilkrts_enter_frame(__cilkrts_stack_frame
*sf
)
133 enter_frame_internal(sf
, 0);
136 CILK_ABI_VOID
__cilkrts_enter_frame_1(__cilkrts_stack_frame
*sf
)
138 enter_frame_internal(sf
, 1);
143 void enter_frame_fast_internal(__cilkrts_stack_frame
*sf
, uint32_t version
)
145 __cilkrts_worker
*w
= __cilkrts_get_tls_worker_fast();
146 sf
->flags
= version
<< 24;
147 sf
->call_parent
= w
->current_stack_frame
;
149 w
->current_stack_frame
= sf
;
152 CILK_ABI_VOID
__cilkrts_enter_frame_fast(__cilkrts_stack_frame
*sf
)
154 enter_frame_fast_internal(sf
, 0);
157 CILK_ABI_VOID
__cilkrts_enter_frame_fast_1(__cilkrts_stack_frame
*sf
)
159 enter_frame_fast_internal(sf
, 1);
164 * A component of the THE protocol. __cilkrts_undo_detach checks whether
165 * this frame's parent has been stolen. If it hasn't, the frame can return
166 * normally. If the parent has been stolen, of if we suspect it might be,
167 * then __cilkrts_leave_frame() needs to call into the runtime.
169 * @note __cilkrts_undo_detach() is comparing the exception pointer against
170 * the tail pointer. The exception pointer is modified when another worker
171 * is considering whether it can steal a frame. The head pointer is updated
172 * to match when the worker lock is taken out and the thief is sure that
173 * it can complete the steal. If the steal cannot be completed, the thief
174 * will restore the exception pointer.
176 * @return true if undo-detach failed.
178 static int __cilkrts_undo_detach(__cilkrts_stack_frame
*sf
)
180 __cilkrts_worker
*w
= sf
->worker
;
181 __cilkrts_stack_frame
*volatile *t
= w
->tail
;
183 /* DBGPRINTF("%d - __cilkrts_undo_detach - sf %p\n", w->self, sf); */
187 /* On x86 the __sync_fetch_and_<op> family includes a
188 full memory barrier. In theory the sequence in the
189 second branch of the #if should be faster, but on
190 most x86 it is not. */
191 #if defined __i386__ || defined __x86_64__
192 __sync_fetch_and_and(&sf
->flags
, ~CILK_FRAME_DETACHED
);
194 __cilkrts_fence(); /* membar #StoreLoad */
195 sf
->flags
&= ~CILK_FRAME_DETACHED
;
198 return __builtin_expect(t
< w
->exc
, 0);
201 CILK_ABI_VOID
__cilkrts_leave_frame(__cilkrts_stack_frame
*sf
)
203 __cilkrts_worker
*w
= sf
->worker
;
205 /* DBGPRINTF("%d-%p __cilkrts_leave_frame - sf %p, flags: %x\n", w->self, GetWorkerFiber(w), sf, sf->flags); */
208 /* if leave frame was called from our unwind handler, leave_frame should
209 proceed no further. */
210 if (sf
->flags
& CILK_FRAME_UNWINDING
)
212 /* DBGPRINTF("%d - __cilkrts_leave_frame - aborting due to UNWINDING flag\n", w->self); */
214 // If this is the frame of a spawn helper (indicated by the
215 // CILK_FRAME_DETACHED flag) we must update the pedigree. The pedigree
216 // points to nodes allocated on the stack. Failing to update it will
217 // result in a accvio/segfault if the pedigree is walked. This must happen
218 // for all spawn helper frames, even if we're processing an exception
219 if ((sf
->flags
& CILK_FRAME_DETACHED
))
221 update_pedigree_on_leave_frame(w
, sf
);
228 /* ensure the caller popped itself */
229 CILK_ASSERT(w
->current_stack_frame
!= sf
);
232 /* The exiting function should have checked for zero flags,
233 so there is no check for flags == 0 here. */
236 if (__builtin_expect(sf
->flags
& (CILK_FRAME_EXITING
|CILK_FRAME_UNSYNCHED
), 0))
237 __cilkrts_bug("W%u: function exiting with invalid flags %02x\n",
241 /* Must return normally if (1) the active function was called
242 and not spawned, or (2) the parent has never been stolen. */
243 if ((sf
->flags
& CILK_FRAME_DETACHED
)) {
244 /* DBGPRINTF("%d - __cilkrts_leave_frame - CILK_FRAME_DETACHED\n", w->self); */
247 if (__builtin_expect(sf
->flags
& CILK_FRAME_EXCEPTING
, 0)) {
248 // Pedigree will be updated in __cilkrts_leave_frame. We need the
249 // pedigree before the update for record/replay
250 // update_pedigree_on_leave_frame(w, sf);
251 __cilkrts_return_exception(sf
);
252 /* If return_exception returns the caller is attached.
253 leave_frame is called from a cleanup (destructor)
254 for the frame object. The caller will reraise the
260 // During replay, check whether w was the last worker to continue
261 replay_wait_for_steal_if_parent_was_stolen(w
);
263 // Attempt to undo the detach
264 if (__builtin_expect(__cilkrts_undo_detach(sf
), 0)) {
265 // The update of pedigree for leaving the frame occurs
266 // inside this call if it does not return.
267 __cilkrts_c_THE_exception_check(w
, sf
);
270 update_pedigree_on_leave_frame(w
, sf
);
272 /* This path is taken when undo-detach wins the race with stealing.
273 Otherwise this strand terminates and the caller will be resumed
274 via setjmp at sync. */
275 if (__builtin_expect(sf
->flags
& CILK_FRAME_FLAGS_MASK
, 0))
276 __cilkrts_bug("W%u: frame won undo-detach race with flags %02x\n",
283 sf
->flags
|= CILK_FRAME_EXITING
;
286 if (__builtin_expect(sf
->flags
& CILK_FRAME_LAST
, 0))
287 __cilkrts_c_return_from_initial(w
); /* does return */
288 else if (sf
->flags
& CILK_FRAME_STOLEN
)
289 __cilkrts_return(w
); /* does return */
291 /* DBGPRINTF("%d-%p __cilkrts_leave_frame - returning, StackBase: %p\n", w->self, GetWorkerFiber(w)); */
294 /* Caller must have called setjmp. */
295 CILK_ABI_VOID
__cilkrts_sync(__cilkrts_stack_frame
*sf
)
297 __cilkrts_worker
*w
= sf
->worker
;
298 /* DBGPRINTF("%d-%p __cilkrts_sync - sf %p\n", w->self, GetWorkerFiber(w), sf); */
299 if (__builtin_expect(!(sf
->flags
& CILK_FRAME_UNSYNCHED
), 0))
300 __cilkrts_bug("W%u: double sync %p\n", w
->self
, sf
);
302 if (__builtin_expect(sf
->flags
& CILK_FRAME_EXCEPTING
, 0)) {
303 __cilkrts_c_sync_except(w
, sf
);
307 __cilkrts_c_sync(w
, sf
);
311 * Suspends the runtime by notifying the workers that they should not try to
312 * steal. This function is supposed to be called from a non-parallel region
313 * (i.e., after cilk_sync in the top-level spawning function). Otherwise,
314 * which workers are sleeping or busy is unpredictable in general.
315 * The runtime can be resumed by calling __cilkrts_resume().
317 CILK_ABI_VOID
__cilkrts_suspend(void)
319 global_state_t
*g
= cilkg_get_global_state();
320 if (NULL
== g
|| g
->P
< 2)
322 __cilkrts_worker
*w
= __cilkrts_get_tls_worker();
323 // Do nothing if worker/frame is not available
324 if (NULL
== w
|| NULL
== w
->current_stack_frame
)
326 // Do nothing if this was called within a parallel region.
327 __cilkrts_stack_frame
*sf
= w
->current_stack_frame
;
328 if (0 == (sf
->flags
& CILK_FRAME_LAST
) || (sf
->flags
& CILK_FRAME_UNSYNCHED
))
330 __cilkrts_worker
*root
= g
->workers
[0];
331 root
->l
->steal_failure_count
= g
->max_steal_failures
+ 1;
332 CILK_ASSERT(root
->l
->signal_node
);
333 signal_node_msg(root
->l
->signal_node
, 0);
337 * Resumes the runtime by notifying the workers that they can steal.
339 CILK_ABI_VOID
__cilkrts_resume(void)
341 global_state_t
*g
= cilkg_get_global_state();
342 if (NULL
== g
|| g
->P
< 2)
344 __cilkrts_worker
*root
= g
->workers
[0];
345 CILK_ASSERT(root
->l
->signal_node
);
346 signal_node_msg(root
->l
->signal_node
, 1);
352 * Debugging aid to provide access to the current __cilkrts_stack_frame.
358 __cilkrts_get_sf(void)
360 __cilkrts_worker
*w
= __cilkrts_get_tls_worker();
364 return w
->current_stack_frame
;
367 /* Call with global lock held */
368 static __cilkrts_worker
*find_free_worker(global_state_t
*g
)
370 __cilkrts_worker
*w
= 0;
373 // Scan the non-system workers looking for one which is free so we can
375 for (i
= g
->P
- 1; i
< g
->total_workers
; ++i
) {
377 CILK_ASSERT(WORKER_SYSTEM
!= w
->l
->type
);
378 if (w
->l
->type
== WORKER_FREE
) {
379 w
->l
->type
= WORKER_USER
;
385 // If we ran out of workers, create a new one. It doesn't actually belong
386 // to the Cilk global state so nobody will ever try to steal from it.
387 w
= (__cilkrts_worker
*)__cilkrts_malloc(sizeof(*w
));
388 __cilkrts_cilkscreen_ignore_block(w
, w
+1);
389 make_worker(g
, -1, w
);
390 w
->l
->type
= WORKER_USER
;
396 * __cilkrts_bind_thread
398 * Exported function to bind a thread to the runtime.
400 * This function name should always have a trailing suffix for the latest ABI
401 * version. This means that code built with a new compiler will not load
402 * against an old copy of the runtime.
404 * Symbols for the function called by code compiled with old versions of the
405 * compiler are created in an OS-specific manner:
406 * - On Windows the old symbols are defined in the cilk-exports.def linker
407 * definitions file as aliases of BIND_THREAD_RTN
408 * - On Linux aliased symbols are created for BIND_THREAD_RTN in this file
409 * - On MacOS the alternate entrypoints are implemented and simply call
412 CILK_ABI_WORKER_PTR
BIND_THREAD_RTN(void)
415 int start_cilkscreen
= 0;
417 static int unique_obj
;
420 // Cannot set this pointer until after __cilkrts_init_internal() call:
423 ITT_SYNC_CREATE (&unique_obj
, "Initialization");
424 ITT_SYNC_PREPARE(&unique_obj
);
425 ITT_SYNC_ACQUIRED(&unique_obj
);
428 /* 1: Initialize and start the Cilk runtime */
429 __cilkrts_init_internal(1);
432 * 2: Choose a worker for this thread (fail if none left). The table of
433 * user workers is protected by the global OS mutex lock.
435 g
= cilkg_get_global_state();
436 global_os_mutex_lock();
437 if (__builtin_expect(g
->work_done
, 0))
438 __cilkrts_bug("Attempt to enter Cilk while Cilk is shutting down");
439 w
= find_free_worker(g
);
442 __cilkrts_set_tls_worker(w
);
443 __cilkrts_cilkscreen_establish_worker(w
);
446 START_INTERVAL(w
, INTERVAL_IN_SCHEDULER
);
447 START_INTERVAL(w
, INTERVAL_IN_RUNTIME
);
449 full_frame
*ff
= __cilkrts_make_full_frame(w
, 0);
451 ff
->fiber_self
= cilk_fiber_allocate_from_thread();
452 CILK_ASSERT(ff
->fiber_self
);
454 cilk_fiber_set_owner(ff
->fiber_self
, w
);
455 cilk_fiber_tbb_interop_use_saved_stack_op_info(ff
->fiber_self
);
457 CILK_ASSERT(ff
->join_counter
== 0);
458 ff
->join_counter
= 1;
460 w
->reducer_map
= __cilkrts_make_reducer_map(w
);
461 __cilkrts_set_leftmost_reducer_map(w
->reducer_map
, 1);
462 load_pedigree_leaf_into_user_worker(w
);
465 // Make sure that the head and tail are reset, and saved_protected_tail
466 // allows all frames to be stolen.
468 // Note that we must NOT check w->exc, since workers that are trying to
469 // steal from it will be updating w->exc and we don't own the worker lock.
470 // It's not worth taking out the lock just for an assertion.
471 CILK_ASSERT(w
->head
== w
->l
->ltq
);
472 CILK_ASSERT(w
->tail
== w
->l
->ltq
);
473 CILK_ASSERT(w
->protected_tail
== w
->ltq_limit
);
475 // There may have been an old pending exception which was freed when the
476 // exception was caught outside of Cilk
477 w
->l
->pending_exception
= NULL
;
481 // If we've already created a scheduling fiber for this worker, we'll just
482 // reuse it. If w->self < 0, it means that this is an ad-hoc user worker
483 // not known to the global state. Thus, we need to create a scheduling
484 // stack only if we don't already have one and w->self >= 0.
485 if (NULL
== w
->l
->scheduling_fiber
&& w
->self
>= 0)
487 START_INTERVAL(w
, INTERVAL_FIBER_ALLOCATE
) {
488 // Create a scheduling fiber for this worker.
489 w
->l
->scheduling_fiber
=
490 cilk_fiber_allocate_from_heap(CILK_SCHEDULING_STACK_SIZE
);
491 cilk_fiber_reset_state(w
->l
->scheduling_fiber
,
492 scheduler_fiber_proc_for_user_worker
);
493 cilk_fiber_set_owner(w
->l
->scheduling_fiber
, w
);
494 } STOP_INTERVAL(w
, INTERVAL_FIBER_ALLOCATE
);
497 // If the scheduling fiber is NULL, we've either exceeded our quota for
498 // fibers or workers or we're out of memory, so we should lose parallelism
499 // by disallowing stealing.
500 if (NULL
== w
->l
->scheduling_fiber
)
501 __cilkrts_disallow_stealing(w
, NULL
);
503 start_cilkscreen
= (0 == w
->g
->Q
);
506 // w->self != -1, means that w is a normal user worker and must be
507 // accounted for by the global state since other workers can steal from
510 // w->self == -1, means that w is an overflow worker and was created on
511 // demand. I.e., it does not need to be accounted for by the global
514 __cilkrts_enter_cilk(w
->g
);
517 global_os_mutex_unlock();
519 /* We are about to switch back into user code after binding the
520 thread. Start working again. */
521 STOP_INTERVAL(w
, INTERVAL_IN_RUNTIME
);
522 START_INTERVAL(w
, INTERVAL_WORKING
);
524 ITT_SYNC_RELEASING(&unique_obj
);
526 /* Turn on Cilkscreen if this is the first worker. This needs to be done
527 * when we are NOT holding the os mutex. */
528 if (start_cilkscreen
)
529 __cilkrts_cilkscreen_enable_instrumentation();
536 * Define old version-specific symbols for binding threads (since they exist in
537 * all Cilk code). These aliases prohibit newly compiled code from loading an
538 * old version of the runtime. We can handle old code with a new runtime, but
539 * new code with an old runtime is verboten!
541 * For Windows, the aliased symbol is exported in cilk-exports.def.
543 #if defined(_DARWIN_C_SOURCE) || defined(__APPLE__)
545 * Mac OS X: Unfortunately, Darwin doesn't allow aliasing, so we just make a
546 * call and hope the optimizer does the right thing.
548 CILK_ABI_WORKER_PTR
__cilkrts_bind_thread (void) {
549 return BIND_THREAD_RTN();
554 * Macro to convert a parameter to a string. Used on Linux or BSD.
556 #define STRINGIFY(x) #x
559 * Macro to generate an __attribute__ for an aliased name
561 #define ALIASED_NAME(x) __attribute__ ((alias (STRINGIFY(x))))
564 * Linux or BSD: Use the alias attribute to make the labels for the versioned
565 * functions point to the same place in the code as the original. Using
566 * the two macros is annoying but required.
569 CILK_ABI_WORKER_PTR
__cilkrts_bind_thread(void)
570 ALIASED_NAME(BIND_THREAD_RTN
);
572 #endif // defined _DARWIN_C_SOURCE || defined __APPLE__
573 #endif // !defined _MSC_VER
576 __cilkrts_get_stack_size(void) {
577 return cilkg_get_stack_size();
580 // Method for debugging.
581 CILK_API_VOID
__cilkrts_dump_stats(void)
583 // While the stats aren't protected by the global OS mutex, the table
584 // of workers is, so take out the global OS mutex while we're doing this
585 global_os_mutex_lock();
586 if (cilkg_is_published()) {
587 global_state_t
*g
= cilkg_get_global_state();
588 __cilkrts_dump_stats_to_stderr(g
);
591 __cilkrts_bug("Attempting to report Cilk stats before the runtime has started\n");
593 global_os_mutex_unlock();
597 CILK_ABI_THROWS_VOID
__cilkrts_rethrow(__cilkrts_stack_frame
*sf
)
599 __cilkrts_gcc_rethrow(sf
);
604 * __cilkrts_unwatch_stack
606 * Callback for TBB to tell us they don't want to watch the stack anymore
609 static __cilk_tbb_retcode
__cilkrts_unwatch_stack(void *data
)
611 __cilk_tbb_stack_op_thunk o
;
613 // If the cilk_fiber wasn't available fetch it now
614 if (TBB_INTEROP_DATA_DELAYED_UNTIL_BIND
== data
)
617 __cilkrts_worker
*w
= __cilkrts_get_tls_worker();
620 // Free any saved stack op information
621 cilk_fiber_tbb_interop_free_stack_op_info();
623 return 0; /* Success! */
626 __cilkrts_worker_lock(w
);
628 __cilkrts_frame_lock(w
,ff
);
629 data
= ff
->fiber_self
;
630 __cilkrts_frame_unlock(w
,ff
);
631 __cilkrts_worker_unlock(w
);
634 #if CILK_LIB_DEBUG /* Debug code */
635 /* Get current stack */
637 __cilkrts_worker
*w
= __cilkrts_get_tls_worker();
638 __cilkrts_worker_lock(w
);
640 __cilkrts_frame_lock(w
,ff
);
641 CILK_ASSERT (data
== ff
->fiber_self
);
642 __cilkrts_frame_unlock(w
,ff
);
643 __cilkrts_worker_unlock(w
);
646 /* Clear the callback information */
649 cilk_fiber_set_stack_op((cilk_fiber
*)data
, o
);
651 // Note. Do *NOT* free any saved stack information here. If they want to
652 // free the saved stack op information, they'll do it when the thread is
655 return 0; /* Success! */
659 * __cilkrts_watch_stack
661 * Called by TBB, defined by Cilk.
663 * Requests that Cilk invoke the stack op routine when it orphans a stack.
664 * Cilk sets *u to a thunk that TBB should call when it is no longer interested
665 * in watching the stack.
669 __cilkrts_watch_stack(__cilk_tbb_unwatch_thunk
*u
,
670 __cilk_tbb_stack_op_thunk o
)
672 cilk_fiber
* current_fiber
;
676 // This may be called by TBB *before* the OS has given us our
677 // initialization call. Make sure the module is initialized.
678 sysdep_init_module();
681 // Fetch the __cilkrts_worker bound to this thread
682 w
= __cilkrts_get_tls_worker();
685 // Save data for later. We'll deal with it when/if this thread binds
687 cilk_fiber_tbb_interop_save_stack_op_info(o
);
689 u
->routine
= __cilkrts_unwatch_stack
;
690 u
->data
= TBB_INTEROP_DATA_DELAYED_UNTIL_BIND
;
695 /* Get current stack */
696 __cilkrts_worker_lock(w
);
697 current_fiber
= w
->l
->frame_ff
->fiber_self
;
698 __cilkrts_worker_unlock(w
);
700 /* CILK_ASSERT( !sd->stack_op_data ); */
701 /* CILK_ASSERT( !sd->stack_op_routine ); */
703 /* Give TBB our callback */
704 u
->routine
= __cilkrts_unwatch_stack
;
705 u
->data
= current_fiber
;
706 /* Save the callback information */
707 cilk_fiber_set_stack_op(current_fiber
, o
);
709 return 0; /* Success! */
713 // This function must be called only within a continuation, within the stack
714 // frame of the continuation itself.
715 CILK_API_INT
__cilkrts_synched(void)
717 __cilkrts_worker
*w
= __cilkrts_get_tls_worker();
719 // If we don't have a worker, then we're synched by definition :o)
723 // Check to see if we are in a stolen continuation. If not, then
725 uint32_t flags
= w
->current_stack_frame
->flags
;
726 if (0 == (flags
& CILK_FRAME_UNSYNCHED
))
729 // We are in a stolen continutation, but the join counter might have been
730 // decremented to one, making us synched again. Get the full frame so
731 // that we can check the join counter. ASSUME: frame_ff is stable (can be
732 // read without a lock) in a stolen continuation -- it can't be stolen
733 // while it's currently executing.
734 full_frame
*ff
= w
->l
->frame_ff
;
736 // Make sure we have a full frame
737 // TBD: Don't think that we should ever not have a full frame here.
738 // CILK_ASSERT(NULL != ff); ?
742 // We're synched if there are no outstanding children at this instant in
743 // time. Note that this is a known race, but it's ok since we're only
744 // reading. We can get false negatives, but not false positives. (I.e.,
745 // we can read a non-one join_counter just before it goes to one, but the
746 // join_counter cannot go from one to greater than one while we're
748 return 1 == ff
->join_counter
;
755 __cilkrts_bump_loop_rank_internal(__cilkrts_worker
* w
)
757 // If we don't have a worker, then the runtime is not bound to this
758 // thread and there is no rank to increment
762 // We're at the start of the loop body. Advance the cilk_for loop
763 // body pedigree by following the parent link and updating its
766 // Normally, we'd just write "w->pedigree.parent->rank++"
767 // But we need to cast away the "const".
768 ((__cilkrts_pedigree
*) w
->pedigree
.parent
)->rank
++;
770 // Zero the worker's pedigree rank since this is the start of a new
772 w
->pedigree
.rank
= 0;
778 __cilkrts_save_fp_ctrl_state(__cilkrts_stack_frame
*sf
)
780 // Pass call onto OS/architecture dependent function
781 sysdep_save_fp_ctrl_state(sf
);