6069 libdisasm: instrlen arch op should have a sane default
[illumos-gate.git] / usr / src / lib / libc / port / threads / cancel.c
blobf9e5a3a07c38ce72a47d0ab1ebfb56134e150b0b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include "lint.h"
28 #include "thr_uberdata.h"
31 * pthread_cancel: tries to cancel the targeted thread.
32 * If the target thread has already exited no action is taken.
33 * Else send SIGCANCEL to request the other thread to cancel itself.
35 int
36 pthread_cancel(thread_t tid)
38 ulwp_t *self = curthread;
39 uberdata_t *udp = self->ul_uberdata;
40 ulwp_t *ulwp;
41 int error = 0;
43 if ((ulwp = find_lwp(tid)) == NULL)
44 return (ESRCH);
46 if (ulwp->ul_cancel_pending) {
48 * Don't send SIGCANCEL more than once.
50 ulwp_unlock(ulwp, udp);
51 } else if (ulwp == self) {
53 * Unlock self before cancelling.
55 ulwp_unlock(self, udp);
56 self->ul_nocancel = 0; /* cancellation is now possible */
57 if (self->ul_sigdefer == 0)
58 do_sigcancel();
59 else {
60 self->ul_cancel_pending = 1;
61 set_cancel_pending_flag(self, 0);
63 } else if (ulwp->ul_cancel_disabled) {
65 * Don't send SIGCANCEL if cancellation is disabled;
66 * just set the thread's ulwp->ul_cancel_pending flag.
67 * This avoids a potential EINTR for the target thread.
68 * We don't call set_cancel_pending_flag() here because
69 * we cannot modify another thread's schedctl data.
71 ulwp->ul_cancel_pending = 1;
72 ulwp_unlock(ulwp, udp);
73 } else {
75 * Request the other thread to cancel itself.
77 error = _lwp_kill(tid, SIGCANCEL);
78 ulwp_unlock(ulwp, udp);
81 return (error);
85 * pthread_setcancelstate: sets the state ENABLED or DISABLED.
86 * If the state is already ENABLED or is being set to ENABLED,
87 * the type of cancellation is ASYNCHRONOUS, and a cancel request
88 * is pending, then the thread is cancelled right here.
89 * Otherwise, pthread_setcancelstate() is not a cancellation point.
91 int
92 pthread_setcancelstate(int state, int *oldstate)
94 ulwp_t *self = curthread;
95 uberdata_t *udp = self->ul_uberdata;
96 int was_disabled;
99 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
100 * since it is tested under this lock by pthread_cancel(), above.
101 * This has the side-effect of calling enter_critical() and this
102 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
103 * is called. (self->ul_cancel_pending is set in the SIGCANCEL
104 * handler and we must be async-signal safe here.)
106 ulwp_lock(self, udp);
108 was_disabled = self->ul_cancel_disabled;
109 switch (state) {
110 case PTHREAD_CANCEL_ENABLE:
111 self->ul_cancel_disabled = 0;
112 break;
113 case PTHREAD_CANCEL_DISABLE:
114 self->ul_cancel_disabled = 1;
115 break;
116 default:
117 ulwp_unlock(self, udp);
118 return (EINVAL);
120 set_cancel_pending_flag(self, 0);
123 * If this thread has been requested to be canceled and
124 * is in async mode and is or was enabled, then exit.
126 if ((!self->ul_cancel_disabled || !was_disabled) &&
127 self->ul_cancel_async && self->ul_cancel_pending) {
128 ulwp_unlock(self, udp);
129 pthread_exit(PTHREAD_CANCELED);
132 ulwp_unlock(self, udp);
134 if (oldstate != NULL) {
135 if (was_disabled)
136 *oldstate = PTHREAD_CANCEL_DISABLE;
137 else
138 *oldstate = PTHREAD_CANCEL_ENABLE;
140 return (0);
144 * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
145 * If the type is being set as ASYNC, then it becomes
146 * a cancellation point if there is a cancellation pending.
149 pthread_setcanceltype(int type, int *oldtype)
151 ulwp_t *self = curthread;
152 int was_async;
155 * Call enter_critical() to defer SIGCANCEL until exit_critical().
156 * We do this because curthread->ul_cancel_pending is set in the
157 * SIGCANCEL handler and we must be async-signal safe here.
159 enter_critical(self);
161 was_async = self->ul_cancel_async;
162 switch (type) {
163 case PTHREAD_CANCEL_ASYNCHRONOUS:
164 self->ul_cancel_async = 1;
165 break;
166 case PTHREAD_CANCEL_DEFERRED:
167 self->ul_cancel_async = 0;
168 break;
169 default:
170 exit_critical(self);
171 return (EINVAL);
173 self->ul_save_async = self->ul_cancel_async;
176 * If this thread has been requested to be canceled and
177 * is in enabled mode and is or was in async mode, exit.
179 if ((self->ul_cancel_async || was_async) &&
180 self->ul_cancel_pending && !self->ul_cancel_disabled) {
181 exit_critical(self);
182 pthread_exit(PTHREAD_CANCELED);
185 exit_critical(self);
187 if (oldtype != NULL) {
188 if (was_async)
189 *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
190 else
191 *oldtype = PTHREAD_CANCEL_DEFERRED;
193 return (0);
197 * pthread_testcancel: tests for any cancellation pending
198 * if the cancellation is enabled and is pending, act on
199 * it by calling thr_exit. thr_exit takes care of calling
200 * cleanup handlers.
202 void
203 pthread_testcancel(void)
205 ulwp_t *self = curthread;
207 if (self->ul_cancel_pending && !self->ul_cancel_disabled)
208 pthread_exit(PTHREAD_CANCELED);
212 * For deferred mode, this routine makes a thread cancelable.
213 * It is called from the functions which want to be cancellation
214 * points and are about to block, such as cond_wait().
216 void
217 _cancelon()
219 ulwp_t *self = curthread;
221 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
222 if (!self->ul_cancel_disabled) {
223 ASSERT(self->ul_cancelable >= 0);
224 self->ul_cancelable++;
225 if (self->ul_cancel_pending)
226 pthread_exit(PTHREAD_CANCELED);
231 * This routine turns cancelability off and possible calls pthread_exit().
232 * It is called from functions which are cancellation points, like cond_wait().
234 void
235 _canceloff()
237 ulwp_t *self = curthread;
239 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
240 if (!self->ul_cancel_disabled) {
241 if (self->ul_cancel_pending)
242 pthread_exit(PTHREAD_CANCELED);
243 self->ul_cancelable--;
244 ASSERT(self->ul_cancelable >= 0);
249 * Same as _canceloff() but don't actually cancel the thread.
250 * This is used by cond_wait() and sema_wait() when they don't get EINTR.
252 void
253 _canceloff_nocancel()
255 ulwp_t *self = curthread;
257 ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
258 if (!self->ul_cancel_disabled) {
259 self->ul_cancelable--;
260 ASSERT(self->ul_cancelable >= 0);
265 * __pthread_cleanup_push: called by macro in pthread.h which defines
266 * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
267 * cleanup struct and calls this routine to push the handler off the
268 * curthread's struct.
270 void
271 __pthread_cleanup_push(void (*routine)(void *),
272 void *args, caddr_t fp, _cleanup_t *clnup_info)
274 ulwp_t *self = curthread;
275 __cleanup_t *infop = (__cleanup_t *)clnup_info;
277 infop->func = routine;
278 infop->arg = args;
279 infop->fp = fp;
280 infop->next = self->ul_clnup_hdr;
281 self->ul_clnup_hdr = infop;
285 * __pthread_cleanup_pop: called by macro in pthread.h which defines
286 * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
287 * handler off the curthread's struct and execute it if necessary.
289 /* ARGSUSED1 */
290 void
291 __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info)
293 ulwp_t *self = curthread;
294 __cleanup_t *infop = self->ul_clnup_hdr;
296 self->ul_clnup_hdr = infop->next;
297 if (ex)
298 (*infop->func)(infop->arg);
302 * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
303 * is modified. Setting SC_CANCEL_FLG informs the kernel that we have
304 * a pending cancellation and we do not have cancellation disabled.
305 * In this situation, we will not go to sleep on any system call but
306 * will instead return EINTR immediately on any attempt to sleep,
307 * with SC_EINTR_FLG set in sc_flgs. Clearing SC_CANCEL_FLG rescinds
308 * this condition, but SC_EINTR_FLG never goes away until the thread
309 * terminates (indicated by clear_flags != 0).
311 void
312 set_cancel_pending_flag(ulwp_t *self, int clear_flags)
314 volatile sc_shared_t *scp;
316 if (self->ul_vfork | self->ul_nocancel)
317 return;
318 enter_critical(self);
319 if ((scp = self->ul_schedctl) != NULL ||
320 (scp = setup_schedctl()) != NULL) {
321 if (clear_flags)
322 scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
323 else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
324 scp->sc_flgs |= SC_CANCEL_FLG;
325 else
326 scp->sc_flgs &= ~SC_CANCEL_FLG;
328 exit_critical(self);
332 * Called from the PROLOGUE macro in scalls.c to inform subsequent
333 * code that a cancellation point has been called and that the
334 * current thread should cancel itself as soon as all of its locks
335 * have been dropped (see safe_mutex_unlock()).
337 void
338 set_cancel_eintr_flag(ulwp_t *self)
340 volatile sc_shared_t *scp;
342 if (self->ul_vfork | self->ul_nocancel)
343 return;
344 enter_critical(self);
345 if ((scp = self->ul_schedctl) != NULL ||
346 (scp = setup_schedctl()) != NULL)
347 scp->sc_flgs |= SC_EINTR_FLG;
348 exit_critical(self);
352 * Calling set_parking_flag(curthread, 1) informs the kernel that we are
353 * calling __lwp_park or ___lwp_cond_wait(). If we take a signal in
354 * the unprotected (from signals) interval before reaching the kernel,
355 * sigacthandler() will call set_parking_flag(curthread, 0) to inform
356 * the kernel to return immediately from these system calls, giving us
357 * a spurious wakeup but not a deadlock.
359 void
360 set_parking_flag(ulwp_t *self, int park)
362 volatile sc_shared_t *scp;
364 enter_critical(self);
365 if ((scp = self->ul_schedctl) != NULL ||
366 (scp = setup_schedctl()) != NULL) {
367 if (park) {
368 scp->sc_flgs |= SC_PARK_FLG;
370 * We are parking; allow the __lwp_park() call to
371 * block even if we have a pending cancellation.
373 scp->sc_flgs &= ~SC_CANCEL_FLG;
374 } else {
375 scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
377 * We are no longer parking; restore the
378 * pending cancellation flag if necessary.
380 if (self->ul_cancel_pending &&
381 !self->ul_cancel_disabled)
382 scp->sc_flgs |= SC_CANCEL_FLG;
384 } else if (park == 0) { /* schedctl failed, do it the long way */
385 (void) __lwp_unpark(self->ul_lwpid);
387 exit_critical(self);
391 * Test if the current thread is due to exit because of cancellation.
394 cancel_active(void)
396 ulwp_t *self = curthread;
397 volatile sc_shared_t *scp;
398 int exit_soon;
401 * If there is a pending cancellation and cancellation
402 * is not disabled (SC_CANCEL_FLG) and we received
403 * EINTR from a recent system call (SC_EINTR_FLG),
404 * then we will soon be exiting.
406 enter_critical(self);
407 exit_soon =
408 (((scp = self->ul_schedctl) != NULL ||
409 (scp = setup_schedctl()) != NULL) &&
410 (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
411 (SC_CANCEL_FLG | SC_EINTR_FLG));
412 exit_critical(self);
414 return (exit_soon);