2 * Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
3 * Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
27 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
28 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "namespace.h"
32 #include <machine/tls.h>
34 #include <sys/queue.h>
36 #include <sys/types.h>
41 #include <semaphore.h>
47 #ifdef _PTHREADS_DEBUGGING
50 #include "un-namespace.h"
52 #include "thr_private.h"
54 #define cpu_ccfence() __asm __volatile("" : : : "memory")
56 #define container_of(ptr, type, member) \
58 __typeof(((type *)0)->member) *_p = (ptr); \
59 (type *)((char *)_p - offsetof(type, member)); \
63 * Semaphore definitions.
66 volatile umtx_t count
;
72 #define SEM_MAGIC ((u_int32_t) 0x09fa4012)
74 static char const *sem_prefix
= "/var/run/sem";
78 * POSIX requires that two successive calls to sem_open return
79 * the same address if no call to unlink nor close have been
80 * done in the middle. For that, we keep a list of open semaphore
81 * and search for an existing one before remapping a semaphore.
82 * We have to keep the fd open to check for races.
85 * sem_open("/test", O_CREAT | O_EXCL...) -> fork() ->
87 * sem_unlink("/test") -> sem_open("/test", O_CREAT | O_EXCl ...)
89 * sem_open("/test", 0).
90 * We need to check that the cached mapping is the one of the most up
91 * to date file linked at this name, or child process will reopen the
92 * *old* version of the semaphore, which is wrong.
94 * fstat and nlink check is used to test for this race.
103 LIST_ENTRY(sem_info
) next
;
106 static pthread_mutex_t sem_lock
;
107 static LIST_HEAD(,sem_info
) sem_list
= LIST_HEAD_INITIALIZER(sem_list
);
109 #ifdef _PTHREADS_DEBUGGING
113 sem_log(const char *ctl
, ...)
120 len
= vsnprintf(buf
, sizeof(buf
), ctl
, va
);
129 sem_log(const char *ctl __unused
, ...)
137 #define SEMID_NAMED 2
142 _pthread_mutex_lock(&sem_lock
);
148 _pthread_mutex_unlock(&sem_lock
);
152 sem_child_postfork(void)
154 _pthread_mutex_unlock(&sem_lock
);
160 pthread_mutexattr_t ma
;
162 _pthread_mutexattr_init(&ma
);
163 _pthread_mutexattr_settype(&ma
, PTHREAD_MUTEX_RECURSIVE
);
164 _pthread_mutex_init(&sem_lock
, &ma
);
165 _pthread_mutexattr_destroy(&ma
);
166 _thr_atfork_kern(sem_prefork
, sem_postfork
, sem_child_postfork
);
170 sem_check_validity(sem_t
*sem
)
173 if ((sem
!= NULL
) && (*sem
!= NULL
) && ((*sem
)->magic
== SEM_MAGIC
)) {
182 sem_alloc(unsigned int value
, int pshared
)
187 if (value
> SEM_VALUE_MAX
) {
192 static __thread sem_t sem_base
;
193 static __thread
int sem_count
;
195 if (sem_base
== NULL
) {
196 sem_base
= mmap(NULL
, getpagesize(),
197 PROT_READ
| PROT_WRITE
,
198 MAP_ANON
| MAP_SHARED
,
200 sem_count
= getpagesize() / sizeof(*sem
);
203 if (--sem_count
== 0)
207 sem
= __malloc(sizeof(struct sem
));
214 sem
->magic
= SEM_MAGIC
;
215 sem
->count
= (u_int32_t
)value
;
218 sem_log("sem_alloc %p (%d)\n", sem
, value
);
224 _sem_init(sem_t
*sem
, int pshared
, unsigned int value
)
231 *sem
= sem_alloc(value
, pshared
);
238 _sem_destroy(sem_t
*sem
)
240 if (sem_check_validity(sem
) != 0) {
247 switch ((*sem
)->semid
) {
252 /* memory is left intact */
262 _sem_getvalue(sem_t
* __restrict sem
, int * __restrict sval
)
264 if (sem_check_validity(sem
) != 0) {
268 *sval
= (*sem
)->count
;
274 _sem_trywait(sem_t
*sem
)
278 if (sem_check_validity(sem
) != 0) {
283 sem_log("sem_trywait %p %d\n", *sem
, (*sem
)->count
);
284 while ((val
= (*sem
)->count
) > 0) {
286 if (atomic_cmpset_int(&(*sem
)->count
, val
, val
- 1)) {
287 sem_log("sem_trywait %p %d (success)\n", *sem
, val
- 1);
292 sem_log("sem_trywait %p %d (failure)\n", *sem
, val
);
297 _sem_wait(sem_t
*sem
)
300 int val
, oldcancel
, retval
;
302 if (sem_check_validity(sem
) != 0) {
307 curthread
= tls_get_curthread();
308 _pthread_testcancel();
310 sem_log("sem_wait %p %d (begin)\n", *sem
, (*sem
)->count
);
314 while ((val
= (*sem
)->count
) > 0) {
316 if (atomic_cmpset_acq_int(&(*sem
)->count
, val
, val
- 1)) {
317 sem_log("sem_wait %p %d (success)\n",
322 oldcancel
= _thr_cancel_enter(curthread
);
323 sem_log("sem_wait %p %d (wait)\n", *sem
, val
);
324 retval
= _thr_umtx_wait_intr(&(*sem
)->count
, 0);
325 sem_log("sem_wait %p %d (wait return %d)\n",
326 *sem
, (*sem
)->count
, retval
);
327 _thr_cancel_leave(curthread
, oldcancel
);
329 } while (retval
!= EINTR
);
331 sem_log("sem_wait %p %d (error %d)\n", *sem
, retval
);
338 _sem_timedwait(sem_t
* __restrict sem
, const struct timespec
* __restrict abstime
)
340 struct timespec ts
, ts2
;
342 int val
, oldcancel
, retval
;
344 if (sem_check_validity(sem
) != 0)
347 curthread
= tls_get_curthread();
348 _pthread_testcancel();
349 sem_log("sem_timedwait %p %d (begin)\n", *sem
, (*sem
)->count
);
352 * The timeout argument is only supposed to
353 * be checked if the thread would have blocked.
356 while ((val
= (*sem
)->count
) > 0) {
358 if (atomic_cmpset_acq_int(&(*sem
)->count
, val
, val
- 1)) {
359 sem_log("sem_wait %p %d (success)\n",
364 if (abstime
== NULL
||
365 abstime
->tv_nsec
>= 1000000000 ||
366 abstime
->tv_nsec
< 0) {
367 sem_log("sem_wait %p %d (bad abstime)\n", *sem
, val
);
371 clock_gettime(CLOCK_REALTIME
, &ts
);
372 timespecsub(abstime
, &ts
, &ts2
);
373 oldcancel
= _thr_cancel_enter(curthread
);
374 sem_log("sem_wait %p %d (wait)\n", *sem
, val
);
375 retval
= _thr_umtx_wait(&(*sem
)->count
, 0, &ts2
,
377 sem_log("sem_wait %p %d (wait return %d)\n",
378 *sem
, (*sem
)->count
, retval
);
379 _thr_cancel_leave(curthread
, oldcancel
);
380 } while (retval
!= ETIMEDOUT
&& retval
!= EINTR
);
382 sem_log("sem_wait %p %d (error %d)\n", *sem
, retval
);
389 _sem_post(sem_t
*sem
)
393 if (sem_check_validity(sem
) != 0)
397 * sem_post() is required to be safe to call from within
398 * signal handlers, these code should work as that.
400 val
= atomic_fetchadd_int(&(*sem
)->count
, 1) + 1;
401 sem_log("sem_post %p %d\n", *sem
, val
);
402 _thr_umtx_wake(&(*sem
)->count
, 0);
408 get_path(const char *name
, char *path
, size_t len
, char const **prefix
)
414 if (name
[0] == '/') {
415 *prefix
= getenv("LIBTHREAD_SEM_PREFIX");
418 *prefix
= sem_prefix
;
420 path_len
= strlcpy(path
, *prefix
, len
);
422 if (path_len
> len
) {
423 return (ENAMETOOLONG
);
427 path_len
= strlcat(path
, name
, len
);
430 return (ENAMETOOLONG
);
437 sem_get_mapping(ino_t inode
, dev_t dev
)
442 LIST_FOREACH(ni
, &sem_list
, next
) {
443 if (ni
->inode
== inode
&& ni
->dev
== dev
) {
444 /* Check for races */
445 if(_fstat(ni
->fd
, &sbuf
) == 0) {
446 if (sbuf
.st_nlink
> 0) {
451 LIST_REMOVE(ni
, next
);
464 sem_add_mapping(ino_t inode
, dev_t dev
, sem_t sem
, int fd
)
468 ni
= __malloc(sizeof(struct sem_info
));
474 bzero(ni
, sizeof(*ni
));
481 LIST_INSERT_HEAD(&sem_list
, ni
, next
);
487 sem_close_mapping(sem_t
*sem
)
491 if ((*sem
)->semid
!= SEMID_NAMED
)
494 ni
= container_of(sem
, struct sem_info
, sem
);
496 if ( --ni
->open_count
> 0) {
499 if (ni
->inode
!= 0) {
500 LIST_REMOVE(ni
, next
);
502 munmap(ni
->sem
, getpagesize());
510 _sem_open(const char *name
, int oflag
, ...)
513 char tmppath
[PATH_MAX
];
514 char const *prefix
= NULL
;
516 int error
, fd
, create
;
522 unsigned int value
= 0;
530 * Bail out if invalid flags specified.
532 if (oflag
& ~(O_CREAT
|O_EXCL
)) {
545 _pthread_mutex_lock(&sem_lock
);
547 error
= get_path(name
, path
, PATH_MAX
, &prefix
);
554 fd
= __sys_open(path
, O_RDWR
| O_CLOEXEC
);
558 if ((oflag
& O_EXCL
) == O_EXCL
) {
564 if (_fstat(fd
, &sbuf
) != 0) {
565 /* Bad things happened, like another thread closing our descriptor */
571 sem
= sem_get_mapping(sbuf
.st_ino
, sbuf
.st_dev
);
573 if (sem
!= SEM_FAILED
) {
578 if ((sbuf
.st_mode
& S_IFREG
) == 0) {
579 /* We only want regular files here */
584 } else if ((oflag
& O_CREAT
) && errno
== ENOENT
) {
588 mode
= (mode_t
) va_arg(ap
, int);
589 value
= (unsigned int) va_arg(ap
, int);
593 if (value
> SEM_VALUE_MAX
) {
598 strlcpy(tmppath
, prefix
, sizeof(tmppath
));
599 path_len
= strlcat(tmppath
, "/sem.XXXXXX", sizeof(tmppath
));
601 if (path_len
> sizeof(tmppath
)) {
602 errno
= ENAMETOOLONG
;
607 fd
= mkstemp(tmppath
);
614 error
= fchmod(fd
, mode
);
621 error
= __sys_fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
649 semtmp
= (sem_t
) mmap(NULL
, getpagesize(), PROT_READ
| PROT_WRITE
,
650 MAP_NOSYNC
| MAP_SHARED
, fd
, 0);
652 if (semtmp
== MAP_FAILED
) {
653 if (errno
!= EACCES
&& errno
!= EMFILE
)
664 ftruncate(fd
, sizeof(struct sem
));
665 semtmp
->magic
= SEM_MAGIC
;
666 semtmp
->count
= (u_int32_t
)value
;
667 semtmp
->semid
= SEMID_NAMED
;
669 if (link(tmppath
, path
) != 0) {
670 munmap(semtmp
, getpagesize());
674 if (errno
== EEXIST
&& (oflag
& O_EXCL
) == 0) {
682 if (_fstat(fd
, &sbuf
) != 0) {
683 /* Bad things happened, like another thread closing our descriptor */
684 munmap(semtmp
, getpagesize());
691 sem
= sem_add_mapping(sbuf
.st_ino
, sbuf
.st_dev
, semtmp
, fd
);
694 _pthread_mutex_unlock(&sem_lock
);
698 _pthread_mutex_unlock(&sem_lock
);
704 _sem_close(sem_t
*sem
)
706 _pthread_mutex_lock(&sem_lock
);
708 if (sem_check_validity(sem
)) {
709 _pthread_mutex_unlock(&sem_lock
);
714 if (sem_close_mapping(sem
)) {
715 _pthread_mutex_unlock(&sem_lock
);
719 _pthread_mutex_unlock(&sem_lock
);
725 _sem_unlink(const char *name
)
731 error
= get_path(name
, path
, PATH_MAX
, &prefix
);
737 error
= _unlink(path
);
740 if (errno
!= ENAMETOOLONG
&& errno
!= ENOENT
)
749 __strong_reference(_sem_destroy
, sem_destroy
);
750 __strong_reference(_sem_getvalue
, sem_getvalue
);
751 __strong_reference(_sem_init
, sem_init
);
752 __strong_reference(_sem_trywait
, sem_trywait
);
753 __strong_reference(_sem_wait
, sem_wait
);
754 __strong_reference(_sem_timedwait
, sem_timedwait
);
755 __strong_reference(_sem_post
, sem_post
);
756 __strong_reference(_sem_open
, sem_open
);
757 __strong_reference(_sem_close
, sem_close
);
758 __strong_reference(_sem_unlink
, sem_unlink
);