2 * Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
3 * Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
27 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
28 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "namespace.h"
32 #include <machine/tls.h>
33 #include <sys/semaphore.h>
35 #include <sys/queue.h>
37 #include <sys/types.h>
48 #include "un-namespace.h"
49 #include "thr_private.h"
51 #define container_of(ptr, type, member) \
53 __typeof(((type *)0)->member) *_p = (ptr); \
54 (type *)((char *)_p - offsetof(type, member)); \
58 * Semaphore definitions.
62 volatile umtx_t count
;
67 #define SEM_MAGIC ((u_int32_t) 0x09fa4012)
69 static char const *sem_prefix
= "/var/run/sem";
73 * POSIX requires that two successive calls to sem_open return
74 * the same address if no call to unlink nor close have been
75 * done in the middle. For that, we keep a list of open semaphore
76 * and search for an existing one before remapping a semaphore.
77 * We have to keep the fd open to check for races.
80 * sem_open("/test", O_CREAT | O_EXCL...) -> fork() ->
82 * sem_unlink("/test") -> sem_open("/test", O_CREAT | O_EXCl ...)
84 * sem_open("/test", 0).
85 * We need to check that the cached mapping is the one of the most up
86 * to date file linked at this name, or child process will reopen the
87 * *old* version of the semaphore, which is wrong.
89 * fstat and nlink check is used to test for this race.
98 LIST_ENTRY(sem_info
) next
;
103 static pthread_once_t once
= PTHREAD_ONCE_INIT
;
104 static pthread_mutex_t sem_lock
;
105 static LIST_HEAD(,sem_info
) sem_list
= LIST_HEAD_INITIALIZER(sem_list
);
110 #define SEMID_NAMED 2
115 _pthread_mutex_lock(&sem_lock
);
121 _pthread_mutex_unlock(&sem_lock
);
125 sem_child_postfork(void)
127 _pthread_mutex_unlock(&sem_lock
);
131 sem_module_init(void)
133 pthread_mutexattr_t ma
;
135 _pthread_mutexattr_init(&ma
);
136 _pthread_mutexattr_settype(&ma
, PTHREAD_MUTEX_RECURSIVE
);
137 _pthread_mutex_init(&sem_lock
, &ma
);
138 _pthread_mutexattr_destroy(&ma
);
139 _pthread_atfork(sem_prefork
, sem_postfork
, sem_child_postfork
);
143 sem_check_validity(sem_t
*sem
)
146 if ((sem
!= NULL
) && (*sem
!= NULL
) && ((*sem
)->magic
== SEM_MAGIC
)) {
155 sem_alloc(unsigned int value
, int pshared
)
160 if (value
> SEM_VALUE_MAX
) {
165 static __thread sem_t sem_base
;
166 static __thread
int sem_count
;
168 if (sem_base
== NULL
) {
169 sem_base
= mmap(NULL
, getpagesize(),
170 PROT_READ
| PROT_WRITE
,
171 MAP_ANON
| MAP_SHARED
,
173 sem_count
= getpagesize() / sizeof(*sem
);
176 if (--sem_count
== 0)
180 sem
= malloc(sizeof(struct sem
));
187 sem
->magic
= SEM_MAGIC
;
188 sem
->count
= (u_int32_t
)value
;
194 _sem_init(sem_t
*sem
, int pshared
, unsigned int value
)
201 *sem
= sem_alloc(value
, pshared
);
208 _sem_destroy(sem_t
*sem
)
210 if (sem_check_validity(sem
) != 0) {
217 switch ((*sem
)->semid
) {
222 /* memory is left intact */
232 _sem_getvalue(sem_t
* __restrict sem
, int * __restrict sval
)
234 if (sem_check_validity(sem
) != 0)
237 *sval
= (*sem
)->count
;
242 _sem_trywait(sem_t
*sem
)
246 if (sem_check_validity(sem
) != 0)
249 while ((val
= (*sem
)->count
) > 0) {
250 if (atomic_cmpset_int(&(*sem
)->count
, val
, val
- 1))
258 _sem_wait(sem_t
*sem
)
260 struct pthread
*curthread
;
261 int val
, oldcancel
, retval
;
263 if (sem_check_validity(sem
) != 0)
266 curthread
= tls_get_curthread();
267 _pthread_testcancel();
269 while ((val
= (*sem
)->count
) > 0) {
270 if (atomic_cmpset_acq_int(&(*sem
)->count
, val
, val
- 1))
273 oldcancel
= _thr_cancel_enter(curthread
);
274 retval
= _thr_umtx_wait(&(*sem
)->count
, 0, NULL
, 0);
275 _thr_cancel_leave(curthread
, oldcancel
);
276 } while (retval
== 0);
282 _sem_timedwait(sem_t
* __restrict sem
, const struct timespec
* __restrict abstime
)
284 struct timespec ts
, ts2
;
285 struct pthread
*curthread
;
286 int val
, oldcancel
, retval
;
288 if (sem_check_validity(sem
) != 0)
291 curthread
= tls_get_curthread();
294 * The timeout argument is only supposed to
295 * be checked if the thread would have blocked.
297 _pthread_testcancel();
299 while ((val
= (*sem
)->count
) > 0) {
300 if (atomic_cmpset_acq_int(&(*sem
)->count
, val
, val
- 1))
303 if (abstime
== NULL
||
304 abstime
->tv_nsec
>= 1000000000 || abstime
->tv_nsec
< 0) {
308 clock_gettime(CLOCK_REALTIME
, &ts
);
309 TIMESPEC_SUB(&ts2
, abstime
, &ts
);
310 oldcancel
= _thr_cancel_enter(curthread
);
311 retval
= _thr_umtx_wait(&(*sem
)->count
, 0, &ts2
,
313 _thr_cancel_leave(curthread
, oldcancel
);
314 } while (retval
== 0);
320 _sem_post(sem_t
*sem
)
324 if (sem_check_validity(sem
) != 0)
328 * sem_post() is required to be safe to call from within
329 * signal handlers, these code should work as that.
333 } while (!atomic_cmpset_acq_int(&(*sem
)->count
, val
, val
+ 1));
334 _thr_umtx_wake(&(*sem
)->count
, val
+ 1);
339 get_path(const char *name
, char *path
, size_t len
, char const **prefix
)
345 if (name
[0] == '/') {
346 *prefix
= getenv("LIBTHREAD_SEM_PREFIX");
349 *prefix
= sem_prefix
;
351 path_len
= strlcpy(path
, *prefix
, len
);
353 if (path_len
> len
) {
354 return (ENAMETOOLONG
);
358 path_len
= strlcat(path
, name
, len
);
361 return (ENAMETOOLONG
);
368 sem_get_mapping(ino_t inode
, dev_t dev
)
373 LIST_FOREACH(ni
, &sem_list
, next
) {
374 if (ni
->inode
== inode
&& ni
->dev
== dev
) {
375 /* Check for races */
376 if(_fstat(ni
->fd
, &sbuf
) == 0) {
377 if (sbuf
.st_nlink
> 0) {
382 LIST_REMOVE(ni
, next
);
395 sem_add_mapping(ino_t inode
, dev_t dev
, sem_t sem
, int fd
)
399 ni
= malloc(sizeof(struct sem_info
));
405 bzero(ni
, sizeof(*ni
));
412 LIST_INSERT_HEAD(&sem_list
, ni
, next
);
418 sem_close_mapping(sem_t
*sem
)
422 if ((*sem
)->semid
!= SEMID_NAMED
)
425 ni
= container_of(sem
, struct sem_info
, sem
);
427 if ( --ni
->open_count
> 0) {
430 if (ni
->inode
!= 0) {
431 LIST_REMOVE(ni
, next
);
433 munmap(ni
->sem
, getpagesize());
441 _sem_open(const char *name
, int oflag
, ...)
444 char tmppath
[PATH_MAX
];
445 char const *prefix
= NULL
;
447 int error
, fd
, create
;
453 unsigned int value
= 0;
461 * Bail out if invalid flags specified.
463 if (oflag
& ~(O_CREAT
|O_EXCL
)) {
476 _pthread_once(&once
, sem_module_init
);
478 _pthread_mutex_lock(&sem_lock
);
480 error
= get_path(name
, path
, PATH_MAX
, &prefix
);
487 fd
= __sys_open(path
, O_RDWR
| O_CLOEXEC
);
491 if ((oflag
& O_EXCL
) == O_EXCL
) {
497 if (_fstat(fd
, &sbuf
) != 0) {
498 /* Bad things happened, like another thread closing our descriptor */
504 sem
= sem_get_mapping(sbuf
.st_ino
, sbuf
.st_dev
);
506 if (sem
!= SEM_FAILED
) {
511 if ((sbuf
.st_mode
& S_IFREG
) == 0) {
512 /* We only want regular files here */
517 } else if ((oflag
& O_CREAT
) && errno
== ENOENT
) {
521 mode
= (mode_t
) va_arg(ap
, int);
522 value
= (unsigned int) va_arg(ap
, int);
526 if (value
> SEM_VALUE_MAX
) {
531 strlcpy(tmppath
, prefix
, sizeof(tmppath
));
532 path_len
= strlcat(tmppath
, "/sem.XXXXXX", sizeof(tmppath
));
534 if (path_len
> sizeof(tmppath
)) {
535 errno
= ENAMETOOLONG
;
540 fd
= mkstemp(tmppath
);
547 error
= fchmod(fd
, mode
);
554 error
= __sys_fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
582 semtmp
= (sem_t
) mmap(NULL
, getpagesize(), PROT_READ
| PROT_WRITE
,
583 MAP_NOSYNC
| MAP_SHARED
, fd
, 0);
585 if (semtmp
== MAP_FAILED
) {
586 if (errno
!= EACCES
&& errno
!= EMFILE
)
597 ftruncate(fd
, sizeof(struct sem
));
598 semtmp
->magic
= SEM_MAGIC
;
599 semtmp
->count
= (u_int32_t
)value
;
600 semtmp
->semid
= SEMID_NAMED
;
602 if (link(tmppath
, path
) != 0) {
603 munmap(semtmp
, getpagesize());
607 if (errno
== EEXIST
&& (oflag
& O_EXCL
) == 0) {
615 if (_fstat(fd
, &sbuf
) != 0) {
616 /* Bad things happened, like another thread closing our descriptor */
617 munmap(semtmp
, getpagesize());
624 sem
= sem_add_mapping(sbuf
.st_ino
, sbuf
.st_dev
, semtmp
, fd
);
627 _pthread_mutex_unlock(&sem_lock
);
631 _pthread_mutex_unlock(&sem_lock
);
637 _sem_close(sem_t
*sem
)
639 _pthread_once(&once
, sem_module_init
);
641 _pthread_mutex_lock(&sem_lock
);
643 if (sem_check_validity(sem
)) {
644 _pthread_mutex_unlock(&sem_lock
);
649 if (sem_close_mapping(sem
)) {
650 _pthread_mutex_unlock(&sem_lock
);
654 _pthread_mutex_unlock(&sem_lock
);
660 _sem_unlink(const char *name
)
666 error
= get_path(name
, path
, PATH_MAX
, &prefix
);
672 error
= unlink(path
);
675 if (errno
!= ENAMETOOLONG
&& errno
!= ENOENT
)
684 __strong_reference(_sem_destroy
, sem_destroy
);
685 __strong_reference(_sem_getvalue
, sem_getvalue
);
686 __strong_reference(_sem_init
, sem_init
);
687 __strong_reference(_sem_trywait
, sem_trywait
);
688 __strong_reference(_sem_wait
, sem_wait
);
689 __strong_reference(_sem_timedwait
, sem_timedwait
);
690 __strong_reference(_sem_post
, sem_post
);
691 __strong_reference(_sem_open
, sem_open
);
692 __strong_reference(_sem_close
, sem_close
);
693 __strong_reference(_sem_unlink
, sem_unlink
);