diff(1): Commit our patches to contrib/ and get rid of -I-.
[dragonfly.git] / lib / libc_r / uthread / uthread_fd.c
blobcf06446d2e3f1e3d21a5a2ea3b3c7b8d8c4e3a4c
1 /*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_fd.c,v 1.16.2.7 2002/10/22 14:44:03 fjoe Exp $
33 * $DragonFly: src/lib/libc_r/uthread/uthread_fd.c,v 1.3 2006/06/14 01:45:28 dillon Exp $
36 #include <errno.h>
37 #include <fcntl.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <pthread.h>
41 #include "pthread_private.h"
43 #define FDQ_INSERT(q,p) \
44 do { \
45 TAILQ_INSERT_TAIL(q,p,qe); \
46 p->flags |= PTHREAD_FLAGS_IN_FDQ; \
47 } while (0)
49 #define FDQ_REMOVE(q,p) \
50 do { \
51 if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
52 TAILQ_REMOVE(q,p,qe); \
53 p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
54 } \
55 } while (0)
58 /* Static variables: */
59 static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
61 /* Prototypes: */
62 #ifdef _FDLOCKS_ENABLED
63 static inline pthread_t fd_next_reader(int fd);
64 static inline pthread_t fd_next_writer(int fd);
65 #endif
69 * This function *must* return -1 and set the thread specific errno
70 * as a system call. This is because the error return from this
71 * function is propagated directly back from thread-wrapped system
72 * calls.
75 int
76 _thread_fd_table_init(int fd)
78 int ret = 0;
79 struct fd_table_entry *entry;
80 int saved_errno;
82 if (_thread_initial == NULL)
83 _thread_init();
85 /* Check if the file descriptor is out of range: */
86 if (fd < 0 || fd >= _thread_dtablesize) {
87 /* Return a bad file descriptor error: */
88 errno = EBADF;
89 ret = -1;
93 * Check if memory has already been allocated for this file
94 * descriptor:
96 else if (_thread_fd_table[fd] != NULL) {
97 /* Memory has already been allocated. */
99 /* Allocate memory for the file descriptor table entry: */
100 } else if ((entry = (struct fd_table_entry *)
101 malloc(sizeof(struct fd_table_entry))) == NULL) {
102 /* Return an insufficient memory error: */
103 errno = ENOMEM;
104 ret = -1;
105 } else {
106 /* Initialise the file locks: */
107 memset(&entry->lock, 0, sizeof(entry->lock));
108 entry->r_owner = NULL;
109 entry->w_owner = NULL;
110 entry->r_fname = NULL;
111 entry->w_fname = NULL;
112 entry->r_lineno = 0;
113 entry->w_lineno = 0;
114 entry->r_lockcount = 0;
115 entry->w_lockcount = 0;
117 /* Initialise the read/write queues: */
118 TAILQ_INIT(&entry->r_queue);
119 TAILQ_INIT(&entry->w_queue);
121 /* Get the flags for the file: */
122 if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) &&
123 (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) {
124 ret = -1;
125 } else {
126 /* Check if a stdio descriptor: */
127 if ((fd < 3) && (_pthread_stdio_flags[fd] != -1)) {
129 * Use the stdio flags read by
130 * _pthread_init() to avoid
131 * mistaking the non-blocking
132 * flag that, when set on one
133 * stdio fd, is set on all stdio
134 * fds.
136 entry->flags = _pthread_stdio_flags[fd];
140 * NOTE: We now use new system calls which allow
141 * the non-blocking mode to be set on a per-I/O
142 * basis, we no longer have to mess with the
143 * file pointer (which can have unexpected side
144 * effects since it might be shared with parent
145 * processes such as, oh, gmake).
148 /* Lock the file descriptor table: */
149 _SPINLOCK(&fd_table_lock);
152 * Check if another thread allocated the
153 * file descriptor entry while this thread
154 * was doing the same thing. The table wasn't
155 * kept locked during this operation because
156 * it has the potential to recurse.
158 if (_thread_fd_table[fd] == NULL) {
159 /* This thread wins: */
160 _thread_fd_table[fd] = entry;
161 entry = NULL;
164 /* Unlock the file descriptor table: */
165 _SPINUNLOCK(&fd_table_lock);
169 * Check if another thread initialised the table entry
170 * before this one could:
172 if (entry != NULL)
174 * Throw away the table entry that this thread
175 * prepared. The other thread wins.
177 free(entry);
180 /* Return the completion status: */
181 return (ret);
185 _thread_fd_getflags(int fd)
187 if (_thread_fd_table[fd] != NULL)
188 return (_thread_fd_table[fd]->flags);
189 else
190 return (0);
193 void
194 _thread_fd_setflags(int fd, int flags)
196 if (_thread_fd_table[fd] != NULL)
197 _thread_fd_table[fd]->flags = flags;
200 #ifdef _FDLOCKS_ENABLED
201 void
202 _thread_fd_unlock(int fd, int lock_type)
204 struct pthread *curthread = _get_curthread();
205 int ret;
208 * Check that the file descriptor table is initialised for this
209 * entry:
211 if ((ret = _thread_fd_table_init(fd)) == 0) {
213 * Defer signals to protect the scheduling queues from
214 * access by the signal handler:
216 _thread_kern_sig_defer();
219 * Lock the file descriptor table entry to prevent
220 * other threads for clashing with the current
221 * thread's accesses:
223 _SPINLOCK(&_thread_fd_table[fd]->lock);
225 /* Check if the running thread owns the read lock: */
226 if (_thread_fd_table[fd]->r_owner == curthread) {
227 /* Check the file descriptor and lock types: */
228 if (lock_type == FD_READ || lock_type == FD_RDWR) {
230 * Decrement the read lock count for the
231 * running thread:
233 _thread_fd_table[fd]->r_lockcount--;
236 * Check if the running thread still has read
237 * locks on this file descriptor:
239 if (_thread_fd_table[fd]->r_lockcount != 0) {
242 * Get the next thread in the queue for a
243 * read lock on this file descriptor:
245 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
246 } else {
247 /* Remove this thread from the queue: */
248 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
249 _thread_fd_table[fd]->r_owner);
252 * Set the state of the new owner of
253 * the thread to running:
255 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
258 * Reset the number of read locks.
259 * This will be incremented by the
260 * new owner of the lock when it sees
261 * that it has the lock.
263 _thread_fd_table[fd]->r_lockcount = 0;
267 /* Check if the running thread owns the write lock: */
268 if (_thread_fd_table[fd]->w_owner == curthread) {
269 /* Check the file descriptor and lock types: */
270 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
272 * Decrement the write lock count for the
273 * running thread:
275 _thread_fd_table[fd]->w_lockcount--;
278 * Check if the running thread still has
279 * write locks on this file descriptor:
281 if (_thread_fd_table[fd]->w_lockcount != 0) {
284 * Get the next thread in the queue for a
285 * write lock on this file descriptor:
287 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
288 } else {
289 /* Remove this thread from the queue: */
290 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
291 _thread_fd_table[fd]->w_owner);
294 * Set the state of the new owner of
295 * the thread to running:
297 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
300 * Reset the number of write locks.
301 * This will be incremented by the
302 * new owner of the lock when it
303 * sees that it has the lock.
305 _thread_fd_table[fd]->w_lockcount = 0;
310 /* Unlock the file descriptor table entry: */
311 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
314 * Undefer and handle pending signals, yielding if
315 * necessary:
317 _thread_kern_sig_undefer();
322 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
324 struct pthread *curthread = _get_curthread();
325 int ret;
328 * Check that the file descriptor table is initialised for this
329 * entry:
331 if ((ret = _thread_fd_table_init(fd)) == 0) {
332 /* Clear the interrupted flag: */
333 curthread->interrupted = 0;
336 * Lock the file descriptor table entry to prevent
337 * other threads for clashing with the current
338 * thread's accesses:
340 _SPINLOCK(&_thread_fd_table[fd]->lock);
342 /* Check the file descriptor and lock types: */
343 if (lock_type == FD_READ || lock_type == FD_RDWR) {
345 * Wait for the file descriptor to be locked
346 * for read for the current thread:
348 while ((_thread_fd_table[fd]->r_owner != curthread) &&
349 (curthread->interrupted == 0)) {
351 * Check if the file descriptor is locked by
352 * another thread:
354 if (_thread_fd_table[fd]->r_owner != NULL) {
356 * Another thread has locked the file
357 * descriptor for read, so join the
358 * queue of threads waiting for a
359 * read lock on this file descriptor:
361 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
364 * Save the file descriptor details
365 * in the thread structure for the
366 * running thread:
368 curthread->data.fd.fd = fd;
370 /* Set the timeout: */
371 _thread_kern_set_timeout(timeout);
374 * Unlock the file descriptor
375 * table entry:
377 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
380 * Schedule this thread to wait on
381 * the read lock. It will only be
382 * woken when it becomes the next in
383 * the queue and is granted access
384 * to the lock by the thread
385 * that is unlocking the file
386 * descriptor.
388 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
391 * Lock the file descriptor
392 * table entry again:
394 _SPINLOCK(&_thread_fd_table[fd]->lock);
396 if (curthread->interrupted != 0) {
397 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
398 curthread);
400 } else {
402 * The running thread now owns the
403 * read lock on this file descriptor:
405 _thread_fd_table[fd]->r_owner = curthread;
408 * Reset the number of read locks for
409 * this file descriptor:
411 _thread_fd_table[fd]->r_lockcount = 0;
415 if (_thread_fd_table[fd]->r_owner == curthread)
416 /* Increment the read lock count: */
417 _thread_fd_table[fd]->r_lockcount++;
420 /* Check the file descriptor and lock types: */
421 if (curthread->interrupted == 0 &&
422 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
424 * Wait for the file descriptor to be locked
425 * for write for the current thread:
427 while ((_thread_fd_table[fd]->w_owner != curthread) &&
428 (curthread->interrupted == 0)) {
430 * Check if the file descriptor is locked by
431 * another thread:
433 if (_thread_fd_table[fd]->w_owner != NULL) {
435 * Another thread has locked the file
436 * descriptor for write, so join the
437 * queue of threads waiting for a
438 * write lock on this file
439 * descriptor:
441 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
444 * Save the file descriptor details
445 * in the thread structure for the
446 * running thread:
448 curthread->data.fd.fd = fd;
450 /* Set the timeout: */
451 _thread_kern_set_timeout(timeout);
454 * Unlock the file descriptor
455 * table entry:
457 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
460 * Schedule this thread to wait on
461 * the write lock. It will only be
462 * woken when it becomes the next in
463 * the queue and is granted access to
464 * the lock by the thread that is
465 * unlocking the file descriptor.
467 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
470 * Lock the file descriptor
471 * table entry again:
473 _SPINLOCK(&_thread_fd_table[fd]->lock);
475 if (curthread->interrupted != 0) {
476 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
477 curthread);
479 } else {
481 * The running thread now owns the
482 * write lock on this file
483 * descriptor:
485 _thread_fd_table[fd]->w_owner = curthread;
488 * Reset the number of write locks
489 * for this file descriptor:
491 _thread_fd_table[fd]->w_lockcount = 0;
495 if (_thread_fd_table[fd]->w_owner == curthread)
496 /* Increment the write lock count: */
497 _thread_fd_table[fd]->w_lockcount++;
500 /* Unlock the file descriptor table entry: */
501 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
503 if (curthread->interrupted != 0) {
504 ret = -1;
505 errno = EINTR;
506 if (curthread->continuation != NULL)
507 curthread->continuation((void *)curthread);
511 /* Return the completion status: */
512 return (ret);
515 void
516 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
518 struct pthread *curthread = _get_curthread();
519 int ret;
522 * Check that the file descriptor table is initialised for this
523 * entry:
525 if ((ret = _thread_fd_table_init(fd)) == 0) {
527 * Defer signals to protect the scheduling queues from
528 * access by the signal handler:
530 _thread_kern_sig_defer();
533 * Lock the file descriptor table entry to prevent
534 * other threads for clashing with the current
535 * thread's accesses:
537 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
539 /* Check if the running thread owns the read lock: */
540 if (_thread_fd_table[fd]->r_owner == curthread) {
541 /* Check the file descriptor and lock types: */
542 if (lock_type == FD_READ || lock_type == FD_RDWR) {
544 * Decrement the read lock count for the
545 * running thread:
547 _thread_fd_table[fd]->r_lockcount--;
550 * Check if the running thread still has read
551 * locks on this file descriptor:
553 if (_thread_fd_table[fd]->r_lockcount != 0) {
556 * Get the next thread in the queue for a
557 * read lock on this file descriptor:
559 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
560 } else {
561 /* Remove this thread from the queue: */
562 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
563 _thread_fd_table[fd]->r_owner);
566 * Set the state of the new owner of
567 * the thread to running:
569 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
572 * Reset the number of read locks.
573 * This will be incremented by the
574 * new owner of the lock when it sees
575 * that it has the lock.
577 _thread_fd_table[fd]->r_lockcount = 0;
581 /* Check if the running thread owns the write lock: */
582 if (_thread_fd_table[fd]->w_owner == curthread) {
583 /* Check the file descriptor and lock types: */
584 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
586 * Decrement the write lock count for the
587 * running thread:
589 _thread_fd_table[fd]->w_lockcount--;
592 * Check if the running thread still has
593 * write locks on this file descriptor:
595 if (_thread_fd_table[fd]->w_lockcount != 0) {
598 * Get the next thread in the queue for a
599 * write lock on this file descriptor:
601 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
602 } else {
603 /* Remove this thread from the queue: */
604 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
605 _thread_fd_table[fd]->w_owner);
608 * Set the state of the new owner of
609 * the thread to running:
611 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
614 * Reset the number of write locks.
615 * This will be incremented by the
616 * new owner of the lock when it
617 * sees that it has the lock.
619 _thread_fd_table[fd]->w_lockcount = 0;
624 /* Unlock the file descriptor table entry: */
625 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
628 * Undefer and handle pending signals, yielding if
629 * necessary.
631 _thread_kern_sig_undefer();
636 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
637 char *fname, int lineno)
639 struct pthread *curthread = _get_curthread();
640 int ret;
643 * Check that the file descriptor table is initialised for this
644 * entry:
646 if ((ret = _thread_fd_table_init(fd)) == 0) {
647 /* Clear the interrupted flag: */
648 curthread->interrupted = 0;
651 * Lock the file descriptor table entry to prevent
652 * other threads for clashing with the current
653 * thread's accesses:
655 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
657 /* Check the file descriptor and lock types: */
658 if (lock_type == FD_READ || lock_type == FD_RDWR) {
660 * Wait for the file descriptor to be locked
661 * for read for the current thread:
663 while ((_thread_fd_table[fd]->r_owner != curthread) &&
664 (curthread->interrupted == 0)) {
666 * Check if the file descriptor is locked by
667 * another thread:
669 if (_thread_fd_table[fd]->r_owner != NULL) {
671 * Another thread has locked the file
672 * descriptor for read, so join the
673 * queue of threads waiting for a
674 * read lock on this file descriptor:
676 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
679 * Save the file descriptor details
680 * in the thread structure for the
681 * running thread:
683 curthread->data.fd.fd = fd;
684 curthread->data.fd.branch = lineno;
685 curthread->data.fd.fname = fname;
687 /* Set the timeout: */
688 _thread_kern_set_timeout(timeout);
691 * Unlock the file descriptor
692 * table entry:
694 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
697 * Schedule this thread to wait on
698 * the read lock. It will only be
699 * woken when it becomes the next in
700 * the queue and is granted access
701 * to the lock by the thread
702 * that is unlocking the file
703 * descriptor.
705 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
708 * Lock the file descriptor
709 * table entry again:
711 _SPINLOCK(&_thread_fd_table[fd]->lock);
713 if (curthread->interrupted != 0) {
714 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
715 curthread);
717 } else {
719 * The running thread now owns the
720 * read lock on this file descriptor:
722 _thread_fd_table[fd]->r_owner = curthread;
725 * Reset the number of read locks for
726 * this file descriptor:
728 _thread_fd_table[fd]->r_lockcount = 0;
731 * Save the source file details for
732 * debugging:
734 _thread_fd_table[fd]->r_fname = fname;
735 _thread_fd_table[fd]->r_lineno = lineno;
739 if (_thread_fd_table[fd]->r_owner == curthread)
740 /* Increment the read lock count: */
741 _thread_fd_table[fd]->r_lockcount++;
744 /* Check the file descriptor and lock types: */
745 if (curthread->interrupted == 0 &&
746 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
748 * Wait for the file descriptor to be locked
749 * for write for the current thread:
751 while ((_thread_fd_table[fd]->w_owner != curthread) &&
752 (curthread->interrupted == 0)) {
754 * Check if the file descriptor is locked by
755 * another thread:
757 if (_thread_fd_table[fd]->w_owner != NULL) {
759 * Another thread has locked the file
760 * descriptor for write, so join the
761 * queue of threads waiting for a
762 * write lock on this file
763 * descriptor:
765 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
768 * Save the file descriptor details
769 * in the thread structure for the
770 * running thread:
772 curthread->data.fd.fd = fd;
773 curthread->data.fd.branch = lineno;
774 curthread->data.fd.fname = fname;
776 /* Set the timeout: */
777 _thread_kern_set_timeout(timeout);
780 * Unlock the file descriptor
781 * table entry:
783 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
786 * Schedule this thread to wait on
787 * the write lock. It will only be
788 * woken when it becomes the next in
789 * the queue and is granted access to
790 * the lock by the thread that is
791 * unlocking the file descriptor.
793 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
796 * Lock the file descriptor
797 * table entry again:
799 _SPINLOCK(&_thread_fd_table[fd]->lock);
801 if (curthread->interrupted != 0) {
802 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
803 curthread);
805 } else {
807 * The running thread now owns the
808 * write lock on this file
809 * descriptor:
811 _thread_fd_table[fd]->w_owner = curthread;
814 * Reset the number of write locks
815 * for this file descriptor:
817 _thread_fd_table[fd]->w_lockcount = 0;
820 * Save the source file details for
821 * debugging:
823 _thread_fd_table[fd]->w_fname = fname;
824 _thread_fd_table[fd]->w_lineno = lineno;
828 if (_thread_fd_table[fd]->w_owner == curthread)
829 /* Increment the write lock count: */
830 _thread_fd_table[fd]->w_lockcount++;
833 /* Unlock the file descriptor table entry: */
834 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
836 if (curthread->interrupted != 0) {
837 ret = -1;
838 errno = EINTR;
839 if (curthread->continuation != NULL)
840 curthread->continuation((void *)curthread);
844 /* Return the completion status: */
845 return (ret);
848 void
849 _thread_fd_unlock_owned(pthread_t pthread)
851 int fd;
853 for (fd = 0; fd < _thread_dtablesize; fd++) {
854 if ((_thread_fd_table[fd] != NULL) &&
855 ((_thread_fd_table[fd]->r_owner == pthread) ||
856 (_thread_fd_table[fd]->w_owner == pthread))) {
858 * Defer signals to protect the scheduling queues
859 * from access by the signal handler:
861 _thread_kern_sig_defer();
864 * Lock the file descriptor table entry to prevent
865 * other threads for clashing with the current
866 * thread's accesses:
868 _SPINLOCK(&_thread_fd_table[fd]->lock);
870 /* Check if the thread owns the read lock: */
871 if (_thread_fd_table[fd]->r_owner == pthread) {
872 /* Clear the read lock count: */
873 _thread_fd_table[fd]->r_lockcount = 0;
876 * Get the next thread in the queue for a
877 * read lock on this file descriptor:
879 if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
880 /* Remove this thread from the queue: */
881 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
882 _thread_fd_table[fd]->r_owner);
885 * Set the state of the new owner of
886 * the thread to running:
888 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
892 /* Check if the thread owns the write lock: */
893 if (_thread_fd_table[fd]->w_owner == pthread) {
894 /* Clear the write lock count: */
895 _thread_fd_table[fd]->w_lockcount = 0;
898 * Get the next thread in the queue for a
899 * write lock on this file descriptor:
901 if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
902 /* Remove this thread from the queue: */
903 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
904 _thread_fd_table[fd]->w_owner);
907 * Set the state of the new owner of
908 * the thread to running:
910 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
915 /* Unlock the file descriptor table entry: */
916 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
919 * Undefer and handle pending signals, yielding if
920 * necessary.
922 _thread_kern_sig_undefer();
927 void
928 _fd_lock_backout(pthread_t pthread)
930 int fd;
933 * Defer signals to protect the scheduling queues
934 * from access by the signal handler:
936 _thread_kern_sig_defer();
938 switch (pthread->state) {
940 case PS_FDLR_WAIT:
941 fd = pthread->data.fd.fd;
944 * Lock the file descriptor table entry to prevent
945 * other threads for clashing with the current
946 * thread's accesses:
948 _SPINLOCK(&_thread_fd_table[fd]->lock);
950 /* Remove the thread from the waiting queue: */
951 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
952 break;
954 case PS_FDLW_WAIT:
955 fd = pthread->data.fd.fd;
958 * Lock the file descriptor table entry to prevent
959 * other threads from clashing with the current
960 * thread's accesses:
962 _SPINLOCK(&_thread_fd_table[fd]->lock);
964 /* Remove the thread from the waiting queue: */
965 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
966 break;
968 default:
969 break;
973 * Undefer and handle pending signals, yielding if
974 * necessary.
976 _thread_kern_sig_undefer();
979 static inline pthread_t
980 fd_next_reader(int fd)
982 pthread_t pthread;
984 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
985 (pthread->interrupted != 0)) {
987 * This thread has either been interrupted by a signal or
988 * it has been canceled. Remove it from the queue.
990 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
993 return (pthread);
996 static inline pthread_t
997 fd_next_writer(int fd)
999 pthread_t pthread;
1001 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
1002 (pthread->interrupted != 0)) {
1004 * This thread has either been interrupted by a signal or
1005 * it has been canceled. Remove it from the queue.
1007 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
1010 return (pthread);
1013 #else
1015 void
1016 _thread_fd_unlock(int fd, int lock_type)
1021 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
1024 * Insure that the file descriptor table is initialized for this
1025 * entry:
1027 return (_thread_fd_table_init(fd));
1030 void
1031 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
1036 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
1037 char *fname, int lineno)
1040 * Insure that the file descriptor table is initialized for this
1041 * entry:
1043 return (_thread_fd_table_init(fd));
1046 void
1047 _thread_fd_unlock_owned(pthread_t pthread)
1051 void
1052 _fd_lock_backout(pthread_t pthread)
1056 #endif