drm/i915: Set GPU freq to idle_freq initially
[dragonfly.git] / lib / libc_r / uthread / uthread_fd.c
blobefb27e47a023dd1c84457e0428c4745b0dd53cf4
1 /*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $FreeBSD: src/lib/libc_r/uthread/uthread_fd.c,v 1.16.2.7 2002/10/22 14:44:03 fjoe Exp $
35 #include <errno.h>
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <pthread.h>
40 #include "pthread_private.h"
42 #define FDQ_INSERT(q,p) \
43 do { \
44 TAILQ_INSERT_TAIL(q,p,qe); \
45 p->flags |= PTHREAD_FLAGS_IN_FDQ; \
46 } while (0)
48 #define FDQ_REMOVE(q,p) \
49 do { \
50 if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
51 TAILQ_REMOVE(q,p,qe); \
52 p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
53 } \
54 } while (0)
57 /* Static variables: */
58 static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
60 /* Prototypes: */
61 #ifdef _FDLOCKS_ENABLED
62 static inline pthread_t fd_next_reader(int fd);
63 static inline pthread_t fd_next_writer(int fd);
64 #endif
68 * This function *must* return -1 and set the thread specific errno
69 * as a system call. This is because the error return from this
70 * function is propagated directly back from thread-wrapped system
71 * calls.
74 int
75 _thread_fd_table_init(int fd)
77 int ret = 0;
78 struct fd_table_entry *entry;
80 if (_thread_initial == NULL)
81 _thread_init();
83 /* Check if the file descriptor is out of range: */
84 if (fd < 0 || fd >= _thread_dtablesize) {
85 /* Return a bad file descriptor error: */
86 errno = EBADF;
87 ret = -1;
91 * Check if memory has already been allocated for this file
92 * descriptor:
94 else if (_thread_fd_table[fd] != NULL) {
95 /* Memory has already been allocated. */
97 /* Allocate memory for the file descriptor table entry: */
98 } else if ((entry = (struct fd_table_entry *)
99 malloc(sizeof(struct fd_table_entry))) == NULL) {
100 /* Return an insufficient memory error: */
101 errno = ENOMEM;
102 ret = -1;
103 } else {
104 /* Initialise the file locks: */
105 memset(&entry->lock, 0, sizeof(entry->lock));
106 entry->r_owner = NULL;
107 entry->w_owner = NULL;
108 entry->r_fname = NULL;
109 entry->w_fname = NULL;
110 entry->r_lineno = 0;
111 entry->w_lineno = 0;
112 entry->r_lockcount = 0;
113 entry->w_lockcount = 0;
115 /* Initialise the read/write queues: */
116 TAILQ_INIT(&entry->r_queue);
117 TAILQ_INIT(&entry->w_queue);
119 /* Get the flags for the file: */
120 if (((fd >= 3) || (_pthread_stdio_flags[fd] == -1)) &&
121 (entry->flags = __sys_fcntl(fd, F_GETFL, 0)) == -1) {
122 ret = -1;
123 } else {
124 /* Check if a stdio descriptor: */
125 if ((fd < 3) && (_pthread_stdio_flags[fd] != -1)) {
127 * Use the stdio flags read by
128 * _pthread_init() to avoid
129 * mistaking the non-blocking
130 * flag that, when set on one
131 * stdio fd, is set on all stdio
132 * fds.
134 entry->flags = _pthread_stdio_flags[fd];
138 * NOTE: We now use new system calls which allow
139 * the non-blocking mode to be set on a per-I/O
140 * basis, we no longer have to mess with the
141 * file pointer (which can have unexpected side
142 * effects since it might be shared with parent
143 * processes such as, oh, gmake).
146 /* Lock the file descriptor table: */
147 _SPINLOCK(&fd_table_lock);
150 * Check if another thread allocated the
151 * file descriptor entry while this thread
152 * was doing the same thing. The table wasn't
153 * kept locked during this operation because
154 * it has the potential to recurse.
156 if (_thread_fd_table[fd] == NULL) {
157 /* This thread wins: */
158 _thread_fd_table[fd] = entry;
159 entry = NULL;
162 /* Unlock the file descriptor table: */
163 _SPINUNLOCK(&fd_table_lock);
167 * Check if another thread initialised the table entry
168 * before this one could:
170 if (entry != NULL)
172 * Throw away the table entry that this thread
173 * prepared. The other thread wins.
175 free(entry);
178 /* Return the completion status: */
179 return (ret);
183 _thread_fd_getflags(int fd)
185 if (_thread_fd_table[fd] != NULL)
186 return (_thread_fd_table[fd]->flags);
187 else
188 return (0);
191 void
192 _thread_fd_setflags(int fd, int flags)
194 if (_thread_fd_table[fd] != NULL)
195 _thread_fd_table[fd]->flags = flags;
198 #ifdef _FDLOCKS_ENABLED
199 void
200 _thread_fd_unlock(int fd, int lock_type)
202 struct pthread *curthread = _get_curthread();
203 int ret;
206 * Early return if magic descriptor used by "at" family of syscalls.
208 if (fd == AT_FDCWD)
209 return (0);
212 * Check that the file descriptor table is initialised for this
213 * entry:
215 if ((ret = _thread_fd_table_init(fd)) == 0) {
217 * Defer signals to protect the scheduling queues from
218 * access by the signal handler:
220 _thread_kern_sig_defer();
223 * Lock the file descriptor table entry to prevent
224 * other threads for clashing with the current
225 * thread's accesses:
227 _SPINLOCK(&_thread_fd_table[fd]->lock);
229 /* Check if the running thread owns the read lock: */
230 if (_thread_fd_table[fd]->r_owner == curthread) {
231 /* Check the file descriptor and lock types: */
232 if (lock_type == FD_READ || lock_type == FD_RDWR) {
234 * Decrement the read lock count for the
235 * running thread:
237 _thread_fd_table[fd]->r_lockcount--;
240 * Check if the running thread still has read
241 * locks on this file descriptor:
243 if (_thread_fd_table[fd]->r_lockcount != 0) {
246 * Get the next thread in the queue for a
247 * read lock on this file descriptor:
249 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
250 } else {
251 /* Remove this thread from the queue: */
252 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
253 _thread_fd_table[fd]->r_owner);
256 * Set the state of the new owner of
257 * the thread to running:
259 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
262 * Reset the number of read locks.
263 * This will be incremented by the
264 * new owner of the lock when it sees
265 * that it has the lock.
267 _thread_fd_table[fd]->r_lockcount = 0;
271 /* Check if the running thread owns the write lock: */
272 if (_thread_fd_table[fd]->w_owner == curthread) {
273 /* Check the file descriptor and lock types: */
274 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
276 * Decrement the write lock count for the
277 * running thread:
279 _thread_fd_table[fd]->w_lockcount--;
282 * Check if the running thread still has
283 * write locks on this file descriptor:
285 if (_thread_fd_table[fd]->w_lockcount != 0) {
288 * Get the next thread in the queue for a
289 * write lock on this file descriptor:
291 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
292 } else {
293 /* Remove this thread from the queue: */
294 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
295 _thread_fd_table[fd]->w_owner);
298 * Set the state of the new owner of
299 * the thread to running:
301 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
304 * Reset the number of write locks.
305 * This will be incremented by the
306 * new owner of the lock when it
307 * sees that it has the lock.
309 _thread_fd_table[fd]->w_lockcount = 0;
314 /* Unlock the file descriptor table entry: */
315 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
318 * Undefer and handle pending signals, yielding if
319 * necessary:
321 _thread_kern_sig_undefer();
326 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
328 struct pthread *curthread = _get_curthread();
329 int ret;
332 * Early return if magic descriptor used by "at" family of syscalls.
334 if (fd == AT_FDCWD)
335 return (0);
338 * Check that the file descriptor table is initialised for this
339 * entry:
341 if ((ret = _thread_fd_table_init(fd)) == 0) {
342 /* Clear the interrupted flag: */
343 curthread->interrupted = 0;
346 * Lock the file descriptor table entry to prevent
347 * other threads for clashing with the current
348 * thread's accesses:
350 _SPINLOCK(&_thread_fd_table[fd]->lock);
352 /* Check the file descriptor and lock types: */
353 if (lock_type == FD_READ || lock_type == FD_RDWR) {
355 * Wait for the file descriptor to be locked
356 * for read for the current thread:
358 while ((_thread_fd_table[fd]->r_owner != curthread) &&
359 (curthread->interrupted == 0)) {
361 * Check if the file descriptor is locked by
362 * another thread:
364 if (_thread_fd_table[fd]->r_owner != NULL) {
366 * Another thread has locked the file
367 * descriptor for read, so join the
368 * queue of threads waiting for a
369 * read lock on this file descriptor:
371 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
374 * Save the file descriptor details
375 * in the thread structure for the
376 * running thread:
378 curthread->data.fd.fd = fd;
380 /* Set the timeout: */
381 _thread_kern_set_timeout(timeout);
384 * Unlock the file descriptor
385 * table entry:
387 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
390 * Schedule this thread to wait on
391 * the read lock. It will only be
392 * woken when it becomes the next in
393 * the queue and is granted access
394 * to the lock by the thread
395 * that is unlocking the file
396 * descriptor.
398 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
401 * Lock the file descriptor
402 * table entry again:
404 _SPINLOCK(&_thread_fd_table[fd]->lock);
406 if (curthread->interrupted != 0) {
407 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
408 curthread);
410 } else {
412 * The running thread now owns the
413 * read lock on this file descriptor:
415 _thread_fd_table[fd]->r_owner = curthread;
418 * Reset the number of read locks for
419 * this file descriptor:
421 _thread_fd_table[fd]->r_lockcount = 0;
425 if (_thread_fd_table[fd]->r_owner == curthread)
426 /* Increment the read lock count: */
427 _thread_fd_table[fd]->r_lockcount++;
430 /* Check the file descriptor and lock types: */
431 if (curthread->interrupted == 0 &&
432 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
434 * Wait for the file descriptor to be locked
435 * for write for the current thread:
437 while ((_thread_fd_table[fd]->w_owner != curthread) &&
438 (curthread->interrupted == 0)) {
440 * Check if the file descriptor is locked by
441 * another thread:
443 if (_thread_fd_table[fd]->w_owner != NULL) {
445 * Another thread has locked the file
446 * descriptor for write, so join the
447 * queue of threads waiting for a
448 * write lock on this file
449 * descriptor:
451 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
454 * Save the file descriptor details
455 * in the thread structure for the
456 * running thread:
458 curthread->data.fd.fd = fd;
460 /* Set the timeout: */
461 _thread_kern_set_timeout(timeout);
464 * Unlock the file descriptor
465 * table entry:
467 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
470 * Schedule this thread to wait on
471 * the write lock. It will only be
472 * woken when it becomes the next in
473 * the queue and is granted access to
474 * the lock by the thread that is
475 * unlocking the file descriptor.
477 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
480 * Lock the file descriptor
481 * table entry again:
483 _SPINLOCK(&_thread_fd_table[fd]->lock);
485 if (curthread->interrupted != 0) {
486 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
487 curthread);
489 } else {
491 * The running thread now owns the
492 * write lock on this file
493 * descriptor:
495 _thread_fd_table[fd]->w_owner = curthread;
498 * Reset the number of write locks
499 * for this file descriptor:
501 _thread_fd_table[fd]->w_lockcount = 0;
505 if (_thread_fd_table[fd]->w_owner == curthread)
506 /* Increment the write lock count: */
507 _thread_fd_table[fd]->w_lockcount++;
510 /* Unlock the file descriptor table entry: */
511 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
513 if (curthread->interrupted != 0) {
514 ret = -1;
515 errno = EINTR;
516 if (curthread->continuation != NULL)
517 curthread->continuation((void *)curthread);
521 /* Return the completion status: */
522 return (ret);
525 void
526 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
528 struct pthread *curthread = _get_curthread();
529 int ret;
532 * Early return if magic descriptor used by "at" family of syscalls.
534 if (fd == AT_FDCWD)
535 return (0);
538 * Check that the file descriptor table is initialised for this
539 * entry:
541 if ((ret = _thread_fd_table_init(fd)) == 0) {
543 * Defer signals to protect the scheduling queues from
544 * access by the signal handler:
546 _thread_kern_sig_defer();
549 * Lock the file descriptor table entry to prevent
550 * other threads for clashing with the current
551 * thread's accesses:
553 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
555 /* Check if the running thread owns the read lock: */
556 if (_thread_fd_table[fd]->r_owner == curthread) {
557 /* Check the file descriptor and lock types: */
558 if (lock_type == FD_READ || lock_type == FD_RDWR) {
560 * Decrement the read lock count for the
561 * running thread:
563 _thread_fd_table[fd]->r_lockcount--;
566 * Check if the running thread still has read
567 * locks on this file descriptor:
569 if (_thread_fd_table[fd]->r_lockcount != 0) {
572 * Get the next thread in the queue for a
573 * read lock on this file descriptor:
575 else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
576 } else {
577 /* Remove this thread from the queue: */
578 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
579 _thread_fd_table[fd]->r_owner);
582 * Set the state of the new owner of
583 * the thread to running:
585 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
588 * Reset the number of read locks.
589 * This will be incremented by the
590 * new owner of the lock when it sees
591 * that it has the lock.
593 _thread_fd_table[fd]->r_lockcount = 0;
597 /* Check if the running thread owns the write lock: */
598 if (_thread_fd_table[fd]->w_owner == curthread) {
599 /* Check the file descriptor and lock types: */
600 if (lock_type == FD_WRITE || lock_type == FD_RDWR) {
602 * Decrement the write lock count for the
603 * running thread:
605 _thread_fd_table[fd]->w_lockcount--;
608 * Check if the running thread still has
609 * write locks on this file descriptor:
611 if (_thread_fd_table[fd]->w_lockcount != 0) {
614 * Get the next thread in the queue for a
615 * write lock on this file descriptor:
617 else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
618 } else {
619 /* Remove this thread from the queue: */
620 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
621 _thread_fd_table[fd]->w_owner);
624 * Set the state of the new owner of
625 * the thread to running:
627 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
630 * Reset the number of write locks.
631 * This will be incremented by the
632 * new owner of the lock when it
633 * sees that it has the lock.
635 _thread_fd_table[fd]->w_lockcount = 0;
640 /* Unlock the file descriptor table entry: */
641 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
644 * Undefer and handle pending signals, yielding if
645 * necessary.
647 _thread_kern_sig_undefer();
652 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
653 char *fname, int lineno)
655 struct pthread *curthread = _get_curthread();
656 int ret;
659 * Early return if magic descriptor used by "at" family of syscalls.
661 if (fd == AT_FDCWD)
662 return (0);
665 * Check that the file descriptor table is initialised for this
666 * entry:
668 if ((ret = _thread_fd_table_init(fd)) == 0) {
669 /* Clear the interrupted flag: */
670 curthread->interrupted = 0;
673 * Lock the file descriptor table entry to prevent
674 * other threads for clashing with the current
675 * thread's accesses:
677 _spinlock_debug(&_thread_fd_table[fd]->lock, fname, lineno);
679 /* Check the file descriptor and lock types: */
680 if (lock_type == FD_READ || lock_type == FD_RDWR) {
682 * Wait for the file descriptor to be locked
683 * for read for the current thread:
685 while ((_thread_fd_table[fd]->r_owner != curthread) &&
686 (curthread->interrupted == 0)) {
688 * Check if the file descriptor is locked by
689 * another thread:
691 if (_thread_fd_table[fd]->r_owner != NULL) {
693 * Another thread has locked the file
694 * descriptor for read, so join the
695 * queue of threads waiting for a
696 * read lock on this file descriptor:
698 FDQ_INSERT(&_thread_fd_table[fd]->r_queue, curthread);
701 * Save the file descriptor details
702 * in the thread structure for the
703 * running thread:
705 curthread->data.fd.fd = fd;
706 curthread->data.fd.branch = lineno;
707 curthread->data.fd.fname = fname;
709 /* Set the timeout: */
710 _thread_kern_set_timeout(timeout);
713 * Unlock the file descriptor
714 * table entry:
716 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
719 * Schedule this thread to wait on
720 * the read lock. It will only be
721 * woken when it becomes the next in
722 * the queue and is granted access
723 * to the lock by the thread
724 * that is unlocking the file
725 * descriptor.
727 _thread_kern_sched_state(PS_FDLR_WAIT, __FILE__, __LINE__);
730 * Lock the file descriptor
731 * table entry again:
733 _SPINLOCK(&_thread_fd_table[fd]->lock);
735 if (curthread->interrupted != 0) {
736 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
737 curthread);
739 } else {
741 * The running thread now owns the
742 * read lock on this file descriptor:
744 _thread_fd_table[fd]->r_owner = curthread;
747 * Reset the number of read locks for
748 * this file descriptor:
750 _thread_fd_table[fd]->r_lockcount = 0;
753 * Save the source file details for
754 * debugging:
756 _thread_fd_table[fd]->r_fname = fname;
757 _thread_fd_table[fd]->r_lineno = lineno;
761 if (_thread_fd_table[fd]->r_owner == curthread)
762 /* Increment the read lock count: */
763 _thread_fd_table[fd]->r_lockcount++;
766 /* Check the file descriptor and lock types: */
767 if (curthread->interrupted == 0 &&
768 (lock_type == FD_WRITE || lock_type == FD_RDWR)) {
770 * Wait for the file descriptor to be locked
771 * for write for the current thread:
773 while ((_thread_fd_table[fd]->w_owner != curthread) &&
774 (curthread->interrupted == 0)) {
776 * Check if the file descriptor is locked by
777 * another thread:
779 if (_thread_fd_table[fd]->w_owner != NULL) {
781 * Another thread has locked the file
782 * descriptor for write, so join the
783 * queue of threads waiting for a
784 * write lock on this file
785 * descriptor:
787 FDQ_INSERT(&_thread_fd_table[fd]->w_queue, curthread);
790 * Save the file descriptor details
791 * in the thread structure for the
792 * running thread:
794 curthread->data.fd.fd = fd;
795 curthread->data.fd.branch = lineno;
796 curthread->data.fd.fname = fname;
798 /* Set the timeout: */
799 _thread_kern_set_timeout(timeout);
802 * Unlock the file descriptor
803 * table entry:
805 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
808 * Schedule this thread to wait on
809 * the write lock. It will only be
810 * woken when it becomes the next in
811 * the queue and is granted access to
812 * the lock by the thread that is
813 * unlocking the file descriptor.
815 _thread_kern_sched_state(PS_FDLW_WAIT, __FILE__, __LINE__);
818 * Lock the file descriptor
819 * table entry again:
821 _SPINLOCK(&_thread_fd_table[fd]->lock);
823 if (curthread->interrupted != 0) {
824 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
825 curthread);
827 } else {
829 * The running thread now owns the
830 * write lock on this file
831 * descriptor:
833 _thread_fd_table[fd]->w_owner = curthread;
836 * Reset the number of write locks
837 * for this file descriptor:
839 _thread_fd_table[fd]->w_lockcount = 0;
842 * Save the source file details for
843 * debugging:
845 _thread_fd_table[fd]->w_fname = fname;
846 _thread_fd_table[fd]->w_lineno = lineno;
850 if (_thread_fd_table[fd]->w_owner == curthread)
851 /* Increment the write lock count: */
852 _thread_fd_table[fd]->w_lockcount++;
855 /* Unlock the file descriptor table entry: */
856 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
858 if (curthread->interrupted != 0) {
859 ret = -1;
860 errno = EINTR;
861 if (curthread->continuation != NULL)
862 curthread->continuation((void *)curthread);
866 /* Return the completion status: */
867 return (ret);
870 void
871 _thread_fd_unlock_owned(pthread_t pthread)
873 int fd;
875 for (fd = 0; fd < _thread_dtablesize; fd++) {
876 if ((_thread_fd_table[fd] != NULL) &&
877 ((_thread_fd_table[fd]->r_owner == pthread) ||
878 (_thread_fd_table[fd]->w_owner == pthread))) {
880 * Defer signals to protect the scheduling queues
881 * from access by the signal handler:
883 _thread_kern_sig_defer();
886 * Lock the file descriptor table entry to prevent
887 * other threads for clashing with the current
888 * thread's accesses:
890 _SPINLOCK(&_thread_fd_table[fd]->lock);
892 /* Check if the thread owns the read lock: */
893 if (_thread_fd_table[fd]->r_owner == pthread) {
894 /* Clear the read lock count: */
895 _thread_fd_table[fd]->r_lockcount = 0;
898 * Get the next thread in the queue for a
899 * read lock on this file descriptor:
901 if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
902 /* Remove this thread from the queue: */
903 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
904 _thread_fd_table[fd]->r_owner);
907 * Set the state of the new owner of
908 * the thread to running:
910 PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
914 /* Check if the thread owns the write lock: */
915 if (_thread_fd_table[fd]->w_owner == pthread) {
916 /* Clear the write lock count: */
917 _thread_fd_table[fd]->w_lockcount = 0;
920 * Get the next thread in the queue for a
921 * write lock on this file descriptor:
923 if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
924 /* Remove this thread from the queue: */
925 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
926 _thread_fd_table[fd]->w_owner);
929 * Set the state of the new owner of
930 * the thread to running:
932 PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
937 /* Unlock the file descriptor table entry: */
938 _SPINUNLOCK(&_thread_fd_table[fd]->lock);
941 * Undefer and handle pending signals, yielding if
942 * necessary.
944 _thread_kern_sig_undefer();
949 void
950 _fd_lock_backout(pthread_t pthread)
952 int fd;
955 * Defer signals to protect the scheduling queues
956 * from access by the signal handler:
958 _thread_kern_sig_defer();
960 switch (pthread->state) {
962 case PS_FDLR_WAIT:
963 fd = pthread->data.fd.fd;
966 * Lock the file descriptor table entry to prevent
967 * other threads for clashing with the current
968 * thread's accesses:
970 _SPINLOCK(&_thread_fd_table[fd]->lock);
972 /* Remove the thread from the waiting queue: */
973 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
974 break;
976 case PS_FDLW_WAIT:
977 fd = pthread->data.fd.fd;
980 * Lock the file descriptor table entry to prevent
981 * other threads from clashing with the current
982 * thread's accesses:
984 _SPINLOCK(&_thread_fd_table[fd]->lock);
986 /* Remove the thread from the waiting queue: */
987 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
988 break;
990 default:
991 break;
995 * Undefer and handle pending signals, yielding if
996 * necessary.
998 _thread_kern_sig_undefer();
1001 static inline pthread_t
1002 fd_next_reader(int fd)
1004 pthread_t pthread;
1006 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
1007 (pthread->interrupted != 0)) {
1009 * This thread has either been interrupted by a signal or
1010 * it has been canceled. Remove it from the queue.
1012 FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
1015 return (pthread);
1018 static inline pthread_t
1019 fd_next_writer(int fd)
1021 pthread_t pthread;
1023 while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
1024 (pthread->interrupted != 0)) {
1026 * This thread has either been interrupted by a signal or
1027 * it has been canceled. Remove it from the queue.
1029 FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
1032 return (pthread);
1035 #else
1037 void
1038 _thread_fd_unlock(int fd, int lock_type)
1043 _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
1046 * Insure that the file descriptor table is initialized for this
1047 * entry except if magic descriptor used by "at" family of syscalls.
1049 return ((fd != AT_FDCWD) ? _thread_fd_table_init(fd) : 0);
1052 void
1053 _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
1058 _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
1059 char *fname, int lineno)
1062 * Insure that the file descriptor table is initialized for this
1063 * entry except if magic descriptor used by "at" family of syscalls.
1065 return ((fd != AT_FDCWD) ? _thread_fd_table_init(fd) : 0);
1068 void
1069 _thread_fd_unlock_owned(pthread_t pthread)
1073 void
1074 _fd_lock_backout(pthread_t pthread)
1078 #endif