Update.
[glibc.git] / rt / lio_listio64.c
blob5df2596c0fecf24c1d038e2bdb8b0551cf5e23bf
1 /* Enqueue and list of read or write requests, 64bit offset version.
2 Copyright (C) 1997, 1998, 1999 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include <aio.h>
22 #include <errno.h>
23 #include <stdlib.h>
24 #include <unistd.h>
26 #include "aio_misc.h"
29 /* We need this special structure to handle asynchronous I/O. */
30 struct async_waitlist
32 int counter;
33 struct sigevent sigev;
34 struct waitlist list[0];
38 int
39 lio_listio64 (mode, list, nent, sig)
40 int mode;
41 struct aiocb64 *const list[];
42 int nent;
43 struct sigevent *sig;
45 struct requestlist *requests[nent];
46 int cnt;
47 volatile int total = 0;
48 int result = 0;
50 /* Check arguments. */
51 if (mode != LIO_WAIT && mode != LIO_NOWAIT)
53 __set_errno (EINVAL);
54 return -1;
57 /* Request the mutex. */
58 pthread_mutex_lock (&__aio_requests_mutex);
60 /* Now we can enqueue all requests. Since we already acquired the
61 mutex the enqueue function need not do this. */
62 for (cnt = 0; cnt < nent; ++cnt)
63 if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP)
65 requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt],
66 (list[cnt]->aio_lio_opcode
67 | 128));
68 if (requests[cnt] != NULL)
69 /* Successfully enqueued. */
70 ++total;
71 else
72 /* Signal that we've seen an error. `errno' and the error code
73 of the aiocb will tell more. */
74 result = -1;
77 if (total == 0)
79 /* We don't have anything to do except signalling if we work
80 asynchronously. */
82 /* Release the mutex. We do this before raising a signal since the
83 signal handler might do a `siglongjmp' and then the mutex is
84 locked forever. */
85 pthread_mutex_unlock (&__aio_requests_mutex);
87 if (mode == LIO_NOWAIT)
88 __aio_notify_only (sig,
89 sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0);
91 return result;
93 else if (mode == LIO_WAIT)
95 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
96 struct waitlist waitlist[nent];
97 int oldstate;
99 total = 0;
100 for (cnt = 0; cnt < nent; ++cnt)
101 if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP
102 && requests[cnt] != NULL)
104 waitlist[cnt].cond = &cond;
105 waitlist[cnt].next = requests[cnt]->waiting;
106 waitlist[cnt].counterp = &total;
107 waitlist[cnt].sigevp = NULL;
108 waitlist[cnt].caller_pid = 0; /* Not needed. */
109 requests[cnt]->waiting = &waitlist[cnt];
110 ++total;
113 /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancelation
114 points we must be careful. We added entries to the waiting lists
115 which we must remove. So defer cancelation for now. */
116 pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
118 while (total > 0)
119 pthread_cond_wait (&cond, &__aio_requests_mutex);
121 /* Now it's time to restore the cancelation state. */
122 pthread_setcancelstate (oldstate, NULL);
124 /* Release the conditional variable. */
125 if (pthread_cond_destroy (&cond) != 0)
126 /* This must never happen. */
127 abort ();
129 else
131 struct async_waitlist *waitlist;
133 waitlist = (struct async_waitlist *)
134 malloc (sizeof (struct async_waitlist)
135 + (nent * sizeof (struct waitlist)));
137 if (waitlist == NULL)
139 __set_errno (EAGAIN);
140 result = -1;
142 else
144 pid_t caller_pid = sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0;
145 total = 0;
147 for (cnt = 0; cnt < nent; ++cnt)
148 if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP
149 && requests[cnt] != NULL)
151 waitlist->list[cnt].cond = NULL;
152 waitlist->list[cnt].next = requests[cnt]->waiting;
153 waitlist->list[cnt].counterp = &waitlist->counter;
154 waitlist->list[cnt].sigevp = &waitlist->sigev;
155 waitlist->list[cnt].caller_pid = caller_pid;
156 requests[cnt]->waiting = &waitlist->list[cnt];
157 ++total;
160 waitlist->counter = total;
161 waitlist->sigev = *sig;
165 /* Release the mutex. */
166 pthread_mutex_unlock (&__aio_requests_mutex);
168 return result;