Update.
[glibc.git] / rt / lio_listio64.c
blob19f56adab1f1fec87754c05b139a6c81c36e4ad9
1 /* Enqueue and list of read or write requests, 64bit offset version.
2 Copyright (C) 1997 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include <aio.h>
22 #include <errno.h>
23 #include <stdlib.h>
25 #include "aio_misc.h"
28 /* We need this special structure to handle asynchronous I/O. */
29 struct async_waitlist
31 int counter;
32 struct sigevent sigev;
33 struct waitlist list[0];
37 int
38 lio_listio64 (mode, list, nent, sig)
39 int mode;
40 struct aiocb64 *const list[];
41 int nent;
42 struct sigevent *sig;
44 struct requestlist *requests[nent];
45 int cnt;
46 volatile int total = 0;
47 int result = 0;
49 /* Check arguments. */
50 if (mode != LIO_WAIT && mode != LIO_NOWAIT)
52 __set_errno (EINVAL);
53 return -1;
56 /* Request the mutex. */
57 pthread_mutex_lock (&__aio_requests_mutex);
59 /* Now we can enqueue all requests. Since we already acquired the
60 mutex the enqueue function need not do this. */
61 for (cnt = 0; cnt < nent; ++cnt)
62 if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP)
64 requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt],
65 (list[cnt]->aio_lio_opcode
66 | 128));
67 if (requests[cnt] != NULL)
68 /* Successfully enqueued. */
69 ++total;
70 else
71 /* Signal that we've seen an error. `errno' and the error code
72 of the aiocb will tell more. */
73 result = -1;
76 if (total == 0)
78 /* We don't have anything to do except signalling if we work
79 asynchronously. */
80 if (mode == LIO_NOWAIT)
81 __aio_notify_only (sig);
83 else if (mode == LIO_WAIT)
85 pthread_cond_t cond;
86 struct waitlist waitlist[nent];
87 int oldstate;
89 total = 0;
90 for (cnt = 0; cnt < nent; ++cnt)
91 if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP
92 && requests[cnt] != NULL)
94 waitlist[cnt].cond = &cond;
95 waitlist[cnt].next = requests[cnt]->waiting;
96 waitlist[cnt].counterp = &total;
97 waitlist[cnt].sigevp = NULL;
98 requests[cnt]->waiting = &waitlist[cnt];
99 ++total;
102 /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancelation
103 points we must be careful. We added entries to the waiting lists
104 which we must remove. So defer cancelation for now. */
105 pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
107 while (total > 0)
108 pthread_cond_wait (&cond, &__aio_requests_mutex);
110 /* Now it's time to restore the cancelation state. */
111 pthread_setcancelstate (oldstate, NULL);
113 else
115 struct async_waitlist *waitlist;
117 waitlist = (struct async_waitlist *)
118 malloc (sizeof (struct async_waitlist)
119 + (nent * sizeof (struct waitlist)));
121 if (waitlist == NULL)
123 __set_errno (EAGAIN);
124 result = -1;
126 else
128 total = 0;
130 for (cnt = 0; cnt < nent; ++cnt)
131 if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP
132 && requests[cnt] != NULL)
134 waitlist->list[cnt].cond = NULL;
135 waitlist->list[cnt].next = requests[cnt]->waiting;
136 waitlist->list[cnt].counterp = &waitlist->counter;
137 waitlist->list[cnt].sigevp = &waitlist->sigev;
138 requests[cnt]->waiting = &waitlist->list[cnt];
139 ++total;
142 waitlist->counter = total;
143 waitlist->sigev = *sig;
147 /* Release the mutex. */
148 pthread_mutex_unlock (&__aio_requests_mutex);
150 return result;