1 /* Enqueue and list of read or write requests.
2 Copyright (C) 1997,1998,1999,2000,2001,2002,2003
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; if not, write to the Free
19 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22 #include <kaio_misc.h>
25 #include <lio_listio.c>
35 #define LIO_OPCODE_BASE 0
38 /* We need this special structure to handle asynchronous I/O. */
42 struct sigevent sigev
;
43 struct waitlist list
[0];
47 lio_listio (mode
, list
, nent
, sig
)
49 struct aiocb
*const list
[];
53 struct sigevent defsigev
;
54 struct requestlist
*requests
[nent
];
56 volatile int total
= 0;
57 int result
= 0, op
= 0;
58 kctx_t kctx
= KCTX_NONE
;
60 /* Check arguments. */
61 if (mode
!= LIO_WAIT
&& mode
!= LIO_NOWAIT
)
69 defsigev
.sigev_notify
= SIGEV_NONE
;
73 /* Request the mutex. */
74 pthread_mutex_lock (&__aio_requests_mutex
);
76 if (mode
== LIO_WAIT
&& ! __have_no_kernel_aio
&& nent
> 0)
79 INTERNAL_SYSCALL_DECL (err
);
83 res
= INTERNAL_SYSCALL (io_setup
, err
, 2, nent
, &kctx
);
84 while (INTERNAL_SYSCALL_ERROR_P (res
, err
)
85 && INTERNAL_SYSCALL_ERRNO (res
, err
) == EINTR
);
86 if (INTERNAL_SYSCALL_ERROR_P (res
, err
))
89 if (INTERNAL_SYSCALL_ERRNO (res
, err
) == ENOSYS
)
90 __have_no_kernel_aio
= 1;
93 else if (mode
== LIO_NOWAIT
)
96 if (sig
->sigev_notify
!= SIGEV_NONE
)
97 op
= LIO_KTHREAD
| LIO_KTHREAD_REQUIRED
;
99 op
|= LIO_OPCODE_BASE
;
101 /* Now we can enqueue all requests. Since we already acquired the
102 mutex the enqueue function need not do this. */
103 for (cnt
= 0; cnt
< nent
; ++cnt
)
104 if (list
[cnt
] != NULL
&& list
[cnt
]->aio_lio_opcode
!= LIO_NOP
)
106 list
[cnt
]->aio_sigevent
.sigev_notify
= SIGEV_NONE
;
108 = __aio_enqueue_request_ctx ((aiocb_union
*) list
[cnt
],
109 list
[cnt
]->aio_lio_opcode
| op
,
112 if (requests
[cnt
] != NULL
)
113 /* Successfully enqueued. */
116 /* Signal that we've seen an error. `errno' and the error code
117 of the aiocb will tell more. */
121 requests
[cnt
] = NULL
;
125 /* We don't have anything to do except signalling if we work
128 if (kctx
!= KCTX_NONE
)
130 INTERNAL_SYSCALL_DECL (err
);
131 INTERNAL_SYSCALL (io_destroy
, err
, 1, kctx
);
134 /* Release the mutex. We do this before raising a signal since the
135 signal handler might do a `siglongjmp' and then the mutex is
137 pthread_mutex_unlock (&__aio_requests_mutex
);
139 if (mode
== LIO_NOWAIT
)
141 #ifdef BROKEN_THREAD_SIGNALS
142 __aio_notify_only (sig
,
143 sig
->sigev_notify
== SIGEV_SIGNAL
? getpid () : 0);
145 __aio_notify_only (sig
);
151 else if (mode
== LIO_WAIT
)
153 pthread_cond_t cond
= PTHREAD_COND_INITIALIZER
;
154 struct waitlist waitlist
[nent
];
156 volatile int ktotal
= 0;
159 for (cnt
= 0; cnt
< nent
; ++cnt
)
161 assert (requests
[cnt
] == NULL
|| list
[cnt
] != NULL
);
163 if (requests
[cnt
] != NULL
&& list
[cnt
]->aio_lio_opcode
!= LIO_NOP
)
165 if (requests
[cnt
]->kioctx
!= KCTX_NONE
)
167 assert (requests
[cnt
]->kioctx
== kctx
);
168 waitlist
[cnt
].counterp
= &ktotal
;
173 waitlist
[cnt
].counterp
= &total
;
176 waitlist
[cnt
].cond
= &cond
;
177 waitlist
[cnt
].next
= requests
[cnt
]->waiting
;
178 waitlist
[cnt
].sigevp
= NULL
;
179 #ifdef BROKEN_THREAD_SIGNALS
180 waitlist
[cnt
].caller_pid
= 0; /* Not needed. */
182 requests
[cnt
]->waiting
= &waitlist
[cnt
];
186 /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancelation
187 points we must be careful. We added entries to the waiting lists
188 which we must remove. So defer cancelation for now. */
189 pthread_setcancelstate (PTHREAD_CANCEL_DISABLE
, &oldstate
);
192 __aio_wait_for_events (kctx
, NULL
);
194 pthread_cond_wait (&cond
, &__aio_requests_mutex
);
196 /* Now it's time to restore the cancelation state. */
197 pthread_setcancelstate (oldstate
, NULL
);
199 if (kctx
!= KCTX_NONE
)
201 INTERNAL_SYSCALL_DECL (err
);
202 INTERNAL_SYSCALL (io_destroy
, err
, 1, kctx
);
205 /* Release the conditional variable. */
206 if (pthread_cond_destroy (&cond
) != 0)
207 /* This must never happen. */
210 else if (sig
->sigev_notify
!= SIGEV_NONE
)
212 struct async_waitlist
*waitlist
;
214 waitlist
= (struct async_waitlist
*)
215 malloc (sizeof (struct async_waitlist
)
216 + (nent
* sizeof (struct waitlist
)));
218 if (waitlist
== NULL
)
220 __set_errno (EAGAIN
);
225 #ifdef BROKEN_THREAD_SIGNALS
226 pid_t caller_pid
= sig
->sigev_notify
== SIGEV_SIGNAL
? getpid () : 0;
230 for (cnt
= 0; cnt
< nent
; ++cnt
)
232 assert (requests
[cnt
] == NULL
|| list
[cnt
] != NULL
);
234 if (requests
[cnt
] != NULL
235 && list
[cnt
]->aio_lio_opcode
!= LIO_NOP
)
237 waitlist
->list
[cnt
].cond
= NULL
;
238 waitlist
->list
[cnt
].next
= requests
[cnt
]->waiting
;
239 waitlist
->list
[cnt
].counterp
= &waitlist
->counter
;
240 waitlist
->list
[cnt
].sigevp
= &waitlist
->sigev
;
241 #ifdef BROKEN_THREAD_SIGNALS
242 waitlist
->list
[cnt
].caller_pid
= caller_pid
;
244 requests
[cnt
]->waiting
= &waitlist
->list
[cnt
];
249 waitlist
->counter
= total
;
250 waitlist
->sigev
= *sig
;
254 /* Release the mutex. */
255 pthread_mutex_unlock (&__aio_requests_mutex
);