1 /* Enqueue and list of read or write requests.
2 Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2005,2006
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; if not, write to the Free
19 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22 #include <kaio_misc.h>
25 #include <lio_listio.c>
35 #define LIO_OPCODE_BASE 0
38 #include <shlib-compat.h>
41 /* We need this special structure to handle asynchronous I/O. */
45 struct sigevent sigev
;
46 struct waitlist list
[0];
50 /* The code in glibc 2.1 to glibc 2.4 issued only one event when all
51 requests submitted with lio_listio finished. The existing practice
52 is to issue events for the individual requests as well. This is
53 what the new code does. */
54 #if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_4)
55 # define LIO_MODE(mode) ((mode) & 127)
56 # define NO_INDIVIDUAL_EVENT_P(mode) ((mode) & 128)
58 # define LIO_MODE(mode) mode
59 # define NO_INDIVIDUAL_EVENT_P(mode) 0
64 lio_listio_internal (int mode
, struct aiocb
*const list
[], int nent
,
67 struct sigevent defsigev
;
68 struct requestlist
*requests
[nent
];
70 volatile int total
= 0;
71 int result
= 0, op
= 0;
72 kctx_t kctx
= KCTX_NONE
;
76 defsigev
.sigev_notify
= SIGEV_NONE
;
80 /* Request the mutex. */
81 pthread_mutex_lock (&__aio_requests_mutex
);
83 if (LIO_MODE (mode
) == LIO_WAIT
&& ! __have_no_kernel_aio
&& nent
> 0)
86 INTERNAL_SYSCALL_DECL (err
);
90 res
= INTERNAL_SYSCALL (io_setup
, err
, 2, nent
, &kctx
);
91 while (INTERNAL_SYSCALL_ERROR_P (res
, err
)
92 && INTERNAL_SYSCALL_ERRNO (res
, err
) == EINTR
);
93 if (INTERNAL_SYSCALL_ERROR_P (res
, err
))
96 if (INTERNAL_SYSCALL_ERRNO (res
, err
) == ENOSYS
)
97 __have_no_kernel_aio
= 1;
100 else if (LIO_MODE (mode
) == LIO_NOWAIT
)
103 if (sig
->sigev_notify
!= SIGEV_NONE
)
104 op
= LIO_KTHREAD
| LIO_KTHREAD_REQUIRED
;
106 op
|= LIO_OPCODE_BASE
;
108 /* Now we can enqueue all requests. Since we already acquired the
109 mutex the enqueue function need not do this. */
110 for (cnt
= 0; cnt
< nent
; ++cnt
)
111 if (list
[cnt
] != NULL
&& list
[cnt
]->aio_lio_opcode
!= LIO_NOP
)
113 if (NO_INDIVIDUAL_EVENT_P (mode
))
114 list
[cnt
]->aio_sigevent
.sigev_notify
= SIGEV_NONE
;
116 = __aio_enqueue_request_ctx ((aiocb_union
*) list
[cnt
],
117 list
[cnt
]->aio_lio_opcode
| op
,
120 if (requests
[cnt
] != NULL
)
121 /* Successfully enqueued. */
124 /* Signal that we've seen an error. `errno' and the error code
125 of the aiocb will tell more. */
129 requests
[cnt
] = NULL
;
133 /* We don't have anything to do except signalling if we work
136 if (kctx
!= KCTX_NONE
)
138 INTERNAL_SYSCALL_DECL (err
);
139 INTERNAL_SYSCALL (io_destroy
, err
, 1, kctx
);
142 /* Release the mutex. We do this before raising a signal since the
143 signal handler might do a `siglongjmp' and then the mutex is
145 pthread_mutex_unlock (&__aio_requests_mutex
);
147 if (LIO_MODE (mode
) == LIO_NOWAIT
)
149 #ifdef BROKEN_THREAD_SIGNALS
150 __aio_notify_only (sig
,
151 sig
->sigev_notify
== SIGEV_SIGNAL
? getpid () : 0);
153 __aio_notify_only (sig
);
159 else if (LIO_MODE (mode
) == LIO_WAIT
)
161 #ifndef DONT_NEED_AIO_MISC_COND
162 pthread_cond_t cond
= PTHREAD_COND_INITIALIZER
;
165 struct waitlist waitlist
[nent
];
166 volatile int ktotal
= 0;
169 for (cnt
= 0; cnt
< nent
; ++cnt
)
171 assert (requests
[cnt
] == NULL
|| list
[cnt
] != NULL
);
173 if (requests
[cnt
] != NULL
&& list
[cnt
]->aio_lio_opcode
!= LIO_NOP
)
175 if (requests
[cnt
]->kioctx
!= KCTX_NONE
)
177 assert (requests
[cnt
]->kioctx
== kctx
);
178 waitlist
[cnt
].counterp
= &ktotal
;
183 waitlist
[cnt
].counterp
= &total
;
186 #ifndef DONT_NEED_AIO_MISC_COND
187 waitlist
[cnt
].cond
= &cond
;
189 waitlist
[cnt
].result
= &result
;
190 waitlist
[cnt
].next
= requests
[cnt
]->waiting
;
191 waitlist
[cnt
].sigevp
= NULL
;
192 #ifdef BROKEN_THREAD_SIGNALS
193 waitlist
[cnt
].caller_pid
= 0; /* Not needed. */
195 requests
[cnt
]->waiting
= &waitlist
[cnt
];
200 __aio_wait_for_events (kctx
, NULL
);
201 #ifdef DONT_NEED_AIO_MISC_COND
202 AIO_MISC_WAIT (result
, total
, NULL
, 0);
204 /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancellation
205 points we must be careful. We added entries to the waiting lists
206 which we must remove. So defer cancellation for now. */
207 pthread_setcancelstate (PTHREAD_CANCEL_DISABLE
, &oldstate
);
210 pthread_cond_wait (&cond
, &__aio_requests_mutex
);
212 /* Now it's time to restore the cancellation state. */
213 pthread_setcancelstate (oldstate
, NULL
);
215 /* Release the conditional variable. */
216 if (pthread_cond_destroy (&cond
) != 0)
217 /* This must never happen. */
221 if (kctx
!= KCTX_NONE
)
223 INTERNAL_SYSCALL_DECL (err
);
224 INTERNAL_SYSCALL (io_destroy
, err
, 1, kctx
);
227 /* If any of the I/O requests failed, return -1 and set errno. */
230 __set_errno (result
== EINTR
? EINTR
: EIO
);
234 else if (sig
->sigev_notify
!= SIGEV_NONE
)
236 struct async_waitlist
*waitlist
;
238 waitlist
= (struct async_waitlist
*)
239 malloc (sizeof (struct async_waitlist
)
240 + (nent
* sizeof (struct waitlist
)));
242 if (waitlist
== NULL
)
244 __set_errno (EAGAIN
);
249 #ifdef BROKEN_THREAD_SIGNALS
250 pid_t caller_pid
= sig
->sigev_notify
== SIGEV_SIGNAL
? getpid () : 0;
254 for (cnt
= 0; cnt
< nent
; ++cnt
)
256 assert (requests
[cnt
] == NULL
|| list
[cnt
] != NULL
);
258 if (requests
[cnt
] != NULL
259 && list
[cnt
]->aio_lio_opcode
!= LIO_NOP
)
261 #ifndef DONT_NEED_AIO_MISC_COND
262 waitlist
->list
[cnt
].cond
= NULL
;
264 waitlist
->list
[cnt
].result
= NULL
;
265 waitlist
->list
[cnt
].next
= requests
[cnt
]->waiting
;
266 waitlist
->list
[cnt
].counterp
= &waitlist
->counter
;
267 waitlist
->list
[cnt
].sigevp
= &waitlist
->sigev
;
268 #ifdef BROKEN_THREAD_SIGNALS
269 waitlist
->list
[cnt
].caller_pid
= caller_pid
;
271 requests
[cnt
]->waiting
= &waitlist
->list
[cnt
];
276 waitlist
->counter
= total
;
277 waitlist
->sigev
= *sig
;
281 /* Release the mutex. */
282 pthread_mutex_unlock (&__aio_requests_mutex
);
288 #if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_4)
290 attribute_compat_text_section
291 __lio_listio_21 (int mode
, struct aiocb
*const list
[], int nent
,
292 struct sigevent
*sig
)
294 /* Check arguments. */
295 if (mode
!= LIO_WAIT
&& mode
!= LIO_NOWAIT
)
297 __set_errno (EINVAL
);
301 return lio_listio_internal (mode
| LIO_NO_INDIVIDUAL_EVENT
, list
, nent
, sig
);
303 compat_symbol (librt
, __lio_listio_21
, lio_listio
, GLIBC_2_1
);
308 __lio_listio_item_notify (int mode
, struct aiocb
*const list
[], int nent
,
309 struct sigevent
*sig
)
311 /* Check arguments. */
312 if (mode
!= LIO_WAIT
&& mode
!= LIO_NOWAIT
)
314 __set_errno (EINVAL
);
318 return lio_listio_internal (mode
, list
, nent
, sig
);
320 versioned_symbol (librt
, __lio_listio_item_notify
, lio_listio
, GLIBC_2_4
);