2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
18 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
29 #include <sys/eventhandler.h>
30 #include <sys/sysproto.h>
31 #include <sys/filedesc.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/kthread.h>
35 #include <sys/fcntl.h>
37 #include <sys/limits.h>
39 #include <sys/mutex.h>
40 #include <sys/unistd.h>
41 #include <sys/posix4.h>
43 #include <sys/resourcevar.h>
44 #include <sys/signalvar.h>
45 #include <sys/protosw.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/syscall.h>
50 #include <sys/sysent.h>
51 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #include <sys/vnode.h>
56 #include <sys/event.h>
57 #include <sys/mount.h>
59 #include <machine/atomic.h>
62 #include <vm/vm_extern.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
69 #include "opt_vfs_aio.h"
72 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
73 * overflow. (XXX will be removed soon.)
75 static u_long jobrefid
;
78 * Counter for aio_fsync.
80 static uint64_t jobseqno
;
83 #define JOBST_JOBQSOCK 1
84 #define JOBST_JOBQGLOBAL 2
85 #define JOBST_JOBRUNNING 3
86 #define JOBST_JOBFINISHED 4
87 #define JOBST_JOBQBUF 5
88 #define JOBST_JOBQSYNC 6
90 #ifndef MAX_AIO_PER_PROC
91 #define MAX_AIO_PER_PROC 32
94 #ifndef MAX_AIO_QUEUE_PER_PROC
95 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
99 #define MAX_AIO_PROCS 32
102 #ifndef MAX_AIO_QUEUE
103 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
106 #ifndef TARGET_AIO_PROCS
107 #define TARGET_AIO_PROCS 4
111 #define MAX_BUF_AIO 16
114 #ifndef AIOD_TIMEOUT_DEFAULT
115 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
118 #ifndef AIOD_LIFETIME_DEFAULT
119 #define AIOD_LIFETIME_DEFAULT (30 * hz)
122 FEATURE(aio
, "Asynchronous I/O");
124 static SYSCTL_NODE(_vfs
, OID_AUTO
, aio
, CTLFLAG_RW
, 0, "Async IO management");
126 static int max_aio_procs
= MAX_AIO_PROCS
;
127 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_procs
,
128 CTLFLAG_RW
, &max_aio_procs
, 0,
129 "Maximum number of kernel threads to use for handling async IO ");
131 static int num_aio_procs
= 0;
132 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_aio_procs
,
133 CTLFLAG_RD
, &num_aio_procs
, 0,
134 "Number of presently active kernel threads for async IO");
137 * The code will adjust the actual number of AIO processes towards this
138 * number when it gets a chance.
140 static int target_aio_procs
= TARGET_AIO_PROCS
;
141 SYSCTL_INT(_vfs_aio
, OID_AUTO
, target_aio_procs
, CTLFLAG_RW
, &target_aio_procs
,
142 0, "Preferred number of ready kernel threads for async IO");
144 static int max_queue_count
= MAX_AIO_QUEUE
;
145 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue
, CTLFLAG_RW
, &max_queue_count
, 0,
146 "Maximum number of aio requests to queue, globally");
148 static int num_queue_count
= 0;
149 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_queue_count
, CTLFLAG_RD
, &num_queue_count
, 0,
150 "Number of queued aio requests");
152 static int num_buf_aio
= 0;
153 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_buf_aio
, CTLFLAG_RD
, &num_buf_aio
, 0,
154 "Number of aio requests presently handled by the buf subsystem");
156 /* Number of async I/O thread in the process of being started */
157 /* XXX This should be local to aio_aqueue() */
158 static int num_aio_resv_start
= 0;
160 static int aiod_timeout
;
161 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_timeout
, CTLFLAG_RW
, &aiod_timeout
, 0,
162 "Timeout value for synchronous aio operations");
164 static int aiod_lifetime
;
165 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_lifetime
, CTLFLAG_RW
, &aiod_lifetime
, 0,
166 "Maximum lifetime for idle aiod");
168 static int unloadable
= 0;
169 SYSCTL_INT(_vfs_aio
, OID_AUTO
, unloadable
, CTLFLAG_RW
, &unloadable
, 0,
170 "Allow unload of aio (not recommended)");
173 static int max_aio_per_proc
= MAX_AIO_PER_PROC
;
174 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_per_proc
, CTLFLAG_RW
, &max_aio_per_proc
,
175 0, "Maximum active aio requests per process (stored in the process)");
177 static int max_aio_queue_per_proc
= MAX_AIO_QUEUE_PER_PROC
;
178 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue_per_proc
, CTLFLAG_RW
,
179 &max_aio_queue_per_proc
, 0,
180 "Maximum queued aio requests per process (stored in the process)");
182 static int max_buf_aio
= MAX_BUF_AIO
;
183 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_buf_aio
, CTLFLAG_RW
, &max_buf_aio
, 0,
184 "Maximum buf aio requests per process (stored in the process)");
186 typedef struct oaiocb
{
187 int aio_fildes
; /* File descriptor */
188 off_t aio_offset
; /* File offset for I/O */
189 volatile void *aio_buf
; /* I/O buffer in process space */
190 size_t aio_nbytes
; /* Number of bytes for I/O */
191 struct osigevent aio_sigevent
; /* Signal to deliver */
192 int aio_lio_opcode
; /* LIO opcode */
193 int aio_reqprio
; /* Request priority -- ignored */
194 struct __aiocb_private _aiocb_private
;
198 * Below is a key of locks used to protect each member of struct aiocblist
199 * aioliojob and kaioinfo and any backends.
201 * * - need not protected
202 * a - locked by kaioinfo lock
203 * b - locked by backend lock, the backend lock can be null in some cases,
204 * for example, BIO belongs to this type, in this case, proc lock is
206 * c - locked by aio_job_mtx, the lock for the generic file I/O backend.
210 * Current, there is only two backends: BIO and generic file I/O.
211 * socket I/O is served by generic file I/O, this is not a good idea, since
212 * disk file I/O and any other types without O_NONBLOCK flag can block daemon
213 * threads, if there is no thread to serve socket I/O, the socket I/O will be
214 * delayed too long or starved, we should create some threads dedicated to
215 * sockets to do non-blocking I/O, same for pipe and fifo, for these I/O
216 * systems we really need non-blocking interface, fiddling O_NONBLOCK in file
217 * structure is not safe because there is race between userland and aio
222 TAILQ_ENTRY(aiocblist
) list
; /* (b) internal list of for backend */
223 TAILQ_ENTRY(aiocblist
) plist
; /* (a) list of jobs for each backend */
224 TAILQ_ENTRY(aiocblist
) allist
; /* (a) list of all jobs in proc */
225 int jobflags
; /* (a) job flags */
226 int jobstate
; /* (b) job state */
227 int inputcharge
; /* (*) input blockes */
228 int outputcharge
; /* (*) output blockes */
229 struct buf
*bp
; /* (*) private to BIO backend,
232 struct proc
*userproc
; /* (*) user process */
233 struct ucred
*cred
; /* (*) active credential when created */
234 struct file
*fd_file
; /* (*) pointer to file structure */
235 struct aioliojob
*lio
; /* (*) optional lio job */
236 struct aiocb
*uuaiocb
; /* (*) pointer in userspace of aiocb */
237 struct knlist klist
; /* (a) list of knotes */
238 struct aiocb uaiocb
; /* (*) kernel I/O control block */
239 ksiginfo_t ksi
; /* (a) realtime signal info */
240 struct task biotask
; /* (*) private to BIO backend */
241 uint64_t seqno
; /* (*) job number */
242 int pending
; /* (a) number of pending I/O, aio_fsync only */
246 #define AIOCBLIST_DONE 0x01
247 #define AIOCBLIST_BUFDONE 0x02
248 #define AIOCBLIST_RUNDOWN 0x04
249 #define AIOCBLIST_CHECKSYNC 0x08
254 #define AIOP_FREE 0x1 /* proc on free queue */
256 struct aiothreadlist
{
257 int aiothreadflags
; /* (c) AIO proc flags */
258 TAILQ_ENTRY(aiothreadlist
) list
; /* (c) list of processes */
259 struct thread
*aiothread
; /* (*) the AIO thread */
263 * data-structure for lio signal management
266 int lioj_flags
; /* (a) listio flags */
267 int lioj_count
; /* (a) listio flags */
268 int lioj_finished_count
; /* (a) listio flags */
269 struct sigevent lioj_signal
; /* (a) signal on all I/O done */
270 TAILQ_ENTRY(aioliojob
) lioj_list
; /* (a) lio list */
271 struct knlist klist
; /* (a) list of knotes */
272 ksiginfo_t lioj_ksi
; /* (a) Realtime signal info */
275 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
276 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
277 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */
280 * per process aio data structure
283 struct mtx kaio_mtx
; /* the lock to protect this struct */
284 int kaio_flags
; /* (a) per process kaio flags */
285 int kaio_maxactive_count
; /* (*) maximum number of AIOs */
286 int kaio_active_count
; /* (c) number of currently used AIOs */
287 int kaio_qallowed_count
; /* (*) maxiumu size of AIO queue */
288 int kaio_count
; /* (a) size of AIO queue */
289 int kaio_ballowed_count
; /* (*) maximum number of buffers */
290 int kaio_buffer_count
; /* (a) number of physio buffers */
291 TAILQ_HEAD(,aiocblist
) kaio_all
; /* (a) all AIOs in the process */
292 TAILQ_HEAD(,aiocblist
) kaio_done
; /* (a) done queue for process */
293 TAILQ_HEAD(,aioliojob
) kaio_liojoblist
; /* (a) list of lio jobs */
294 TAILQ_HEAD(,aiocblist
) kaio_jobqueue
; /* (a) job queue for process */
295 TAILQ_HEAD(,aiocblist
) kaio_bufqueue
; /* (a) buffer job queue for process */
296 TAILQ_HEAD(,aiocblist
) kaio_sockqueue
; /* (a) queue for aios waiting on sockets,
299 TAILQ_HEAD(,aiocblist
) kaio_syncqueue
; /* (a) queue for aio_fsync */
300 struct task kaio_task
; /* (*) task to kick aio threads */
303 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx)
304 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx)
305 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f))
306 #define AIO_MTX(ki) (&(ki)->kaio_mtx)
308 #define KAIO_RUNDOWN 0x1 /* process is being run down */
309 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
311 static TAILQ_HEAD(,aiothreadlist
) aio_freeproc
; /* (c) Idle daemons */
312 static struct sema aio_newproc_sem
;
313 static struct mtx aio_job_mtx
;
314 static struct mtx aio_sock_mtx
;
315 static TAILQ_HEAD(,aiocblist
) aio_jobs
; /* (c) Async job list */
316 static struct unrhdr
*aiod_unr
;
318 void aio_init_aioinfo(struct proc
*p
);
319 static void aio_onceonly(void);
320 static int aio_free_entry(struct aiocblist
*aiocbe
);
321 static void aio_process(struct aiocblist
*aiocbe
);
322 static int aio_newproc(int *);
323 int aio_aqueue(struct thread
*td
, struct aiocb
*job
,
324 struct aioliojob
*lio
, int type
, int osigev
);
325 static void aio_physwakeup(struct buf
*bp
);
326 static void aio_proc_rundown(void *arg
, struct proc
*p
);
327 static void aio_proc_rundown_exec(void *arg
, struct proc
*p
, struct image_params
*imgp
);
328 static int aio_qphysio(struct proc
*p
, struct aiocblist
*iocb
);
329 static void biohelper(void *, int);
330 static void aio_daemon(void *param
);
331 static void aio_swake_cb(struct socket
*, struct sockbuf
*);
332 static int aio_unload(void);
333 static void aio_bio_done_notify(struct proc
*userp
, struct aiocblist
*aiocbe
, int type
);
336 static int do_lio_listio(struct thread
*td
, struct lio_listio_args
*uap
, int oldsigev
);
337 static int aio_kick(struct proc
*userp
);
338 static void aio_kick_nowait(struct proc
*userp
);
339 static void aio_kick_helper(void *context
, int pending
);
340 static int filt_aioattach(struct knote
*kn
);
341 static void filt_aiodetach(struct knote
*kn
);
342 static int filt_aio(struct knote
*kn
, long hint
);
343 static int filt_lioattach(struct knote
*kn
);
344 static void filt_liodetach(struct knote
*kn
);
345 static int filt_lio(struct knote
*kn
, long hint
);
349 * kaio Per process async io info
350 * aiop async io thread data
351 * aiocb async io jobs
352 * aiol list io job pointer - internal to aio_suspend XXX
353 * aiolio list io jobs
355 static uma_zone_t kaio_zone
, aiop_zone
, aiocb_zone
, aiol_zone
, aiolio_zone
;
357 /* kqueue filters for aio */
358 static struct filterops aio_filtops
=
359 { 0, filt_aioattach
, filt_aiodetach
, filt_aio
};
360 static struct filterops lio_filtops
=
361 { 0, filt_lioattach
, filt_liodetach
, filt_lio
};
363 static eventhandler_tag exit_tag
, exec_tag
;
365 TASKQUEUE_DEFINE_THREAD(aiod_bio
);
368 * Main operations function for use as a kernel module.
371 aio_modload(struct module
*module
, int cmd
, void *arg
)
380 error
= aio_unload();
391 static moduledata_t aio_mod
= {
397 SYSCALL_MODULE_HELPER(aio_cancel
);
398 SYSCALL_MODULE_HELPER(aio_error
);
399 SYSCALL_MODULE_HELPER(aio_fsync
);
400 SYSCALL_MODULE_HELPER(aio_read
);
401 SYSCALL_MODULE_HELPER(aio_return
);
402 SYSCALL_MODULE_HELPER(aio_suspend
);
403 SYSCALL_MODULE_HELPER(aio_waitcomplete
);
404 SYSCALL_MODULE_HELPER(aio_write
);
405 SYSCALL_MODULE_HELPER(lio_listio
);
406 SYSCALL_MODULE_HELPER(oaio_read
);
407 SYSCALL_MODULE_HELPER(oaio_write
);
408 SYSCALL_MODULE_HELPER(olio_listio
);
410 DECLARE_MODULE(aio
, aio_mod
,
411 SI_SUB_VFS
, SI_ORDER_ANY
);
412 MODULE_VERSION(aio
, 1);
415 * Startup initialization
421 /* XXX: should probably just use so->callback */
422 aio_swake
= &aio_swake_cb
;
423 exit_tag
= EVENTHANDLER_REGISTER(process_exit
, aio_proc_rundown
, NULL
,
424 EVENTHANDLER_PRI_ANY
);
425 exec_tag
= EVENTHANDLER_REGISTER(process_exec
, aio_proc_rundown_exec
, NULL
,
426 EVENTHANDLER_PRI_ANY
);
427 kqueue_add_filteropts(EVFILT_AIO
, &aio_filtops
);
428 kqueue_add_filteropts(EVFILT_LIO
, &lio_filtops
);
429 TAILQ_INIT(&aio_freeproc
);
430 sema_init(&aio_newproc_sem
, 0, "aio_new_proc");
431 mtx_init(&aio_job_mtx
, "aio_job", NULL
, MTX_DEF
);
432 mtx_init(&aio_sock_mtx
, "aio_sock", NULL
, MTX_DEF
);
433 TAILQ_INIT(&aio_jobs
);
434 aiod_unr
= new_unrhdr(1, INT_MAX
, NULL
);
435 kaio_zone
= uma_zcreate("AIO", sizeof(struct kaioinfo
), NULL
, NULL
,
436 NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
437 aiop_zone
= uma_zcreate("AIOP", sizeof(struct aiothreadlist
), NULL
,
438 NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
439 aiocb_zone
= uma_zcreate("AIOCB", sizeof(struct aiocblist
), NULL
, NULL
,
440 NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
441 aiol_zone
= uma_zcreate("AIOL", AIO_LISTIO_MAX
*sizeof(intptr_t) , NULL
,
442 NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
443 aiolio_zone
= uma_zcreate("AIOLIO", sizeof(struct aioliojob
), NULL
,
444 NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
445 aiod_timeout
= AIOD_TIMEOUT_DEFAULT
;
446 aiod_lifetime
= AIOD_LIFETIME_DEFAULT
;
448 async_io_version
= _POSIX_VERSION
;
449 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX
, AIO_LISTIO_MAX
);
450 p31b_setcfg(CTL_P1003_1B_AIO_MAX
, MAX_AIO_QUEUE
);
451 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX
, 0);
455 * Callback for unload of AIO when used as a module.
463 * XXX: no unloads by default, it's too dangerous.
464 * perhaps we could do it if locked out callers and then
465 * did an aio_proc_rundown() on each process.
467 * jhb: aio_proc_rundown() needs to run on curproc though,
468 * so I don't think that would fly.
473 error
= kqueue_del_filteropts(EVFILT_AIO
);
476 error
= kqueue_del_filteropts(EVFILT_LIO
);
479 async_io_version
= 0;
481 taskqueue_free(taskqueue_aiod_bio
);
482 delete_unrhdr(aiod_unr
);
483 uma_zdestroy(kaio_zone
);
484 uma_zdestroy(aiop_zone
);
485 uma_zdestroy(aiocb_zone
);
486 uma_zdestroy(aiol_zone
);
487 uma_zdestroy(aiolio_zone
);
488 EVENTHANDLER_DEREGISTER(process_exit
, exit_tag
);
489 EVENTHANDLER_DEREGISTER(process_exec
, exec_tag
);
490 mtx_destroy(&aio_job_mtx
);
491 mtx_destroy(&aio_sock_mtx
);
492 sema_destroy(&aio_newproc_sem
);
493 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX
, -1);
494 p31b_setcfg(CTL_P1003_1B_AIO_MAX
, -1);
495 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX
, -1);
500 * Init the per-process aioinfo structure. The aioinfo limits are set
501 * per-process for user limit (resource) management.
504 aio_init_aioinfo(struct proc
*p
)
508 ki
= uma_zalloc(kaio_zone
, M_WAITOK
);
509 mtx_init(&ki
->kaio_mtx
, "aiomtx", NULL
, MTX_DEF
);
511 ki
->kaio_maxactive_count
= max_aio_per_proc
;
512 ki
->kaio_active_count
= 0;
513 ki
->kaio_qallowed_count
= max_aio_queue_per_proc
;
515 ki
->kaio_ballowed_count
= max_buf_aio
;
516 ki
->kaio_buffer_count
= 0;
517 TAILQ_INIT(&ki
->kaio_all
);
518 TAILQ_INIT(&ki
->kaio_done
);
519 TAILQ_INIT(&ki
->kaio_jobqueue
);
520 TAILQ_INIT(&ki
->kaio_bufqueue
);
521 TAILQ_INIT(&ki
->kaio_liojoblist
);
522 TAILQ_INIT(&ki
->kaio_sockqueue
);
523 TAILQ_INIT(&ki
->kaio_syncqueue
);
524 TASK_INIT(&ki
->kaio_task
, 0, aio_kick_helper
, p
);
526 if (p
->p_aioinfo
== NULL
) {
531 mtx_destroy(&ki
->kaio_mtx
);
532 uma_zfree(kaio_zone
, ki
);
535 while (num_aio_procs
< MIN(target_aio_procs
, max_aio_procs
))
540 aio_sendsig(struct proc
*p
, struct sigevent
*sigev
, ksiginfo_t
*ksi
)
546 ksi
->ksi_code
= SI_ASYNCIO
;
547 ksi
->ksi_flags
|= KSI_EXT
| KSI_INS
;
548 ret
= psignal_event(p
, sigev
, ksi
);
555 * Free a job entry. Wait for completion if it is currently active, but don't
556 * delay forever. If we delay, we return a flag that says that we have to
557 * restart the queue scan.
560 aio_free_entry(struct aiocblist
*aiocbe
)
563 struct aioliojob
*lj
;
566 p
= aiocbe
->userproc
;
571 AIO_LOCK_ASSERT(ki
, MA_OWNED
);
572 MPASS(aiocbe
->jobstate
== JOBST_JOBFINISHED
);
574 atomic_subtract_int(&num_queue_count
, 1);
577 MPASS(ki
->kaio_count
>= 0);
579 TAILQ_REMOVE(&ki
->kaio_done
, aiocbe
, plist
);
580 TAILQ_REMOVE(&ki
->kaio_all
, aiocbe
, allist
);
585 lj
->lioj_finished_count
--;
587 if (lj
->lioj_count
== 0) {
588 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
589 /* lio is going away, we need to destroy any knotes */
590 knlist_delete(&lj
->klist
, curthread
, 1);
592 sigqueue_take(&lj
->lioj_ksi
);
594 uma_zfree(aiolio_zone
, lj
);
598 /* aiocbe is going away, we need to destroy any knotes */
599 knlist_delete(&aiocbe
->klist
, curthread
, 1);
601 sigqueue_take(&aiocbe
->ksi
);
604 MPASS(aiocbe
->bp
== NULL
);
605 aiocbe
->jobstate
= JOBST_NULL
;
609 * The thread argument here is used to find the owning process
610 * and is also passed to fo_close() which may pass it to various
611 * places such as devsw close() routines. Because of that, we
612 * need a thread pointer from the process owning the job that is
613 * persistent and won't disappear out from under us or move to
616 * Currently, all the callers of this function call it to remove
617 * an aiocblist from the current process' job list either via a
618 * syscall or due to the current process calling exit() or
619 * execve(). Thus, we know that p == curproc. We also know that
620 * curthread can't exit since we are curthread.
622 * Therefore, we use curthread as the thread to pass to
623 * knlist_delete(). This does mean that it is possible for the
624 * thread pointer at close time to differ from the thread pointer
625 * at open time, but this is already true of file descriptors in
626 * a multithreaded process.
628 fdrop(aiocbe
->fd_file
, curthread
);
629 crfree(aiocbe
->cred
);
630 uma_zfree(aiocb_zone
, aiocbe
);
637 aio_proc_rundown_exec(void *arg
, struct proc
*p
, struct image_params
*imgp __unused
)
639 aio_proc_rundown(arg
, p
);
643 * Rundown the jobs for a given process.
646 aio_proc_rundown(void *arg
, struct proc
*p
)
649 struct aioliojob
*lj
;
650 struct aiocblist
*cbe
, *cbn
;
655 KASSERT(curthread
->td_proc
== p
,
656 ("%s: called on non-curproc", __func__
));
662 ki
->kaio_flags
|= KAIO_RUNDOWN
;
667 * Try to cancel all pending requests. This code simulates
668 * aio_cancel on all pending I/O requests.
670 TAILQ_FOREACH_SAFE(cbe
, &ki
->kaio_jobqueue
, plist
, cbn
) {
672 mtx_lock(&aio_job_mtx
);
673 if (cbe
->jobstate
== JOBST_JOBQGLOBAL
) {
674 TAILQ_REMOVE(&aio_jobs
, cbe
, list
);
676 } else if (cbe
->jobstate
== JOBST_JOBQSOCK
) {
678 MPASS(fp
->f_type
== DTYPE_SOCKET
);
680 TAILQ_REMOVE(&so
->so_aiojobq
, cbe
, list
);
682 } else if (cbe
->jobstate
== JOBST_JOBQSYNC
) {
683 TAILQ_REMOVE(&ki
->kaio_syncqueue
, cbe
, list
);
686 mtx_unlock(&aio_job_mtx
);
689 cbe
->jobstate
= JOBST_JOBFINISHED
;
690 cbe
->uaiocb
._aiocb_private
.status
= -1;
691 cbe
->uaiocb
._aiocb_private
.error
= ECANCELED
;
692 TAILQ_REMOVE(&ki
->kaio_jobqueue
, cbe
, plist
);
693 aio_bio_done_notify(p
, cbe
, DONE_QUEUE
);
697 /* Wait for all running I/O to be finished */
698 if (TAILQ_FIRST(&ki
->kaio_bufqueue
) ||
699 TAILQ_FIRST(&ki
->kaio_jobqueue
)) {
700 ki
->kaio_flags
|= KAIO_WAKEUP
;
701 msleep(&p
->p_aioinfo
, AIO_MTX(ki
), PRIBIO
, "aioprn", hz
);
705 /* Free all completed I/O requests. */
706 while ((cbe
= TAILQ_FIRST(&ki
->kaio_done
)) != NULL
)
709 while ((lj
= TAILQ_FIRST(&ki
->kaio_liojoblist
)) != NULL
) {
710 if (lj
->lioj_count
== 0) {
711 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
712 knlist_delete(&lj
->klist
, curthread
, 1);
714 sigqueue_take(&lj
->lioj_ksi
);
716 uma_zfree(aiolio_zone
, lj
);
718 panic("LIO job not cleaned up: C:%d, FC:%d\n",
719 lj
->lioj_count
, lj
->lioj_finished_count
);
723 taskqueue_drain(taskqueue_aiod_bio
, &ki
->kaio_task
);
724 mtx_destroy(&ki
->kaio_mtx
);
725 uma_zfree(kaio_zone
, ki
);
730 * Select a job to run (called by an AIO daemon).
732 static struct aiocblist
*
733 aio_selectjob(struct aiothreadlist
*aiop
)
735 struct aiocblist
*aiocbe
;
739 mtx_assert(&aio_job_mtx
, MA_OWNED
);
740 TAILQ_FOREACH(aiocbe
, &aio_jobs
, list
) {
741 userp
= aiocbe
->userproc
;
742 ki
= userp
->p_aioinfo
;
744 if (ki
->kaio_active_count
< ki
->kaio_maxactive_count
) {
745 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
746 /* Account for currently active jobs. */
747 ki
->kaio_active_count
++;
748 aiocbe
->jobstate
= JOBST_JOBRUNNING
;
756 * Move all data to a permanent storage device, this code
757 * simulates fsync syscall.
760 aio_fsync_vnode(struct thread
*td
, struct vnode
*vp
)
766 vfslocked
= VFS_LOCK_GIANT(vp
->v_mount
);
767 if ((error
= vn_start_write(vp
, &mp
, V_WAIT
| PCATCH
)) != 0)
769 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
770 if (vp
->v_object
!= NULL
) {
771 VM_OBJECT_LOCK(vp
->v_object
);
772 vm_object_page_clean(vp
->v_object
, 0, 0, 0);
773 VM_OBJECT_UNLOCK(vp
->v_object
);
775 error
= VOP_FSYNC(vp
, MNT_WAIT
, td
);
778 vn_finished_write(mp
);
780 VFS_UNLOCK_GIANT(vfslocked
);
785 * The AIO processing activity. This is the code that does the I/O request for
786 * the non-physio version of the operations. The normal vn operations are used,
787 * and this code should work in all instances for every type of file, including
788 * pipes, sockets, fifos, and regular files.
790 * XXX I don't think it works well for socket, pipe, and fifo.
793 aio_process(struct aiocblist
*aiocbe
)
795 struct ucred
*td_savedcred
;
804 int oublock_st
, oublock_end
;
805 int inblock_st
, inblock_end
;
808 td_savedcred
= td
->td_ucred
;
809 td
->td_ucred
= aiocbe
->cred
;
810 cb
= &aiocbe
->uaiocb
;
811 fp
= aiocbe
->fd_file
;
813 if (cb
->aio_lio_opcode
== LIO_SYNC
) {
816 if (fp
->f_vnode
!= NULL
)
817 error
= aio_fsync_vnode(td
, fp
->f_vnode
);
818 cb
->_aiocb_private
.error
= error
;
819 cb
->_aiocb_private
.status
= 0;
820 td
->td_ucred
= td_savedcred
;
824 aiov
.iov_base
= (void *)(uintptr_t)cb
->aio_buf
;
825 aiov
.iov_len
= cb
->aio_nbytes
;
827 auio
.uio_iov
= &aiov
;
829 auio
.uio_offset
= cb
->aio_offset
;
830 auio
.uio_resid
= cb
->aio_nbytes
;
831 cnt
= cb
->aio_nbytes
;
832 auio
.uio_segflg
= UIO_USERSPACE
;
835 inblock_st
= td
->td_ru
.ru_inblock
;
836 oublock_st
= td
->td_ru
.ru_oublock
;
838 * aio_aqueue() acquires a reference to the file that is
839 * released in aio_free_entry().
841 if (cb
->aio_lio_opcode
== LIO_READ
) {
842 auio
.uio_rw
= UIO_READ
;
843 if (auio
.uio_resid
== 0)
846 error
= fo_read(fp
, &auio
, fp
->f_cred
, FOF_OFFSET
, td
);
848 if (fp
->f_type
== DTYPE_VNODE
)
850 auio
.uio_rw
= UIO_WRITE
;
851 error
= fo_write(fp
, &auio
, fp
->f_cred
, FOF_OFFSET
, td
);
853 inblock_end
= td
->td_ru
.ru_inblock
;
854 oublock_end
= td
->td_ru
.ru_oublock
;
856 aiocbe
->inputcharge
= inblock_end
- inblock_st
;
857 aiocbe
->outputcharge
= oublock_end
- oublock_st
;
859 if ((error
) && (auio
.uio_resid
!= cnt
)) {
860 if (error
== ERESTART
|| error
== EINTR
|| error
== EWOULDBLOCK
)
862 if ((error
== EPIPE
) && (cb
->aio_lio_opcode
== LIO_WRITE
)) {
864 if (fp
->f_type
== DTYPE_SOCKET
) {
866 if (so
->so_options
& SO_NOSIGPIPE
)
870 PROC_LOCK(aiocbe
->userproc
);
871 psignal(aiocbe
->userproc
, SIGPIPE
);
872 PROC_UNLOCK(aiocbe
->userproc
);
877 cnt
-= auio
.uio_resid
;
878 cb
->_aiocb_private
.error
= error
;
879 cb
->_aiocb_private
.status
= cnt
;
880 td
->td_ucred
= td_savedcred
;
884 aio_bio_done_notify(struct proc
*userp
, struct aiocblist
*aiocbe
, int type
)
886 struct aioliojob
*lj
;
888 struct aiocblist
*scb
, *scbn
;
891 ki
= userp
->p_aioinfo
;
892 AIO_LOCK_ASSERT(ki
, MA_OWNED
);
896 lj
->lioj_finished_count
++;
897 if (lj
->lioj_count
== lj
->lioj_finished_count
)
900 if (type
== DONE_QUEUE
) {
901 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
903 aiocbe
->jobflags
|= AIOCBLIST_BUFDONE
;
905 TAILQ_INSERT_TAIL(&ki
->kaio_done
, aiocbe
, plist
);
906 aiocbe
->jobstate
= JOBST_JOBFINISHED
;
908 if (ki
->kaio_flags
& KAIO_RUNDOWN
)
909 goto notification_done
;
911 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
||
912 aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_THREAD_ID
)
913 aio_sendsig(userp
, &aiocbe
->uaiocb
.aio_sigevent
, &aiocbe
->ksi
);
915 KNOTE_LOCKED(&aiocbe
->klist
, 1);
918 if (lj
->lioj_signal
.sigev_notify
== SIGEV_KEVENT
) {
919 lj
->lioj_flags
|= LIOJ_KEVENT_POSTED
;
920 KNOTE_LOCKED(&lj
->klist
, 1);
922 if ((lj
->lioj_flags
& (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
))
924 && (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
||
925 lj
->lioj_signal
.sigev_notify
== SIGEV_THREAD_ID
)) {
926 aio_sendsig(userp
, &lj
->lioj_signal
, &lj
->lioj_ksi
);
927 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
932 if (aiocbe
->jobflags
& AIOCBLIST_CHECKSYNC
) {
933 TAILQ_FOREACH_SAFE(scb
, &ki
->kaio_syncqueue
, list
, scbn
) {
934 if (aiocbe
->fd_file
== scb
->fd_file
&&
935 aiocbe
->seqno
< scb
->seqno
) {
936 if (--scb
->pending
== 0) {
937 mtx_lock(&aio_job_mtx
);
938 scb
->jobstate
= JOBST_JOBQGLOBAL
;
939 TAILQ_REMOVE(&ki
->kaio_syncqueue
, scb
, list
);
940 TAILQ_INSERT_TAIL(&aio_jobs
, scb
, list
);
941 aio_kick_nowait(userp
);
942 mtx_unlock(&aio_job_mtx
);
947 if (ki
->kaio_flags
& KAIO_WAKEUP
) {
948 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
949 wakeup(&userp
->p_aioinfo
);
954 * The AIO daemon, most of the actual work is done in aio_process,
955 * but the setup (and address space mgmt) is done in this routine.
958 aio_daemon(void *_id
)
960 struct aiocblist
*aiocbe
;
961 struct aiothreadlist
*aiop
;
963 struct proc
*curcp
, *mycp
, *userp
;
964 struct vmspace
*myvm
, *tmpvm
;
965 struct thread
*td
= curthread
;
966 int id
= (intptr_t)_id
;
969 * Local copies of curproc (cp) and vmspace (myvm)
972 myvm
= mycp
->p_vmspace
;
974 KASSERT(mycp
->p_textvp
== NULL
, ("kthread has a textvp"));
977 * Allocate and ready the aio control info. There is one aiop structure
980 aiop
= uma_zalloc(aiop_zone
, M_WAITOK
);
981 aiop
->aiothread
= td
;
982 aiop
->aiothreadflags
= 0;
984 /* The daemon resides in its own pgrp. */
988 * Wakeup parent process. (Parent sleeps to keep from blasting away
989 * and creating too many daemons.)
991 sema_post(&aio_newproc_sem
);
993 mtx_lock(&aio_job_mtx
);
996 * curcp is the current daemon process context.
997 * userp is the current user process context.
1002 * Take daemon off of free queue
1004 if (aiop
->aiothreadflags
& AIOP_FREE
) {
1005 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1006 aiop
->aiothreadflags
&= ~AIOP_FREE
;
1012 while ((aiocbe
= aio_selectjob(aiop
)) != NULL
) {
1013 mtx_unlock(&aio_job_mtx
);
1014 userp
= aiocbe
->userproc
;
1017 * Connect to process address space for user program.
1019 if (userp
!= curcp
) {
1021 * Save the current address space that we are
1024 tmpvm
= mycp
->p_vmspace
;
1027 * Point to the new user address space, and
1030 mycp
->p_vmspace
= userp
->p_vmspace
;
1031 atomic_add_int(&mycp
->p_vmspace
->vm_refcnt
, 1);
1033 /* Activate the new mapping. */
1034 pmap_activate(FIRST_THREAD_IN_PROC(mycp
));
1037 * If the old address space wasn't the daemons
1038 * own address space, then we need to remove the
1039 * daemon's reference from the other process
1040 * that it was acting on behalf of.
1042 if (tmpvm
!= myvm
) {
1043 vmspace_free(tmpvm
);
1048 ki
= userp
->p_aioinfo
;
1050 /* Do the I/O function. */
1051 aio_process(aiocbe
);
1053 mtx_lock(&aio_job_mtx
);
1054 /* Decrement the active job count. */
1055 ki
->kaio_active_count
--;
1056 mtx_unlock(&aio_job_mtx
);
1059 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1060 aio_bio_done_notify(userp
, aiocbe
, DONE_QUEUE
);
1063 mtx_lock(&aio_job_mtx
);
1067 * Disconnect from user address space.
1069 if (curcp
!= mycp
) {
1071 mtx_unlock(&aio_job_mtx
);
1073 /* Get the user address space to disconnect from. */
1074 tmpvm
= mycp
->p_vmspace
;
1076 /* Get original address space for daemon. */
1077 mycp
->p_vmspace
= myvm
;
1079 /* Activate the daemon's address space. */
1080 pmap_activate(FIRST_THREAD_IN_PROC(mycp
));
1082 if (tmpvm
== myvm
) {
1083 printf("AIOD: vmspace problem -- %d\n",
1087 /* Remove our vmspace reference. */
1088 vmspace_free(tmpvm
);
1092 mtx_lock(&aio_job_mtx
);
1094 * We have to restart to avoid race, we only sleep if
1095 * no job can be selected, that should be
1101 mtx_assert(&aio_job_mtx
, MA_OWNED
);
1103 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
1104 aiop
->aiothreadflags
|= AIOP_FREE
;
1107 * If daemon is inactive for a long time, allow it to exit,
1108 * thereby freeing resources.
1110 if (msleep(aiop
->aiothread
, &aio_job_mtx
, PRIBIO
, "aiordy",
1112 if (TAILQ_EMPTY(&aio_jobs
)) {
1113 if ((aiop
->aiothreadflags
& AIOP_FREE
) &&
1114 (num_aio_procs
> target_aio_procs
)) {
1115 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1117 mtx_unlock(&aio_job_mtx
);
1118 uma_zfree(aiop_zone
, aiop
);
1119 free_unr(aiod_unr
, id
);
1121 if (mycp
->p_vmspace
->vm_refcnt
<= 1) {
1122 printf("AIOD: bad vm refcnt for"
1123 " exiting daemon: %d\n",
1124 mycp
->p_vmspace
->vm_refcnt
);
1132 mtx_unlock(&aio_job_mtx
);
1133 panic("shouldn't be here\n");
1137 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1138 * AIO daemon modifies its environment itself.
1141 aio_newproc(int *start
)
1147 id
= alloc_unr(aiod_unr
);
1148 error
= kproc_create(aio_daemon
, (void *)(intptr_t)id
, &p
,
1149 RFNOWAIT
, 0, "aiod%d", id
);
1152 * Wait until daemon is started.
1154 sema_wait(&aio_newproc_sem
);
1155 mtx_lock(&aio_job_mtx
);
1159 mtx_unlock(&aio_job_mtx
);
1161 free_unr(aiod_unr
, id
);
1167 * Try the high-performance, low-overhead physio method for eligible
1168 * VCHR devices. This method doesn't use an aio helper thread, and
1169 * thus has very low overhead.
1171 * Assumes that the caller, aio_aqueue(), has incremented the file
1172 * structure's reference count, preventing its deallocation for the
1173 * duration of this call.
1176 aio_qphysio(struct proc
*p
, struct aiocblist
*aiocbe
)
1182 struct kaioinfo
*ki
;
1183 struct aioliojob
*lj
;
1186 cb
= &aiocbe
->uaiocb
;
1187 fp
= aiocbe
->fd_file
;
1189 if (fp
->f_type
!= DTYPE_VNODE
)
1195 * If its not a disk, we don't want to return a positive error.
1196 * It causes the aio code to not fall through to try the thread
1197 * way when you're talking to a regular file.
1199 if (!vn_isdisk(vp
, &error
)) {
1200 if (error
== ENOTBLK
)
1206 if (vp
->v_bufobj
.bo_bsize
== 0)
1209 if (cb
->aio_nbytes
% vp
->v_bufobj
.bo_bsize
)
1212 if (cb
->aio_nbytes
> vp
->v_rdev
->si_iosize_max
)
1215 if (cb
->aio_nbytes
>
1216 MAXPHYS
- (((vm_offset_t
) cb
->aio_buf
) & PAGE_MASK
))
1220 if (ki
->kaio_buffer_count
>= ki
->kaio_ballowed_count
)
1223 /* Create and build a buffer header for a transfer. */
1224 bp
= (struct buf
*)getpbuf(NULL
);
1229 ki
->kaio_buffer_count
++;
1236 * Get a copy of the kva from the physical buffer.
1240 bp
->b_bcount
= cb
->aio_nbytes
;
1241 bp
->b_bufsize
= cb
->aio_nbytes
;
1242 bp
->b_iodone
= aio_physwakeup
;
1243 bp
->b_saveaddr
= bp
->b_data
;
1244 bp
->b_data
= (void *)(uintptr_t)cb
->aio_buf
;
1245 bp
->b_offset
= cb
->aio_offset
;
1246 bp
->b_iooffset
= cb
->aio_offset
;
1247 bp
->b_blkno
= btodb(cb
->aio_offset
);
1248 bp
->b_iocmd
= cb
->aio_lio_opcode
== LIO_WRITE
? BIO_WRITE
: BIO_READ
;
1251 * Bring buffer into kernel space.
1253 if (vmapbuf(bp
) < 0) {
1260 bp
->b_caller1
= (void *)aiocbe
;
1261 TAILQ_INSERT_TAIL(&ki
->kaio_bufqueue
, aiocbe
, plist
);
1262 TAILQ_INSERT_TAIL(&ki
->kaio_all
, aiocbe
, allist
);
1263 aiocbe
->jobstate
= JOBST_JOBQBUF
;
1264 cb
->_aiocb_private
.status
= cb
->aio_nbytes
;
1267 atomic_add_int(&num_queue_count
, 1);
1268 atomic_add_int(&num_buf_aio
, 1);
1272 TASK_INIT(&aiocbe
->biotask
, 0, biohelper
, aiocbe
);
1274 /* Perform transfer. */
1275 dev_strategy(vp
->v_rdev
, bp
);
1281 ki
->kaio_buffer_count
--;
1291 * Wake up aio requests that may be serviceable now.
1294 aio_swake_cb(struct socket
*so
, struct sockbuf
*sb
)
1296 struct aiocblist
*cb
, *cbn
;
1299 if (sb
== &so
->so_snd
)
1305 sb
->sb_flags
&= ~SB_AIO
;
1306 mtx_lock(&aio_job_mtx
);
1307 TAILQ_FOREACH_SAFE(cb
, &so
->so_aiojobq
, list
, cbn
) {
1308 if (opcode
== cb
->uaiocb
.aio_lio_opcode
) {
1309 if (cb
->jobstate
!= JOBST_JOBQSOCK
)
1310 panic("invalid queue value");
1312 * We don't have actual sockets backend yet,
1313 * so we simply move the requests to the generic
1316 TAILQ_REMOVE(&so
->so_aiojobq
, cb
, list
);
1317 TAILQ_INSERT_TAIL(&aio_jobs
, cb
, list
);
1318 aio_kick_nowait(cb
->userproc
);
1321 mtx_unlock(&aio_job_mtx
);
1326 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1327 * technique is done in this code.
1330 aio_aqueue(struct thread
*td
, struct aiocb
*job
, struct aioliojob
*lj
,
1331 int type
, int oldsigev
)
1333 struct proc
*p
= td
->td_proc
;
1336 struct aiocblist
*aiocbe
, *cb
;
1337 struct kaioinfo
*ki
;
1345 if (p
->p_aioinfo
== NULL
)
1346 aio_init_aioinfo(p
);
1350 suword(&job
->_aiocb_private
.status
, -1);
1351 suword(&job
->_aiocb_private
.error
, 0);
1352 suword(&job
->_aiocb_private
.kernelinfo
, -1);
1354 if (num_queue_count
>= max_queue_count
||
1355 ki
->kaio_count
>= ki
->kaio_qallowed_count
) {
1356 suword(&job
->_aiocb_private
.error
, EAGAIN
);
1360 aiocbe
= uma_zalloc(aiocb_zone
, M_WAITOK
| M_ZERO
);
1361 aiocbe
->inputcharge
= 0;
1362 aiocbe
->outputcharge
= 0;
1363 knlist_init(&aiocbe
->klist
, AIO_MTX(ki
), NULL
, NULL
, NULL
);
1366 bzero(&aiocbe
->uaiocb
, sizeof(struct aiocb
));
1367 error
= copyin(job
, &aiocbe
->uaiocb
, sizeof(struct oaiocb
));
1368 bcopy(&aiocbe
->uaiocb
.__spare__
, &aiocbe
->uaiocb
.aio_sigevent
,
1369 sizeof(struct osigevent
));
1371 error
= copyin(job
, &aiocbe
->uaiocb
, sizeof(struct aiocb
));
1374 suword(&job
->_aiocb_private
.error
, error
);
1375 uma_zfree(aiocb_zone
, aiocbe
);
1379 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
!= SIGEV_KEVENT
&&
1380 aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
!= SIGEV_SIGNAL
&&
1381 aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
!= SIGEV_THREAD_ID
&&
1382 aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
!= SIGEV_NONE
) {
1383 suword(&job
->_aiocb_private
.error
, EINVAL
);
1384 uma_zfree(aiocb_zone
, aiocbe
);
1388 if ((aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
||
1389 aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_THREAD_ID
) &&
1390 !_SIG_VALID(aiocbe
->uaiocb
.aio_sigevent
.sigev_signo
)) {
1391 uma_zfree(aiocb_zone
, aiocbe
);
1395 ksiginfo_init(&aiocbe
->ksi
);
1397 /* Save userspace address of the job info. */
1398 aiocbe
->uuaiocb
= job
;
1400 /* Get the opcode. */
1401 if (type
!= LIO_NOP
)
1402 aiocbe
->uaiocb
.aio_lio_opcode
= type
;
1403 opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1405 /* Fetch the file object for the specified file descriptor. */
1406 fd
= aiocbe
->uaiocb
.aio_fildes
;
1409 error
= fget_write(td
, fd
, &fp
);
1412 error
= fget_read(td
, fd
, &fp
);
1415 error
= fget(td
, fd
, &fp
);
1418 uma_zfree(aiocb_zone
, aiocbe
);
1419 suword(&job
->_aiocb_private
.error
, error
);
1423 if (opcode
== LIO_SYNC
&& fp
->f_vnode
== NULL
) {
1428 if (opcode
!= LIO_SYNC
&& aiocbe
->uaiocb
.aio_offset
== -1LL) {
1433 aiocbe
->fd_file
= fp
;
1435 mtx_lock(&aio_job_mtx
);
1437 aiocbe
->seqno
= jobseqno
++;
1438 mtx_unlock(&aio_job_mtx
);
1439 error
= suword(&job
->_aiocb_private
.kernelinfo
, jid
);
1444 aiocbe
->uaiocb
._aiocb_private
.kernelinfo
= (void *)(intptr_t)jid
;
1446 if (opcode
== LIO_NOP
) {
1448 uma_zfree(aiocb_zone
, aiocbe
);
1451 if ((opcode
!= LIO_READ
) && (opcode
!= LIO_WRITE
) &&
1452 (opcode
!= LIO_SYNC
)) {
1457 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
!= SIGEV_KEVENT
)
1459 kqfd
= aiocbe
->uaiocb
.aio_sigevent
.sigev_notify_kqueue
;
1460 kev
.ident
= (uintptr_t)aiocbe
->uuaiocb
;
1461 kev
.filter
= EVFILT_AIO
;
1462 kev
.flags
= EV_ADD
| EV_ENABLE
| EV_FLAG1
;
1463 kev
.data
= (intptr_t)aiocbe
;
1464 kev
.udata
= aiocbe
->uaiocb
.aio_sigevent
.sigev_value
.sival_ptr
;
1465 error
= kqfd_register(kqfd
, &kev
, td
, 1);
1469 uma_zfree(aiocb_zone
, aiocbe
);
1470 suword(&job
->_aiocb_private
.error
, error
);
1475 suword(&job
->_aiocb_private
.error
, EINPROGRESS
);
1476 aiocbe
->uaiocb
._aiocb_private
.error
= EINPROGRESS
;
1477 aiocbe
->userproc
= p
;
1478 aiocbe
->cred
= crhold(td
->td_ucred
);
1479 aiocbe
->jobflags
= 0;
1482 if (opcode
== LIO_SYNC
)
1485 if (fp
->f_type
== DTYPE_SOCKET
) {
1487 * Alternate queueing for socket ops: Reach down into the
1488 * descriptor to get the socket data. Then check to see if the
1489 * socket is ready to be read or written (based on the requested
1492 * If it is not ready for io, then queue the aiocbe on the
1493 * socket, and set the flags so we get a call when sbnotify()
1496 * Note if opcode is neither LIO_WRITE nor LIO_READ we lock
1497 * and unlock the snd sockbuf for no reason.
1500 sb
= (opcode
== LIO_READ
) ? &so
->so_rcv
: &so
->so_snd
;
1502 if (((opcode
== LIO_READ
) && (!soreadable(so
))) || ((opcode
==
1503 LIO_WRITE
) && (!sowriteable(so
)))) {
1504 sb
->sb_flags
|= SB_AIO
;
1506 mtx_lock(&aio_job_mtx
);
1507 TAILQ_INSERT_TAIL(&so
->so_aiojobq
, aiocbe
, list
);
1508 mtx_unlock(&aio_job_mtx
);
1511 TAILQ_INSERT_TAIL(&ki
->kaio_all
, aiocbe
, allist
);
1512 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1513 aiocbe
->jobstate
= JOBST_JOBQSOCK
;
1519 atomic_add_int(&num_queue_count
, 1);
1526 if ((error
= aio_qphysio(p
, aiocbe
)) == 0)
1530 aiocbe
->uaiocb
._aiocb_private
.error
= error
;
1531 suword(&job
->_aiocb_private
.error
, error
);
1536 /* No buffer for daemon I/O. */
1538 atomic_add_int(&num_queue_count
, 1);
1544 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1545 TAILQ_INSERT_TAIL(&ki
->kaio_all
, aiocbe
, allist
);
1546 if (opcode
== LIO_SYNC
) {
1547 TAILQ_FOREACH(cb
, &ki
->kaio_jobqueue
, plist
) {
1548 if (cb
->fd_file
== aiocbe
->fd_file
&&
1549 cb
->uaiocb
.aio_lio_opcode
!= LIO_SYNC
&&
1550 cb
->seqno
< aiocbe
->seqno
) {
1551 cb
->jobflags
|= AIOCBLIST_CHECKSYNC
;
1555 TAILQ_FOREACH(cb
, &ki
->kaio_bufqueue
, plist
) {
1556 if (cb
->fd_file
== aiocbe
->fd_file
&&
1557 cb
->uaiocb
.aio_lio_opcode
!= LIO_SYNC
&&
1558 cb
->seqno
< aiocbe
->seqno
) {
1559 cb
->jobflags
|= AIOCBLIST_CHECKSYNC
;
1563 if (aiocbe
->pending
!= 0) {
1564 TAILQ_INSERT_TAIL(&ki
->kaio_syncqueue
, aiocbe
, list
);
1565 aiocbe
->jobstate
= JOBST_JOBQSYNC
;
1570 mtx_lock(&aio_job_mtx
);
1571 TAILQ_INSERT_TAIL(&aio_jobs
, aiocbe
, list
);
1572 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
;
1574 mtx_unlock(&aio_job_mtx
);
1582 aio_kick_nowait(struct proc
*userp
)
1584 struct kaioinfo
*ki
= userp
->p_aioinfo
;
1585 struct aiothreadlist
*aiop
;
1587 mtx_assert(&aio_job_mtx
, MA_OWNED
);
1588 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != NULL
) {
1589 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1590 aiop
->aiothreadflags
&= ~AIOP_FREE
;
1591 wakeup(aiop
->aiothread
);
1592 } else if (((num_aio_resv_start
+ num_aio_procs
) < max_aio_procs
) &&
1593 ((ki
->kaio_active_count
+ num_aio_resv_start
) <
1594 ki
->kaio_maxactive_count
)) {
1595 taskqueue_enqueue(taskqueue_aiod_bio
, &ki
->kaio_task
);
1600 aio_kick(struct proc
*userp
)
1602 struct kaioinfo
*ki
= userp
->p_aioinfo
;
1603 struct aiothreadlist
*aiop
;
1606 mtx_assert(&aio_job_mtx
, MA_OWNED
);
1608 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != NULL
) {
1609 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1610 aiop
->aiothreadflags
&= ~AIOP_FREE
;
1611 wakeup(aiop
->aiothread
);
1612 } else if (((num_aio_resv_start
+ num_aio_procs
) < max_aio_procs
) &&
1613 ((ki
->kaio_active_count
+ num_aio_resv_start
) <
1614 ki
->kaio_maxactive_count
)) {
1615 num_aio_resv_start
++;
1616 mtx_unlock(&aio_job_mtx
);
1617 error
= aio_newproc(&num_aio_resv_start
);
1618 mtx_lock(&aio_job_mtx
);
1620 num_aio_resv_start
--;
1630 aio_kick_helper(void *context
, int pending
)
1632 struct proc
*userp
= context
;
1634 mtx_lock(&aio_job_mtx
);
1635 while (--pending
>= 0) {
1636 if (aio_kick(userp
))
1639 mtx_unlock(&aio_job_mtx
);
1643 * Support the aio_return system call, as a side-effect, kernel resources are
1647 aio_return(struct thread
*td
, struct aio_return_args
*uap
)
1649 struct proc
*p
= td
->td_proc
;
1650 struct aiocblist
*cb
;
1651 struct aiocb
*uaiocb
;
1652 struct kaioinfo
*ki
;
1658 uaiocb
= uap
->aiocbp
;
1660 TAILQ_FOREACH(cb
, &ki
->kaio_done
, plist
) {
1661 if (cb
->uuaiocb
== uaiocb
)
1665 MPASS(cb
->jobstate
== JOBST_JOBFINISHED
);
1666 status
= cb
->uaiocb
._aiocb_private
.status
;
1667 error
= cb
->uaiocb
._aiocb_private
.error
;
1668 td
->td_retval
[0] = status
;
1669 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
1670 td
->td_ru
.ru_oublock
+= cb
->outputcharge
;
1671 cb
->outputcharge
= 0;
1672 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
1673 td
->td_ru
.ru_inblock
+= cb
->inputcharge
;
1674 cb
->inputcharge
= 0;
1678 suword(&uaiocb
->_aiocb_private
.error
, error
);
1679 suword(&uaiocb
->_aiocb_private
.status
, status
);
1688 * Allow a process to wakeup when any of the I/O requests are completed.
1691 aio_suspend(struct thread
*td
, struct aio_suspend_args
*uap
)
1693 struct proc
*p
= td
->td_proc
;
1696 struct aiocb
*const *cbptr
, *cbp
;
1697 struct kaioinfo
*ki
;
1698 struct aiocblist
*cb
, *cbfirst
;
1699 struct aiocb
**ujoblist
;
1705 if (uap
->nent
< 0 || uap
->nent
> AIO_LISTIO_MAX
)
1710 /* Get timespec struct. */
1711 if ((error
= copyin(uap
->timeout
, &ts
, sizeof(ts
))) != 0)
1714 if (ts
.tv_nsec
< 0 || ts
.tv_nsec
>= 1000000000)
1717 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
1718 if (itimerfix(&atv
))
1720 timo
= tvtohz(&atv
);
1728 ujoblist
= uma_zalloc(aiol_zone
, M_WAITOK
);
1729 cbptr
= uap
->aiocbp
;
1731 for (i
= 0; i
< uap
->nent
; i
++) {
1732 cbp
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1735 ujoblist
[njoblist
] = cbp
;
1739 if (njoblist
== 0) {
1740 uma_zfree(aiol_zone
, ujoblist
);
1748 TAILQ_FOREACH(cb
, &ki
->kaio_all
, allist
) {
1749 for (i
= 0; i
< njoblist
; i
++) {
1750 if (cb
->uuaiocb
== ujoblist
[i
]) {
1751 if (cbfirst
== NULL
)
1753 if (cb
->jobstate
== JOBST_JOBFINISHED
)
1758 /* All tasks were finished. */
1759 if (cbfirst
== NULL
)
1762 ki
->kaio_flags
|= KAIO_WAKEUP
;
1763 error
= msleep(&p
->p_aioinfo
, AIO_MTX(ki
), PRIBIO
| PCATCH
,
1765 if (error
== ERESTART
)
1772 uma_zfree(aiol_zone
, ujoblist
);
1777 * aio_cancel cancels any non-physio aio operations not currently in
1781 aio_cancel(struct thread
*td
, struct aio_cancel_args
*uap
)
1783 struct proc
*p
= td
->td_proc
;
1784 struct kaioinfo
*ki
;
1785 struct aiocblist
*cbe
, *cbn
;
1791 int notcancelled
= 0;
1794 /* Lookup file object. */
1795 error
= fget(td
, uap
->fd
, &fp
);
1803 if (fp
->f_type
== DTYPE_VNODE
) {
1805 if (vn_isdisk(vp
, &error
)) {
1807 td
->td_retval
[0] = AIO_NOTCANCELED
;
1813 TAILQ_FOREACH_SAFE(cbe
, &ki
->kaio_jobqueue
, plist
, cbn
) {
1814 if ((uap
->fd
== cbe
->uaiocb
.aio_fildes
) &&
1815 ((uap
->aiocbp
== NULL
) ||
1816 (uap
->aiocbp
== cbe
->uuaiocb
))) {
1819 mtx_lock(&aio_job_mtx
);
1820 if (cbe
->jobstate
== JOBST_JOBQGLOBAL
) {
1821 TAILQ_REMOVE(&aio_jobs
, cbe
, list
);
1823 } else if (cbe
->jobstate
== JOBST_JOBQSOCK
) {
1824 MPASS(fp
->f_type
== DTYPE_SOCKET
);
1826 TAILQ_REMOVE(&so
->so_aiojobq
, cbe
, list
);
1828 } else if (cbe
->jobstate
== JOBST_JOBQSYNC
) {
1829 TAILQ_REMOVE(&ki
->kaio_syncqueue
, cbe
, list
);
1832 mtx_unlock(&aio_job_mtx
);
1835 TAILQ_REMOVE(&ki
->kaio_jobqueue
, cbe
, plist
);
1836 cbe
->uaiocb
._aiocb_private
.status
= -1;
1837 cbe
->uaiocb
._aiocb_private
.error
= ECANCELED
;
1838 aio_bio_done_notify(p
, cbe
, DONE_QUEUE
);
1843 if (uap
->aiocbp
!= NULL
)
1852 if (uap
->aiocbp
!= NULL
) {
1854 td
->td_retval
[0] = AIO_CANCELED
;
1860 td
->td_retval
[0] = AIO_NOTCANCELED
;
1865 td
->td_retval
[0] = AIO_CANCELED
;
1869 td
->td_retval
[0] = AIO_ALLDONE
;
1875 * aio_error is implemented in the kernel level for compatibility purposes
1876 * only. For a user mode async implementation, it would be best to do it in
1877 * a userland subroutine.
1880 aio_error(struct thread
*td
, struct aio_error_args
*uap
)
1882 struct proc
*p
= td
->td_proc
;
1883 struct aiocblist
*cb
;
1884 struct kaioinfo
*ki
;
1889 td
->td_retval
[0] = EINVAL
;
1894 TAILQ_FOREACH(cb
, &ki
->kaio_all
, allist
) {
1895 if (cb
->uuaiocb
== uap
->aiocbp
) {
1896 if (cb
->jobstate
== JOBST_JOBFINISHED
)
1898 cb
->uaiocb
._aiocb_private
.error
;
1900 td
->td_retval
[0] = EINPROGRESS
;
1908 * Hack for failure of aio_aqueue.
1910 status
= fuword(&uap
->aiocbp
->_aiocb_private
.status
);
1912 td
->td_retval
[0] = fuword(&uap
->aiocbp
->_aiocb_private
.error
);
1916 td
->td_retval
[0] = EINVAL
;
1920 /* syscall - asynchronous read from a file (REALTIME) */
1922 oaio_read(struct thread
*td
, struct oaio_read_args
*uap
)
1925 return aio_aqueue(td
, (struct aiocb
*)uap
->aiocbp
, NULL
, LIO_READ
, 1);
1929 aio_read(struct thread
*td
, struct aio_read_args
*uap
)
1932 return aio_aqueue(td
, uap
->aiocbp
, NULL
, LIO_READ
, 0);
1935 /* syscall - asynchronous write to a file (REALTIME) */
1937 oaio_write(struct thread
*td
, struct oaio_write_args
*uap
)
1940 return aio_aqueue(td
, (struct aiocb
*)uap
->aiocbp
, NULL
, LIO_WRITE
, 1);
1944 aio_write(struct thread
*td
, struct aio_write_args
*uap
)
1947 return aio_aqueue(td
, uap
->aiocbp
, NULL
, LIO_WRITE
, 0);
1950 /* syscall - list directed I/O (REALTIME) */
1952 olio_listio(struct thread
*td
, struct olio_listio_args
*uap
)
1954 return do_lio_listio(td
, (struct lio_listio_args
*)uap
, 1);
1957 /* syscall - list directed I/O (REALTIME) */
1959 lio_listio(struct thread
*td
, struct lio_listio_args
*uap
)
1961 return do_lio_listio(td
, uap
, 0);
1965 do_lio_listio(struct thread
*td
, struct lio_listio_args
*uap
, int oldsigev
)
1967 struct proc
*p
= td
->td_proc
;
1968 struct aiocb
*iocb
, * const *cbptr
;
1969 struct kaioinfo
*ki
;
1970 struct aioliojob
*lj
;
1977 if ((uap
->mode
!= LIO_NOWAIT
) && (uap
->mode
!= LIO_WAIT
))
1981 if (nent
< 0 || nent
> AIO_LISTIO_MAX
)
1984 if (p
->p_aioinfo
== NULL
)
1985 aio_init_aioinfo(p
);
1989 lj
= uma_zalloc(aiolio_zone
, M_WAITOK
);
1992 lj
->lioj_finished_count
= 0;
1993 knlist_init(&lj
->klist
, AIO_MTX(ki
), NULL
, NULL
, NULL
);
1994 ksiginfo_init(&lj
->lioj_ksi
);
1999 if (uap
->sig
&& (uap
->mode
== LIO_NOWAIT
)) {
2000 bzero(&lj
->lioj_signal
, sizeof(&lj
->lioj_signal
));
2001 error
= copyin(uap
->sig
, &lj
->lioj_signal
,
2002 oldsigev
? sizeof(struct osigevent
) :
2003 sizeof(struct sigevent
));
2005 uma_zfree(aiolio_zone
, lj
);
2009 if (lj
->lioj_signal
.sigev_notify
== SIGEV_KEVENT
) {
2010 /* Assume only new style KEVENT */
2011 kev
.filter
= EVFILT_LIO
;
2012 kev
.flags
= EV_ADD
| EV_ENABLE
| EV_FLAG1
;
2013 kev
.ident
= (uintptr_t)uap
->acb_list
; /* something unique */
2014 kev
.data
= (intptr_t)lj
;
2015 /* pass user defined sigval data */
2016 kev
.udata
= lj
->lioj_signal
.sigev_value
.sival_ptr
;
2017 error
= kqfd_register(
2018 lj
->lioj_signal
.sigev_notify_kqueue
, &kev
, td
, 1);
2020 uma_zfree(aiolio_zone
, lj
);
2023 } else if (lj
->lioj_signal
.sigev_notify
== SIGEV_NONE
) {
2025 } else if (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
||
2026 lj
->lioj_signal
.sigev_notify
== SIGEV_THREAD_ID
) {
2027 if (!_SIG_VALID(lj
->lioj_signal
.sigev_signo
)) {
2028 uma_zfree(aiolio_zone
, lj
);
2031 lj
->lioj_flags
|= LIOJ_SIGNAL
;
2033 uma_zfree(aiolio_zone
, lj
);
2039 TAILQ_INSERT_TAIL(&ki
->kaio_liojoblist
, lj
, lioj_list
);
2041 * Add extra aiocb count to avoid the lio to be freed
2042 * by other threads doing aio_waitcomplete or aio_return,
2043 * and prevent event from being sent until we have queued
2050 * Get pointers to the list of I/O requests.
2053 cbptr
= uap
->acb_list
;
2054 for (i
= 0; i
< uap
->nent
; i
++) {
2055 iocb
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
2056 if (((intptr_t)iocb
!= -1) && ((intptr_t)iocb
!= 0)) {
2057 error
= aio_aqueue(td
, iocb
, lj
, LIO_NOP
, oldsigev
);
2065 if (uap
->mode
== LIO_WAIT
) {
2066 while (lj
->lioj_count
- 1 != lj
->lioj_finished_count
) {
2067 ki
->kaio_flags
|= KAIO_WAKEUP
;
2068 error
= msleep(&p
->p_aioinfo
, AIO_MTX(ki
),
2069 PRIBIO
| PCATCH
, "aiospn", 0);
2070 if (error
== ERESTART
)
2076 if (lj
->lioj_count
- 1 == lj
->lioj_finished_count
) {
2077 if (lj
->lioj_signal
.sigev_notify
== SIGEV_KEVENT
) {
2078 lj
->lioj_flags
|= LIOJ_KEVENT_POSTED
;
2079 KNOTE_LOCKED(&lj
->klist
, 1);
2081 if ((lj
->lioj_flags
& (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
))
2083 && (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
||
2084 lj
->lioj_signal
.sigev_notify
== SIGEV_THREAD_ID
)) {
2085 aio_sendsig(p
, &lj
->lioj_signal
,
2087 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
2092 if (lj
->lioj_count
== 0) {
2093 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
2094 knlist_delete(&lj
->klist
, curthread
, 1);
2096 sigqueue_take(&lj
->lioj_ksi
);
2099 uma_zfree(aiolio_zone
, lj
);
2109 * Called from interrupt thread for physio, we should return as fast
2110 * as possible, so we schedule a biohelper task.
2113 aio_physwakeup(struct buf
*bp
)
2115 struct aiocblist
*aiocbe
;
2117 aiocbe
= (struct aiocblist
*)bp
->b_caller1
;
2118 taskqueue_enqueue(taskqueue_aiod_bio
, &aiocbe
->biotask
);
2122 * Task routine to perform heavy tasks, process wakeup, and signals.
2125 biohelper(void *context
, int pending
)
2127 struct aiocblist
*aiocbe
= context
;
2130 struct kaioinfo
*ki
;
2134 userp
= aiocbe
->userproc
;
2135 ki
= userp
->p_aioinfo
;
2137 aiocbe
->uaiocb
._aiocb_private
.status
-= bp
->b_resid
;
2138 aiocbe
->uaiocb
._aiocb_private
.error
= 0;
2139 if (bp
->b_ioflags
& BIO_ERROR
)
2140 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
2141 nblks
= btodb(aiocbe
->uaiocb
.aio_nbytes
);
2142 if (aiocbe
->uaiocb
.aio_lio_opcode
== LIO_WRITE
)
2143 aiocbe
->outputcharge
+= nblks
;
2145 aiocbe
->inputcharge
+= nblks
;
2147 TAILQ_REMOVE(&userp
->p_aioinfo
->kaio_bufqueue
, aiocbe
, plist
);
2148 ki
->kaio_buffer_count
--;
2149 aio_bio_done_notify(userp
, aiocbe
, DONE_BUF
);
2152 /* Release mapping into kernel space. */
2155 atomic_subtract_int(&num_buf_aio
, 1);
2158 /* syscall - wait for the next completion of an aio request */
2160 aio_waitcomplete(struct thread
*td
, struct aio_waitcomplete_args
*uap
)
2162 struct proc
*p
= td
->td_proc
;
2165 struct kaioinfo
*ki
;
2166 struct aiocblist
*cb
;
2167 struct aiocb
*uuaiocb
;
2168 int error
, status
, timo
;
2170 suword(uap
->aiocbp
, (long)NULL
);
2174 /* Get timespec struct. */
2175 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
2179 if ((ts
.tv_nsec
< 0) || (ts
.tv_nsec
>= 1000000000))
2182 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
2183 if (itimerfix(&atv
))
2185 timo
= tvtohz(&atv
);
2188 if (p
->p_aioinfo
== NULL
)
2189 aio_init_aioinfo(p
);
2195 while ((cb
= TAILQ_FIRST(&ki
->kaio_done
)) == NULL
) {
2196 ki
->kaio_flags
|= KAIO_WAKEUP
;
2197 error
= msleep(&p
->p_aioinfo
, AIO_MTX(ki
), PRIBIO
| PCATCH
,
2199 if (timo
&& error
== ERESTART
)
2206 MPASS(cb
->jobstate
== JOBST_JOBFINISHED
);
2207 uuaiocb
= cb
->uuaiocb
;
2208 status
= cb
->uaiocb
._aiocb_private
.status
;
2209 error
= cb
->uaiocb
._aiocb_private
.error
;
2210 td
->td_retval
[0] = status
;
2211 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
2212 td
->td_ru
.ru_oublock
+= cb
->outputcharge
;
2213 cb
->outputcharge
= 0;
2214 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
2215 td
->td_ru
.ru_inblock
+= cb
->inputcharge
;
2216 cb
->inputcharge
= 0;
2220 suword(uap
->aiocbp
, (long)uuaiocb
);
2221 suword(&uuaiocb
->_aiocb_private
.error
, error
);
2222 suword(&uuaiocb
->_aiocb_private
.status
, status
);
2230 aio_fsync(struct thread
*td
, struct aio_fsync_args
*uap
)
2232 struct proc
*p
= td
->td_proc
;
2233 struct kaioinfo
*ki
;
2235 if (uap
->op
!= O_SYNC
) /* XXX lack of O_DSYNC */
2239 aio_init_aioinfo(p
);
2240 return aio_aqueue(td
, uap
->aiocbp
, NULL
, LIO_SYNC
, 0);
2243 /* kqueue attach function */
2245 filt_aioattach(struct knote
*kn
)
2247 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2250 * The aiocbe pointer must be validated before using it, so
2251 * registration is restricted to the kernel; the user cannot
2254 if ((kn
->kn_flags
& EV_FLAG1
) == 0)
2256 kn
->kn_ptr
.p_aio
= aiocbe
;
2257 kn
->kn_flags
&= ~EV_FLAG1
;
2259 knlist_add(&aiocbe
->klist
, kn
, 0);
2264 /* kqueue detach function */
2266 filt_aiodetach(struct knote
*kn
)
2268 struct aiocblist
*aiocbe
= kn
->kn_ptr
.p_aio
;
2270 if (!knlist_empty(&aiocbe
->klist
))
2271 knlist_remove(&aiocbe
->klist
, kn
, 0);
2274 /* kqueue filter function */
2277 filt_aio(struct knote
*kn
, long hint
)
2279 struct aiocblist
*aiocbe
= kn
->kn_ptr
.p_aio
;
2281 kn
->kn_data
= aiocbe
->uaiocb
._aiocb_private
.error
;
2282 if (aiocbe
->jobstate
!= JOBST_JOBFINISHED
)
2284 kn
->kn_flags
|= EV_EOF
;
2288 /* kqueue attach function */
2290 filt_lioattach(struct knote
*kn
)
2292 struct aioliojob
* lj
= (struct aioliojob
*)kn
->kn_sdata
;
2295 * The aioliojob pointer must be validated before using it, so
2296 * registration is restricted to the kernel; the user cannot
2299 if ((kn
->kn_flags
& EV_FLAG1
) == 0)
2301 kn
->kn_ptr
.p_lio
= lj
;
2302 kn
->kn_flags
&= ~EV_FLAG1
;
2304 knlist_add(&lj
->klist
, kn
, 0);
2309 /* kqueue detach function */
2311 filt_liodetach(struct knote
*kn
)
2313 struct aioliojob
* lj
= kn
->kn_ptr
.p_lio
;
2315 if (!knlist_empty(&lj
->klist
))
2316 knlist_remove(&lj
->klist
, kn
, 0);
2319 /* kqueue filter function */
2322 filt_lio(struct knote
*kn
, long hint
)
2324 struct aioliojob
* lj
= kn
->kn_ptr
.p_lio
;
2326 return (lj
->lioj_flags
& LIOJ_KEVENT_POSTED
);