2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
16 * $FreeBSD: src/sys/kern/vfs_aio.c,v 1.70.2.28 2003/05/29 06:15:35 alc Exp $
17 * $DragonFly: src/sys/kern/vfs_aio.c,v 1.42 2007/07/20 17:21:52 dillon Exp $
21 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
24 #include <sys/param.h>
25 #include <sys/systm.h>
27 #include <sys/sysproto.h>
28 #include <sys/filedesc.h>
29 #include <sys/kernel.h>
30 #include <sys/fcntl.h>
33 #include <sys/unistd.h>
35 #include <sys/resourcevar.h>
36 #include <sys/signalvar.h>
37 #include <sys/protosw.h>
38 #include <sys/socketvar.h>
39 #include <sys/sysctl.h>
40 #include <sys/vnode.h>
42 #include <sys/event.h>
45 #include <vm/vm_extern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_zone.h>
51 #include <sys/file2.h>
53 #include <sys/sysref2.h>
54 #include <sys/thread2.h>
55 #include <sys/mplock2.h>
57 #include <machine/limits.h>
58 #include "opt_vfs_aio.h"
63 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
68 #define JOBST_NULL 0x0
69 #define JOBST_JOBQGLOBAL 0x2
70 #define JOBST_JOBRUNNING 0x3
71 #define JOBST_JOBFINISHED 0x4
72 #define JOBST_JOBQBUF 0x5
73 #define JOBST_JOBBFINISHED 0x6
75 #ifndef MAX_AIO_PER_PROC
76 #define MAX_AIO_PER_PROC 32
79 #ifndef MAX_AIO_QUEUE_PER_PROC
80 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
84 #define MAX_AIO_PROCS 32
88 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
91 #ifndef TARGET_AIO_PROCS
92 #define TARGET_AIO_PROCS 4
96 #define MAX_BUF_AIO 16
99 #ifndef AIOD_TIMEOUT_DEFAULT
100 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
103 #ifndef AIOD_LIFETIME_DEFAULT
104 #define AIOD_LIFETIME_DEFAULT (30 * hz)
107 SYSCTL_NODE(_vfs
, OID_AUTO
, aio
, CTLFLAG_RW
, 0, "Async IO management");
109 static int max_aio_procs
= MAX_AIO_PROCS
;
110 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_procs
,
111 CTLFLAG_RW
, &max_aio_procs
, 0,
112 "Maximum number of kernel threads to use for handling async IO");
114 static int num_aio_procs
= 0;
115 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_aio_procs
,
116 CTLFLAG_RD
, &num_aio_procs
, 0,
117 "Number of presently active kernel threads for async IO");
120 * The code will adjust the actual number of AIO processes towards this
121 * number when it gets a chance.
123 static int target_aio_procs
= TARGET_AIO_PROCS
;
124 SYSCTL_INT(_vfs_aio
, OID_AUTO
, target_aio_procs
, CTLFLAG_RW
, &target_aio_procs
,
125 0, "Preferred number of ready kernel threads for async IO");
127 static int max_queue_count
= MAX_AIO_QUEUE
;
128 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue
, CTLFLAG_RW
, &max_queue_count
, 0,
129 "Maximum number of aio requests to queue, globally");
131 static int num_queue_count
= 0;
132 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_queue_count
, CTLFLAG_RD
, &num_queue_count
, 0,
133 "Number of queued aio requests");
135 static int num_buf_aio
= 0;
136 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_buf_aio
, CTLFLAG_RD
, &num_buf_aio
, 0,
137 "Number of aio requests presently handled by the buf subsystem");
139 /* Number of async I/O thread in the process of being started */
140 /* XXX This should be local to _aio_aqueue() */
141 static int num_aio_resv_start
= 0;
143 static int aiod_timeout
;
144 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_timeout
, CTLFLAG_RW
, &aiod_timeout
, 0,
145 "Timeout value for synchronous aio operations");
147 static int aiod_lifetime
;
148 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_lifetime
, CTLFLAG_RW
, &aiod_lifetime
, 0,
149 "Maximum lifetime for idle aiod");
151 static int max_aio_per_proc
= MAX_AIO_PER_PROC
;
152 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_per_proc
, CTLFLAG_RW
, &max_aio_per_proc
,
153 0, "Maximum active aio requests per process (stored in the process)");
155 static int max_aio_queue_per_proc
= MAX_AIO_QUEUE_PER_PROC
;
156 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue_per_proc
, CTLFLAG_RW
,
157 &max_aio_queue_per_proc
, 0,
158 "Maximum queued aio requests per process (stored in the process)");
160 static int max_buf_aio
= MAX_BUF_AIO
;
161 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_buf_aio
, CTLFLAG_RW
, &max_buf_aio
, 0,
162 "Maximum buf aio requests per process (stored in the process)");
167 #define AIOP_FREE 0x1 /* proc on free queue */
168 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */
171 int aioprocflags
; /* AIO proc flags */
172 TAILQ_ENTRY(aioproclist
) list
; /* List of processes */
173 struct proc
*aioproc
; /* The AIO thread */
177 * data-structure for lio signal management
181 int lioj_buffer_count
;
182 int lioj_buffer_finished_count
;
183 int lioj_queue_count
;
184 int lioj_queue_finished_count
;
185 struct sigevent lioj_signal
; /* signal on all I/O done */
186 TAILQ_ENTRY(aio_liojob
) lioj_list
;
187 struct kaioinfo
*lioj_ki
;
189 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
190 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
193 * per process aio data structure
196 int kaio_flags
; /* per process kaio flags */
197 int kaio_maxactive_count
; /* maximum number of AIOs */
198 int kaio_active_count
; /* number of currently used AIOs */
199 int kaio_qallowed_count
; /* maxiumu size of AIO queue */
200 int kaio_queue_count
; /* size of AIO queue */
201 int kaio_ballowed_count
; /* maximum number of buffers */
202 int kaio_queue_finished_count
; /* number of daemon jobs finished */
203 int kaio_buffer_count
; /* number of physio buffers */
204 int kaio_buffer_finished_count
; /* count of I/O done */
205 struct proc
*kaio_p
; /* process that uses this kaio block */
206 TAILQ_HEAD(,aio_liojob
) kaio_liojoblist
; /* list of lio jobs */
207 TAILQ_HEAD(,aiocblist
) kaio_jobqueue
; /* job queue for process */
208 TAILQ_HEAD(,aiocblist
) kaio_jobdone
; /* done queue for process */
209 TAILQ_HEAD(,aiocblist
) kaio_bufqueue
; /* buffer job queue for process */
210 TAILQ_HEAD(,aiocblist
) kaio_bufdone
; /* buffer done queue for process */
211 TAILQ_HEAD(,aiocblist
) kaio_sockqueue
; /* queue for aios waiting on sockets */
214 #define KAIO_RUNDOWN 0x1 /* process is being run down */
215 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
217 static TAILQ_HEAD(,aioproclist
) aio_freeproc
, aio_activeproc
;
218 static TAILQ_HEAD(,aiocblist
) aio_jobs
; /* Async job list */
219 static TAILQ_HEAD(,aiocblist
) aio_bufjobs
; /* Phys I/O job list */
220 static TAILQ_HEAD(,aiocblist
) aio_freejobs
; /* Pool of free jobs */
222 static void aio_init_aioinfo(struct proc
*p
);
223 static void aio_onceonly(void *);
224 static int aio_free_entry(struct aiocblist
*aiocbe
);
225 static void aio_process(struct aiocblist
*aiocbe
);
226 static int aio_newproc(void);
227 static int aio_aqueue(struct aiocb
*job
, int type
);
228 static void aio_physwakeup(struct bio
*bio
);
229 static int aio_fphysio(struct aiocblist
*aiocbe
);
230 static int aio_qphysio(struct proc
*p
, struct aiocblist
*iocb
);
231 static void aio_daemon(void *uproc
, struct trapframe
*frame
);
232 static void process_signal(void *aioj
);
234 SYSINIT(aio
, SI_SUB_VFS
, SI_ORDER_ANY
, aio_onceonly
, NULL
);
238 * kaio Per process async io info
239 * aiop async io thread data
240 * aiocb async io jobs
241 * aiol list io job pointer - internal to aio_suspend XXX
242 * aiolio list io jobs
244 static vm_zone_t kaio_zone
, aiop_zone
, aiocb_zone
, aiol_zone
, aiolio_zone
;
247 * Startup initialization
250 aio_onceonly(void *na
)
252 TAILQ_INIT(&aio_freeproc
);
253 TAILQ_INIT(&aio_activeproc
);
254 TAILQ_INIT(&aio_jobs
);
255 TAILQ_INIT(&aio_bufjobs
);
256 TAILQ_INIT(&aio_freejobs
);
257 kaio_zone
= zinit("AIO", sizeof(struct kaioinfo
), 0, 0, 1);
258 aiop_zone
= zinit("AIOP", sizeof(struct aioproclist
), 0, 0, 1);
259 aiocb_zone
= zinit("AIOCB", sizeof(struct aiocblist
), 0, 0, 1);
260 aiol_zone
= zinit("AIOL", AIO_LISTIO_MAX
*sizeof(intptr_t), 0, 0, 1);
261 aiolio_zone
= zinit("AIOLIO", sizeof(struct aio_liojob
), 0, 0, 1);
262 aiod_timeout
= AIOD_TIMEOUT_DEFAULT
;
263 aiod_lifetime
= AIOD_LIFETIME_DEFAULT
;
268 * Init the per-process aioinfo structure. The aioinfo limits are set
269 * per-process for user limit (resource) management.
272 aio_init_aioinfo(struct proc
*p
)
275 if (p
->p_aioinfo
== NULL
) {
276 ki
= zalloc(kaio_zone
);
279 ki
->kaio_maxactive_count
= max_aio_per_proc
;
280 ki
->kaio_active_count
= 0;
281 ki
->kaio_qallowed_count
= max_aio_queue_per_proc
;
282 ki
->kaio_queue_count
= 0;
283 ki
->kaio_ballowed_count
= max_buf_aio
;
284 ki
->kaio_buffer_count
= 0;
285 ki
->kaio_buffer_finished_count
= 0;
287 TAILQ_INIT(&ki
->kaio_jobdone
);
288 TAILQ_INIT(&ki
->kaio_jobqueue
);
289 TAILQ_INIT(&ki
->kaio_bufdone
);
290 TAILQ_INIT(&ki
->kaio_bufqueue
);
291 TAILQ_INIT(&ki
->kaio_liojoblist
);
292 TAILQ_INIT(&ki
->kaio_sockqueue
);
295 while (num_aio_procs
< target_aio_procs
)
300 * Free a job entry. Wait for completion if it is currently active, but don't
301 * delay forever. If we delay, we return a flag that says that we have to
302 * restart the queue scan.
305 aio_free_entry(struct aiocblist
*aiocbe
)
308 struct aio_liojob
*lj
;
312 if (aiocbe
->jobstate
== JOBST_NULL
)
313 panic("aio_free_entry: freeing already free job");
315 p
= aiocbe
->userproc
;
319 panic("aio_free_entry: missing p->p_aioinfo");
321 while (aiocbe
->jobstate
== JOBST_JOBRUNNING
) {
322 aiocbe
->jobflags
|= AIOCBLIST_RUNDOWN
;
323 tsleep(aiocbe
, 0, "jobwai", 0);
325 if (aiocbe
->bp
== NULL
) {
326 if (ki
->kaio_queue_count
<= 0)
327 panic("aio_free_entry: process queue size <= 0");
328 if (num_queue_count
<= 0)
329 panic("aio_free_entry: system wide queue size <= 0");
332 lj
->lioj_queue_count
--;
333 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
334 lj
->lioj_queue_finished_count
--;
336 ki
->kaio_queue_count
--;
337 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
338 ki
->kaio_queue_finished_count
--;
342 lj
->lioj_buffer_count
--;
343 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
344 lj
->lioj_buffer_finished_count
--;
346 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
347 ki
->kaio_buffer_finished_count
--;
348 ki
->kaio_buffer_count
--;
352 /* aiocbe is going away, we need to destroy any knotes */
353 /* XXX lwp knote wants a thread, but only cares about the process */
354 knote_remove(FIRST_LWP_IN_PROC(p
)->lwp_thread
, &aiocbe
->klist
);
356 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
& KAIO_RUNDOWN
)
357 && ((ki
->kaio_buffer_count
== 0) && (ki
->kaio_queue_count
== 0)))) {
358 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
362 if (aiocbe
->jobstate
== JOBST_JOBQBUF
) {
363 if ((error
= aio_fphysio(aiocbe
)) != 0)
365 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
366 panic("aio_free_entry: invalid physio finish-up state");
368 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
370 } else if (aiocbe
->jobstate
== JOBST_JOBQGLOBAL
) {
372 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
373 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
375 } else if (aiocbe
->jobstate
== JOBST_JOBFINISHED
)
376 TAILQ_REMOVE(&ki
->kaio_jobdone
, aiocbe
, plist
);
377 else if (aiocbe
->jobstate
== JOBST_JOBBFINISHED
) {
379 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
382 vunmapbuf(aiocbe
->bp
);
383 relpbuf(aiocbe
->bp
, NULL
);
387 if (lj
&& (lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
== 0)) {
388 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
389 zfree(aiolio_zone
, lj
);
391 aiocbe
->jobstate
= JOBST_NULL
;
392 callout_stop(&aiocbe
->timeout
);
393 fdrop(aiocbe
->fd_file
);
394 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
400 * Rundown the jobs for a given process.
403 aio_proc_rundown(struct proc
*p
)
409 struct aio_liojob
*lj
, *ljn
;
410 struct aiocblist
*aiocbe
, *aiocbn
;
418 ki
->kaio_flags
|= LIOJ_SIGNAL_POSTED
;
419 while ((ki
->kaio_active_count
> 0) || (ki
->kaio_buffer_count
>
420 ki
->kaio_buffer_finished_count
)) {
421 ki
->kaio_flags
|= KAIO_RUNDOWN
;
422 if (tsleep(p
, 0, "kaiowt", aiod_timeout
))
427 * Move any aio ops that are waiting on socket I/O to the normal job
428 * queues so they are cleaned up with any others.
431 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_sockqueue
); aiocbe
; aiocbe
=
433 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
434 fp
= aiocbe
->fd_file
;
436 so
= (struct socket
*)fp
->f_data
;
437 TAILQ_REMOVE(&so
->so_aiojobq
, aiocbe
, list
);
438 if (TAILQ_EMPTY(&so
->so_aiojobq
)) {
439 so
->so_snd
.ssb_flags
&= ~SSB_AIO
;
440 so
->so_rcv
.ssb_flags
&= ~SSB_AIO
;
443 TAILQ_REMOVE(&ki
->kaio_sockqueue
, aiocbe
, plist
);
444 TAILQ_INSERT_HEAD(&aio_jobs
, aiocbe
, list
);
445 TAILQ_INSERT_HEAD(&ki
->kaio_jobqueue
, aiocbe
, plist
);
450 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobdone
); aiocbe
; aiocbe
= aiocbn
) {
451 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
452 if (aio_free_entry(aiocbe
))
457 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); aiocbe
; aiocbe
=
459 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
460 if (aio_free_entry(aiocbe
))
466 while (TAILQ_FIRST(&ki
->kaio_bufqueue
)) {
467 ki
->kaio_flags
|= KAIO_WAKEUP
;
468 tsleep(p
, 0, "aioprn", 0);
476 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_bufdone
); aiocbe
; aiocbe
= aiocbn
) {
477 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
478 if (aio_free_entry(aiocbe
)) {
486 * If we've slept, jobs might have moved from one queue to another.
487 * Retry rundown if we didn't manage to empty the queues.
489 if (TAILQ_FIRST(&ki
->kaio_jobdone
) != NULL
||
490 TAILQ_FIRST(&ki
->kaio_jobqueue
) != NULL
||
491 TAILQ_FIRST(&ki
->kaio_bufqueue
) != NULL
||
492 TAILQ_FIRST(&ki
->kaio_bufdone
) != NULL
)
495 for (lj
= TAILQ_FIRST(&ki
->kaio_liojoblist
); lj
; lj
= ljn
) {
496 ljn
= TAILQ_NEXT(lj
, lioj_list
);
497 if ((lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
==
499 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
500 zfree(aiolio_zone
, lj
);
503 kprintf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
504 "QF:%d\n", lj
->lioj_buffer_count
,
505 lj
->lioj_buffer_finished_count
,
506 lj
->lioj_queue_count
,
507 lj
->lioj_queue_finished_count
);
512 zfree(kaio_zone
, ki
);
519 * Select a job to run (called by an AIO daemon).
521 static struct aiocblist
*
522 aio_selectjob(struct aioproclist
*aiop
)
524 struct aiocblist
*aiocbe
;
529 for (aiocbe
= TAILQ_FIRST(&aio_jobs
); aiocbe
; aiocbe
=
530 TAILQ_NEXT(aiocbe
, list
)) {
531 userp
= aiocbe
->userproc
;
532 ki
= userp
->p_aioinfo
;
534 if (ki
->kaio_active_count
< ki
->kaio_maxactive_count
) {
535 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
546 * The AIO processing activity. This is the code that does the I/O request for
547 * the non-physio version of the operations. The normal vn operations are used,
548 * and this code should work in all instances for every type of file, including
549 * pipes, sockets, fifos, and regular files.
552 aio_process(struct aiocblist
*aiocbe
)
561 int oublock_st
, oublock_end
;
562 int inblock_st
, inblock_end
;
565 cb
= &aiocbe
->uaiocb
;
566 fp
= aiocbe
->fd_file
;
568 aiov
.iov_base
= (void *)(uintptr_t)cb
->aio_buf
;
569 aiov
.iov_len
= cb
->aio_nbytes
;
571 auio
.uio_iov
= &aiov
;
573 auio
.uio_offset
= cb
->aio_offset
;
574 auio
.uio_resid
= cb
->aio_nbytes
;
575 cnt
= cb
->aio_nbytes
;
576 auio
.uio_segflg
= UIO_USERSPACE
;
579 inblock_st
= mytd
->td_lwp
->lwp_ru
.ru_inblock
;
580 oublock_st
= mytd
->td_lwp
->lwp_ru
.ru_oublock
;
582 * _aio_aqueue() acquires a reference to the file that is
583 * released in aio_free_entry().
585 if (cb
->aio_lio_opcode
== LIO_READ
) {
586 auio
.uio_rw
= UIO_READ
;
587 error
= fo_read(fp
, &auio
, fp
->f_cred
, O_FOFFSET
);
589 auio
.uio_rw
= UIO_WRITE
;
590 error
= fo_write(fp
, &auio
, fp
->f_cred
, O_FOFFSET
);
592 inblock_end
= mytd
->td_lwp
->lwp_ru
.ru_inblock
;
593 oublock_end
= mytd
->td_lwp
->lwp_ru
.ru_oublock
;
595 aiocbe
->inputcharge
= inblock_end
- inblock_st
;
596 aiocbe
->outputcharge
= oublock_end
- oublock_st
;
598 if ((error
) && (auio
.uio_resid
!= cnt
)) {
599 if (error
== ERESTART
|| error
== EINTR
|| error
== EWOULDBLOCK
)
601 if ((error
== EPIPE
) && (cb
->aio_lio_opcode
== LIO_WRITE
))
602 ksignal(aiocbe
->userproc
, SIGPIPE
);
605 cnt
-= auio
.uio_resid
;
606 cb
->_aiocb_private
.error
= error
;
607 cb
->_aiocb_private
.status
= cnt
;
611 * The AIO daemon, most of the actual work is done in aio_process,
612 * but the setup (and address space mgmt) is done in this routine.
614 * The MP lock is held on entry.
617 aio_daemon(void *uproc
, struct trapframe
*frame
)
619 struct aio_liojob
*lj
;
621 struct aiocblist
*aiocbe
;
622 struct aioproclist
*aiop
;
624 struct proc
*mycp
, *userp
;
625 struct vmspace
*curvm
;
629 mylwp
= curthread
->td_lwp
;
630 mycp
= mylwp
->lwp_proc
;
632 if (mycp
->p_textvp
) {
633 vrele(mycp
->p_textvp
);
634 mycp
->p_textvp
= NULL
;
638 * Allocate and ready the aio control info. There is one aiop structure
641 aiop
= zalloc(aiop_zone
);
642 aiop
->aioproc
= mycp
;
643 aiop
->aioprocflags
|= AIOP_FREE
;
648 * Place thread (lightweight process) onto the AIO free thread list.
650 if (TAILQ_EMPTY(&aio_freeproc
))
651 wakeup(&aio_freeproc
);
652 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
656 /* Make up a name for the daemon. */
657 strcpy(mycp
->p_comm
, "aiod");
660 * Get rid of our current filedescriptors. AIOD's don't need any
661 * filedescriptors, except as temporarily inherited from the client.
662 * Credentials are also cloned, and made equivalent to "root".
665 cr
= cratom(&mycp
->p_ucred
);
667 uireplace(&cr
->cr_uidinfo
, uifind(0));
669 cr
->cr_groups
[0] = 1;
671 /* The daemon resides in its own pgrp. */
672 enterpgrp(mycp
, mycp
->p_pid
, 1);
674 /* Mark special process type. */
675 mycp
->p_flag
|= P_SYSTEM
| P_KTHREADP
;
678 * Wakeup parent process. (Parent sleeps to keep from blasting away
679 * and creating too many daemons.)
686 * Take daemon off of free queue
688 if (aiop
->aioprocflags
& AIOP_FREE
) {
690 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
691 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
692 aiop
->aioprocflags
&= ~AIOP_FREE
;
695 aiop
->aioprocflags
&= ~AIOP_SCHED
;
700 while ((aiocbe
= aio_selectjob(aiop
)) != NULL
) {
701 cb
= &aiocbe
->uaiocb
;
702 userp
= aiocbe
->userproc
;
704 aiocbe
->jobstate
= JOBST_JOBRUNNING
;
707 * Connect to process address space for user program.
709 if (curvm
!= userp
->p_vmspace
) {
710 pmap_setlwpvm(mylwp
, userp
->p_vmspace
);
712 sysref_put(&curvm
->vm_sysref
);
713 curvm
= userp
->p_vmspace
;
714 sysref_get(&curvm
->vm_sysref
);
717 ki
= userp
->p_aioinfo
;
720 /* Account for currently active jobs. */
721 ki
->kaio_active_count
++;
723 /* Do the I/O function. */
726 /* Decrement the active job count. */
727 ki
->kaio_active_count
--;
730 * Increment the completion count for wakeup/signal
733 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
734 ki
->kaio_queue_finished_count
++;
736 lj
->lioj_queue_finished_count
++;
737 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
738 & KAIO_RUNDOWN
) && (ki
->kaio_active_count
== 0))) {
739 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
744 if (lj
&& (lj
->lioj_flags
&
745 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) == LIOJ_SIGNAL
) {
746 if ((lj
->lioj_queue_finished_count
==
747 lj
->lioj_queue_count
) &&
748 (lj
->lioj_buffer_finished_count
==
749 lj
->lioj_buffer_count
)) {
751 lj
->lioj_signal
.sigev_signo
);
758 aiocbe
->jobstate
= JOBST_JOBFINISHED
;
761 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
762 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, aiocbe
, plist
);
764 KNOTE(&aiocbe
->klist
, 0);
766 if (aiocbe
->jobflags
& AIOCBLIST_RUNDOWN
) {
768 aiocbe
->jobflags
&= ~AIOCBLIST_RUNDOWN
;
771 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
772 ksignal(userp
, cb
->aio_sigevent
.sigev_signo
);
777 * Disconnect from user address space.
780 /* swap our original address space back in */
781 pmap_setlwpvm(mylwp
, mycp
->p_vmspace
);
782 sysref_put(&curvm
->vm_sysref
);
787 * If we are the first to be put onto the free queue, wakeup
788 * anyone waiting for a daemon.
791 TAILQ_REMOVE(&aio_activeproc
, aiop
, list
);
792 if (TAILQ_EMPTY(&aio_freeproc
))
793 wakeup(&aio_freeproc
);
794 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
795 aiop
->aioprocflags
|= AIOP_FREE
;
799 * If daemon is inactive for a long time, allow it to exit,
800 * thereby freeing resources.
802 if (((aiop
->aioprocflags
& AIOP_SCHED
) == 0) && tsleep(mycp
,
803 0, "aiordy", aiod_lifetime
)) {
805 if (TAILQ_EMPTY(&aio_jobs
)) {
806 if ((aiop
->aioprocflags
& AIOP_FREE
) &&
807 (num_aio_procs
> target_aio_procs
)) {
808 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
810 zfree(aiop_zone
, aiop
);
813 if (mycp
->p_vmspace
->vm_sysref
.refcnt
<= 1) {
814 kprintf("AIOD: bad vm refcnt for"
815 " exiting daemon: %d\n",
816 mycp
->p_vmspace
->vm_sysref
.refcnt
);
828 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
829 * AIO daemon modifies its environment itself.
835 struct lwp
*lp
, *nlp
;
839 error
= fork1(lp
, RFPROC
|RFMEM
|RFNOWAIT
, &np
);
842 nlp
= ONLY_LWP_IN_PROC(np
);
843 cpu_set_fork_handler(nlp
, aio_daemon
, curproc
);
844 start_forked_proc(lp
, np
);
847 * Wait until daemon is started, but continue on just in case to
848 * handle error conditions.
850 error
= tsleep(np
, 0, "aiosta", aiod_timeout
);
857 * Try the high-performance, low-overhead physio method for eligible
858 * VCHR devices. This method doesn't use an aio helper thread, and
859 * thus has very low overhead.
861 * Assumes that the caller, _aio_aqueue(), has incremented the file
862 * structure's reference count, preventing its deallocation for the
863 * duration of this call.
866 aio_qphysio(struct proc
*p
, struct aiocblist
*aiocbe
)
874 struct aio_liojob
*lj
;
877 cb
= &aiocbe
->uaiocb
;
878 fp
= aiocbe
->fd_file
;
880 if (fp
->f_type
!= DTYPE_VNODE
)
883 vp
= (struct vnode
*)fp
->f_data
;
886 * If its not a disk, we don't want to return a positive error.
887 * It causes the aio code to not fall through to try the thread
888 * way when you're talking to a regular file.
890 if (!vn_isdisk(vp
, &error
)) {
891 if (error
== ENOTBLK
)
897 if (cb
->aio_nbytes
% vp
->v_rdev
->si_bsize_phys
)
901 MAXPHYS
- (((vm_offset_t
) cb
->aio_buf
) & PAGE_MASK
))
905 if (ki
->kaio_buffer_count
>= ki
->kaio_ballowed_count
)
908 ki
->kaio_buffer_count
++;
912 lj
->lioj_buffer_count
++;
914 /* Create and build a buffer header for a transfer. */
919 * Get a copy of the kva from the physical buffer.
921 bp
->b_bio1
.bio_caller_info1
.ptr
= p
;
924 bp
->b_cmd
= (cb
->aio_lio_opcode
== LIO_WRITE
) ?
925 BUF_CMD_WRITE
: BUF_CMD_READ
;
926 bp
->b_bio1
.bio_done
= aio_physwakeup
;
927 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
928 bp
->b_bio1
.bio_offset
= cb
->aio_offset
;
930 /* Bring buffer into kernel space. */
931 if (vmapbuf(bp
, __DEVOLATILE(char *, cb
->aio_buf
), cb
->aio_nbytes
) < 0) {
939 bp
->b_bio1
.bio_caller_info2
.ptr
= aiocbe
;
940 TAILQ_INSERT_TAIL(&aio_bufjobs
, aiocbe
, list
);
941 TAILQ_INSERT_TAIL(&ki
->kaio_bufqueue
, aiocbe
, plist
);
942 aiocbe
->jobstate
= JOBST_JOBQBUF
;
943 cb
->_aiocb_private
.status
= cb
->aio_nbytes
;
950 * Perform the transfer. vn_strategy must be used even though we
951 * know we have a device in order to deal with requests which exceed
952 * device DMA limitations.
954 vn_strategy(vp
, &bp
->b_bio1
);
961 * If we had an error invoking the request, or an error in processing
962 * the request before we have returned, we process it as an error in
963 * transfer. Note that such an I/O error is not indicated immediately,
964 * but is returned using the aio_error mechanism. In this case,
965 * aio_suspend will return immediately.
967 if (bp
->b_error
|| (bp
->b_flags
& B_ERROR
)) {
968 struct aiocb
*job
= aiocbe
->uuaiocb
;
970 aiocbe
->uaiocb
._aiocb_private
.status
= 0;
971 suword(&job
->_aiocb_private
.status
, 0);
972 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
973 suword(&job
->_aiocb_private
.error
, bp
->b_error
);
975 ki
->kaio_buffer_finished_count
++;
977 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
) {
978 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
979 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
980 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
981 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
982 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
989 KNOTE(&aiocbe
->klist
, 0);
993 ki
->kaio_buffer_count
--;
995 lj
->lioj_buffer_count
--;
1002 * This waits/tests physio completion.
1005 aio_fphysio(struct aiocblist
*iocb
)
1012 error
= biowait_timeout(&bp
->b_bio1
, "physstr", aiod_timeout
);
1013 if (error
== EWOULDBLOCK
)
1016 /* Release mapping into kernel space. */
1022 /* Check for an error. */
1023 if (bp
->b_flags
& B_ERROR
)
1024 error
= bp
->b_error
;
1029 #endif /* VFS_AIO */
1032 * Wake up aio requests that may be serviceable now.
1035 aio_swake(struct socket
*so
, struct signalsockbuf
*ssb
)
1040 struct aiocblist
*cb
,*cbn
;
1042 struct kaioinfo
*ki
= NULL
;
1043 int opcode
, wakecount
= 0;
1044 struct aioproclist
*aiop
;
1046 if (ssb
== &so
->so_snd
) {
1048 so
->so_snd
.ssb_flags
&= ~SSB_AIO
;
1051 so
->so_rcv
.ssb_flags
&= ~SSB_AIO
;
1054 for (cb
= TAILQ_FIRST(&so
->so_aiojobq
); cb
; cb
= cbn
) {
1055 cbn
= TAILQ_NEXT(cb
, list
);
1056 if (opcode
== cb
->uaiocb
.aio_lio_opcode
) {
1059 TAILQ_REMOVE(&so
->so_aiojobq
, cb
, list
);
1060 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cb
, plist
);
1061 TAILQ_INSERT_TAIL(&aio_jobs
, cb
, list
);
1062 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, cb
, plist
);
1064 if (cb
->jobstate
!= JOBST_JOBQGLOBAL
)
1065 panic("invalid queue value");
1069 while (wakecount
--) {
1070 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != 0) {
1071 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1072 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1073 aiop
->aioprocflags
&= ~AIOP_FREE
;
1074 wakeup(aiop
->aioproc
);
1077 #endif /* VFS_AIO */
1082 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1083 * technique is done in this code.
1086 _aio_aqueue(struct aiocb
*job
, struct aio_liojob
*lj
, int type
)
1088 struct proc
*p
= curproc
;
1093 int opcode
, user_opcode
;
1094 struct aiocblist
*aiocbe
;
1095 struct aioproclist
*aiop
;
1096 struct kaioinfo
*ki
;
1102 if ((aiocbe
= TAILQ_FIRST(&aio_freejobs
)) != NULL
)
1103 TAILQ_REMOVE(&aio_freejobs
, aiocbe
, list
);
1105 aiocbe
= zalloc (aiocb_zone
);
1107 aiocbe
->inputcharge
= 0;
1108 aiocbe
->outputcharge
= 0;
1109 callout_init(&aiocbe
->timeout
);
1110 SLIST_INIT(&aiocbe
->klist
);
1112 suword(&job
->_aiocb_private
.status
, -1);
1113 suword(&job
->_aiocb_private
.error
, 0);
1114 suword(&job
->_aiocb_private
.kernelinfo
, -1);
1116 error
= copyin(job
, &aiocbe
->uaiocb
, sizeof(aiocbe
->uaiocb
));
1118 suword(&job
->_aiocb_private
.error
, error
);
1119 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1122 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
&&
1123 !_SIG_VALID(aiocbe
->uaiocb
.aio_sigevent
.sigev_signo
)) {
1124 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1128 /* Save userspace address of the job info. */
1129 aiocbe
->uuaiocb
= job
;
1131 /* Get the opcode. */
1132 user_opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1133 if (type
!= LIO_NOP
)
1134 aiocbe
->uaiocb
.aio_lio_opcode
= type
;
1135 opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1138 * Range check file descriptor.
1140 fflags
= (opcode
== LIO_WRITE
) ? FWRITE
: FREAD
;
1141 fd
= aiocbe
->uaiocb
.aio_fildes
;
1142 fp
= holdfp(p
->p_fd
, fd
, fflags
);
1144 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1146 suword(&job
->_aiocb_private
.error
, EBADF
);
1150 aiocbe
->fd_file
= fp
;
1152 if (aiocbe
->uaiocb
.aio_offset
== -1LL) {
1156 error
= suword(&job
->_aiocb_private
.kernelinfo
, jobrefid
);
1161 aiocbe
->uaiocb
._aiocb_private
.kernelinfo
= (void *)(intptr_t)jobrefid
;
1162 if (jobrefid
== LONG_MAX
)
1167 if (opcode
== LIO_NOP
) {
1169 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1171 suword(&job
->_aiocb_private
.error
, 0);
1172 suword(&job
->_aiocb_private
.status
, 0);
1173 suword(&job
->_aiocb_private
.kernelinfo
, 0);
1177 if ((opcode
!= LIO_READ
) && (opcode
!= LIO_WRITE
)) {
1179 suword(&job
->_aiocb_private
.status
, 0);
1184 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_KEVENT
) {
1185 kev
.ident
= aiocbe
->uaiocb
.aio_sigevent
.sigev_notify_kqueue
;
1186 kev
.udata
= aiocbe
->uaiocb
.aio_sigevent
.sigev_value
.sigval_ptr
;
1190 * This method for requesting kevent-based notification won't
1191 * work on the alpha, since we're passing in a pointer
1192 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
1193 * based method instead.
1195 if (user_opcode
== LIO_NOP
|| user_opcode
== LIO_READ
||
1196 user_opcode
== LIO_WRITE
)
1199 error
= copyin((struct kevent
*)(uintptr_t)user_opcode
,
1204 kq_fp
= holdfp(p
->p_fd
, (int)kev
.ident
, -1);
1205 if (kq_fp
== NULL
|| kq_fp
->f_type
!= DTYPE_KQUEUE
) {
1213 kq
= (struct kqueue
*)kq_fp
->f_data
;
1214 kev
.ident
= (uintptr_t)aiocbe
->uuaiocb
;
1215 kev
.filter
= EVFILT_AIO
;
1216 kev
.flags
= EV_ADD
| EV_ENABLE
| EV_FLAG1
;
1217 kev
.data
= (intptr_t)aiocbe
;
1218 /* XXX lwp kqueue_register takes a thread, but only uses its proc */
1219 error
= kqueue_register(kq
, &kev
, FIRST_LWP_IN_PROC(p
)->lwp_thread
);
1224 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1226 suword(&job
->_aiocb_private
.error
, error
);
1231 suword(&job
->_aiocb_private
.error
, EINPROGRESS
);
1232 aiocbe
->uaiocb
._aiocb_private
.error
= EINPROGRESS
;
1233 aiocbe
->userproc
= p
;
1234 aiocbe
->jobflags
= 0;
1238 if (fp
->f_type
== DTYPE_SOCKET
) {
1240 * Alternate queueing for socket ops: Reach down into the
1241 * descriptor to get the socket data. Then check to see if the
1242 * socket is ready to be read or written (based on the requested
1245 * If it is not ready for io, then queue the aiocbe on the
1246 * socket, and set the flags so we get a call when ssb_notify()
1249 so
= (struct socket
*)fp
->f_data
;
1251 if (((opcode
== LIO_READ
) && (!soreadable(so
))) || ((opcode
==
1252 LIO_WRITE
) && (!sowriteable(so
)))) {
1253 TAILQ_INSERT_TAIL(&so
->so_aiojobq
, aiocbe
, list
);
1254 TAILQ_INSERT_TAIL(&ki
->kaio_sockqueue
, aiocbe
, plist
);
1255 if (opcode
== LIO_READ
)
1256 so
->so_rcv
.ssb_flags
|= SSB_AIO
;
1258 so
->so_snd
.ssb_flags
|= SSB_AIO
;
1259 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
; /* XXX */
1260 ki
->kaio_queue_count
++;
1269 if ((error
= aio_qphysio(p
, aiocbe
)) == 0)
1272 suword(&job
->_aiocb_private
.status
, 0);
1273 aiocbe
->uaiocb
._aiocb_private
.error
= error
;
1274 suword(&job
->_aiocb_private
.error
, error
);
1278 /* No buffer for daemon I/O. */
1281 ki
->kaio_queue_count
++;
1283 lj
->lioj_queue_count
++;
1285 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1286 TAILQ_INSERT_TAIL(&aio_jobs
, aiocbe
, list
);
1288 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
;
1294 * If we don't have a free AIO process, and we are below our quota, then
1295 * start one. Otherwise, depend on the subsequent I/O completions to
1296 * pick-up this job. If we don't successfully create the new process
1297 * (thread) due to resource issues, we return an error for now (EAGAIN),
1298 * which is likely not the correct thing to do.
1302 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != NULL
) {
1303 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1304 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1305 aiop
->aioprocflags
&= ~AIOP_FREE
;
1306 wakeup(aiop
->aioproc
);
1307 } else if (((num_aio_resv_start
+ num_aio_procs
) < max_aio_procs
) &&
1308 ((ki
->kaio_active_count
+ num_aio_resv_start
) <
1309 ki
->kaio_maxactive_count
)) {
1310 num_aio_resv_start
++;
1311 if ((error
= aio_newproc()) == 0) {
1312 num_aio_resv_start
--;
1315 num_aio_resv_start
--;
1323 * This routine queues an AIO request, checking for quotas.
1326 aio_aqueue(struct aiocb
*job
, int type
)
1328 struct proc
*p
= curproc
;
1329 struct kaioinfo
*ki
;
1331 if (p
->p_aioinfo
== NULL
)
1332 aio_init_aioinfo(p
);
1334 if (num_queue_count
>= max_queue_count
)
1338 if (ki
->kaio_queue_count
>= ki
->kaio_qallowed_count
)
1341 return _aio_aqueue(job
, NULL
, type
);
1343 #endif /* VFS_AIO */
1346 * Support the aio_return system call, as a side-effect, kernel resources are
1352 sys_aio_return(struct aio_return_args
*uap
)
1357 struct proc
*p
= curproc
;
1358 struct lwp
*lp
= curthread
->td_lwp
;
1360 struct aiocblist
*cb
, *ncb
;
1362 struct kaioinfo
*ki
;
1371 jobref
= fuword(&ujob
->_aiocb_private
.kernelinfo
);
1372 if (jobref
== -1 || jobref
== 0)
1376 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1377 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1379 if (ujob
== cb
->uuaiocb
) {
1380 uap
->sysmsg_result
=
1381 cb
->uaiocb
._aiocb_private
.status
;
1383 uap
->sysmsg_result
= EFAULT
;
1385 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
1386 lp
->lwp_ru
.ru_oublock
+= cb
->outputcharge
;
1387 cb
->outputcharge
= 0;
1388 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
1389 lp
->lwp_ru
.ru_inblock
+= cb
->inputcharge
;
1390 cb
->inputcharge
= 0;
1398 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= ncb
) {
1399 ncb
= TAILQ_NEXT(cb
, plist
);
1400 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
)
1403 if (ujob
== cb
->uuaiocb
) {
1404 uap
->sysmsg_result
=
1405 cb
->uaiocb
._aiocb_private
.status
;
1407 uap
->sysmsg_result
= EFAULT
;
1419 #endif /* VFS_AIO */
1423 * Allow a process to wakeup when any of the I/O requests are completed.
1428 sys_aio_suspend(struct aio_suspend_args
*uap
)
1433 struct proc
*p
= curproc
;
1436 struct aiocb
*const *cbptr
, *cbp
;
1437 struct kaioinfo
*ki
;
1438 struct aiocblist
*cb
;
1443 struct aiocb
**ujoblist
;
1445 if ((u_int
)uap
->nent
> AIO_LISTIO_MAX
)
1450 /* Get timespec struct. */
1451 if ((error
= copyin(uap
->timeout
, &ts
, sizeof(ts
))) != 0)
1454 if (ts
.tv_nsec
< 0 || ts
.tv_nsec
>= 1000000000)
1457 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
1458 if (itimerfix(&atv
))
1460 timo
= tvtohz_high(&atv
);
1470 ijoblist
= zalloc(aiol_zone
);
1471 ujoblist
= zalloc(aiol_zone
);
1472 cbptr
= uap
->aiocbp
;
1474 for (i
= 0; i
< uap
->nent
; i
++) {
1475 cbp
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1478 ujoblist
[njoblist
] = cbp
;
1479 ijoblist
[njoblist
] = fuword(&cbp
->_aiocb_private
.kernelinfo
);
1483 if (njoblist
== 0) {
1484 zfree(aiol_zone
, ijoblist
);
1485 zfree(aiol_zone
, ujoblist
);
1492 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1493 for (i
= 0; i
< njoblist
; i
++) {
1495 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1497 if (ujoblist
[i
] != cb
->uuaiocb
)
1499 zfree(aiol_zone
, ijoblist
);
1500 zfree(aiol_zone
, ujoblist
);
1507 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
=
1508 TAILQ_NEXT(cb
, plist
)) {
1509 for (i
= 0; i
< njoblist
; i
++) {
1511 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1514 if (ujoblist
[i
] != cb
->uuaiocb
)
1516 zfree(aiol_zone
, ijoblist
);
1517 zfree(aiol_zone
, ujoblist
);
1523 ki
->kaio_flags
|= KAIO_WAKEUP
;
1524 error
= tsleep(p
, PCATCH
, "aiospn", timo
);
1527 if (error
== ERESTART
|| error
== EINTR
) {
1528 zfree(aiol_zone
, ijoblist
);
1529 zfree(aiol_zone
, ujoblist
);
1532 } else if (error
== EWOULDBLOCK
) {
1533 zfree(aiol_zone
, ijoblist
);
1534 zfree(aiol_zone
, ujoblist
);
1545 #endif /* VFS_AIO */
1549 * aio_cancel cancels any non-physio aio operations not currently in
1555 sys_aio_cancel(struct aio_cancel_args
*uap
)
1560 struct proc
*p
= curproc
;
1561 struct kaioinfo
*ki
;
1562 struct aiocblist
*cbe
, *cbn
;
1571 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
1577 if (fp
->f_type
== DTYPE_VNODE
) {
1578 vp
= (struct vnode
*)fp
->f_data
;
1580 if (vn_isdisk(vp
,&error
)) {
1581 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1585 } else if (fp
->f_type
== DTYPE_SOCKET
) {
1586 so
= (struct socket
*)fp
->f_data
;
1590 for (cbe
= TAILQ_FIRST(&so
->so_aiojobq
); cbe
; cbe
= cbn
) {
1591 cbn
= TAILQ_NEXT(cbe
, list
);
1592 if ((uap
->aiocbp
== NULL
) ||
1593 (uap
->aiocbp
== cbe
->uuaiocb
) ) {
1596 TAILQ_REMOVE(&so
->so_aiojobq
, cbe
, list
);
1597 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cbe
, plist
);
1598 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
, plist
);
1599 if (ki
->kaio_flags
& KAIO_WAKEUP
) {
1602 cbe
->jobstate
= JOBST_JOBFINISHED
;
1603 cbe
->uaiocb
._aiocb_private
.status
=-1;
1604 cbe
->uaiocb
._aiocb_private
.error
=ECANCELED
;
1606 /* XXX cancelled, knote? */
1607 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1609 ksignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1616 if ((cancelled
) && (uap
->aiocbp
)) {
1617 uap
->sysmsg_result
= AIO_CANCELED
;
1627 for (cbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cbe
; cbe
= cbn
) {
1628 cbn
= TAILQ_NEXT(cbe
, plist
);
1630 if ((uap
->fd
== cbe
->uaiocb
.aio_fildes
) &&
1631 ((uap
->aiocbp
== NULL
) ||
1632 (uap
->aiocbp
== cbe
->uuaiocb
))) {
1634 if (cbe
->jobstate
== JOBST_JOBQGLOBAL
) {
1635 TAILQ_REMOVE(&aio_jobs
, cbe
, list
);
1636 TAILQ_REMOVE(&ki
->kaio_jobqueue
, cbe
, plist
);
1637 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
,
1640 ki
->kaio_queue_finished_count
++;
1641 cbe
->jobstate
= JOBST_JOBFINISHED
;
1642 cbe
->uaiocb
._aiocb_private
.status
= -1;
1643 cbe
->uaiocb
._aiocb_private
.error
= ECANCELED
;
1644 /* XXX cancelled, knote? */
1645 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1647 ksignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1656 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1658 uap
->sysmsg_result
= AIO_CANCELED
;
1660 uap
->sysmsg_result
= AIO_ALLDONE
;
1666 #endif /* VFS_AIO */
1670 * aio_error is implemented in the kernel level for compatibility purposes only.
1671 * For a user mode async implementation, it would be best to do it in a userland
1677 sys_aio_error(struct aio_error_args
*uap
)
1682 struct proc
*p
= curproc
;
1683 struct aiocblist
*cb
;
1684 struct kaioinfo
*ki
;
1692 jobref
= fuword(&uap
->aiocbp
->_aiocb_private
.kernelinfo
);
1693 if ((jobref
== -1) || (jobref
== 0))
1699 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1700 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1702 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1709 for (cb
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1711 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1713 uap
->sysmsg_result
= EINPROGRESS
;
1719 for (cb
= TAILQ_FIRST(&ki
->kaio_sockqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1721 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1723 uap
->sysmsg_result
= EINPROGRESS
;
1731 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= TAILQ_NEXT(cb
,
1733 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1735 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1741 for (cb
= TAILQ_FIRST(&ki
->kaio_bufqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1743 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1745 uap
->sysmsg_result
= EINPROGRESS
;
1755 #endif /* VFS_AIO */
1759 * syscall - asynchronous read from a file (REALTIME)
1764 sys_aio_read(struct aio_read_args
*uap
)
1772 error
= aio_aqueue(uap
->aiocbp
, LIO_READ
);
1775 #endif /* VFS_AIO */
1779 * syscall - asynchronous write to a file (REALTIME)
1784 sys_aio_write(struct aio_write_args
*uap
)
1792 error
= aio_aqueue(uap
->aiocbp
, LIO_WRITE
);
1795 #endif /* VFS_AIO */
1799 * syscall - XXX undocumented
1804 sys_lio_listio(struct lio_listio_args
*uap
)
1809 struct proc
*p
= curproc
;
1810 struct lwp
*lp
= curthread
->td_lwp
;
1811 int nent
, nentqueued
;
1812 struct aiocb
*iocb
, * const *cbptr
;
1813 struct aiocblist
*cb
;
1814 struct kaioinfo
*ki
;
1815 struct aio_liojob
*lj
;
1816 int error
, runningcode
;
1820 if ((uap
->mode
!= LIO_NOWAIT
) && (uap
->mode
!= LIO_WAIT
))
1824 if (nent
> AIO_LISTIO_MAX
)
1829 if (p
->p_aioinfo
== NULL
)
1830 aio_init_aioinfo(p
);
1832 if ((nent
+ num_queue_count
) > max_queue_count
) {
1838 if ((nent
+ ki
->kaio_queue_count
) > ki
->kaio_qallowed_count
) {
1843 lj
= zalloc(aiolio_zone
);
1850 lj
->lioj_buffer_count
= 0;
1851 lj
->lioj_buffer_finished_count
= 0;
1852 lj
->lioj_queue_count
= 0;
1853 lj
->lioj_queue_finished_count
= 0;
1859 if (uap
->sig
&& (uap
->mode
== LIO_NOWAIT
)) {
1860 error
= copyin(uap
->sig
, &lj
->lioj_signal
,
1861 sizeof(lj
->lioj_signal
));
1863 zfree(aiolio_zone
, lj
);
1866 if (!_SIG_VALID(lj
->lioj_signal
.sigev_signo
)) {
1867 zfree(aiolio_zone
, lj
);
1871 lj
->lioj_flags
|= LIOJ_SIGNAL
;
1872 lj
->lioj_flags
&= ~LIOJ_SIGNAL_POSTED
;
1874 lj
->lioj_flags
&= ~LIOJ_SIGNAL
;
1876 TAILQ_INSERT_TAIL(&ki
->kaio_liojoblist
, lj
, lioj_list
);
1878 * Get pointers to the list of I/O requests.
1882 cbptr
= uap
->acb_list
;
1883 for (i
= 0; i
< uap
->nent
; i
++) {
1884 iocb
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1885 if (((intptr_t)iocb
!= -1) && ((intptr_t)iocb
!= 0)) {
1886 error
= _aio_aqueue(iocb
, lj
, 0);
1895 * If we haven't queued any, then just return error.
1897 if (nentqueued
== 0) {
1903 * Calculate the appropriate error return.
1909 if (uap
->mode
== LIO_WAIT
) {
1910 int command
, found
, jobref
;
1914 for (i
= 0; i
< uap
->nent
; i
++) {
1916 * Fetch address of the control buf pointer in
1919 iocb
= (struct aiocb
*)
1920 (intptr_t)fuword(&cbptr
[i
]);
1921 if (((intptr_t)iocb
== -1) || ((intptr_t)iocb
1926 * Fetch the associated command from user space.
1928 command
= fuword(&iocb
->aio_lio_opcode
);
1929 if (command
== LIO_NOP
) {
1934 jobref
= fuword(&iocb
->_aiocb_private
.kernelinfo
);
1936 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1937 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1939 if (cb
->uaiocb
.aio_lio_opcode
1941 lp
->lwp_ru
.ru_oublock
+=
1943 cb
->outputcharge
= 0;
1944 } else if (cb
->uaiocb
.aio_lio_opcode
1946 lp
->lwp_ru
.ru_inblock
+=
1948 cb
->inputcharge
= 0;
1956 TAILQ_FOREACH(cb
, &ki
->kaio_bufdone
, plist
) {
1957 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1967 * If all I/Os have been disposed of, then we can
1970 if (found
== nentqueued
) {
1971 error
= runningcode
;
1975 ki
->kaio_flags
|= KAIO_WAKEUP
;
1976 error
= tsleep(p
, PCATCH
, "aiospn", 0);
1978 if (error
== EINTR
) {
1980 } else if (error
== EWOULDBLOCK
) {
1987 error
= runningcode
;
1991 #endif /* VFS_AIO */
1996 * This is a weird hack so that we can post a signal. It is safe to do so from
1997 * a timeout routine, but *not* from an interrupt routine.
2000 process_signal(void *aioj
)
2002 struct aiocblist
*aiocbe
= aioj
;
2003 struct aio_liojob
*lj
= aiocbe
->lio
;
2004 struct aiocb
*cb
= &aiocbe
->uaiocb
;
2006 if ((lj
) && (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
) &&
2007 (lj
->lioj_queue_count
== lj
->lioj_queue_finished_count
)) {
2008 ksignal(lj
->lioj_ki
->kaio_p
, lj
->lioj_signal
.sigev_signo
);
2009 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
2012 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
)
2013 ksignal(aiocbe
->userproc
, cb
->aio_sigevent
.sigev_signo
);
2017 * Interrupt handler for physio, performs the necessary process wakeups, and
2021 aio_physwakeup(struct bio
*bio
)
2023 struct buf
*bp
= bio
->bio_buf
;
2024 struct aiocblist
*aiocbe
;
2026 struct kaioinfo
*ki
;
2027 struct aio_liojob
*lj
;
2029 aiocbe
= bio
->bio_caller_info2
.ptr
;
2032 p
= bio
->bio_caller_info1
.ptr
;
2034 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
2035 aiocbe
->uaiocb
._aiocb_private
.status
-= bp
->b_resid
;
2036 aiocbe
->uaiocb
._aiocb_private
.error
= 0;
2037 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
2039 if (bp
->b_flags
& B_ERROR
)
2040 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
2044 lj
->lioj_buffer_finished_count
++;
2047 * wakeup/signal if all of the interrupt jobs are done.
2049 if (lj
->lioj_buffer_finished_count
==
2050 lj
->lioj_buffer_count
) {
2052 * Post a signal if it is called for.
2054 if ((lj
->lioj_flags
&
2055 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) ==
2057 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
2058 callout_reset(&aiocbe
->timeout
, 0,
2059 process_signal
, aiocbe
);
2066 ki
->kaio_buffer_finished_count
++;
2067 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
2068 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
2069 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
2071 KNOTE(&aiocbe
->klist
, 0);
2072 /* Do the wakeup. */
2073 if (ki
->kaio_flags
& (KAIO_RUNDOWN
|KAIO_WAKEUP
)) {
2074 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
2079 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
2080 callout_reset(&aiocbe
->timeout
, 0,
2081 process_signal
, aiocbe
);
2086 #endif /* VFS_AIO */
2089 * syscall - wait for the next completion of an aio request
2094 sys_aio_waitcomplete(struct aio_waitcomplete_args
*uap
)
2099 struct proc
*p
= curproc
;
2100 struct lwp
*lp
= curthread
->td_lwp
;
2103 struct kaioinfo
*ki
;
2104 struct aiocblist
*cb
= NULL
;
2107 suword(uap
->aiocbp
, (int)NULL
);
2111 /* Get timespec struct. */
2112 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
2116 if ((ts
.tv_nsec
< 0) || (ts
.tv_nsec
>= 1000000000))
2119 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
2120 if (itimerfix(&atv
))
2122 timo
= tvtohz_high(&atv
);
2132 if ((cb
= TAILQ_FIRST(&ki
->kaio_jobdone
)) != 0) {
2133 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2134 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2135 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
2136 lp
->lwp_ru
.ru_oublock
+=
2138 cb
->outputcharge
= 0;
2139 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
2140 lp
->lwp_ru
.ru_inblock
+= cb
->inputcharge
;
2141 cb
->inputcharge
= 0;
2144 error
= cb
->uaiocb
._aiocb_private
.error
;
2149 if ((cb
= TAILQ_FIRST(&ki
->kaio_bufdone
)) != 0 ) {
2151 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2152 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2154 error
= cb
->uaiocb
._aiocb_private
.error
;
2158 ki
->kaio_flags
|= KAIO_WAKEUP
;
2159 error
= tsleep(p
, PCATCH
, "aiowc", timo
);
2162 if (error
== ERESTART
) {
2170 if (error
== EWOULDBLOCK
) {
2177 #endif /* VFS_AIO */
2182 filt_aioattach(struct knote
*kn
)
2188 struct filterops aio_filtops
=
2189 { 0, filt_aioattach
, NULL
, NULL
};
2192 /* kqueue attach function */
2194 filt_aioattach(struct knote
*kn
)
2196 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2199 * The aiocbe pointer must be validated before using it, so
2200 * registration is restricted to the kernel; the user cannot
2203 if ((kn
->kn_flags
& EV_FLAG1
) == 0)
2205 kn
->kn_flags
&= ~EV_FLAG1
;
2207 SLIST_INSERT_HEAD(&aiocbe
->klist
, kn
, kn_selnext
);
2212 /* kqueue detach function */
2214 filt_aiodetach(struct knote
*kn
)
2216 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2218 SLIST_REMOVE(&aiocbe
->klist
, kn
, knote
, kn_selnext
);
2221 /* kqueue filter function */
2224 filt_aio(struct knote
*kn
, long hint
)
2226 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2228 kn
->kn_data
= aiocbe
->uaiocb
._aiocb_private
.error
;
2229 if (aiocbe
->jobstate
!= JOBST_JOBFINISHED
&&
2230 aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
2232 kn
->kn_flags
|= EV_EOF
;
2236 struct filterops aio_filtops
=
2237 { 0, filt_aioattach
, filt_aiodetach
, filt_aio
};
2238 #endif /* VFS_AIO */