2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
16 * $FreeBSD: src/sys/kern/vfs_aio.c,v 1.70.2.28 2003/05/29 06:15:35 alc Exp $
17 * $DragonFly: src/sys/kern/vfs_aio.c,v 1.42 2007/07/20 17:21:52 dillon Exp $
21 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
24 #include <sys/param.h>
25 #include <sys/systm.h>
27 #include <sys/sysproto.h>
28 #include <sys/filedesc.h>
29 #include <sys/kernel.h>
30 #include <sys/fcntl.h>
33 #include <sys/unistd.h>
35 #include <sys/resourcevar.h>
36 #include <sys/signalvar.h>
37 #include <sys/protosw.h>
38 #include <sys/socketvar.h>
39 #include <sys/sysctl.h>
40 #include <sys/vnode.h>
42 #include <sys/event.h>
45 #include <vm/vm_extern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_zone.h>
50 #include <sys/file2.h>
52 #include <sys/sysref2.h>
53 #include <sys/thread2.h>
55 #include <machine/limits.h>
56 #include "opt_vfs_aio.h"
61 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
66 #define JOBST_NULL 0x0
67 #define JOBST_JOBQGLOBAL 0x2
68 #define JOBST_JOBRUNNING 0x3
69 #define JOBST_JOBFINISHED 0x4
70 #define JOBST_JOBQBUF 0x5
71 #define JOBST_JOBBFINISHED 0x6
73 #ifndef MAX_AIO_PER_PROC
74 #define MAX_AIO_PER_PROC 32
77 #ifndef MAX_AIO_QUEUE_PER_PROC
78 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
82 #define MAX_AIO_PROCS 32
86 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
89 #ifndef TARGET_AIO_PROCS
90 #define TARGET_AIO_PROCS 4
94 #define MAX_BUF_AIO 16
97 #ifndef AIOD_TIMEOUT_DEFAULT
98 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
101 #ifndef AIOD_LIFETIME_DEFAULT
102 #define AIOD_LIFETIME_DEFAULT (30 * hz)
105 SYSCTL_NODE(_vfs
, OID_AUTO
, aio
, CTLFLAG_RW
, 0, "Async IO management");
107 static int max_aio_procs
= MAX_AIO_PROCS
;
108 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_procs
,
109 CTLFLAG_RW
, &max_aio_procs
, 0,
110 "Maximum number of kernel threads to use for handling async IO");
112 static int num_aio_procs
= 0;
113 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_aio_procs
,
114 CTLFLAG_RD
, &num_aio_procs
, 0,
115 "Number of presently active kernel threads for async IO");
118 * The code will adjust the actual number of AIO processes towards this
119 * number when it gets a chance.
121 static int target_aio_procs
= TARGET_AIO_PROCS
;
122 SYSCTL_INT(_vfs_aio
, OID_AUTO
, target_aio_procs
, CTLFLAG_RW
, &target_aio_procs
,
123 0, "Preferred number of ready kernel threads for async IO");
125 static int max_queue_count
= MAX_AIO_QUEUE
;
126 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue
, CTLFLAG_RW
, &max_queue_count
, 0,
127 "Maximum number of aio requests to queue, globally");
129 static int num_queue_count
= 0;
130 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_queue_count
, CTLFLAG_RD
, &num_queue_count
, 0,
131 "Number of queued aio requests");
133 static int num_buf_aio
= 0;
134 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_buf_aio
, CTLFLAG_RD
, &num_buf_aio
, 0,
135 "Number of aio requests presently handled by the buf subsystem");
137 /* Number of async I/O thread in the process of being started */
138 /* XXX This should be local to _aio_aqueue() */
139 static int num_aio_resv_start
= 0;
141 static int aiod_timeout
;
142 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_timeout
, CTLFLAG_RW
, &aiod_timeout
, 0,
143 "Timeout value for synchronous aio operations");
145 static int aiod_lifetime
;
146 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_lifetime
, CTLFLAG_RW
, &aiod_lifetime
, 0,
147 "Maximum lifetime for idle aiod");
149 static int max_aio_per_proc
= MAX_AIO_PER_PROC
;
150 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_per_proc
, CTLFLAG_RW
, &max_aio_per_proc
,
151 0, "Maximum active aio requests per process (stored in the process)");
153 static int max_aio_queue_per_proc
= MAX_AIO_QUEUE_PER_PROC
;
154 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue_per_proc
, CTLFLAG_RW
,
155 &max_aio_queue_per_proc
, 0,
156 "Maximum queued aio requests per process (stored in the process)");
158 static int max_buf_aio
= MAX_BUF_AIO
;
159 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_buf_aio
, CTLFLAG_RW
, &max_buf_aio
, 0,
160 "Maximum buf aio requests per process (stored in the process)");
165 #define AIOP_FREE 0x1 /* proc on free queue */
166 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */
169 int aioprocflags
; /* AIO proc flags */
170 TAILQ_ENTRY(aioproclist
) list
; /* List of processes */
171 struct proc
*aioproc
; /* The AIO thread */
175 * data-structure for lio signal management
179 int lioj_buffer_count
;
180 int lioj_buffer_finished_count
;
181 int lioj_queue_count
;
182 int lioj_queue_finished_count
;
183 struct sigevent lioj_signal
; /* signal on all I/O done */
184 TAILQ_ENTRY(aio_liojob
) lioj_list
;
185 struct kaioinfo
*lioj_ki
;
187 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
188 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
191 * per process aio data structure
194 int kaio_flags
; /* per process kaio flags */
195 int kaio_maxactive_count
; /* maximum number of AIOs */
196 int kaio_active_count
; /* number of currently used AIOs */
197 int kaio_qallowed_count
; /* maxiumu size of AIO queue */
198 int kaio_queue_count
; /* size of AIO queue */
199 int kaio_ballowed_count
; /* maximum number of buffers */
200 int kaio_queue_finished_count
; /* number of daemon jobs finished */
201 int kaio_buffer_count
; /* number of physio buffers */
202 int kaio_buffer_finished_count
; /* count of I/O done */
203 struct proc
*kaio_p
; /* process that uses this kaio block */
204 TAILQ_HEAD(,aio_liojob
) kaio_liojoblist
; /* list of lio jobs */
205 TAILQ_HEAD(,aiocblist
) kaio_jobqueue
; /* job queue for process */
206 TAILQ_HEAD(,aiocblist
) kaio_jobdone
; /* done queue for process */
207 TAILQ_HEAD(,aiocblist
) kaio_bufqueue
; /* buffer job queue for process */
208 TAILQ_HEAD(,aiocblist
) kaio_bufdone
; /* buffer done queue for process */
209 TAILQ_HEAD(,aiocblist
) kaio_sockqueue
; /* queue for aios waiting on sockets */
212 #define KAIO_RUNDOWN 0x1 /* process is being run down */
213 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
215 static TAILQ_HEAD(,aioproclist
) aio_freeproc
, aio_activeproc
;
216 static TAILQ_HEAD(,aiocblist
) aio_jobs
; /* Async job list */
217 static TAILQ_HEAD(,aiocblist
) aio_bufjobs
; /* Phys I/O job list */
218 static TAILQ_HEAD(,aiocblist
) aio_freejobs
; /* Pool of free jobs */
220 static void aio_init_aioinfo(struct proc
*p
);
221 static void aio_onceonly(void *);
222 static int aio_free_entry(struct aiocblist
*aiocbe
);
223 static void aio_process(struct aiocblist
*aiocbe
);
224 static int aio_newproc(void);
225 static int aio_aqueue(struct aiocb
*job
, int type
);
226 static void aio_physwakeup(struct bio
*bio
);
227 static int aio_fphysio(struct aiocblist
*aiocbe
);
228 static int aio_qphysio(struct proc
*p
, struct aiocblist
*iocb
);
229 static void aio_daemon(void *uproc
, struct trapframe
*frame
);
230 static void process_signal(void *aioj
);
232 SYSINIT(aio
, SI_SUB_VFS
, SI_ORDER_ANY
, aio_onceonly
, NULL
);
236 * kaio Per process async io info
237 * aiop async io thread data
238 * aiocb async io jobs
239 * aiol list io job pointer - internal to aio_suspend XXX
240 * aiolio list io jobs
242 static vm_zone_t kaio_zone
, aiop_zone
, aiocb_zone
, aiol_zone
, aiolio_zone
;
245 * Startup initialization
248 aio_onceonly(void *na
)
250 TAILQ_INIT(&aio_freeproc
);
251 TAILQ_INIT(&aio_activeproc
);
252 TAILQ_INIT(&aio_jobs
);
253 TAILQ_INIT(&aio_bufjobs
);
254 TAILQ_INIT(&aio_freejobs
);
255 kaio_zone
= zinit("AIO", sizeof(struct kaioinfo
), 0, 0, 1);
256 aiop_zone
= zinit("AIOP", sizeof(struct aioproclist
), 0, 0, 1);
257 aiocb_zone
= zinit("AIOCB", sizeof(struct aiocblist
), 0, 0, 1);
258 aiol_zone
= zinit("AIOL", AIO_LISTIO_MAX
*sizeof(intptr_t), 0, 0, 1);
259 aiolio_zone
= zinit("AIOLIO", sizeof(struct aio_liojob
), 0, 0, 1);
260 aiod_timeout
= AIOD_TIMEOUT_DEFAULT
;
261 aiod_lifetime
= AIOD_LIFETIME_DEFAULT
;
266 * Init the per-process aioinfo structure. The aioinfo limits are set
267 * per-process for user limit (resource) management.
270 aio_init_aioinfo(struct proc
*p
)
273 if (p
->p_aioinfo
== NULL
) {
274 ki
= zalloc(kaio_zone
);
277 ki
->kaio_maxactive_count
= max_aio_per_proc
;
278 ki
->kaio_active_count
= 0;
279 ki
->kaio_qallowed_count
= max_aio_queue_per_proc
;
280 ki
->kaio_queue_count
= 0;
281 ki
->kaio_ballowed_count
= max_buf_aio
;
282 ki
->kaio_buffer_count
= 0;
283 ki
->kaio_buffer_finished_count
= 0;
285 TAILQ_INIT(&ki
->kaio_jobdone
);
286 TAILQ_INIT(&ki
->kaio_jobqueue
);
287 TAILQ_INIT(&ki
->kaio_bufdone
);
288 TAILQ_INIT(&ki
->kaio_bufqueue
);
289 TAILQ_INIT(&ki
->kaio_liojoblist
);
290 TAILQ_INIT(&ki
->kaio_sockqueue
);
293 while (num_aio_procs
< target_aio_procs
)
298 * Free a job entry. Wait for completion if it is currently active, but don't
299 * delay forever. If we delay, we return a flag that says that we have to
300 * restart the queue scan.
303 aio_free_entry(struct aiocblist
*aiocbe
)
306 struct aio_liojob
*lj
;
310 if (aiocbe
->jobstate
== JOBST_NULL
)
311 panic("aio_free_entry: freeing already free job");
313 p
= aiocbe
->userproc
;
317 panic("aio_free_entry: missing p->p_aioinfo");
319 while (aiocbe
->jobstate
== JOBST_JOBRUNNING
) {
320 aiocbe
->jobflags
|= AIOCBLIST_RUNDOWN
;
321 tsleep(aiocbe
, 0, "jobwai", 0);
323 if (aiocbe
->bp
== NULL
) {
324 if (ki
->kaio_queue_count
<= 0)
325 panic("aio_free_entry: process queue size <= 0");
326 if (num_queue_count
<= 0)
327 panic("aio_free_entry: system wide queue size <= 0");
330 lj
->lioj_queue_count
--;
331 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
332 lj
->lioj_queue_finished_count
--;
334 ki
->kaio_queue_count
--;
335 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
336 ki
->kaio_queue_finished_count
--;
340 lj
->lioj_buffer_count
--;
341 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
342 lj
->lioj_buffer_finished_count
--;
344 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
345 ki
->kaio_buffer_finished_count
--;
346 ki
->kaio_buffer_count
--;
350 /* aiocbe is going away, we need to destroy any knotes */
351 /* XXX lwp knote wants a thread, but only cares about the process */
352 knote_remove(FIRST_LWP_IN_PROC(p
)->lwp_thread
, &aiocbe
->klist
);
354 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
& KAIO_RUNDOWN
)
355 && ((ki
->kaio_buffer_count
== 0) && (ki
->kaio_queue_count
== 0)))) {
356 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
360 if (aiocbe
->jobstate
== JOBST_JOBQBUF
) {
361 if ((error
= aio_fphysio(aiocbe
)) != 0)
363 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
364 panic("aio_free_entry: invalid physio finish-up state");
366 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
368 } else if (aiocbe
->jobstate
== JOBST_JOBQGLOBAL
) {
370 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
371 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
373 } else if (aiocbe
->jobstate
== JOBST_JOBFINISHED
)
374 TAILQ_REMOVE(&ki
->kaio_jobdone
, aiocbe
, plist
);
375 else if (aiocbe
->jobstate
== JOBST_JOBBFINISHED
) {
377 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
380 vunmapbuf(aiocbe
->bp
);
381 relpbuf(aiocbe
->bp
, NULL
);
385 if (lj
&& (lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
== 0)) {
386 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
387 zfree(aiolio_zone
, lj
);
389 aiocbe
->jobstate
= JOBST_NULL
;
390 callout_stop(&aiocbe
->timeout
);
391 fdrop(aiocbe
->fd_file
);
392 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
398 * Rundown the jobs for a given process.
401 aio_proc_rundown(struct proc
*p
)
407 struct aio_liojob
*lj
, *ljn
;
408 struct aiocblist
*aiocbe
, *aiocbn
;
416 ki
->kaio_flags
|= LIOJ_SIGNAL_POSTED
;
417 while ((ki
->kaio_active_count
> 0) || (ki
->kaio_buffer_count
>
418 ki
->kaio_buffer_finished_count
)) {
419 ki
->kaio_flags
|= KAIO_RUNDOWN
;
420 if (tsleep(p
, 0, "kaiowt", aiod_timeout
))
425 * Move any aio ops that are waiting on socket I/O to the normal job
426 * queues so they are cleaned up with any others.
429 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_sockqueue
); aiocbe
; aiocbe
=
431 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
432 fp
= aiocbe
->fd_file
;
434 so
= (struct socket
*)fp
->f_data
;
435 TAILQ_REMOVE(&so
->so_aiojobq
, aiocbe
, list
);
436 if (TAILQ_EMPTY(&so
->so_aiojobq
)) {
437 so
->so_snd
.ssb_flags
&= ~SSB_AIO
;
438 so
->so_rcv
.ssb_flags
&= ~SSB_AIO
;
441 TAILQ_REMOVE(&ki
->kaio_sockqueue
, aiocbe
, plist
);
442 TAILQ_INSERT_HEAD(&aio_jobs
, aiocbe
, list
);
443 TAILQ_INSERT_HEAD(&ki
->kaio_jobqueue
, aiocbe
, plist
);
448 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobdone
); aiocbe
; aiocbe
= aiocbn
) {
449 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
450 if (aio_free_entry(aiocbe
))
455 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); aiocbe
; aiocbe
=
457 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
458 if (aio_free_entry(aiocbe
))
464 while (TAILQ_FIRST(&ki
->kaio_bufqueue
)) {
465 ki
->kaio_flags
|= KAIO_WAKEUP
;
466 tsleep(p
, 0, "aioprn", 0);
474 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_bufdone
); aiocbe
; aiocbe
= aiocbn
) {
475 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
476 if (aio_free_entry(aiocbe
)) {
484 * If we've slept, jobs might have moved from one queue to another.
485 * Retry rundown if we didn't manage to empty the queues.
487 if (TAILQ_FIRST(&ki
->kaio_jobdone
) != NULL
||
488 TAILQ_FIRST(&ki
->kaio_jobqueue
) != NULL
||
489 TAILQ_FIRST(&ki
->kaio_bufqueue
) != NULL
||
490 TAILQ_FIRST(&ki
->kaio_bufdone
) != NULL
)
493 for (lj
= TAILQ_FIRST(&ki
->kaio_liojoblist
); lj
; lj
= ljn
) {
494 ljn
= TAILQ_NEXT(lj
, lioj_list
);
495 if ((lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
==
497 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
498 zfree(aiolio_zone
, lj
);
501 kprintf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
502 "QF:%d\n", lj
->lioj_buffer_count
,
503 lj
->lioj_buffer_finished_count
,
504 lj
->lioj_queue_count
,
505 lj
->lioj_queue_finished_count
);
510 zfree(kaio_zone
, ki
);
517 * Select a job to run (called by an AIO daemon).
519 static struct aiocblist
*
520 aio_selectjob(struct aioproclist
*aiop
)
522 struct aiocblist
*aiocbe
;
527 for (aiocbe
= TAILQ_FIRST(&aio_jobs
); aiocbe
; aiocbe
=
528 TAILQ_NEXT(aiocbe
, list
)) {
529 userp
= aiocbe
->userproc
;
530 ki
= userp
->p_aioinfo
;
532 if (ki
->kaio_active_count
< ki
->kaio_maxactive_count
) {
533 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
544 * The AIO processing activity. This is the code that does the I/O request for
545 * the non-physio version of the operations. The normal vn operations are used,
546 * and this code should work in all instances for every type of file, including
547 * pipes, sockets, fifos, and regular files.
550 aio_process(struct aiocblist
*aiocbe
)
559 int oublock_st
, oublock_end
;
560 int inblock_st
, inblock_end
;
563 cb
= &aiocbe
->uaiocb
;
564 fp
= aiocbe
->fd_file
;
566 aiov
.iov_base
= (void *)(uintptr_t)cb
->aio_buf
;
567 aiov
.iov_len
= cb
->aio_nbytes
;
569 auio
.uio_iov
= &aiov
;
571 auio
.uio_offset
= cb
->aio_offset
;
572 auio
.uio_resid
= cb
->aio_nbytes
;
573 cnt
= cb
->aio_nbytes
;
574 auio
.uio_segflg
= UIO_USERSPACE
;
577 inblock_st
= mytd
->td_lwp
->lwp_ru
.ru_inblock
;
578 oublock_st
= mytd
->td_lwp
->lwp_ru
.ru_oublock
;
580 * _aio_aqueue() acquires a reference to the file that is
581 * released in aio_free_entry().
583 if (cb
->aio_lio_opcode
== LIO_READ
) {
584 auio
.uio_rw
= UIO_READ
;
585 error
= fo_read(fp
, &auio
, fp
->f_cred
, O_FOFFSET
);
587 auio
.uio_rw
= UIO_WRITE
;
588 error
= fo_write(fp
, &auio
, fp
->f_cred
, O_FOFFSET
);
590 inblock_end
= mytd
->td_lwp
->lwp_ru
.ru_inblock
;
591 oublock_end
= mytd
->td_lwp
->lwp_ru
.ru_oublock
;
593 aiocbe
->inputcharge
= inblock_end
- inblock_st
;
594 aiocbe
->outputcharge
= oublock_end
- oublock_st
;
596 if ((error
) && (auio
.uio_resid
!= cnt
)) {
597 if (error
== ERESTART
|| error
== EINTR
|| error
== EWOULDBLOCK
)
599 if ((error
== EPIPE
) && (cb
->aio_lio_opcode
== LIO_WRITE
))
600 ksignal(aiocbe
->userproc
, SIGPIPE
);
603 cnt
-= auio
.uio_resid
;
604 cb
->_aiocb_private
.error
= error
;
605 cb
->_aiocb_private
.status
= cnt
;
609 * The AIO daemon, most of the actual work is done in aio_process,
610 * but the setup (and address space mgmt) is done in this routine.
612 * The MP lock is held on entry.
615 aio_daemon(void *uproc
, struct trapframe
*frame
)
617 struct aio_liojob
*lj
;
619 struct aiocblist
*aiocbe
;
620 struct aioproclist
*aiop
;
622 struct proc
*mycp
, *userp
;
623 struct vmspace
*curvm
;
627 mylwp
= curthread
->td_lwp
;
628 mycp
= mylwp
->lwp_proc
;
630 if (mycp
->p_textvp
) {
631 vrele(mycp
->p_textvp
);
632 mycp
->p_textvp
= NULL
;
636 * Allocate and ready the aio control info. There is one aiop structure
639 aiop
= zalloc(aiop_zone
);
640 aiop
->aioproc
= mycp
;
641 aiop
->aioprocflags
|= AIOP_FREE
;
646 * Place thread (lightweight process) onto the AIO free thread list.
648 if (TAILQ_EMPTY(&aio_freeproc
))
649 wakeup(&aio_freeproc
);
650 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
654 /* Make up a name for the daemon. */
655 strcpy(mycp
->p_comm
, "aiod");
658 * Get rid of our current filedescriptors. AIOD's don't need any
659 * filedescriptors, except as temporarily inherited from the client.
660 * Credentials are also cloned, and made equivalent to "root".
664 cr
= cratom(&mycp
->p_ucred
);
666 uireplace(&cr
->cr_uidinfo
, uifind(0));
668 cr
->cr_groups
[0] = 1;
670 /* The daemon resides in its own pgrp. */
671 enterpgrp(mycp
, mycp
->p_pid
, 1);
673 /* Mark special process type. */
674 mycp
->p_flag
|= P_SYSTEM
| P_KTHREADP
;
677 * Wakeup parent process. (Parent sleeps to keep from blasting away
678 * and creating too many daemons.)
685 * Take daemon off of free queue
687 if (aiop
->aioprocflags
& AIOP_FREE
) {
689 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
690 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
691 aiop
->aioprocflags
&= ~AIOP_FREE
;
694 aiop
->aioprocflags
&= ~AIOP_SCHED
;
699 while ((aiocbe
= aio_selectjob(aiop
)) != NULL
) {
700 cb
= &aiocbe
->uaiocb
;
701 userp
= aiocbe
->userproc
;
703 aiocbe
->jobstate
= JOBST_JOBRUNNING
;
706 * Connect to process address space for user program.
708 if (curvm
!= userp
->p_vmspace
) {
709 pmap_setlwpvm(mylwp
, userp
->p_vmspace
);
711 sysref_put(&curvm
->vm_sysref
);
712 curvm
= userp
->p_vmspace
;
713 sysref_get(&curvm
->vm_sysref
);
716 ki
= userp
->p_aioinfo
;
719 /* Account for currently active jobs. */
720 ki
->kaio_active_count
++;
722 /* Do the I/O function. */
725 /* Decrement the active job count. */
726 ki
->kaio_active_count
--;
729 * Increment the completion count for wakeup/signal
732 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
733 ki
->kaio_queue_finished_count
++;
735 lj
->lioj_queue_finished_count
++;
736 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
737 & KAIO_RUNDOWN
) && (ki
->kaio_active_count
== 0))) {
738 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
743 if (lj
&& (lj
->lioj_flags
&
744 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) == LIOJ_SIGNAL
) {
745 if ((lj
->lioj_queue_finished_count
==
746 lj
->lioj_queue_count
) &&
747 (lj
->lioj_buffer_finished_count
==
748 lj
->lioj_buffer_count
)) {
750 lj
->lioj_signal
.sigev_signo
);
757 aiocbe
->jobstate
= JOBST_JOBFINISHED
;
760 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
761 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, aiocbe
, plist
);
763 KNOTE(&aiocbe
->klist
, 0);
765 if (aiocbe
->jobflags
& AIOCBLIST_RUNDOWN
) {
767 aiocbe
->jobflags
&= ~AIOCBLIST_RUNDOWN
;
770 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
771 ksignal(userp
, cb
->aio_sigevent
.sigev_signo
);
776 * Disconnect from user address space.
779 /* swap our original address space back in */
780 pmap_setlwpvm(mylwp
, mycp
->p_vmspace
);
781 sysref_put(&curvm
->vm_sysref
);
786 * If we are the first to be put onto the free queue, wakeup
787 * anyone waiting for a daemon.
790 TAILQ_REMOVE(&aio_activeproc
, aiop
, list
);
791 if (TAILQ_EMPTY(&aio_freeproc
))
792 wakeup(&aio_freeproc
);
793 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
794 aiop
->aioprocflags
|= AIOP_FREE
;
798 * If daemon is inactive for a long time, allow it to exit,
799 * thereby freeing resources.
801 if (((aiop
->aioprocflags
& AIOP_SCHED
) == 0) && tsleep(mycp
,
802 0, "aiordy", aiod_lifetime
)) {
804 if (TAILQ_EMPTY(&aio_jobs
)) {
805 if ((aiop
->aioprocflags
& AIOP_FREE
) &&
806 (num_aio_procs
> target_aio_procs
)) {
807 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
809 zfree(aiop_zone
, aiop
);
812 if (mycp
->p_vmspace
->vm_sysref
.refcnt
<= 1) {
813 kprintf("AIOD: bad vm refcnt for"
814 " exiting daemon: %d\n",
815 mycp
->p_vmspace
->vm_sysref
.refcnt
);
827 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
828 * AIO daemon modifies its environment itself.
834 struct lwp
*lp
, *nlp
;
838 error
= fork1(lp
, RFPROC
|RFMEM
|RFNOWAIT
, &np
);
841 nlp
= ONLY_LWP_IN_PROC(np
);
842 cpu_set_fork_handler(nlp
, aio_daemon
, curproc
);
843 start_forked_proc(lp
, np
);
846 * Wait until daemon is started, but continue on just in case to
847 * handle error conditions.
849 error
= tsleep(np
, 0, "aiosta", aiod_timeout
);
856 * Try the high-performance, low-overhead physio method for eligible
857 * VCHR devices. This method doesn't use an aio helper thread, and
858 * thus has very low overhead.
860 * Assumes that the caller, _aio_aqueue(), has incremented the file
861 * structure's reference count, preventing its deallocation for the
862 * duration of this call.
865 aio_qphysio(struct proc
*p
, struct aiocblist
*aiocbe
)
873 struct aio_liojob
*lj
;
876 cb
= &aiocbe
->uaiocb
;
877 fp
= aiocbe
->fd_file
;
879 if (fp
->f_type
!= DTYPE_VNODE
)
882 vp
= (struct vnode
*)fp
->f_data
;
885 * If its not a disk, we don't want to return a positive error.
886 * It causes the aio code to not fall through to try the thread
887 * way when you're talking to a regular file.
889 if (!vn_isdisk(vp
, &error
)) {
890 if (error
== ENOTBLK
)
896 if (cb
->aio_nbytes
% vp
->v_rdev
->si_bsize_phys
)
900 MAXPHYS
- (((vm_offset_t
) cb
->aio_buf
) & PAGE_MASK
))
904 if (ki
->kaio_buffer_count
>= ki
->kaio_ballowed_count
)
907 ki
->kaio_buffer_count
++;
911 lj
->lioj_buffer_count
++;
913 /* Create and build a buffer header for a transfer. */
918 * Get a copy of the kva from the physical buffer.
920 bp
->b_bio1
.bio_caller_info1
.ptr
= p
;
923 bp
->b_cmd
= (cb
->aio_lio_opcode
== LIO_WRITE
) ?
924 BUF_CMD_WRITE
: BUF_CMD_READ
;
925 bp
->b_bio1
.bio_done
= aio_physwakeup
;
926 bp
->b_bio1
.bio_offset
= cb
->aio_offset
;
928 /* Bring buffer into kernel space. */
929 if (vmapbuf(bp
, __DEVOLATILE(char *, cb
->aio_buf
), cb
->aio_nbytes
) < 0) {
937 bp
->b_bio1
.bio_caller_info2
.ptr
= aiocbe
;
938 TAILQ_INSERT_TAIL(&aio_bufjobs
, aiocbe
, list
);
939 TAILQ_INSERT_TAIL(&ki
->kaio_bufqueue
, aiocbe
, plist
);
940 aiocbe
->jobstate
= JOBST_JOBQBUF
;
941 cb
->_aiocb_private
.status
= cb
->aio_nbytes
;
948 * Perform the transfer. vn_strategy must be used even though we
949 * know we have a device in order to deal with requests which exceed
950 * device DMA limitations.
952 vn_strategy(vp
, &bp
->b_bio1
);
958 * If we had an error invoking the request, or an error in processing
959 * the request before we have returned, we process it as an error in
960 * transfer. Note that such an I/O error is not indicated immediately,
961 * but is returned using the aio_error mechanism. In this case,
962 * aio_suspend will return immediately.
964 if (bp
->b_error
|| (bp
->b_flags
& B_ERROR
)) {
965 struct aiocb
*job
= aiocbe
->uuaiocb
;
967 aiocbe
->uaiocb
._aiocb_private
.status
= 0;
968 suword(&job
->_aiocb_private
.status
, 0);
969 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
970 suword(&job
->_aiocb_private
.error
, bp
->b_error
);
972 ki
->kaio_buffer_finished_count
++;
974 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
) {
975 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
976 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
977 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
978 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
979 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
985 KNOTE(&aiocbe
->klist
, 0);
989 ki
->kaio_buffer_count
--;
991 lj
->lioj_buffer_count
--;
998 * This waits/tests physio completion.
1001 aio_fphysio(struct aiocblist
*iocb
)
1009 while (bp
->b_cmd
!= BUF_CMD_DONE
) {
1010 if (tsleep(bp
, 0, "physstr", aiod_timeout
)) {
1011 if (bp
->b_cmd
!= BUF_CMD_DONE
) {
1021 /* Release mapping into kernel space. */
1027 /* Check for an error. */
1028 if (bp
->b_flags
& B_ERROR
)
1029 error
= bp
->b_error
;
1034 #endif /* VFS_AIO */
1037 * Wake up aio requests that may be serviceable now.
1040 aio_swake(struct socket
*so
, struct signalsockbuf
*ssb
)
1045 struct aiocblist
*cb
,*cbn
;
1047 struct kaioinfo
*ki
= NULL
;
1048 int opcode
, wakecount
= 0;
1049 struct aioproclist
*aiop
;
1051 if (ssb
== &so
->so_snd
) {
1053 so
->so_snd
.ssb_flags
&= ~SSB_AIO
;
1056 so
->so_rcv
.ssb_flags
&= ~SSB_AIO
;
1059 for (cb
= TAILQ_FIRST(&so
->so_aiojobq
); cb
; cb
= cbn
) {
1060 cbn
= TAILQ_NEXT(cb
, list
);
1061 if (opcode
== cb
->uaiocb
.aio_lio_opcode
) {
1064 TAILQ_REMOVE(&so
->so_aiojobq
, cb
, list
);
1065 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cb
, plist
);
1066 TAILQ_INSERT_TAIL(&aio_jobs
, cb
, list
);
1067 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, cb
, plist
);
1069 if (cb
->jobstate
!= JOBST_JOBQGLOBAL
)
1070 panic("invalid queue value");
1074 while (wakecount
--) {
1075 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != 0) {
1076 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1077 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1078 aiop
->aioprocflags
&= ~AIOP_FREE
;
1079 wakeup(aiop
->aioproc
);
1082 #endif /* VFS_AIO */
1087 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1088 * technique is done in this code.
1091 _aio_aqueue(struct aiocb
*job
, struct aio_liojob
*lj
, int type
)
1093 struct proc
*p
= curproc
;
1094 struct filedesc
*fdp
;
1099 int opcode
, user_opcode
;
1100 struct aiocblist
*aiocbe
;
1101 struct aioproclist
*aiop
;
1102 struct kaioinfo
*ki
;
1107 if ((aiocbe
= TAILQ_FIRST(&aio_freejobs
)) != NULL
)
1108 TAILQ_REMOVE(&aio_freejobs
, aiocbe
, list
);
1110 aiocbe
= zalloc (aiocb_zone
);
1112 aiocbe
->inputcharge
= 0;
1113 aiocbe
->outputcharge
= 0;
1114 callout_init(&aiocbe
->timeout
);
1115 SLIST_INIT(&aiocbe
->klist
);
1117 suword(&job
->_aiocb_private
.status
, -1);
1118 suword(&job
->_aiocb_private
.error
, 0);
1119 suword(&job
->_aiocb_private
.kernelinfo
, -1);
1121 error
= copyin(job
, &aiocbe
->uaiocb
, sizeof(aiocbe
->uaiocb
));
1123 suword(&job
->_aiocb_private
.error
, error
);
1124 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1127 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
&&
1128 !_SIG_VALID(aiocbe
->uaiocb
.aio_sigevent
.sigev_signo
)) {
1129 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1133 /* Save userspace address of the job info. */
1134 aiocbe
->uuaiocb
= job
;
1136 /* Get the opcode. */
1137 user_opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1138 if (type
!= LIO_NOP
)
1139 aiocbe
->uaiocb
.aio_lio_opcode
= type
;
1140 opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1142 /* Get the fd info for process. */
1146 * Range check file descriptor.
1148 fd
= aiocbe
->uaiocb
.aio_fildes
;
1149 if (fd
>= fdp
->fd_nfiles
) {
1150 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1152 suword(&job
->_aiocb_private
.error
, EBADF
);
1156 fp
= aiocbe
->fd_file
= fdp
->fd_files
[fd
].fp
;
1157 if ((fp
== NULL
) || ((opcode
== LIO_WRITE
) && ((fp
->f_flag
& FWRITE
) ==
1159 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1161 suword(&job
->_aiocb_private
.error
, EBADF
);
1166 if (aiocbe
->uaiocb
.aio_offset
== -1LL) {
1170 error
= suword(&job
->_aiocb_private
.kernelinfo
, jobrefid
);
1175 aiocbe
->uaiocb
._aiocb_private
.kernelinfo
= (void *)(intptr_t)jobrefid
;
1176 if (jobrefid
== LONG_MAX
)
1181 if (opcode
== LIO_NOP
) {
1183 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1185 suword(&job
->_aiocb_private
.error
, 0);
1186 suword(&job
->_aiocb_private
.status
, 0);
1187 suword(&job
->_aiocb_private
.kernelinfo
, 0);
1191 if ((opcode
!= LIO_READ
) && (opcode
!= LIO_WRITE
)) {
1193 suword(&job
->_aiocb_private
.status
, 0);
1198 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_KEVENT
) {
1199 kev
.ident
= aiocbe
->uaiocb
.aio_sigevent
.sigev_notify_kqueue
;
1200 kev
.udata
= aiocbe
->uaiocb
.aio_sigevent
.sigev_value
.sigval_ptr
;
1204 * This method for requesting kevent-based notification won't
1205 * work on the alpha, since we're passing in a pointer
1206 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
1207 * based method instead.
1209 if (user_opcode
== LIO_NOP
|| user_opcode
== LIO_READ
||
1210 user_opcode
== LIO_WRITE
)
1213 error
= copyin((struct kevent
*)(uintptr_t)user_opcode
,
1218 if ((u_int
)kev
.ident
>= fdp
->fd_nfiles
||
1219 (kq_fp
= fdp
->fd_files
[kev
.ident
].fp
) == NULL
||
1220 (kq_fp
->f_type
!= DTYPE_KQUEUE
)) {
1224 kq
= (struct kqueue
*)kq_fp
->f_data
;
1225 kev
.ident
= (uintptr_t)aiocbe
->uuaiocb
;
1226 kev
.filter
= EVFILT_AIO
;
1227 kev
.flags
= EV_ADD
| EV_ENABLE
| EV_FLAG1
;
1228 kev
.data
= (intptr_t)aiocbe
;
1229 /* XXX lwp kqueue_register takes a thread, but only uses its proc */
1230 error
= kqueue_register(kq
, &kev
, FIRST_LWP_IN_PROC(p
)->lwp_thread
);
1234 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1236 suword(&job
->_aiocb_private
.error
, error
);
1241 suword(&job
->_aiocb_private
.error
, EINPROGRESS
);
1242 aiocbe
->uaiocb
._aiocb_private
.error
= EINPROGRESS
;
1243 aiocbe
->userproc
= p
;
1244 aiocbe
->jobflags
= 0;
1248 if (fp
->f_type
== DTYPE_SOCKET
) {
1250 * Alternate queueing for socket ops: Reach down into the
1251 * descriptor to get the socket data. Then check to see if the
1252 * socket is ready to be read or written (based on the requested
1255 * If it is not ready for io, then queue the aiocbe on the
1256 * socket, and set the flags so we get a call when ssb_notify()
1259 so
= (struct socket
*)fp
->f_data
;
1261 if (((opcode
== LIO_READ
) && (!soreadable(so
))) || ((opcode
==
1262 LIO_WRITE
) && (!sowriteable(so
)))) {
1263 TAILQ_INSERT_TAIL(&so
->so_aiojobq
, aiocbe
, list
);
1264 TAILQ_INSERT_TAIL(&ki
->kaio_sockqueue
, aiocbe
, plist
);
1265 if (opcode
== LIO_READ
)
1266 so
->so_rcv
.ssb_flags
|= SSB_AIO
;
1268 so
->so_snd
.ssb_flags
|= SSB_AIO
;
1269 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
; /* XXX */
1270 ki
->kaio_queue_count
++;
1279 if ((error
= aio_qphysio(p
, aiocbe
)) == 0)
1282 suword(&job
->_aiocb_private
.status
, 0);
1283 aiocbe
->uaiocb
._aiocb_private
.error
= error
;
1284 suword(&job
->_aiocb_private
.error
, error
);
1288 /* No buffer for daemon I/O. */
1291 ki
->kaio_queue_count
++;
1293 lj
->lioj_queue_count
++;
1295 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1296 TAILQ_INSERT_TAIL(&aio_jobs
, aiocbe
, list
);
1298 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
;
1304 * If we don't have a free AIO process, and we are below our quota, then
1305 * start one. Otherwise, depend on the subsequent I/O completions to
1306 * pick-up this job. If we don't successfully create the new process
1307 * (thread) due to resource issues, we return an error for now (EAGAIN),
1308 * which is likely not the correct thing to do.
1312 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != NULL
) {
1313 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1314 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1315 aiop
->aioprocflags
&= ~AIOP_FREE
;
1316 wakeup(aiop
->aioproc
);
1317 } else if (((num_aio_resv_start
+ num_aio_procs
) < max_aio_procs
) &&
1318 ((ki
->kaio_active_count
+ num_aio_resv_start
) <
1319 ki
->kaio_maxactive_count
)) {
1320 num_aio_resv_start
++;
1321 if ((error
= aio_newproc()) == 0) {
1322 num_aio_resv_start
--;
1325 num_aio_resv_start
--;
1333 * This routine queues an AIO request, checking for quotas.
1336 aio_aqueue(struct aiocb
*job
, int type
)
1338 struct proc
*p
= curproc
;
1339 struct kaioinfo
*ki
;
1341 if (p
->p_aioinfo
== NULL
)
1342 aio_init_aioinfo(p
);
1344 if (num_queue_count
>= max_queue_count
)
1348 if (ki
->kaio_queue_count
>= ki
->kaio_qallowed_count
)
1351 return _aio_aqueue(job
, NULL
, type
);
1353 #endif /* VFS_AIO */
1356 * Support the aio_return system call, as a side-effect, kernel resources are
1360 sys_aio_return(struct aio_return_args
*uap
)
1365 struct proc
*p
= curproc
;
1366 struct lwp
*lp
= curthread
->td_lwp
;
1368 struct aiocblist
*cb
, *ncb
;
1370 struct kaioinfo
*ki
;
1378 jobref
= fuword(&ujob
->_aiocb_private
.kernelinfo
);
1379 if (jobref
== -1 || jobref
== 0)
1382 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1383 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1385 if (ujob
== cb
->uuaiocb
) {
1386 uap
->sysmsg_result
=
1387 cb
->uaiocb
._aiocb_private
.status
;
1389 uap
->sysmsg_result
= EFAULT
;
1390 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
1391 lp
->lwp_ru
.ru_oublock
+= cb
->outputcharge
;
1392 cb
->outputcharge
= 0;
1393 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
1394 lp
->lwp_ru
.ru_inblock
+= cb
->inputcharge
;
1395 cb
->inputcharge
= 0;
1402 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= ncb
) {
1403 ncb
= TAILQ_NEXT(cb
, plist
);
1404 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
)
1407 if (ujob
== cb
->uuaiocb
) {
1408 uap
->sysmsg_result
=
1409 cb
->uaiocb
._aiocb_private
.status
;
1411 uap
->sysmsg_result
= EFAULT
;
1419 #endif /* VFS_AIO */
1423 * Allow a process to wakeup when any of the I/O requests are completed.
1426 sys_aio_suspend(struct aio_suspend_args
*uap
)
1431 struct proc
*p
= curproc
;
1434 struct aiocb
*const *cbptr
, *cbp
;
1435 struct kaioinfo
*ki
;
1436 struct aiocblist
*cb
;
1441 struct aiocb
**ujoblist
;
1443 if (uap
->nent
> AIO_LISTIO_MAX
)
1448 /* Get timespec struct. */
1449 if ((error
= copyin(uap
->timeout
, &ts
, sizeof(ts
))) != 0)
1452 if (ts
.tv_nsec
< 0 || ts
.tv_nsec
>= 1000000000)
1455 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
1456 if (itimerfix(&atv
))
1458 timo
= tvtohz_high(&atv
);
1466 ijoblist
= zalloc(aiol_zone
);
1467 ujoblist
= zalloc(aiol_zone
);
1468 cbptr
= uap
->aiocbp
;
1470 for (i
= 0; i
< uap
->nent
; i
++) {
1471 cbp
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1474 ujoblist
[njoblist
] = cbp
;
1475 ijoblist
[njoblist
] = fuword(&cbp
->_aiocb_private
.kernelinfo
);
1479 if (njoblist
== 0) {
1480 zfree(aiol_zone
, ijoblist
);
1481 zfree(aiol_zone
, ujoblist
);
1487 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1488 for (i
= 0; i
< njoblist
; i
++) {
1490 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1492 if (ujoblist
[i
] != cb
->uuaiocb
)
1494 zfree(aiol_zone
, ijoblist
);
1495 zfree(aiol_zone
, ujoblist
);
1502 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
=
1503 TAILQ_NEXT(cb
, plist
)) {
1504 for (i
= 0; i
< njoblist
; i
++) {
1506 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1509 if (ujoblist
[i
] != cb
->uuaiocb
)
1511 zfree(aiol_zone
, ijoblist
);
1512 zfree(aiol_zone
, ujoblist
);
1518 ki
->kaio_flags
|= KAIO_WAKEUP
;
1519 error
= tsleep(p
, PCATCH
, "aiospn", timo
);
1522 if (error
== ERESTART
|| error
== EINTR
) {
1523 zfree(aiol_zone
, ijoblist
);
1524 zfree(aiol_zone
, ujoblist
);
1526 } else if (error
== EWOULDBLOCK
) {
1527 zfree(aiol_zone
, ijoblist
);
1528 zfree(aiol_zone
, ujoblist
);
1535 #endif /* VFS_AIO */
1539 * aio_cancel cancels any non-physio aio operations not currently in
1543 sys_aio_cancel(struct aio_cancel_args
*uap
)
1548 struct proc
*p
= curproc
;
1549 struct kaioinfo
*ki
;
1550 struct aiocblist
*cbe
, *cbn
;
1552 struct filedesc
*fdp
;
1561 if ((u_int
)uap
->fd
>= fdp
->fd_nfiles
||
1562 (fp
= fdp
->fd_files
[uap
->fd
].fp
) == NULL
)
1565 if (fp
->f_type
== DTYPE_VNODE
) {
1566 vp
= (struct vnode
*)fp
->f_data
;
1568 if (vn_isdisk(vp
,&error
)) {
1569 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1572 } else if (fp
->f_type
== DTYPE_SOCKET
) {
1573 so
= (struct socket
*)fp
->f_data
;
1577 for (cbe
= TAILQ_FIRST(&so
->so_aiojobq
); cbe
; cbe
= cbn
) {
1578 cbn
= TAILQ_NEXT(cbe
, list
);
1579 if ((uap
->aiocbp
== NULL
) ||
1580 (uap
->aiocbp
== cbe
->uuaiocb
) ) {
1583 TAILQ_REMOVE(&so
->so_aiojobq
, cbe
, list
);
1584 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cbe
, plist
);
1585 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
, plist
);
1586 if (ki
->kaio_flags
& KAIO_WAKEUP
) {
1589 cbe
->jobstate
= JOBST_JOBFINISHED
;
1590 cbe
->uaiocb
._aiocb_private
.status
=-1;
1591 cbe
->uaiocb
._aiocb_private
.error
=ECANCELED
;
1593 /* XXX cancelled, knote? */
1594 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1596 ksignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1603 if ((cancelled
) && (uap
->aiocbp
)) {
1604 uap
->sysmsg_result
= AIO_CANCELED
;
1613 for (cbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cbe
; cbe
= cbn
) {
1614 cbn
= TAILQ_NEXT(cbe
, plist
);
1616 if ((uap
->fd
== cbe
->uaiocb
.aio_fildes
) &&
1617 ((uap
->aiocbp
== NULL
) ||
1618 (uap
->aiocbp
== cbe
->uuaiocb
))) {
1620 if (cbe
->jobstate
== JOBST_JOBQGLOBAL
) {
1621 TAILQ_REMOVE(&aio_jobs
, cbe
, list
);
1622 TAILQ_REMOVE(&ki
->kaio_jobqueue
, cbe
, plist
);
1623 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
,
1626 ki
->kaio_queue_finished_count
++;
1627 cbe
->jobstate
= JOBST_JOBFINISHED
;
1628 cbe
->uaiocb
._aiocb_private
.status
= -1;
1629 cbe
->uaiocb
._aiocb_private
.error
= ECANCELED
;
1630 /* XXX cancelled, knote? */
1631 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1633 ksignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1642 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1646 uap
->sysmsg_result
= AIO_CANCELED
;
1649 uap
->sysmsg_result
= AIO_ALLDONE
;
1652 #endif /* VFS_AIO */
1656 * aio_error is implemented in the kernel level for compatibility purposes only.
1657 * For a user mode async implementation, it would be best to do it in a userland
1661 sys_aio_error(struct aio_error_args
*uap
)
1666 struct proc
*p
= curproc
;
1667 struct aiocblist
*cb
;
1668 struct kaioinfo
*ki
;
1675 jobref
= fuword(&uap
->aiocbp
->_aiocb_private
.kernelinfo
);
1676 if ((jobref
== -1) || (jobref
== 0))
1679 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1680 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1682 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1689 for (cb
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1691 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1693 uap
->sysmsg_result
= EINPROGRESS
;
1699 for (cb
= TAILQ_FIRST(&ki
->kaio_sockqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1701 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1703 uap
->sysmsg_result
= EINPROGRESS
;
1711 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= TAILQ_NEXT(cb
,
1713 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1715 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1721 for (cb
= TAILQ_FIRST(&ki
->kaio_bufqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1723 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1725 uap
->sysmsg_result
= EINPROGRESS
;
1736 status
= fuword(&uap
->aiocbp
->_aiocb_private
.status
);
1738 return fuword(&uap
->aiocbp
->_aiocb_private
.error
);
1741 #endif /* VFS_AIO */
1744 /* syscall - asynchronous read from a file (REALTIME) */
1746 sys_aio_read(struct aio_read_args
*uap
)
1751 return aio_aqueue(uap
->aiocbp
, LIO_READ
);
1752 #endif /* VFS_AIO */
1755 /* syscall - asynchronous write to a file (REALTIME) */
1757 sys_aio_write(struct aio_write_args
*uap
)
1762 return aio_aqueue(uap
->aiocbp
, LIO_WRITE
);
1763 #endif /* VFS_AIO */
1766 /* syscall - XXX undocumented */
1768 sys_lio_listio(struct lio_listio_args
*uap
)
1773 struct proc
*p
= curproc
;
1774 struct lwp
*lp
= curthread
->td_lwp
;
1775 int nent
, nentqueued
;
1776 struct aiocb
*iocb
, * const *cbptr
;
1777 struct aiocblist
*cb
;
1778 struct kaioinfo
*ki
;
1779 struct aio_liojob
*lj
;
1780 int error
, runningcode
;
1784 if ((uap
->mode
!= LIO_NOWAIT
) && (uap
->mode
!= LIO_WAIT
))
1788 if (nent
> AIO_LISTIO_MAX
)
1791 if (p
->p_aioinfo
== NULL
)
1792 aio_init_aioinfo(p
);
1794 if ((nent
+ num_queue_count
) > max_queue_count
)
1798 if ((nent
+ ki
->kaio_queue_count
) > ki
->kaio_qallowed_count
)
1801 lj
= zalloc(aiolio_zone
);
1806 lj
->lioj_buffer_count
= 0;
1807 lj
->lioj_buffer_finished_count
= 0;
1808 lj
->lioj_queue_count
= 0;
1809 lj
->lioj_queue_finished_count
= 0;
1815 if (uap
->sig
&& (uap
->mode
== LIO_NOWAIT
)) {
1816 error
= copyin(uap
->sig
, &lj
->lioj_signal
,
1817 sizeof(lj
->lioj_signal
));
1819 zfree(aiolio_zone
, lj
);
1822 if (!_SIG_VALID(lj
->lioj_signal
.sigev_signo
)) {
1823 zfree(aiolio_zone
, lj
);
1826 lj
->lioj_flags
|= LIOJ_SIGNAL
;
1827 lj
->lioj_flags
&= ~LIOJ_SIGNAL_POSTED
;
1829 lj
->lioj_flags
&= ~LIOJ_SIGNAL
;
1831 TAILQ_INSERT_TAIL(&ki
->kaio_liojoblist
, lj
, lioj_list
);
1833 * Get pointers to the list of I/O requests.
1837 cbptr
= uap
->acb_list
;
1838 for (i
= 0; i
< uap
->nent
; i
++) {
1839 iocb
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1840 if (((intptr_t)iocb
!= -1) && ((intptr_t)iocb
!= 0)) {
1841 error
= _aio_aqueue(iocb
, lj
, 0);
1850 * If we haven't queued any, then just return error.
1852 if (nentqueued
== 0)
1856 * Calculate the appropriate error return.
1862 if (uap
->mode
== LIO_WAIT
) {
1863 int command
, found
, jobref
;
1867 for (i
= 0; i
< uap
->nent
; i
++) {
1869 * Fetch address of the control buf pointer in
1872 iocb
= (struct aiocb
*)
1873 (intptr_t)fuword(&cbptr
[i
]);
1874 if (((intptr_t)iocb
== -1) || ((intptr_t)iocb
1879 * Fetch the associated command from user space.
1881 command
= fuword(&iocb
->aio_lio_opcode
);
1882 if (command
== LIO_NOP
) {
1887 jobref
= fuword(&iocb
->_aiocb_private
.kernelinfo
);
1889 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1890 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1892 if (cb
->uaiocb
.aio_lio_opcode
1894 lp
->lwp_ru
.ru_oublock
+=
1896 cb
->outputcharge
= 0;
1897 } else if (cb
->uaiocb
.aio_lio_opcode
1899 lp
->lwp_ru
.ru_inblock
+=
1901 cb
->inputcharge
= 0;
1909 TAILQ_FOREACH(cb
, &ki
->kaio_bufdone
, plist
) {
1910 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1920 * If all I/Os have been disposed of, then we can
1923 if (found
== nentqueued
)
1926 ki
->kaio_flags
|= KAIO_WAKEUP
;
1927 error
= tsleep(p
, PCATCH
, "aiospn", 0);
1931 else if (error
== EWOULDBLOCK
)
1937 #endif /* VFS_AIO */
1942 * This is a weird hack so that we can post a signal. It is safe to do so from
1943 * a timeout routine, but *not* from an interrupt routine.
1946 process_signal(void *aioj
)
1948 struct aiocblist
*aiocbe
= aioj
;
1949 struct aio_liojob
*lj
= aiocbe
->lio
;
1950 struct aiocb
*cb
= &aiocbe
->uaiocb
;
1952 if ((lj
) && (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
) &&
1953 (lj
->lioj_queue_count
== lj
->lioj_queue_finished_count
)) {
1954 ksignal(lj
->lioj_ki
->kaio_p
, lj
->lioj_signal
.sigev_signo
);
1955 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
1958 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
)
1959 ksignal(aiocbe
->userproc
, cb
->aio_sigevent
.sigev_signo
);
1963 * Interrupt handler for physio, performs the necessary process wakeups, and
1967 aio_physwakeup(struct bio
*bio
)
1969 struct buf
*bp
= bio
->bio_buf
;
1970 struct aiocblist
*aiocbe
;
1972 struct kaioinfo
*ki
;
1973 struct aio_liojob
*lj
;
1975 aiocbe
= bio
->bio_caller_info2
.ptr
;
1978 p
= bio
->bio_caller_info1
.ptr
;
1980 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
1981 aiocbe
->uaiocb
._aiocb_private
.status
-= bp
->b_resid
;
1982 aiocbe
->uaiocb
._aiocb_private
.error
= 0;
1983 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
1985 if (bp
->b_flags
& B_ERROR
)
1986 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
1990 lj
->lioj_buffer_finished_count
++;
1993 * wakeup/signal if all of the interrupt jobs are done.
1995 if (lj
->lioj_buffer_finished_count
==
1996 lj
->lioj_buffer_count
) {
1998 * Post a signal if it is called for.
2000 if ((lj
->lioj_flags
&
2001 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) ==
2003 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
2004 callout_reset(&aiocbe
->timeout
, 0,
2005 process_signal
, aiocbe
);
2012 ki
->kaio_buffer_finished_count
++;
2013 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
2014 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
2015 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
2017 KNOTE(&aiocbe
->klist
, 0);
2018 /* Do the wakeup. */
2019 if (ki
->kaio_flags
& (KAIO_RUNDOWN
|KAIO_WAKEUP
)) {
2020 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
2025 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
2026 callout_reset(&aiocbe
->timeout
, 0,
2027 process_signal
, aiocbe
);
2030 bp
->b_cmd
= BUF_CMD_DONE
;
2033 #endif /* VFS_AIO */
2035 /* syscall - wait for the next completion of an aio request */
2037 sys_aio_waitcomplete(struct aio_waitcomplete_args
*uap
)
2042 struct proc
*p
= curproc
;
2043 struct lwp
*lp
= curthread
->td_lwp
;
2046 struct kaioinfo
*ki
;
2047 struct aiocblist
*cb
= NULL
;
2050 suword(uap
->aiocbp
, (int)NULL
);
2054 /* Get timespec struct. */
2055 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
2059 if ((ts
.tv_nsec
< 0) || (ts
.tv_nsec
>= 1000000000))
2062 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
2063 if (itimerfix(&atv
))
2065 timo
= tvtohz_high(&atv
);
2073 if ((cb
= TAILQ_FIRST(&ki
->kaio_jobdone
)) != 0) {
2074 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2075 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2076 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
2077 lp
->lwp_ru
.ru_oublock
+=
2079 cb
->outputcharge
= 0;
2080 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
2081 lp
->lwp_ru
.ru_inblock
+= cb
->inputcharge
;
2082 cb
->inputcharge
= 0;
2085 return cb
->uaiocb
._aiocb_private
.error
;
2089 if ((cb
= TAILQ_FIRST(&ki
->kaio_bufdone
)) != 0 ) {
2091 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2092 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2094 return cb
->uaiocb
._aiocb_private
.error
;
2097 ki
->kaio_flags
|= KAIO_WAKEUP
;
2098 error
= tsleep(p
, PCATCH
, "aiowc", timo
);
2101 if (error
== ERESTART
)
2105 else if (error
== EINTR
)
2107 else if (error
== EWOULDBLOCK
)
2110 #endif /* VFS_AIO */
2115 filt_aioattach(struct knote
*kn
)
2121 struct filterops aio_filtops
=
2122 { 0, filt_aioattach
, NULL
, NULL
};
2125 /* kqueue attach function */
2127 filt_aioattach(struct knote
*kn
)
2129 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2132 * The aiocbe pointer must be validated before using it, so
2133 * registration is restricted to the kernel; the user cannot
2136 if ((kn
->kn_flags
& EV_FLAG1
) == 0)
2138 kn
->kn_flags
&= ~EV_FLAG1
;
2140 SLIST_INSERT_HEAD(&aiocbe
->klist
, kn
, kn_selnext
);
2145 /* kqueue detach function */
2147 filt_aiodetach(struct knote
*kn
)
2149 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2151 SLIST_REMOVE(&aiocbe
->klist
, kn
, knote
, kn_selnext
);
2154 /* kqueue filter function */
2157 filt_aio(struct knote
*kn
, long hint
)
2159 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2161 kn
->kn_data
= aiocbe
->uaiocb
._aiocb_private
.error
;
2162 if (aiocbe
->jobstate
!= JOBST_JOBFINISHED
&&
2163 aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
2165 kn
->kn_flags
|= EV_EOF
;
2169 struct filterops aio_filtops
=
2170 { 0, filt_aioattach
, filt_aiodetach
, filt_aio
};
2171 #endif /* VFS_AIO */