2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
16 * $FreeBSD: src/sys/kern/vfs_aio.c,v 1.70.2.28 2003/05/29 06:15:35 alc Exp $
17 * $DragonFly: src/sys/kern/vfs_aio.c,v 1.42 2007/07/20 17:21:52 dillon Exp $
21 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
24 #include <sys/param.h>
25 #include <sys/systm.h>
27 #include <sys/sysproto.h>
28 #include <sys/filedesc.h>
29 #include <sys/kernel.h>
30 #include <sys/fcntl.h>
33 #include <sys/unistd.h>
35 #include <sys/resourcevar.h>
36 #include <sys/signalvar.h>
37 #include <sys/protosw.h>
38 #include <sys/socketvar.h>
39 #include <sys/sysctl.h>
40 #include <sys/vnode.h>
42 #include <sys/event.h>
45 #include <vm/vm_extern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_zone.h>
50 #include <sys/file2.h>
52 #include <sys/sysref2.h>
53 #include <sys/thread2.h>
55 #include <machine/limits.h>
56 #include "opt_vfs_aio.h"
61 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
66 #define JOBST_NULL 0x0
67 #define JOBST_JOBQGLOBAL 0x2
68 #define JOBST_JOBRUNNING 0x3
69 #define JOBST_JOBFINISHED 0x4
70 #define JOBST_JOBQBUF 0x5
71 #define JOBST_JOBBFINISHED 0x6
73 #ifndef MAX_AIO_PER_PROC
74 #define MAX_AIO_PER_PROC 32
77 #ifndef MAX_AIO_QUEUE_PER_PROC
78 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
82 #define MAX_AIO_PROCS 32
86 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
89 #ifndef TARGET_AIO_PROCS
90 #define TARGET_AIO_PROCS 4
94 #define MAX_BUF_AIO 16
97 #ifndef AIOD_TIMEOUT_DEFAULT
98 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
101 #ifndef AIOD_LIFETIME_DEFAULT
102 #define AIOD_LIFETIME_DEFAULT (30 * hz)
105 SYSCTL_NODE(_vfs
, OID_AUTO
, aio
, CTLFLAG_RW
, 0, "Async IO management");
107 static int max_aio_procs
= MAX_AIO_PROCS
;
108 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_procs
,
109 CTLFLAG_RW
, &max_aio_procs
, 0,
110 "Maximum number of kernel threads to use for handling async IO");
112 static int num_aio_procs
= 0;
113 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_aio_procs
,
114 CTLFLAG_RD
, &num_aio_procs
, 0,
115 "Number of presently active kernel threads for async IO");
118 * The code will adjust the actual number of AIO processes towards this
119 * number when it gets a chance.
121 static int target_aio_procs
= TARGET_AIO_PROCS
;
122 SYSCTL_INT(_vfs_aio
, OID_AUTO
, target_aio_procs
, CTLFLAG_RW
, &target_aio_procs
,
123 0, "Preferred number of ready kernel threads for async IO");
125 static int max_queue_count
= MAX_AIO_QUEUE
;
126 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue
, CTLFLAG_RW
, &max_queue_count
, 0,
127 "Maximum number of aio requests to queue, globally");
129 static int num_queue_count
= 0;
130 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_queue_count
, CTLFLAG_RD
, &num_queue_count
, 0,
131 "Number of queued aio requests");
133 static int num_buf_aio
= 0;
134 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_buf_aio
, CTLFLAG_RD
, &num_buf_aio
, 0,
135 "Number of aio requests presently handled by the buf subsystem");
137 /* Number of async I/O thread in the process of being started */
138 /* XXX This should be local to _aio_aqueue() */
139 static int num_aio_resv_start
= 0;
141 static int aiod_timeout
;
142 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_timeout
, CTLFLAG_RW
, &aiod_timeout
, 0,
143 "Timeout value for synchronous aio operations");
145 static int aiod_lifetime
;
146 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_lifetime
, CTLFLAG_RW
, &aiod_lifetime
, 0,
147 "Maximum lifetime for idle aiod");
149 static int max_aio_per_proc
= MAX_AIO_PER_PROC
;
150 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_per_proc
, CTLFLAG_RW
, &max_aio_per_proc
,
151 0, "Maximum active aio requests per process (stored in the process)");
153 static int max_aio_queue_per_proc
= MAX_AIO_QUEUE_PER_PROC
;
154 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue_per_proc
, CTLFLAG_RW
,
155 &max_aio_queue_per_proc
, 0,
156 "Maximum queued aio requests per process (stored in the process)");
158 static int max_buf_aio
= MAX_BUF_AIO
;
159 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_buf_aio
, CTLFLAG_RW
, &max_buf_aio
, 0,
160 "Maximum buf aio requests per process (stored in the process)");
165 #define AIOP_FREE 0x1 /* proc on free queue */
166 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */
169 int aioprocflags
; /* AIO proc flags */
170 TAILQ_ENTRY(aioproclist
) list
; /* List of processes */
171 struct proc
*aioproc
; /* The AIO thread */
175 * data-structure for lio signal management
179 int lioj_buffer_count
;
180 int lioj_buffer_finished_count
;
181 int lioj_queue_count
;
182 int lioj_queue_finished_count
;
183 struct sigevent lioj_signal
; /* signal on all I/O done */
184 TAILQ_ENTRY(aio_liojob
) lioj_list
;
185 struct kaioinfo
*lioj_ki
;
187 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
188 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
191 * per process aio data structure
194 int kaio_flags
; /* per process kaio flags */
195 int kaio_maxactive_count
; /* maximum number of AIOs */
196 int kaio_active_count
; /* number of currently used AIOs */
197 int kaio_qallowed_count
; /* maxiumu size of AIO queue */
198 int kaio_queue_count
; /* size of AIO queue */
199 int kaio_ballowed_count
; /* maximum number of buffers */
200 int kaio_queue_finished_count
; /* number of daemon jobs finished */
201 int kaio_buffer_count
; /* number of physio buffers */
202 int kaio_buffer_finished_count
; /* count of I/O done */
203 struct proc
*kaio_p
; /* process that uses this kaio block */
204 TAILQ_HEAD(,aio_liojob
) kaio_liojoblist
; /* list of lio jobs */
205 TAILQ_HEAD(,aiocblist
) kaio_jobqueue
; /* job queue for process */
206 TAILQ_HEAD(,aiocblist
) kaio_jobdone
; /* done queue for process */
207 TAILQ_HEAD(,aiocblist
) kaio_bufqueue
; /* buffer job queue for process */
208 TAILQ_HEAD(,aiocblist
) kaio_bufdone
; /* buffer done queue for process */
209 TAILQ_HEAD(,aiocblist
) kaio_sockqueue
; /* queue for aios waiting on sockets */
212 #define KAIO_RUNDOWN 0x1 /* process is being run down */
213 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
215 static TAILQ_HEAD(,aioproclist
) aio_freeproc
, aio_activeproc
;
216 static TAILQ_HEAD(,aiocblist
) aio_jobs
; /* Async job list */
217 static TAILQ_HEAD(,aiocblist
) aio_bufjobs
; /* Phys I/O job list */
218 static TAILQ_HEAD(,aiocblist
) aio_freejobs
; /* Pool of free jobs */
220 static void aio_init_aioinfo(struct proc
*p
);
221 static void aio_onceonly(void *);
222 static int aio_free_entry(struct aiocblist
*aiocbe
);
223 static void aio_process(struct aiocblist
*aiocbe
);
224 static int aio_newproc(void);
225 static int aio_aqueue(struct aiocb
*job
, int type
);
226 static void aio_physwakeup(struct bio
*bio
);
227 static int aio_fphysio(struct aiocblist
*aiocbe
);
228 static int aio_qphysio(struct proc
*p
, struct aiocblist
*iocb
);
229 static void aio_daemon(void *uproc
, struct trapframe
*frame
);
230 static void process_signal(void *aioj
);
232 SYSINIT(aio
, SI_SUB_VFS
, SI_ORDER_ANY
, aio_onceonly
, NULL
);
236 * kaio Per process async io info
237 * aiop async io thread data
238 * aiocb async io jobs
239 * aiol list io job pointer - internal to aio_suspend XXX
240 * aiolio list io jobs
242 static vm_zone_t kaio_zone
, aiop_zone
, aiocb_zone
, aiol_zone
, aiolio_zone
;
245 * Startup initialization
248 aio_onceonly(void *na
)
250 TAILQ_INIT(&aio_freeproc
);
251 TAILQ_INIT(&aio_activeproc
);
252 TAILQ_INIT(&aio_jobs
);
253 TAILQ_INIT(&aio_bufjobs
);
254 TAILQ_INIT(&aio_freejobs
);
255 kaio_zone
= zinit("AIO", sizeof(struct kaioinfo
), 0, 0, 1);
256 aiop_zone
= zinit("AIOP", sizeof(struct aioproclist
), 0, 0, 1);
257 aiocb_zone
= zinit("AIOCB", sizeof(struct aiocblist
), 0, 0, 1);
258 aiol_zone
= zinit("AIOL", AIO_LISTIO_MAX
*sizeof(intptr_t), 0, 0, 1);
259 aiolio_zone
= zinit("AIOLIO", sizeof(struct aio_liojob
), 0, 0, 1);
260 aiod_timeout
= AIOD_TIMEOUT_DEFAULT
;
261 aiod_lifetime
= AIOD_LIFETIME_DEFAULT
;
266 * Init the per-process aioinfo structure. The aioinfo limits are set
267 * per-process for user limit (resource) management.
270 aio_init_aioinfo(struct proc
*p
)
273 if (p
->p_aioinfo
== NULL
) {
274 ki
= zalloc(kaio_zone
);
277 ki
->kaio_maxactive_count
= max_aio_per_proc
;
278 ki
->kaio_active_count
= 0;
279 ki
->kaio_qallowed_count
= max_aio_queue_per_proc
;
280 ki
->kaio_queue_count
= 0;
281 ki
->kaio_ballowed_count
= max_buf_aio
;
282 ki
->kaio_buffer_count
= 0;
283 ki
->kaio_buffer_finished_count
= 0;
285 TAILQ_INIT(&ki
->kaio_jobdone
);
286 TAILQ_INIT(&ki
->kaio_jobqueue
);
287 TAILQ_INIT(&ki
->kaio_bufdone
);
288 TAILQ_INIT(&ki
->kaio_bufqueue
);
289 TAILQ_INIT(&ki
->kaio_liojoblist
);
290 TAILQ_INIT(&ki
->kaio_sockqueue
);
293 while (num_aio_procs
< target_aio_procs
)
298 * Free a job entry. Wait for completion if it is currently active, but don't
299 * delay forever. If we delay, we return a flag that says that we have to
300 * restart the queue scan.
303 aio_free_entry(struct aiocblist
*aiocbe
)
306 struct aio_liojob
*lj
;
310 if (aiocbe
->jobstate
== JOBST_NULL
)
311 panic("aio_free_entry: freeing already free job");
313 p
= aiocbe
->userproc
;
317 panic("aio_free_entry: missing p->p_aioinfo");
319 while (aiocbe
->jobstate
== JOBST_JOBRUNNING
) {
320 aiocbe
->jobflags
|= AIOCBLIST_RUNDOWN
;
321 tsleep(aiocbe
, 0, "jobwai", 0);
323 if (aiocbe
->bp
== NULL
) {
324 if (ki
->kaio_queue_count
<= 0)
325 panic("aio_free_entry: process queue size <= 0");
326 if (num_queue_count
<= 0)
327 panic("aio_free_entry: system wide queue size <= 0");
330 lj
->lioj_queue_count
--;
331 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
332 lj
->lioj_queue_finished_count
--;
334 ki
->kaio_queue_count
--;
335 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
336 ki
->kaio_queue_finished_count
--;
340 lj
->lioj_buffer_count
--;
341 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
342 lj
->lioj_buffer_finished_count
--;
344 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
345 ki
->kaio_buffer_finished_count
--;
346 ki
->kaio_buffer_count
--;
350 /* aiocbe is going away, we need to destroy any knotes */
351 /* XXX lwp knote wants a thread, but only cares about the process */
352 knote_remove(FIRST_LWP_IN_PROC(p
)->lwp_thread
, &aiocbe
->klist
);
354 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
& KAIO_RUNDOWN
)
355 && ((ki
->kaio_buffer_count
== 0) && (ki
->kaio_queue_count
== 0)))) {
356 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
360 if (aiocbe
->jobstate
== JOBST_JOBQBUF
) {
361 if ((error
= aio_fphysio(aiocbe
)) != 0)
363 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
364 panic("aio_free_entry: invalid physio finish-up state");
366 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
368 } else if (aiocbe
->jobstate
== JOBST_JOBQGLOBAL
) {
370 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
371 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
373 } else if (aiocbe
->jobstate
== JOBST_JOBFINISHED
)
374 TAILQ_REMOVE(&ki
->kaio_jobdone
, aiocbe
, plist
);
375 else if (aiocbe
->jobstate
== JOBST_JOBBFINISHED
) {
377 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
380 vunmapbuf(aiocbe
->bp
);
381 relpbuf(aiocbe
->bp
, NULL
);
385 if (lj
&& (lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
== 0)) {
386 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
387 zfree(aiolio_zone
, lj
);
389 aiocbe
->jobstate
= JOBST_NULL
;
390 callout_stop(&aiocbe
->timeout
);
391 fdrop(aiocbe
->fd_file
);
392 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
398 * Rundown the jobs for a given process.
401 aio_proc_rundown(struct proc
*p
)
407 struct aio_liojob
*lj
, *ljn
;
408 struct aiocblist
*aiocbe
, *aiocbn
;
416 ki
->kaio_flags
|= LIOJ_SIGNAL_POSTED
;
417 while ((ki
->kaio_active_count
> 0) || (ki
->kaio_buffer_count
>
418 ki
->kaio_buffer_finished_count
)) {
419 ki
->kaio_flags
|= KAIO_RUNDOWN
;
420 if (tsleep(p
, 0, "kaiowt", aiod_timeout
))
425 * Move any aio ops that are waiting on socket I/O to the normal job
426 * queues so they are cleaned up with any others.
429 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_sockqueue
); aiocbe
; aiocbe
=
431 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
432 fp
= aiocbe
->fd_file
;
434 so
= (struct socket
*)fp
->f_data
;
435 TAILQ_REMOVE(&so
->so_aiojobq
, aiocbe
, list
);
436 if (TAILQ_EMPTY(&so
->so_aiojobq
)) {
437 so
->so_snd
.ssb_flags
&= ~SSB_AIO
;
438 so
->so_rcv
.ssb_flags
&= ~SSB_AIO
;
441 TAILQ_REMOVE(&ki
->kaio_sockqueue
, aiocbe
, plist
);
442 TAILQ_INSERT_HEAD(&aio_jobs
, aiocbe
, list
);
443 TAILQ_INSERT_HEAD(&ki
->kaio_jobqueue
, aiocbe
, plist
);
448 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobdone
); aiocbe
; aiocbe
= aiocbn
) {
449 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
450 if (aio_free_entry(aiocbe
))
455 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); aiocbe
; aiocbe
=
457 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
458 if (aio_free_entry(aiocbe
))
464 while (TAILQ_FIRST(&ki
->kaio_bufqueue
)) {
465 ki
->kaio_flags
|= KAIO_WAKEUP
;
466 tsleep(p
, 0, "aioprn", 0);
474 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_bufdone
); aiocbe
; aiocbe
= aiocbn
) {
475 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
476 if (aio_free_entry(aiocbe
)) {
484 * If we've slept, jobs might have moved from one queue to another.
485 * Retry rundown if we didn't manage to empty the queues.
487 if (TAILQ_FIRST(&ki
->kaio_jobdone
) != NULL
||
488 TAILQ_FIRST(&ki
->kaio_jobqueue
) != NULL
||
489 TAILQ_FIRST(&ki
->kaio_bufqueue
) != NULL
||
490 TAILQ_FIRST(&ki
->kaio_bufdone
) != NULL
)
493 for (lj
= TAILQ_FIRST(&ki
->kaio_liojoblist
); lj
; lj
= ljn
) {
494 ljn
= TAILQ_NEXT(lj
, lioj_list
);
495 if ((lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
==
497 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
498 zfree(aiolio_zone
, lj
);
501 kprintf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
502 "QF:%d\n", lj
->lioj_buffer_count
,
503 lj
->lioj_buffer_finished_count
,
504 lj
->lioj_queue_count
,
505 lj
->lioj_queue_finished_count
);
510 zfree(kaio_zone
, ki
);
517 * Select a job to run (called by an AIO daemon).
519 static struct aiocblist
*
520 aio_selectjob(struct aioproclist
*aiop
)
522 struct aiocblist
*aiocbe
;
527 for (aiocbe
= TAILQ_FIRST(&aio_jobs
); aiocbe
; aiocbe
=
528 TAILQ_NEXT(aiocbe
, list
)) {
529 userp
= aiocbe
->userproc
;
530 ki
= userp
->p_aioinfo
;
532 if (ki
->kaio_active_count
< ki
->kaio_maxactive_count
) {
533 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
544 * The AIO processing activity. This is the code that does the I/O request for
545 * the non-physio version of the operations. The normal vn operations are used,
546 * and this code should work in all instances for every type of file, including
547 * pipes, sockets, fifos, and regular files.
550 aio_process(struct aiocblist
*aiocbe
)
559 int oublock_st
, oublock_end
;
560 int inblock_st
, inblock_end
;
563 cb
= &aiocbe
->uaiocb
;
564 fp
= aiocbe
->fd_file
;
566 aiov
.iov_base
= (void *)(uintptr_t)cb
->aio_buf
;
567 aiov
.iov_len
= cb
->aio_nbytes
;
569 auio
.uio_iov
= &aiov
;
571 auio
.uio_offset
= cb
->aio_offset
;
572 auio
.uio_resid
= cb
->aio_nbytes
;
573 cnt
= cb
->aio_nbytes
;
574 auio
.uio_segflg
= UIO_USERSPACE
;
577 inblock_st
= mytd
->td_lwp
->lwp_ru
.ru_inblock
;
578 oublock_st
= mytd
->td_lwp
->lwp_ru
.ru_oublock
;
580 * _aio_aqueue() acquires a reference to the file that is
581 * released in aio_free_entry().
583 if (cb
->aio_lio_opcode
== LIO_READ
) {
584 auio
.uio_rw
= UIO_READ
;
585 error
= fo_read(fp
, &auio
, fp
->f_cred
, O_FOFFSET
);
587 auio
.uio_rw
= UIO_WRITE
;
588 error
= fo_write(fp
, &auio
, fp
->f_cred
, O_FOFFSET
);
590 inblock_end
= mytd
->td_lwp
->lwp_ru
.ru_inblock
;
591 oublock_end
= mytd
->td_lwp
->lwp_ru
.ru_oublock
;
593 aiocbe
->inputcharge
= inblock_end
- inblock_st
;
594 aiocbe
->outputcharge
= oublock_end
- oublock_st
;
596 if ((error
) && (auio
.uio_resid
!= cnt
)) {
597 if (error
== ERESTART
|| error
== EINTR
|| error
== EWOULDBLOCK
)
599 if ((error
== EPIPE
) && (cb
->aio_lio_opcode
== LIO_WRITE
))
600 ksignal(aiocbe
->userproc
, SIGPIPE
);
603 cnt
-= auio
.uio_resid
;
604 cb
->_aiocb_private
.error
= error
;
605 cb
->_aiocb_private
.status
= cnt
;
609 * The AIO daemon, most of the actual work is done in aio_process,
610 * but the setup (and address space mgmt) is done in this routine.
612 * The MP lock is held on entry.
615 aio_daemon(void *uproc
, struct trapframe
*frame
)
617 struct aio_liojob
*lj
;
619 struct aiocblist
*aiocbe
;
620 struct aioproclist
*aiop
;
622 struct proc
*mycp
, *userp
;
623 struct vmspace
*curvm
;
627 mylwp
= curthread
->td_lwp
;
628 mycp
= mylwp
->lwp_proc
;
630 if (mycp
->p_textvp
) {
631 vrele(mycp
->p_textvp
);
632 mycp
->p_textvp
= NULL
;
636 * Allocate and ready the aio control info. There is one aiop structure
639 aiop
= zalloc(aiop_zone
);
640 aiop
->aioproc
= mycp
;
641 aiop
->aioprocflags
|= AIOP_FREE
;
646 * Place thread (lightweight process) onto the AIO free thread list.
648 if (TAILQ_EMPTY(&aio_freeproc
))
649 wakeup(&aio_freeproc
);
650 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
654 /* Make up a name for the daemon. */
655 strcpy(mycp
->p_comm
, "aiod");
658 * Get rid of our current filedescriptors. AIOD's don't need any
659 * filedescriptors, except as temporarily inherited from the client.
660 * Credentials are also cloned, and made equivalent to "root".
663 cr
= cratom(&mycp
->p_ucred
);
665 uireplace(&cr
->cr_uidinfo
, uifind(0));
667 cr
->cr_groups
[0] = 1;
669 /* The daemon resides in its own pgrp. */
670 enterpgrp(mycp
, mycp
->p_pid
, 1);
672 /* Mark special process type. */
673 mycp
->p_flag
|= P_SYSTEM
| P_KTHREADP
;
676 * Wakeup parent process. (Parent sleeps to keep from blasting away
677 * and creating too many daemons.)
684 * Take daemon off of free queue
686 if (aiop
->aioprocflags
& AIOP_FREE
) {
688 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
689 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
690 aiop
->aioprocflags
&= ~AIOP_FREE
;
693 aiop
->aioprocflags
&= ~AIOP_SCHED
;
698 while ((aiocbe
= aio_selectjob(aiop
)) != NULL
) {
699 cb
= &aiocbe
->uaiocb
;
700 userp
= aiocbe
->userproc
;
702 aiocbe
->jobstate
= JOBST_JOBRUNNING
;
705 * Connect to process address space for user program.
707 if (curvm
!= userp
->p_vmspace
) {
708 pmap_setlwpvm(mylwp
, userp
->p_vmspace
);
710 sysref_put(&curvm
->vm_sysref
);
711 curvm
= userp
->p_vmspace
;
712 sysref_get(&curvm
->vm_sysref
);
715 ki
= userp
->p_aioinfo
;
718 /* Account for currently active jobs. */
719 ki
->kaio_active_count
++;
721 /* Do the I/O function. */
724 /* Decrement the active job count. */
725 ki
->kaio_active_count
--;
728 * Increment the completion count for wakeup/signal
731 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
732 ki
->kaio_queue_finished_count
++;
734 lj
->lioj_queue_finished_count
++;
735 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
736 & KAIO_RUNDOWN
) && (ki
->kaio_active_count
== 0))) {
737 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
742 if (lj
&& (lj
->lioj_flags
&
743 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) == LIOJ_SIGNAL
) {
744 if ((lj
->lioj_queue_finished_count
==
745 lj
->lioj_queue_count
) &&
746 (lj
->lioj_buffer_finished_count
==
747 lj
->lioj_buffer_count
)) {
749 lj
->lioj_signal
.sigev_signo
);
756 aiocbe
->jobstate
= JOBST_JOBFINISHED
;
759 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
760 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, aiocbe
, plist
);
762 KNOTE(&aiocbe
->klist
, 0);
764 if (aiocbe
->jobflags
& AIOCBLIST_RUNDOWN
) {
766 aiocbe
->jobflags
&= ~AIOCBLIST_RUNDOWN
;
769 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
770 ksignal(userp
, cb
->aio_sigevent
.sigev_signo
);
775 * Disconnect from user address space.
778 /* swap our original address space back in */
779 pmap_setlwpvm(mylwp
, mycp
->p_vmspace
);
780 sysref_put(&curvm
->vm_sysref
);
785 * If we are the first to be put onto the free queue, wakeup
786 * anyone waiting for a daemon.
789 TAILQ_REMOVE(&aio_activeproc
, aiop
, list
);
790 if (TAILQ_EMPTY(&aio_freeproc
))
791 wakeup(&aio_freeproc
);
792 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
793 aiop
->aioprocflags
|= AIOP_FREE
;
797 * If daemon is inactive for a long time, allow it to exit,
798 * thereby freeing resources.
800 if (((aiop
->aioprocflags
& AIOP_SCHED
) == 0) && tsleep(mycp
,
801 0, "aiordy", aiod_lifetime
)) {
803 if (TAILQ_EMPTY(&aio_jobs
)) {
804 if ((aiop
->aioprocflags
& AIOP_FREE
) &&
805 (num_aio_procs
> target_aio_procs
)) {
806 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
808 zfree(aiop_zone
, aiop
);
811 if (mycp
->p_vmspace
->vm_sysref
.refcnt
<= 1) {
812 kprintf("AIOD: bad vm refcnt for"
813 " exiting daemon: %d\n",
814 mycp
->p_vmspace
->vm_sysref
.refcnt
);
826 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
827 * AIO daemon modifies its environment itself.
833 struct lwp
*lp
, *nlp
;
837 error
= fork1(lp
, RFPROC
|RFMEM
|RFNOWAIT
, &np
);
840 nlp
= ONLY_LWP_IN_PROC(np
);
841 cpu_set_fork_handler(nlp
, aio_daemon
, curproc
);
842 start_forked_proc(lp
, np
);
845 * Wait until daemon is started, but continue on just in case to
846 * handle error conditions.
848 error
= tsleep(np
, 0, "aiosta", aiod_timeout
);
855 * Try the high-performance, low-overhead physio method for eligible
856 * VCHR devices. This method doesn't use an aio helper thread, and
857 * thus has very low overhead.
859 * Assumes that the caller, _aio_aqueue(), has incremented the file
860 * structure's reference count, preventing its deallocation for the
861 * duration of this call.
864 aio_qphysio(struct proc
*p
, struct aiocblist
*aiocbe
)
872 struct aio_liojob
*lj
;
875 cb
= &aiocbe
->uaiocb
;
876 fp
= aiocbe
->fd_file
;
878 if (fp
->f_type
!= DTYPE_VNODE
)
881 vp
= (struct vnode
*)fp
->f_data
;
884 * If its not a disk, we don't want to return a positive error.
885 * It causes the aio code to not fall through to try the thread
886 * way when you're talking to a regular file.
888 if (!vn_isdisk(vp
, &error
)) {
889 if (error
== ENOTBLK
)
895 if (cb
->aio_nbytes
% vp
->v_rdev
->si_bsize_phys
)
899 MAXPHYS
- (((vm_offset_t
) cb
->aio_buf
) & PAGE_MASK
))
903 if (ki
->kaio_buffer_count
>= ki
->kaio_ballowed_count
)
906 ki
->kaio_buffer_count
++;
910 lj
->lioj_buffer_count
++;
912 /* Create and build a buffer header for a transfer. */
917 * Get a copy of the kva from the physical buffer.
919 bp
->b_bio1
.bio_caller_info1
.ptr
= p
;
922 bp
->b_cmd
= (cb
->aio_lio_opcode
== LIO_WRITE
) ?
923 BUF_CMD_WRITE
: BUF_CMD_READ
;
924 bp
->b_bio1
.bio_done
= aio_physwakeup
;
925 bp
->b_bio1
.bio_offset
= cb
->aio_offset
;
927 /* Bring buffer into kernel space. */
928 if (vmapbuf(bp
, __DEVOLATILE(char *, cb
->aio_buf
), cb
->aio_nbytes
) < 0) {
936 bp
->b_bio1
.bio_caller_info2
.ptr
= aiocbe
;
937 TAILQ_INSERT_TAIL(&aio_bufjobs
, aiocbe
, list
);
938 TAILQ_INSERT_TAIL(&ki
->kaio_bufqueue
, aiocbe
, plist
);
939 aiocbe
->jobstate
= JOBST_JOBQBUF
;
940 cb
->_aiocb_private
.status
= cb
->aio_nbytes
;
947 * Perform the transfer. vn_strategy must be used even though we
948 * know we have a device in order to deal with requests which exceed
949 * device DMA limitations.
951 vn_strategy(vp
, &bp
->b_bio1
);
957 * If we had an error invoking the request, or an error in processing
958 * the request before we have returned, we process it as an error in
959 * transfer. Note that such an I/O error is not indicated immediately,
960 * but is returned using the aio_error mechanism. In this case,
961 * aio_suspend will return immediately.
963 if (bp
->b_error
|| (bp
->b_flags
& B_ERROR
)) {
964 struct aiocb
*job
= aiocbe
->uuaiocb
;
966 aiocbe
->uaiocb
._aiocb_private
.status
= 0;
967 suword(&job
->_aiocb_private
.status
, 0);
968 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
969 suword(&job
->_aiocb_private
.error
, bp
->b_error
);
971 ki
->kaio_buffer_finished_count
++;
973 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
) {
974 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
975 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
976 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
977 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
978 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
984 KNOTE(&aiocbe
->klist
, 0);
988 ki
->kaio_buffer_count
--;
990 lj
->lioj_buffer_count
--;
997 * This waits/tests physio completion.
1000 aio_fphysio(struct aiocblist
*iocb
)
1008 while (bp
->b_cmd
!= BUF_CMD_DONE
) {
1009 if (tsleep(bp
, 0, "physstr", aiod_timeout
)) {
1010 if (bp
->b_cmd
!= BUF_CMD_DONE
) {
1020 /* Release mapping into kernel space. */
1026 /* Check for an error. */
1027 if (bp
->b_flags
& B_ERROR
)
1028 error
= bp
->b_error
;
1033 #endif /* VFS_AIO */
1036 * Wake up aio requests that may be serviceable now.
1039 aio_swake(struct socket
*so
, struct signalsockbuf
*ssb
)
1044 struct aiocblist
*cb
,*cbn
;
1046 struct kaioinfo
*ki
= NULL
;
1047 int opcode
, wakecount
= 0;
1048 struct aioproclist
*aiop
;
1050 if (ssb
== &so
->so_snd
) {
1052 so
->so_snd
.ssb_flags
&= ~SSB_AIO
;
1055 so
->so_rcv
.ssb_flags
&= ~SSB_AIO
;
1058 for (cb
= TAILQ_FIRST(&so
->so_aiojobq
); cb
; cb
= cbn
) {
1059 cbn
= TAILQ_NEXT(cb
, list
);
1060 if (opcode
== cb
->uaiocb
.aio_lio_opcode
) {
1063 TAILQ_REMOVE(&so
->so_aiojobq
, cb
, list
);
1064 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cb
, plist
);
1065 TAILQ_INSERT_TAIL(&aio_jobs
, cb
, list
);
1066 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, cb
, plist
);
1068 if (cb
->jobstate
!= JOBST_JOBQGLOBAL
)
1069 panic("invalid queue value");
1073 while (wakecount
--) {
1074 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != 0) {
1075 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1076 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1077 aiop
->aioprocflags
&= ~AIOP_FREE
;
1078 wakeup(aiop
->aioproc
);
1081 #endif /* VFS_AIO */
1086 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1087 * technique is done in this code.
1090 _aio_aqueue(struct aiocb
*job
, struct aio_liojob
*lj
, int type
)
1092 struct proc
*p
= curproc
;
1093 struct filedesc
*fdp
;
1098 int opcode
, user_opcode
;
1099 struct aiocblist
*aiocbe
;
1100 struct aioproclist
*aiop
;
1101 struct kaioinfo
*ki
;
1106 if ((aiocbe
= TAILQ_FIRST(&aio_freejobs
)) != NULL
)
1107 TAILQ_REMOVE(&aio_freejobs
, aiocbe
, list
);
1109 aiocbe
= zalloc (aiocb_zone
);
1111 aiocbe
->inputcharge
= 0;
1112 aiocbe
->outputcharge
= 0;
1113 callout_init(&aiocbe
->timeout
);
1114 SLIST_INIT(&aiocbe
->klist
);
1116 suword(&job
->_aiocb_private
.status
, -1);
1117 suword(&job
->_aiocb_private
.error
, 0);
1118 suword(&job
->_aiocb_private
.kernelinfo
, -1);
1120 error
= copyin(job
, &aiocbe
->uaiocb
, sizeof(aiocbe
->uaiocb
));
1122 suword(&job
->_aiocb_private
.error
, error
);
1123 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1126 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
&&
1127 !_SIG_VALID(aiocbe
->uaiocb
.aio_sigevent
.sigev_signo
)) {
1128 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1132 /* Save userspace address of the job info. */
1133 aiocbe
->uuaiocb
= job
;
1135 /* Get the opcode. */
1136 user_opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1137 if (type
!= LIO_NOP
)
1138 aiocbe
->uaiocb
.aio_lio_opcode
= type
;
1139 opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1141 /* Get the fd info for process. */
1145 * Range check file descriptor.
1147 fd
= aiocbe
->uaiocb
.aio_fildes
;
1148 if (fd
>= fdp
->fd_nfiles
) {
1149 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1151 suword(&job
->_aiocb_private
.error
, EBADF
);
1155 fp
= aiocbe
->fd_file
= fdp
->fd_files
[fd
].fp
;
1156 if ((fp
== NULL
) || ((opcode
== LIO_WRITE
) && ((fp
->f_flag
& FWRITE
) ==
1158 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1160 suword(&job
->_aiocb_private
.error
, EBADF
);
1165 if (aiocbe
->uaiocb
.aio_offset
== -1LL) {
1169 error
= suword(&job
->_aiocb_private
.kernelinfo
, jobrefid
);
1174 aiocbe
->uaiocb
._aiocb_private
.kernelinfo
= (void *)(intptr_t)jobrefid
;
1175 if (jobrefid
== LONG_MAX
)
1180 if (opcode
== LIO_NOP
) {
1182 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1184 suword(&job
->_aiocb_private
.error
, 0);
1185 suword(&job
->_aiocb_private
.status
, 0);
1186 suword(&job
->_aiocb_private
.kernelinfo
, 0);
1190 if ((opcode
!= LIO_READ
) && (opcode
!= LIO_WRITE
)) {
1192 suword(&job
->_aiocb_private
.status
, 0);
1197 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_KEVENT
) {
1198 kev
.ident
= aiocbe
->uaiocb
.aio_sigevent
.sigev_notify_kqueue
;
1199 kev
.udata
= aiocbe
->uaiocb
.aio_sigevent
.sigev_value
.sigval_ptr
;
1203 * This method for requesting kevent-based notification won't
1204 * work on the alpha, since we're passing in a pointer
1205 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
1206 * based method instead.
1208 if (user_opcode
== LIO_NOP
|| user_opcode
== LIO_READ
||
1209 user_opcode
== LIO_WRITE
)
1212 error
= copyin((struct kevent
*)(uintptr_t)user_opcode
,
1217 if ((u_int
)kev
.ident
>= fdp
->fd_nfiles
||
1218 (kq_fp
= fdp
->fd_files
[kev
.ident
].fp
) == NULL
||
1219 (kq_fp
->f_type
!= DTYPE_KQUEUE
)) {
1223 kq
= (struct kqueue
*)kq_fp
->f_data
;
1224 kev
.ident
= (uintptr_t)aiocbe
->uuaiocb
;
1225 kev
.filter
= EVFILT_AIO
;
1226 kev
.flags
= EV_ADD
| EV_ENABLE
| EV_FLAG1
;
1227 kev
.data
= (intptr_t)aiocbe
;
1228 /* XXX lwp kqueue_register takes a thread, but only uses its proc */
1229 error
= kqueue_register(kq
, &kev
, FIRST_LWP_IN_PROC(p
)->lwp_thread
);
1233 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1235 suword(&job
->_aiocb_private
.error
, error
);
1240 suword(&job
->_aiocb_private
.error
, EINPROGRESS
);
1241 aiocbe
->uaiocb
._aiocb_private
.error
= EINPROGRESS
;
1242 aiocbe
->userproc
= p
;
1243 aiocbe
->jobflags
= 0;
1247 if (fp
->f_type
== DTYPE_SOCKET
) {
1249 * Alternate queueing for socket ops: Reach down into the
1250 * descriptor to get the socket data. Then check to see if the
1251 * socket is ready to be read or written (based on the requested
1254 * If it is not ready for io, then queue the aiocbe on the
1255 * socket, and set the flags so we get a call when ssb_notify()
1258 so
= (struct socket
*)fp
->f_data
;
1260 if (((opcode
== LIO_READ
) && (!soreadable(so
))) || ((opcode
==
1261 LIO_WRITE
) && (!sowriteable(so
)))) {
1262 TAILQ_INSERT_TAIL(&so
->so_aiojobq
, aiocbe
, list
);
1263 TAILQ_INSERT_TAIL(&ki
->kaio_sockqueue
, aiocbe
, plist
);
1264 if (opcode
== LIO_READ
)
1265 so
->so_rcv
.ssb_flags
|= SSB_AIO
;
1267 so
->so_snd
.ssb_flags
|= SSB_AIO
;
1268 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
; /* XXX */
1269 ki
->kaio_queue_count
++;
1278 if ((error
= aio_qphysio(p
, aiocbe
)) == 0)
1281 suword(&job
->_aiocb_private
.status
, 0);
1282 aiocbe
->uaiocb
._aiocb_private
.error
= error
;
1283 suword(&job
->_aiocb_private
.error
, error
);
1287 /* No buffer for daemon I/O. */
1290 ki
->kaio_queue_count
++;
1292 lj
->lioj_queue_count
++;
1294 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1295 TAILQ_INSERT_TAIL(&aio_jobs
, aiocbe
, list
);
1297 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
;
1303 * If we don't have a free AIO process, and we are below our quota, then
1304 * start one. Otherwise, depend on the subsequent I/O completions to
1305 * pick-up this job. If we don't successfully create the new process
1306 * (thread) due to resource issues, we return an error for now (EAGAIN),
1307 * which is likely not the correct thing to do.
1311 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != NULL
) {
1312 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1313 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1314 aiop
->aioprocflags
&= ~AIOP_FREE
;
1315 wakeup(aiop
->aioproc
);
1316 } else if (((num_aio_resv_start
+ num_aio_procs
) < max_aio_procs
) &&
1317 ((ki
->kaio_active_count
+ num_aio_resv_start
) <
1318 ki
->kaio_maxactive_count
)) {
1319 num_aio_resv_start
++;
1320 if ((error
= aio_newproc()) == 0) {
1321 num_aio_resv_start
--;
1324 num_aio_resv_start
--;
1332 * This routine queues an AIO request, checking for quotas.
1335 aio_aqueue(struct aiocb
*job
, int type
)
1337 struct proc
*p
= curproc
;
1338 struct kaioinfo
*ki
;
1340 if (p
->p_aioinfo
== NULL
)
1341 aio_init_aioinfo(p
);
1343 if (num_queue_count
>= max_queue_count
)
1347 if (ki
->kaio_queue_count
>= ki
->kaio_qallowed_count
)
1350 return _aio_aqueue(job
, NULL
, type
);
1352 #endif /* VFS_AIO */
1355 * Support the aio_return system call, as a side-effect, kernel resources are
1359 sys_aio_return(struct aio_return_args
*uap
)
1364 struct proc
*p
= curproc
;
1365 struct lwp
*lp
= curthread
->td_lwp
;
1367 struct aiocblist
*cb
, *ncb
;
1369 struct kaioinfo
*ki
;
1377 jobref
= fuword(&ujob
->_aiocb_private
.kernelinfo
);
1378 if (jobref
== -1 || jobref
== 0)
1381 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1382 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1384 if (ujob
== cb
->uuaiocb
) {
1385 uap
->sysmsg_result
=
1386 cb
->uaiocb
._aiocb_private
.status
;
1388 uap
->sysmsg_result
= EFAULT
;
1389 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
1390 lp
->lwp_ru
.ru_oublock
+= cb
->outputcharge
;
1391 cb
->outputcharge
= 0;
1392 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
1393 lp
->lwp_ru
.ru_inblock
+= cb
->inputcharge
;
1394 cb
->inputcharge
= 0;
1401 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= ncb
) {
1402 ncb
= TAILQ_NEXT(cb
, plist
);
1403 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
)
1406 if (ujob
== cb
->uuaiocb
) {
1407 uap
->sysmsg_result
=
1408 cb
->uaiocb
._aiocb_private
.status
;
1410 uap
->sysmsg_result
= EFAULT
;
1418 #endif /* VFS_AIO */
1422 * Allow a process to wakeup when any of the I/O requests are completed.
1425 sys_aio_suspend(struct aio_suspend_args
*uap
)
1430 struct proc
*p
= curproc
;
1433 struct aiocb
*const *cbptr
, *cbp
;
1434 struct kaioinfo
*ki
;
1435 struct aiocblist
*cb
;
1440 struct aiocb
**ujoblist
;
1442 if (uap
->nent
> AIO_LISTIO_MAX
)
1447 /* Get timespec struct. */
1448 if ((error
= copyin(uap
->timeout
, &ts
, sizeof(ts
))) != 0)
1451 if (ts
.tv_nsec
< 0 || ts
.tv_nsec
>= 1000000000)
1454 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
1455 if (itimerfix(&atv
))
1457 timo
= tvtohz_high(&atv
);
1465 ijoblist
= zalloc(aiol_zone
);
1466 ujoblist
= zalloc(aiol_zone
);
1467 cbptr
= uap
->aiocbp
;
1469 for (i
= 0; i
< uap
->nent
; i
++) {
1470 cbp
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1473 ujoblist
[njoblist
] = cbp
;
1474 ijoblist
[njoblist
] = fuword(&cbp
->_aiocb_private
.kernelinfo
);
1478 if (njoblist
== 0) {
1479 zfree(aiol_zone
, ijoblist
);
1480 zfree(aiol_zone
, ujoblist
);
1486 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1487 for (i
= 0; i
< njoblist
; i
++) {
1489 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1491 if (ujoblist
[i
] != cb
->uuaiocb
)
1493 zfree(aiol_zone
, ijoblist
);
1494 zfree(aiol_zone
, ujoblist
);
1501 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
=
1502 TAILQ_NEXT(cb
, plist
)) {
1503 for (i
= 0; i
< njoblist
; i
++) {
1505 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1508 if (ujoblist
[i
] != cb
->uuaiocb
)
1510 zfree(aiol_zone
, ijoblist
);
1511 zfree(aiol_zone
, ujoblist
);
1517 ki
->kaio_flags
|= KAIO_WAKEUP
;
1518 error
= tsleep(p
, PCATCH
, "aiospn", timo
);
1521 if (error
== ERESTART
|| error
== EINTR
) {
1522 zfree(aiol_zone
, ijoblist
);
1523 zfree(aiol_zone
, ujoblist
);
1525 } else if (error
== EWOULDBLOCK
) {
1526 zfree(aiol_zone
, ijoblist
);
1527 zfree(aiol_zone
, ujoblist
);
1534 #endif /* VFS_AIO */
1538 * aio_cancel cancels any non-physio aio operations not currently in
1542 sys_aio_cancel(struct aio_cancel_args
*uap
)
1547 struct proc
*p
= curproc
;
1548 struct kaioinfo
*ki
;
1549 struct aiocblist
*cbe
, *cbn
;
1551 struct filedesc
*fdp
;
1560 if ((u_int
)uap
->fd
>= fdp
->fd_nfiles
||
1561 (fp
= fdp
->fd_files
[uap
->fd
].fp
) == NULL
)
1564 if (fp
->f_type
== DTYPE_VNODE
) {
1565 vp
= (struct vnode
*)fp
->f_data
;
1567 if (vn_isdisk(vp
,&error
)) {
1568 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1571 } else if (fp
->f_type
== DTYPE_SOCKET
) {
1572 so
= (struct socket
*)fp
->f_data
;
1576 for (cbe
= TAILQ_FIRST(&so
->so_aiojobq
); cbe
; cbe
= cbn
) {
1577 cbn
= TAILQ_NEXT(cbe
, list
);
1578 if ((uap
->aiocbp
== NULL
) ||
1579 (uap
->aiocbp
== cbe
->uuaiocb
) ) {
1582 TAILQ_REMOVE(&so
->so_aiojobq
, cbe
, list
);
1583 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cbe
, plist
);
1584 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
, plist
);
1585 if (ki
->kaio_flags
& KAIO_WAKEUP
) {
1588 cbe
->jobstate
= JOBST_JOBFINISHED
;
1589 cbe
->uaiocb
._aiocb_private
.status
=-1;
1590 cbe
->uaiocb
._aiocb_private
.error
=ECANCELED
;
1592 /* XXX cancelled, knote? */
1593 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1595 ksignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1602 if ((cancelled
) && (uap
->aiocbp
)) {
1603 uap
->sysmsg_result
= AIO_CANCELED
;
1612 for (cbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cbe
; cbe
= cbn
) {
1613 cbn
= TAILQ_NEXT(cbe
, plist
);
1615 if ((uap
->fd
== cbe
->uaiocb
.aio_fildes
) &&
1616 ((uap
->aiocbp
== NULL
) ||
1617 (uap
->aiocbp
== cbe
->uuaiocb
))) {
1619 if (cbe
->jobstate
== JOBST_JOBQGLOBAL
) {
1620 TAILQ_REMOVE(&aio_jobs
, cbe
, list
);
1621 TAILQ_REMOVE(&ki
->kaio_jobqueue
, cbe
, plist
);
1622 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
,
1625 ki
->kaio_queue_finished_count
++;
1626 cbe
->jobstate
= JOBST_JOBFINISHED
;
1627 cbe
->uaiocb
._aiocb_private
.status
= -1;
1628 cbe
->uaiocb
._aiocb_private
.error
= ECANCELED
;
1629 /* XXX cancelled, knote? */
1630 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1632 ksignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1641 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1645 uap
->sysmsg_result
= AIO_CANCELED
;
1648 uap
->sysmsg_result
= AIO_ALLDONE
;
1651 #endif /* VFS_AIO */
1655 * aio_error is implemented in the kernel level for compatibility purposes only.
1656 * For a user mode async implementation, it would be best to do it in a userland
1660 sys_aio_error(struct aio_error_args
*uap
)
1665 struct proc
*p
= curproc
;
1666 struct aiocblist
*cb
;
1667 struct kaioinfo
*ki
;
1674 jobref
= fuword(&uap
->aiocbp
->_aiocb_private
.kernelinfo
);
1675 if ((jobref
== -1) || (jobref
== 0))
1678 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1679 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1681 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1688 for (cb
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1690 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1692 uap
->sysmsg_result
= EINPROGRESS
;
1698 for (cb
= TAILQ_FIRST(&ki
->kaio_sockqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1700 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1702 uap
->sysmsg_result
= EINPROGRESS
;
1710 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= TAILQ_NEXT(cb
,
1712 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1714 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1720 for (cb
= TAILQ_FIRST(&ki
->kaio_bufqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1722 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1724 uap
->sysmsg_result
= EINPROGRESS
;
1735 status
= fuword(&uap
->aiocbp
->_aiocb_private
.status
);
1737 return fuword(&uap
->aiocbp
->_aiocb_private
.error
);
1740 #endif /* VFS_AIO */
1743 /* syscall - asynchronous read from a file (REALTIME) */
1745 sys_aio_read(struct aio_read_args
*uap
)
1750 return aio_aqueue(uap
->aiocbp
, LIO_READ
);
1751 #endif /* VFS_AIO */
1754 /* syscall - asynchronous write to a file (REALTIME) */
1756 sys_aio_write(struct aio_write_args
*uap
)
1761 return aio_aqueue(uap
->aiocbp
, LIO_WRITE
);
1762 #endif /* VFS_AIO */
1765 /* syscall - XXX undocumented */
1767 sys_lio_listio(struct lio_listio_args
*uap
)
1772 struct proc
*p
= curproc
;
1773 struct lwp
*lp
= curthread
->td_lwp
;
1774 int nent
, nentqueued
;
1775 struct aiocb
*iocb
, * const *cbptr
;
1776 struct aiocblist
*cb
;
1777 struct kaioinfo
*ki
;
1778 struct aio_liojob
*lj
;
1779 int error
, runningcode
;
1783 if ((uap
->mode
!= LIO_NOWAIT
) && (uap
->mode
!= LIO_WAIT
))
1787 if (nent
> AIO_LISTIO_MAX
)
1790 if (p
->p_aioinfo
== NULL
)
1791 aio_init_aioinfo(p
);
1793 if ((nent
+ num_queue_count
) > max_queue_count
)
1797 if ((nent
+ ki
->kaio_queue_count
) > ki
->kaio_qallowed_count
)
1800 lj
= zalloc(aiolio_zone
);
1805 lj
->lioj_buffer_count
= 0;
1806 lj
->lioj_buffer_finished_count
= 0;
1807 lj
->lioj_queue_count
= 0;
1808 lj
->lioj_queue_finished_count
= 0;
1814 if (uap
->sig
&& (uap
->mode
== LIO_NOWAIT
)) {
1815 error
= copyin(uap
->sig
, &lj
->lioj_signal
,
1816 sizeof(lj
->lioj_signal
));
1818 zfree(aiolio_zone
, lj
);
1821 if (!_SIG_VALID(lj
->lioj_signal
.sigev_signo
)) {
1822 zfree(aiolio_zone
, lj
);
1825 lj
->lioj_flags
|= LIOJ_SIGNAL
;
1826 lj
->lioj_flags
&= ~LIOJ_SIGNAL_POSTED
;
1828 lj
->lioj_flags
&= ~LIOJ_SIGNAL
;
1830 TAILQ_INSERT_TAIL(&ki
->kaio_liojoblist
, lj
, lioj_list
);
1832 * Get pointers to the list of I/O requests.
1836 cbptr
= uap
->acb_list
;
1837 for (i
= 0; i
< uap
->nent
; i
++) {
1838 iocb
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1839 if (((intptr_t)iocb
!= -1) && ((intptr_t)iocb
!= 0)) {
1840 error
= _aio_aqueue(iocb
, lj
, 0);
1849 * If we haven't queued any, then just return error.
1851 if (nentqueued
== 0)
1855 * Calculate the appropriate error return.
1861 if (uap
->mode
== LIO_WAIT
) {
1862 int command
, found
, jobref
;
1866 for (i
= 0; i
< uap
->nent
; i
++) {
1868 * Fetch address of the control buf pointer in
1871 iocb
= (struct aiocb
*)
1872 (intptr_t)fuword(&cbptr
[i
]);
1873 if (((intptr_t)iocb
== -1) || ((intptr_t)iocb
1878 * Fetch the associated command from user space.
1880 command
= fuword(&iocb
->aio_lio_opcode
);
1881 if (command
== LIO_NOP
) {
1886 jobref
= fuword(&iocb
->_aiocb_private
.kernelinfo
);
1888 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1889 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1891 if (cb
->uaiocb
.aio_lio_opcode
1893 lp
->lwp_ru
.ru_oublock
+=
1895 cb
->outputcharge
= 0;
1896 } else if (cb
->uaiocb
.aio_lio_opcode
1898 lp
->lwp_ru
.ru_inblock
+=
1900 cb
->inputcharge
= 0;
1908 TAILQ_FOREACH(cb
, &ki
->kaio_bufdone
, plist
) {
1909 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1919 * If all I/Os have been disposed of, then we can
1922 if (found
== nentqueued
)
1925 ki
->kaio_flags
|= KAIO_WAKEUP
;
1926 error
= tsleep(p
, PCATCH
, "aiospn", 0);
1930 else if (error
== EWOULDBLOCK
)
1936 #endif /* VFS_AIO */
1941 * This is a weird hack so that we can post a signal. It is safe to do so from
1942 * a timeout routine, but *not* from an interrupt routine.
1945 process_signal(void *aioj
)
1947 struct aiocblist
*aiocbe
= aioj
;
1948 struct aio_liojob
*lj
= aiocbe
->lio
;
1949 struct aiocb
*cb
= &aiocbe
->uaiocb
;
1951 if ((lj
) && (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
) &&
1952 (lj
->lioj_queue_count
== lj
->lioj_queue_finished_count
)) {
1953 ksignal(lj
->lioj_ki
->kaio_p
, lj
->lioj_signal
.sigev_signo
);
1954 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
1957 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
)
1958 ksignal(aiocbe
->userproc
, cb
->aio_sigevent
.sigev_signo
);
1962 * Interrupt handler for physio, performs the necessary process wakeups, and
1966 aio_physwakeup(struct bio
*bio
)
1968 struct buf
*bp
= bio
->bio_buf
;
1969 struct aiocblist
*aiocbe
;
1971 struct kaioinfo
*ki
;
1972 struct aio_liojob
*lj
;
1974 aiocbe
= bio
->bio_caller_info2
.ptr
;
1977 p
= bio
->bio_caller_info1
.ptr
;
1979 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
1980 aiocbe
->uaiocb
._aiocb_private
.status
-= bp
->b_resid
;
1981 aiocbe
->uaiocb
._aiocb_private
.error
= 0;
1982 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
1984 if (bp
->b_flags
& B_ERROR
)
1985 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
1989 lj
->lioj_buffer_finished_count
++;
1992 * wakeup/signal if all of the interrupt jobs are done.
1994 if (lj
->lioj_buffer_finished_count
==
1995 lj
->lioj_buffer_count
) {
1997 * Post a signal if it is called for.
1999 if ((lj
->lioj_flags
&
2000 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) ==
2002 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
2003 callout_reset(&aiocbe
->timeout
, 0,
2004 process_signal
, aiocbe
);
2011 ki
->kaio_buffer_finished_count
++;
2012 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
2013 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
2014 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
2016 KNOTE(&aiocbe
->klist
, 0);
2017 /* Do the wakeup. */
2018 if (ki
->kaio_flags
& (KAIO_RUNDOWN
|KAIO_WAKEUP
)) {
2019 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
2024 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
2025 callout_reset(&aiocbe
->timeout
, 0,
2026 process_signal
, aiocbe
);
2029 bp
->b_cmd
= BUF_CMD_DONE
;
2032 #endif /* VFS_AIO */
2034 /* syscall - wait for the next completion of an aio request */
2036 sys_aio_waitcomplete(struct aio_waitcomplete_args
*uap
)
2041 struct proc
*p
= curproc
;
2042 struct lwp
*lp
= curthread
->td_lwp
;
2045 struct kaioinfo
*ki
;
2046 struct aiocblist
*cb
= NULL
;
2049 suword(uap
->aiocbp
, (int)NULL
);
2053 /* Get timespec struct. */
2054 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
2058 if ((ts
.tv_nsec
< 0) || (ts
.tv_nsec
>= 1000000000))
2061 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
2062 if (itimerfix(&atv
))
2064 timo
= tvtohz_high(&atv
);
2072 if ((cb
= TAILQ_FIRST(&ki
->kaio_jobdone
)) != 0) {
2073 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2074 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2075 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
2076 lp
->lwp_ru
.ru_oublock
+=
2078 cb
->outputcharge
= 0;
2079 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
2080 lp
->lwp_ru
.ru_inblock
+= cb
->inputcharge
;
2081 cb
->inputcharge
= 0;
2084 return cb
->uaiocb
._aiocb_private
.error
;
2088 if ((cb
= TAILQ_FIRST(&ki
->kaio_bufdone
)) != 0 ) {
2090 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2091 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2093 return cb
->uaiocb
._aiocb_private
.error
;
2096 ki
->kaio_flags
|= KAIO_WAKEUP
;
2097 error
= tsleep(p
, PCATCH
, "aiowc", timo
);
2100 if (error
== ERESTART
)
2104 else if (error
== EINTR
)
2106 else if (error
== EWOULDBLOCK
)
2109 #endif /* VFS_AIO */
2114 filt_aioattach(struct knote
*kn
)
2120 struct filterops aio_filtops
=
2121 { 0, filt_aioattach
, NULL
, NULL
};
2124 /* kqueue attach function */
2126 filt_aioattach(struct knote
*kn
)
2128 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2131 * The aiocbe pointer must be validated before using it, so
2132 * registration is restricted to the kernel; the user cannot
2135 if ((kn
->kn_flags
& EV_FLAG1
) == 0)
2137 kn
->kn_flags
&= ~EV_FLAG1
;
2139 SLIST_INSERT_HEAD(&aiocbe
->klist
, kn
, kn_selnext
);
2144 /* kqueue detach function */
2146 filt_aiodetach(struct knote
*kn
)
2148 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2150 SLIST_REMOVE(&aiocbe
->klist
, kn
, knote
, kn_selnext
);
2153 /* kqueue filter function */
2156 filt_aio(struct knote
*kn
, long hint
)
2158 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2160 kn
->kn_data
= aiocbe
->uaiocb
._aiocb_private
.error
;
2161 if (aiocbe
->jobstate
!= JOBST_JOBFINISHED
&&
2162 aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
2164 kn
->kn_flags
|= EV_EOF
;
2168 struct filterops aio_filtops
=
2169 { 0, filt_aioattach
, filt_aiodetach
, filt_aio
};
2170 #endif /* VFS_AIO */