2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
16 * $FreeBSD: src/sys/kern/vfs_aio.c,v 1.70.2.28 2003/05/29 06:15:35 alc Exp $
17 * $DragonFly: src/sys/kern/vfs_aio.c,v 1.20 2006/02/17 19:18:06 dillon Exp $
21 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
24 #include <sys/param.h>
25 #include <sys/systm.h>
27 #include <sys/sysproto.h>
28 #include <sys/filedesc.h>
29 #include <sys/kernel.h>
30 #include <sys/fcntl.h>
33 #include <sys/unistd.h>
35 #include <sys/resourcevar.h>
36 #include <sys/signalvar.h>
37 #include <sys/protosw.h>
38 #include <sys/socketvar.h>
39 #include <sys/sysctl.h>
40 #include <sys/vnode.h>
42 #include <sys/event.h>
45 #include <vm/vm_extern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_zone.h>
50 #include <sys/file2.h>
52 #include <sys/thread2.h>
54 #include <machine/limits.h>
55 #include "opt_vfs_aio.h"
60 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
65 #define JOBST_NULL 0x0
66 #define JOBST_JOBQGLOBAL 0x2
67 #define JOBST_JOBRUNNING 0x3
68 #define JOBST_JOBFINISHED 0x4
69 #define JOBST_JOBQBUF 0x5
70 #define JOBST_JOBBFINISHED 0x6
72 #ifndef MAX_AIO_PER_PROC
73 #define MAX_AIO_PER_PROC 32
76 #ifndef MAX_AIO_QUEUE_PER_PROC
77 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
81 #define MAX_AIO_PROCS 32
85 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
88 #ifndef TARGET_AIO_PROCS
89 #define TARGET_AIO_PROCS 4
93 #define MAX_BUF_AIO 16
96 #ifndef AIOD_TIMEOUT_DEFAULT
97 #define AIOD_TIMEOUT_DEFAULT (10 * hz)
100 #ifndef AIOD_LIFETIME_DEFAULT
101 #define AIOD_LIFETIME_DEFAULT (30 * hz)
104 SYSCTL_NODE(_vfs
, OID_AUTO
, aio
, CTLFLAG_RW
, 0, "Async IO management");
106 static int max_aio_procs
= MAX_AIO_PROCS
;
107 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_procs
,
108 CTLFLAG_RW
, &max_aio_procs
, 0,
109 "Maximum number of kernel threads to use for handling async IO");
111 static int num_aio_procs
= 0;
112 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_aio_procs
,
113 CTLFLAG_RD
, &num_aio_procs
, 0,
114 "Number of presently active kernel threads for async IO");
117 * The code will adjust the actual number of AIO processes towards this
118 * number when it gets a chance.
120 static int target_aio_procs
= TARGET_AIO_PROCS
;
121 SYSCTL_INT(_vfs_aio
, OID_AUTO
, target_aio_procs
, CTLFLAG_RW
, &target_aio_procs
,
122 0, "Preferred number of ready kernel threads for async IO");
124 static int max_queue_count
= MAX_AIO_QUEUE
;
125 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue
, CTLFLAG_RW
, &max_queue_count
, 0,
126 "Maximum number of aio requests to queue, globally");
128 static int num_queue_count
= 0;
129 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_queue_count
, CTLFLAG_RD
, &num_queue_count
, 0,
130 "Number of queued aio requests");
132 static int num_buf_aio
= 0;
133 SYSCTL_INT(_vfs_aio
, OID_AUTO
, num_buf_aio
, CTLFLAG_RD
, &num_buf_aio
, 0,
134 "Number of aio requests presently handled by the buf subsystem");
136 /* Number of async I/O thread in the process of being started */
137 /* XXX This should be local to _aio_aqueue() */
138 static int num_aio_resv_start
= 0;
140 static int aiod_timeout
;
141 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_timeout
, CTLFLAG_RW
, &aiod_timeout
, 0,
142 "Timeout value for synchronous aio operations");
144 static int aiod_lifetime
;
145 SYSCTL_INT(_vfs_aio
, OID_AUTO
, aiod_lifetime
, CTLFLAG_RW
, &aiod_lifetime
, 0,
146 "Maximum lifetime for idle aiod");
148 static int max_aio_per_proc
= MAX_AIO_PER_PROC
;
149 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_per_proc
, CTLFLAG_RW
, &max_aio_per_proc
,
150 0, "Maximum active aio requests per process (stored in the process)");
152 static int max_aio_queue_per_proc
= MAX_AIO_QUEUE_PER_PROC
;
153 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_aio_queue_per_proc
, CTLFLAG_RW
,
154 &max_aio_queue_per_proc
, 0,
155 "Maximum queued aio requests per process (stored in the process)");
157 static int max_buf_aio
= MAX_BUF_AIO
;
158 SYSCTL_INT(_vfs_aio
, OID_AUTO
, max_buf_aio
, CTLFLAG_RW
, &max_buf_aio
, 0,
159 "Maximum buf aio requests per process (stored in the process)");
164 #define AIOP_FREE 0x1 /* proc on free queue */
165 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */
168 int aioprocflags
; /* AIO proc flags */
169 TAILQ_ENTRY(aioproclist
) list
; /* List of processes */
170 struct proc
*aioproc
; /* The AIO thread */
174 * data-structure for lio signal management
178 int lioj_buffer_count
;
179 int lioj_buffer_finished_count
;
180 int lioj_queue_count
;
181 int lioj_queue_finished_count
;
182 struct sigevent lioj_signal
; /* signal on all I/O done */
183 TAILQ_ENTRY(aio_liojob
) lioj_list
;
184 struct kaioinfo
*lioj_ki
;
186 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
187 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
190 * per process aio data structure
193 int kaio_flags
; /* per process kaio flags */
194 int kaio_maxactive_count
; /* maximum number of AIOs */
195 int kaio_active_count
; /* number of currently used AIOs */
196 int kaio_qallowed_count
; /* maxiumu size of AIO queue */
197 int kaio_queue_count
; /* size of AIO queue */
198 int kaio_ballowed_count
; /* maximum number of buffers */
199 int kaio_queue_finished_count
; /* number of daemon jobs finished */
200 int kaio_buffer_count
; /* number of physio buffers */
201 int kaio_buffer_finished_count
; /* count of I/O done */
202 struct proc
*kaio_p
; /* process that uses this kaio block */
203 TAILQ_HEAD(,aio_liojob
) kaio_liojoblist
; /* list of lio jobs */
204 TAILQ_HEAD(,aiocblist
) kaio_jobqueue
; /* job queue for process */
205 TAILQ_HEAD(,aiocblist
) kaio_jobdone
; /* done queue for process */
206 TAILQ_HEAD(,aiocblist
) kaio_bufqueue
; /* buffer job queue for process */
207 TAILQ_HEAD(,aiocblist
) kaio_bufdone
; /* buffer done queue for process */
208 TAILQ_HEAD(,aiocblist
) kaio_sockqueue
; /* queue for aios waiting on sockets */
211 #define KAIO_RUNDOWN 0x1 /* process is being run down */
212 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
214 static TAILQ_HEAD(,aioproclist
) aio_freeproc
, aio_activeproc
;
215 static TAILQ_HEAD(,aiocblist
) aio_jobs
; /* Async job list */
216 static TAILQ_HEAD(,aiocblist
) aio_bufjobs
; /* Phys I/O job list */
217 static TAILQ_HEAD(,aiocblist
) aio_freejobs
; /* Pool of free jobs */
219 static void aio_init_aioinfo(struct proc
*p
);
220 static void aio_onceonly(void *);
221 static int aio_free_entry(struct aiocblist
*aiocbe
);
222 static void aio_process(struct aiocblist
*aiocbe
);
223 static int aio_newproc(void);
224 static int aio_aqueue(struct aiocb
*job
, int type
);
225 static void aio_physwakeup(struct bio
*bio
);
226 static int aio_fphysio(struct aiocblist
*aiocbe
);
227 static int aio_qphysio(struct proc
*p
, struct aiocblist
*iocb
);
228 static void aio_daemon(void *uproc
);
229 static void process_signal(void *aioj
);
231 SYSINIT(aio
, SI_SUB_VFS
, SI_ORDER_ANY
, aio_onceonly
, NULL
);
235 * kaio Per process async io info
236 * aiop async io thread data
237 * aiocb async io jobs
238 * aiol list io job pointer - internal to aio_suspend XXX
239 * aiolio list io jobs
241 static vm_zone_t kaio_zone
, aiop_zone
, aiocb_zone
, aiol_zone
, aiolio_zone
;
244 * Startup initialization
247 aio_onceonly(void *na
)
249 TAILQ_INIT(&aio_freeproc
);
250 TAILQ_INIT(&aio_activeproc
);
251 TAILQ_INIT(&aio_jobs
);
252 TAILQ_INIT(&aio_bufjobs
);
253 TAILQ_INIT(&aio_freejobs
);
254 kaio_zone
= zinit("AIO", sizeof(struct kaioinfo
), 0, 0, 1);
255 aiop_zone
= zinit("AIOP", sizeof(struct aioproclist
), 0, 0, 1);
256 aiocb_zone
= zinit("AIOCB", sizeof(struct aiocblist
), 0, 0, 1);
257 aiol_zone
= zinit("AIOL", AIO_LISTIO_MAX
*sizeof(intptr_t), 0, 0, 1);
258 aiolio_zone
= zinit("AIOLIO", sizeof(struct aio_liojob
), 0, 0, 1);
259 aiod_timeout
= AIOD_TIMEOUT_DEFAULT
;
260 aiod_lifetime
= AIOD_LIFETIME_DEFAULT
;
265 * Init the per-process aioinfo structure. The aioinfo limits are set
266 * per-process for user limit (resource) management.
269 aio_init_aioinfo(struct proc
*p
)
272 if (p
->p_aioinfo
== NULL
) {
273 ki
= zalloc(kaio_zone
);
276 ki
->kaio_maxactive_count
= max_aio_per_proc
;
277 ki
->kaio_active_count
= 0;
278 ki
->kaio_qallowed_count
= max_aio_queue_per_proc
;
279 ki
->kaio_queue_count
= 0;
280 ki
->kaio_ballowed_count
= max_buf_aio
;
281 ki
->kaio_buffer_count
= 0;
282 ki
->kaio_buffer_finished_count
= 0;
284 TAILQ_INIT(&ki
->kaio_jobdone
);
285 TAILQ_INIT(&ki
->kaio_jobqueue
);
286 TAILQ_INIT(&ki
->kaio_bufdone
);
287 TAILQ_INIT(&ki
->kaio_bufqueue
);
288 TAILQ_INIT(&ki
->kaio_liojoblist
);
289 TAILQ_INIT(&ki
->kaio_sockqueue
);
292 while (num_aio_procs
< target_aio_procs
)
297 * Free a job entry. Wait for completion if it is currently active, but don't
298 * delay forever. If we delay, we return a flag that says that we have to
299 * restart the queue scan.
302 aio_free_entry(struct aiocblist
*aiocbe
)
305 struct aio_liojob
*lj
;
309 if (aiocbe
->jobstate
== JOBST_NULL
)
310 panic("aio_free_entry: freeing already free job");
312 p
= aiocbe
->userproc
;
316 panic("aio_free_entry: missing p->p_aioinfo");
318 while (aiocbe
->jobstate
== JOBST_JOBRUNNING
) {
319 aiocbe
->jobflags
|= AIOCBLIST_RUNDOWN
;
320 tsleep(aiocbe
, 0, "jobwai", 0);
322 if (aiocbe
->bp
== NULL
) {
323 if (ki
->kaio_queue_count
<= 0)
324 panic("aio_free_entry: process queue size <= 0");
325 if (num_queue_count
<= 0)
326 panic("aio_free_entry: system wide queue size <= 0");
329 lj
->lioj_queue_count
--;
330 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
331 lj
->lioj_queue_finished_count
--;
333 ki
->kaio_queue_count
--;
334 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
335 ki
->kaio_queue_finished_count
--;
339 lj
->lioj_buffer_count
--;
340 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
341 lj
->lioj_buffer_finished_count
--;
343 if (aiocbe
->jobflags
& AIOCBLIST_DONE
)
344 ki
->kaio_buffer_finished_count
--;
345 ki
->kaio_buffer_count
--;
349 /* aiocbe is going away, we need to destroy any knotes */
350 knote_remove(p
->p_thread
, &aiocbe
->klist
);
352 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
& KAIO_RUNDOWN
)
353 && ((ki
->kaio_buffer_count
== 0) && (ki
->kaio_queue_count
== 0)))) {
354 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
358 if (aiocbe
->jobstate
== JOBST_JOBQBUF
) {
359 if ((error
= aio_fphysio(aiocbe
)) != 0)
361 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
362 panic("aio_free_entry: invalid physio finish-up state");
364 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
366 } else if (aiocbe
->jobstate
== JOBST_JOBQGLOBAL
) {
368 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
369 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
371 } else if (aiocbe
->jobstate
== JOBST_JOBFINISHED
)
372 TAILQ_REMOVE(&ki
->kaio_jobdone
, aiocbe
, plist
);
373 else if (aiocbe
->jobstate
== JOBST_JOBBFINISHED
) {
375 TAILQ_REMOVE(&ki
->kaio_bufdone
, aiocbe
, plist
);
378 vunmapbuf(aiocbe
->bp
);
379 relpbuf(aiocbe
->bp
, NULL
);
383 if (lj
&& (lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
== 0)) {
384 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
385 zfree(aiolio_zone
, lj
);
387 aiocbe
->jobstate
= JOBST_NULL
;
388 callout_stop(&aiocbe
->timeout
);
389 fdrop(aiocbe
->fd_file
, curthread
);
390 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
396 * Rundown the jobs for a given process.
399 aio_proc_rundown(struct proc
*p
)
405 struct aio_liojob
*lj
, *ljn
;
406 struct aiocblist
*aiocbe
, *aiocbn
;
414 ki
->kaio_flags
|= LIOJ_SIGNAL_POSTED
;
415 while ((ki
->kaio_active_count
> 0) || (ki
->kaio_buffer_count
>
416 ki
->kaio_buffer_finished_count
)) {
417 ki
->kaio_flags
|= KAIO_RUNDOWN
;
418 if (tsleep(p
, 0, "kaiowt", aiod_timeout
))
423 * Move any aio ops that are waiting on socket I/O to the normal job
424 * queues so they are cleaned up with any others.
427 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_sockqueue
); aiocbe
; aiocbe
=
429 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
430 fp
= aiocbe
->fd_file
;
432 so
= (struct socket
*)fp
->f_data
;
433 TAILQ_REMOVE(&so
->so_aiojobq
, aiocbe
, list
);
434 if (TAILQ_EMPTY(&so
->so_aiojobq
)) {
435 so
->so_snd
.sb_flags
&= ~SB_AIO
;
436 so
->so_rcv
.sb_flags
&= ~SB_AIO
;
439 TAILQ_REMOVE(&ki
->kaio_sockqueue
, aiocbe
, plist
);
440 TAILQ_INSERT_HEAD(&aio_jobs
, aiocbe
, list
);
441 TAILQ_INSERT_HEAD(&ki
->kaio_jobqueue
, aiocbe
, plist
);
446 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobdone
); aiocbe
; aiocbe
= aiocbn
) {
447 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
448 if (aio_free_entry(aiocbe
))
453 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); aiocbe
; aiocbe
=
455 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
456 if (aio_free_entry(aiocbe
))
462 while (TAILQ_FIRST(&ki
->kaio_bufqueue
)) {
463 ki
->kaio_flags
|= KAIO_WAKEUP
;
464 tsleep(p
, 0, "aioprn", 0);
472 for (aiocbe
= TAILQ_FIRST(&ki
->kaio_bufdone
); aiocbe
; aiocbe
= aiocbn
) {
473 aiocbn
= TAILQ_NEXT(aiocbe
, plist
);
474 if (aio_free_entry(aiocbe
)) {
482 * If we've slept, jobs might have moved from one queue to another.
483 * Retry rundown if we didn't manage to empty the queues.
485 if (TAILQ_FIRST(&ki
->kaio_jobdone
) != NULL
||
486 TAILQ_FIRST(&ki
->kaio_jobqueue
) != NULL
||
487 TAILQ_FIRST(&ki
->kaio_bufqueue
) != NULL
||
488 TAILQ_FIRST(&ki
->kaio_bufdone
) != NULL
)
491 for (lj
= TAILQ_FIRST(&ki
->kaio_liojoblist
); lj
; lj
= ljn
) {
492 ljn
= TAILQ_NEXT(lj
, lioj_list
);
493 if ((lj
->lioj_buffer_count
== 0) && (lj
->lioj_queue_count
==
495 TAILQ_REMOVE(&ki
->kaio_liojoblist
, lj
, lioj_list
);
496 zfree(aiolio_zone
, lj
);
499 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
500 "QF:%d\n", lj
->lioj_buffer_count
,
501 lj
->lioj_buffer_finished_count
,
502 lj
->lioj_queue_count
,
503 lj
->lioj_queue_finished_count
);
508 zfree(kaio_zone
, ki
);
515 * Select a job to run (called by an AIO daemon).
517 static struct aiocblist
*
518 aio_selectjob(struct aioproclist
*aiop
)
520 struct aiocblist
*aiocbe
;
525 for (aiocbe
= TAILQ_FIRST(&aio_jobs
); aiocbe
; aiocbe
=
526 TAILQ_NEXT(aiocbe
, list
)) {
527 userp
= aiocbe
->userproc
;
528 ki
= userp
->p_aioinfo
;
530 if (ki
->kaio_active_count
< ki
->kaio_maxactive_count
) {
531 TAILQ_REMOVE(&aio_jobs
, aiocbe
, list
);
542 * The AIO processing activity. This is the code that does the I/O request for
543 * the non-physio version of the operations. The normal vn operations are used,
544 * and this code should work in all instances for every type of file, including
545 * pipes, sockets, fifos, and regular files.
548 aio_process(struct aiocblist
*aiocbe
)
557 int oublock_st
, oublock_end
;
558 int inblock_st
, inblock_end
;
561 cb
= &aiocbe
->uaiocb
;
562 fp
= aiocbe
->fd_file
;
564 aiov
.iov_base
= (void *)(uintptr_t)cb
->aio_buf
;
565 aiov
.iov_len
= cb
->aio_nbytes
;
567 auio
.uio_iov
= &aiov
;
569 auio
.uio_offset
= cb
->aio_offset
;
570 auio
.uio_resid
= cb
->aio_nbytes
;
571 cnt
= cb
->aio_nbytes
;
572 auio
.uio_segflg
= UIO_USERSPACE
;
575 inblock_st
= mytd
->td_proc
->p_stats
->p_ru
.ru_inblock
;
576 oublock_st
= mytd
->td_proc
->p_stats
->p_ru
.ru_oublock
;
578 * _aio_aqueue() acquires a reference to the file that is
579 * released in aio_free_entry().
581 if (cb
->aio_lio_opcode
== LIO_READ
) {
582 auio
.uio_rw
= UIO_READ
;
583 error
= fo_read(fp
, &auio
, fp
->f_cred
, FOF_OFFSET
, mytd
);
585 auio
.uio_rw
= UIO_WRITE
;
586 error
= fo_write(fp
, &auio
, fp
->f_cred
, FOF_OFFSET
, mytd
);
588 inblock_end
= mytd
->td_proc
->p_stats
->p_ru
.ru_inblock
;
589 oublock_end
= mytd
->td_proc
->p_stats
->p_ru
.ru_oublock
;
591 aiocbe
->inputcharge
= inblock_end
- inblock_st
;
592 aiocbe
->outputcharge
= oublock_end
- oublock_st
;
594 if ((error
) && (auio
.uio_resid
!= cnt
)) {
595 if (error
== ERESTART
|| error
== EINTR
|| error
== EWOULDBLOCK
)
597 if ((error
== EPIPE
) && (cb
->aio_lio_opcode
== LIO_WRITE
))
598 psignal(aiocbe
->userproc
, SIGPIPE
);
601 cnt
-= auio
.uio_resid
;
602 cb
->_aiocb_private
.error
= error
;
603 cb
->_aiocb_private
.status
= cnt
;
607 * The AIO daemon, most of the actual work is done in aio_process,
608 * but the setup (and address space mgmt) is done in this routine.
610 * The MP lock is held on entry.
613 aio_daemon(void *uproc
)
615 struct aio_liojob
*lj
;
617 struct aiocblist
*aiocbe
;
618 struct aioproclist
*aiop
;
620 struct proc
*curcp
, *mycp
, *userp
;
621 struct vmspace
*myvm
, *tmpvm
;
625 * Local copies of curproc (cp) and vmspace (myvm)
628 myvm
= mycp
->p_vmspace
;
630 if (mycp
->p_textvp
) {
631 vrele(mycp
->p_textvp
);
632 mycp
->p_textvp
= NULL
;
636 * Allocate and ready the aio control info. There is one aiop structure
639 aiop
= zalloc(aiop_zone
);
640 aiop
->aioproc
= mycp
;
641 aiop
->aioprocflags
|= AIOP_FREE
;
646 * Place thread (lightweight process) onto the AIO free thread list.
648 if (TAILQ_EMPTY(&aio_freeproc
))
649 wakeup(&aio_freeproc
);
650 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
654 /* Make up a name for the daemon. */
655 strcpy(mycp
->p_comm
, "aiod");
658 * Get rid of our current filedescriptors. AIOD's don't need any
659 * filedescriptors, except as temporarily inherited from the client.
660 * Credentials are also cloned, and made equivalent to "root".
664 cr
= cratom(&mycp
->p_ucred
);
666 uireplace(&cr
->cr_uidinfo
, uifind(0));
668 cr
->cr_groups
[0] = 1;
670 /* The daemon resides in its own pgrp. */
671 enterpgrp(mycp
, mycp
->p_pid
, 1);
673 /* Mark special process type. */
674 mycp
->p_flag
|= P_SYSTEM
| P_KTHREADP
;
677 * Wakeup parent process. (Parent sleeps to keep from blasting away
678 * and creating too many daemons.)
684 * curcp is the current daemon process context.
685 * userp is the current user process context.
690 * Take daemon off of free queue
692 if (aiop
->aioprocflags
& AIOP_FREE
) {
694 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
695 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
696 aiop
->aioprocflags
&= ~AIOP_FREE
;
699 aiop
->aioprocflags
&= ~AIOP_SCHED
;
704 while ((aiocbe
= aio_selectjob(aiop
)) != NULL
) {
705 cb
= &aiocbe
->uaiocb
;
706 userp
= aiocbe
->userproc
;
708 aiocbe
->jobstate
= JOBST_JOBRUNNING
;
711 * Connect to process address space for user program.
713 if (userp
!= curcp
) {
715 * Save the current address space that we are
718 tmpvm
= mycp
->p_vmspace
;
721 * Point to the new user address space, and
724 mycp
->p_vmspace
= userp
->p_vmspace
;
725 mycp
->p_vmspace
->vm_refcnt
++;
727 /* Activate the new mapping. */
731 * If the old address space wasn't the daemons
732 * own address space, then we need to remove the
733 * daemon's reference from the other process
734 * that it was acting on behalf of.
742 ki
= userp
->p_aioinfo
;
745 /* Account for currently active jobs. */
746 ki
->kaio_active_count
++;
748 /* Do the I/O function. */
751 /* Decrement the active job count. */
752 ki
->kaio_active_count
--;
755 * Increment the completion count for wakeup/signal
758 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
759 ki
->kaio_queue_finished_count
++;
761 lj
->lioj_queue_finished_count
++;
762 if ((ki
->kaio_flags
& KAIO_WAKEUP
) || ((ki
->kaio_flags
763 & KAIO_RUNDOWN
) && (ki
->kaio_active_count
== 0))) {
764 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
769 if (lj
&& (lj
->lioj_flags
&
770 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) == LIOJ_SIGNAL
) {
771 if ((lj
->lioj_queue_finished_count
==
772 lj
->lioj_queue_count
) &&
773 (lj
->lioj_buffer_finished_count
==
774 lj
->lioj_buffer_count
)) {
776 lj
->lioj_signal
.sigev_signo
);
783 aiocbe
->jobstate
= JOBST_JOBFINISHED
;
786 TAILQ_REMOVE(&ki
->kaio_jobqueue
, aiocbe
, plist
);
787 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, aiocbe
, plist
);
789 KNOTE(&aiocbe
->klist
, 0);
791 if (aiocbe
->jobflags
& AIOCBLIST_RUNDOWN
) {
793 aiocbe
->jobflags
&= ~AIOCBLIST_RUNDOWN
;
796 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
797 psignal(userp
, cb
->aio_sigevent
.sigev_signo
);
802 * Disconnect from user address space.
805 /* Get the user address space to disconnect from. */
806 tmpvm
= mycp
->p_vmspace
;
808 /* Get original address space for daemon. */
809 mycp
->p_vmspace
= myvm
;
811 /* Activate the daemon's address space. */
815 printf("AIOD: vmspace problem -- %d\n",
819 /* Remove our vmspace reference. */
826 * If we are the first to be put onto the free queue, wakeup
827 * anyone waiting for a daemon.
830 TAILQ_REMOVE(&aio_activeproc
, aiop
, list
);
831 if (TAILQ_EMPTY(&aio_freeproc
))
832 wakeup(&aio_freeproc
);
833 TAILQ_INSERT_HEAD(&aio_freeproc
, aiop
, list
);
834 aiop
->aioprocflags
|= AIOP_FREE
;
838 * If daemon is inactive for a long time, allow it to exit,
839 * thereby freeing resources.
841 if (((aiop
->aioprocflags
& AIOP_SCHED
) == 0) && tsleep(mycp
,
842 0, "aiordy", aiod_lifetime
)) {
844 if (TAILQ_EMPTY(&aio_jobs
)) {
845 if ((aiop
->aioprocflags
& AIOP_FREE
) &&
846 (num_aio_procs
> target_aio_procs
)) {
847 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
849 zfree(aiop_zone
, aiop
);
852 if (mycp
->p_vmspace
->vm_refcnt
<= 1) {
853 printf("AIOD: bad vm refcnt for"
854 " exiting daemon: %d\n",
855 mycp
->p_vmspace
->vm_refcnt
);
867 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
868 * AIO daemon modifies its environment itself.
878 error
= fork1(lp
, RFPROC
|RFMEM
|RFNOWAIT
, &np
);
881 cpu_set_fork_handler(np
, aio_daemon
, curproc
);
882 start_forked_proc(lp
, np
);
885 * Wait until daemon is started, but continue on just in case to
886 * handle error conditions.
888 error
= tsleep(np
, 0, "aiosta", aiod_timeout
);
895 * Try the high-performance, low-overhead physio method for eligible
896 * VCHR devices. This method doesn't use an aio helper thread, and
897 * thus has very low overhead.
899 * Assumes that the caller, _aio_aqueue(), has incremented the file
900 * structure's reference count, preventing its deallocation for the
901 * duration of this call.
904 aio_qphysio(struct proc
*p
, struct aiocblist
*aiocbe
)
912 struct aio_liojob
*lj
;
915 cb
= &aiocbe
->uaiocb
;
916 fp
= aiocbe
->fd_file
;
918 if (fp
->f_type
!= DTYPE_VNODE
)
921 vp
= (struct vnode
*)fp
->f_data
;
924 * If its not a disk, we don't want to return a positive error.
925 * It causes the aio code to not fall through to try the thread
926 * way when you're talking to a regular file.
928 if (!vn_isdisk(vp
, &error
)) {
929 if (error
== ENOTBLK
)
935 if (cb
->aio_nbytes
% vp
->v_rdev
->si_bsize_phys
)
939 MAXPHYS
- (((vm_offset_t
) cb
->aio_buf
) & PAGE_MASK
))
943 if (ki
->kaio_buffer_count
>= ki
->kaio_ballowed_count
)
946 ki
->kaio_buffer_count
++;
950 lj
->lioj_buffer_count
++;
952 /* Create and build a buffer header for a transfer. */
957 * Get a copy of the kva from the physical buffer.
959 bp
->b_bio1
.bio_caller_info1
.ptr
= p
;
962 bp
->b_bcount
= cb
->aio_nbytes
;
963 bp
->b_bufsize
= cb
->aio_nbytes
;
964 bp
->b_flags
= B_PHYS
| (cb
->aio_lio_opcode
== LIO_WRITE
?
966 bp
->b_bio1
.bio_done
= aio_physwakeup
;
967 bp
->b_saveaddr
= bp
->b_data
;
968 bp
->b_data
= (void *)(uintptr_t)cb
->aio_buf
;
969 bp
->b_bio1
.bio_blkno
= btodb(cb
->aio_offset
);
971 /* Bring buffer into kernel space. */
972 if (vmapbuf(bp
) < 0) {
980 bp
->b_bio1
.bio_caller_info2
.ptr
= aiocbe
;
981 TAILQ_INSERT_TAIL(&aio_bufjobs
, aiocbe
, list
);
982 TAILQ_INSERT_TAIL(&ki
->kaio_bufqueue
, aiocbe
, plist
);
983 aiocbe
->jobstate
= JOBST_JOBQBUF
;
984 cb
->_aiocb_private
.status
= cb
->aio_nbytes
;
990 /* Perform transfer. */
991 dev_dstrategy(vp
->v_rdev
, &bp
->b_bio1
);
997 * If we had an error invoking the request, or an error in processing
998 * the request before we have returned, we process it as an error in
999 * transfer. Note that such an I/O error is not indicated immediately,
1000 * but is returned using the aio_error mechanism. In this case,
1001 * aio_suspend will return immediately.
1003 if (bp
->b_error
|| (bp
->b_flags
& B_ERROR
)) {
1004 struct aiocb
*job
= aiocbe
->uuaiocb
;
1006 aiocbe
->uaiocb
._aiocb_private
.status
= 0;
1007 suword(&job
->_aiocb_private
.status
, 0);
1008 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
1009 suword(&job
->_aiocb_private
.error
, bp
->b_error
);
1011 ki
->kaio_buffer_finished_count
++;
1013 if (aiocbe
->jobstate
!= JOBST_JOBBFINISHED
) {
1014 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
1015 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
1016 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
1017 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
1018 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
1024 KNOTE(&aiocbe
->klist
, 0);
1028 ki
->kaio_buffer_count
--;
1030 lj
->lioj_buffer_count
--;
1037 * This waits/tests physio completion.
1040 aio_fphysio(struct aiocblist
*iocb
)
1048 while ((bp
->b_flags
& B_DONE
) == 0) {
1049 if (tsleep(bp
, 0, "physstr", aiod_timeout
)) {
1050 if ((bp
->b_flags
& B_DONE
) == 0) {
1059 /* Release mapping into kernel space. */
1065 /* Check for an error. */
1066 if (bp
->b_flags
& B_ERROR
)
1067 error
= bp
->b_error
;
1072 #endif /* VFS_AIO */
1075 * Wake up aio requests that may be serviceable now.
1078 aio_swake(struct socket
*so
, struct sockbuf
*sb
)
1083 struct aiocblist
*cb
,*cbn
;
1085 struct kaioinfo
*ki
= NULL
;
1086 int opcode
, wakecount
= 0;
1087 struct aioproclist
*aiop
;
1089 if (sb
== &so
->so_snd
) {
1091 so
->so_snd
.sb_flags
&= ~SB_AIO
;
1094 so
->so_rcv
.sb_flags
&= ~SB_AIO
;
1097 for (cb
= TAILQ_FIRST(&so
->so_aiojobq
); cb
; cb
= cbn
) {
1098 cbn
= TAILQ_NEXT(cb
, list
);
1099 if (opcode
== cb
->uaiocb
.aio_lio_opcode
) {
1102 TAILQ_REMOVE(&so
->so_aiojobq
, cb
, list
);
1103 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cb
, plist
);
1104 TAILQ_INSERT_TAIL(&aio_jobs
, cb
, list
);
1105 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, cb
, plist
);
1107 if (cb
->jobstate
!= JOBST_JOBQGLOBAL
)
1108 panic("invalid queue value");
1112 while (wakecount
--) {
1113 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != 0) {
1114 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1115 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1116 aiop
->aioprocflags
&= ~AIOP_FREE
;
1117 wakeup(aiop
->aioproc
);
1120 #endif /* VFS_AIO */
1125 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1126 * technique is done in this code.
1129 _aio_aqueue(struct aiocb
*job
, struct aio_liojob
*lj
, int type
)
1131 struct proc
*p
= curproc
;
1132 struct filedesc
*fdp
;
1137 int opcode
, user_opcode
;
1138 struct aiocblist
*aiocbe
;
1139 struct aioproclist
*aiop
;
1140 struct kaioinfo
*ki
;
1145 if ((aiocbe
= TAILQ_FIRST(&aio_freejobs
)) != NULL
)
1146 TAILQ_REMOVE(&aio_freejobs
, aiocbe
, list
);
1148 aiocbe
= zalloc (aiocb_zone
);
1150 aiocbe
->inputcharge
= 0;
1151 aiocbe
->outputcharge
= 0;
1152 callout_init(&aiocbe
->timeout
);
1153 SLIST_INIT(&aiocbe
->klist
);
1155 suword(&job
->_aiocb_private
.status
, -1);
1156 suword(&job
->_aiocb_private
.error
, 0);
1157 suword(&job
->_aiocb_private
.kernelinfo
, -1);
1159 error
= copyin(job
, &aiocbe
->uaiocb
, sizeof(aiocbe
->uaiocb
));
1161 suword(&job
->_aiocb_private
.error
, error
);
1162 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1165 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
&&
1166 !_SIG_VALID(aiocbe
->uaiocb
.aio_sigevent
.sigev_signo
)) {
1167 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1171 /* Save userspace address of the job info. */
1172 aiocbe
->uuaiocb
= job
;
1174 /* Get the opcode. */
1175 user_opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1176 if (type
!= LIO_NOP
)
1177 aiocbe
->uaiocb
.aio_lio_opcode
= type
;
1178 opcode
= aiocbe
->uaiocb
.aio_lio_opcode
;
1180 /* Get the fd info for process. */
1184 * Range check file descriptor.
1186 fd
= aiocbe
->uaiocb
.aio_fildes
;
1187 if (fd
>= fdp
->fd_nfiles
) {
1188 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1190 suword(&job
->_aiocb_private
.error
, EBADF
);
1194 fp
= aiocbe
->fd_file
= fdp
->fd_files
[fd
].fp
;
1195 if ((fp
== NULL
) || ((opcode
== LIO_WRITE
) && ((fp
->f_flag
& FWRITE
) ==
1197 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1199 suword(&job
->_aiocb_private
.error
, EBADF
);
1204 if (aiocbe
->uaiocb
.aio_offset
== -1LL) {
1208 error
= suword(&job
->_aiocb_private
.kernelinfo
, jobrefid
);
1213 aiocbe
->uaiocb
._aiocb_private
.kernelinfo
= (void *)(intptr_t)jobrefid
;
1214 if (jobrefid
== LONG_MAX
)
1219 if (opcode
== LIO_NOP
) {
1220 fdrop(fp
, p
->p_thread
);
1221 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1223 suword(&job
->_aiocb_private
.error
, 0);
1224 suword(&job
->_aiocb_private
.status
, 0);
1225 suword(&job
->_aiocb_private
.kernelinfo
, 0);
1229 if ((opcode
!= LIO_READ
) && (opcode
!= LIO_WRITE
)) {
1231 suword(&job
->_aiocb_private
.status
, 0);
1236 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_KEVENT
) {
1237 kev
.ident
= aiocbe
->uaiocb
.aio_sigevent
.sigev_notify_kqueue
;
1238 kev
.udata
= aiocbe
->uaiocb
.aio_sigevent
.sigev_value
.sigval_ptr
;
1242 * This method for requesting kevent-based notification won't
1243 * work on the alpha, since we're passing in a pointer
1244 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
1245 * based method instead.
1247 if (user_opcode
== LIO_NOP
|| user_opcode
== LIO_READ
||
1248 user_opcode
== LIO_WRITE
)
1251 error
= copyin((struct kevent
*)(uintptr_t)user_opcode
,
1256 if ((u_int
)kev
.ident
>= fdp
->fd_nfiles
||
1257 (kq_fp
= fdp
->fd_files
[kev
.ident
].fp
) == NULL
||
1258 (kq_fp
->f_type
!= DTYPE_KQUEUE
)) {
1262 kq
= (struct kqueue
*)kq_fp
->f_data
;
1263 kev
.ident
= (uintptr_t)aiocbe
->uuaiocb
;
1264 kev
.filter
= EVFILT_AIO
;
1265 kev
.flags
= EV_ADD
| EV_ENABLE
| EV_FLAG1
;
1266 kev
.data
= (intptr_t)aiocbe
;
1267 error
= kqueue_register(kq
, &kev
, p
->p_thread
);
1270 fdrop(fp
, p
->p_thread
);
1271 TAILQ_INSERT_HEAD(&aio_freejobs
, aiocbe
, list
);
1273 suword(&job
->_aiocb_private
.error
, error
);
1278 suword(&job
->_aiocb_private
.error
, EINPROGRESS
);
1279 aiocbe
->uaiocb
._aiocb_private
.error
= EINPROGRESS
;
1280 aiocbe
->userproc
= p
;
1281 aiocbe
->jobflags
= 0;
1285 if (fp
->f_type
== DTYPE_SOCKET
) {
1287 * Alternate queueing for socket ops: Reach down into the
1288 * descriptor to get the socket data. Then check to see if the
1289 * socket is ready to be read or written (based on the requested
1292 * If it is not ready for io, then queue the aiocbe on the
1293 * socket, and set the flags so we get a call when sbnotify()
1296 so
= (struct socket
*)fp
->f_data
;
1298 if (((opcode
== LIO_READ
) && (!soreadable(so
))) || ((opcode
==
1299 LIO_WRITE
) && (!sowriteable(so
)))) {
1300 TAILQ_INSERT_TAIL(&so
->so_aiojobq
, aiocbe
, list
);
1301 TAILQ_INSERT_TAIL(&ki
->kaio_sockqueue
, aiocbe
, plist
);
1302 if (opcode
== LIO_READ
)
1303 so
->so_rcv
.sb_flags
|= SB_AIO
;
1305 so
->so_snd
.sb_flags
|= SB_AIO
;
1306 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
; /* XXX */
1307 ki
->kaio_queue_count
++;
1316 if ((error
= aio_qphysio(p
, aiocbe
)) == 0)
1319 suword(&job
->_aiocb_private
.status
, 0);
1320 aiocbe
->uaiocb
._aiocb_private
.error
= error
;
1321 suword(&job
->_aiocb_private
.error
, error
);
1325 /* No buffer for daemon I/O. */
1328 ki
->kaio_queue_count
++;
1330 lj
->lioj_queue_count
++;
1332 TAILQ_INSERT_TAIL(&ki
->kaio_jobqueue
, aiocbe
, plist
);
1333 TAILQ_INSERT_TAIL(&aio_jobs
, aiocbe
, list
);
1335 aiocbe
->jobstate
= JOBST_JOBQGLOBAL
;
1341 * If we don't have a free AIO process, and we are below our quota, then
1342 * start one. Otherwise, depend on the subsequent I/O completions to
1343 * pick-up this job. If we don't successfully create the new process
1344 * (thread) due to resource issues, we return an error for now (EAGAIN),
1345 * which is likely not the correct thing to do.
1349 if ((aiop
= TAILQ_FIRST(&aio_freeproc
)) != NULL
) {
1350 TAILQ_REMOVE(&aio_freeproc
, aiop
, list
);
1351 TAILQ_INSERT_TAIL(&aio_activeproc
, aiop
, list
);
1352 aiop
->aioprocflags
&= ~AIOP_FREE
;
1353 wakeup(aiop
->aioproc
);
1354 } else if (((num_aio_resv_start
+ num_aio_procs
) < max_aio_procs
) &&
1355 ((ki
->kaio_active_count
+ num_aio_resv_start
) <
1356 ki
->kaio_maxactive_count
)) {
1357 num_aio_resv_start
++;
1358 if ((error
= aio_newproc()) == 0) {
1359 num_aio_resv_start
--;
1362 num_aio_resv_start
--;
1370 * This routine queues an AIO request, checking for quotas.
1373 aio_aqueue(struct aiocb
*job
, int type
)
1375 struct proc
*p
= curproc
;
1376 struct kaioinfo
*ki
;
1378 if (p
->p_aioinfo
== NULL
)
1379 aio_init_aioinfo(p
);
1381 if (num_queue_count
>= max_queue_count
)
1385 if (ki
->kaio_queue_count
>= ki
->kaio_qallowed_count
)
1388 return _aio_aqueue(job
, NULL
, type
);
1390 #endif /* VFS_AIO */
1393 * Support the aio_return system call, as a side-effect, kernel resources are
1397 aio_return(struct aio_return_args
*uap
)
1402 struct proc
*p
= curproc
;
1404 struct aiocblist
*cb
, *ncb
;
1406 struct kaioinfo
*ki
;
1414 jobref
= fuword(&ujob
->_aiocb_private
.kernelinfo
);
1415 if (jobref
== -1 || jobref
== 0)
1418 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1419 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1421 if (ujob
== cb
->uuaiocb
) {
1422 uap
->sysmsg_result
=
1423 cb
->uaiocb
._aiocb_private
.status
;
1425 uap
->sysmsg_result
= EFAULT
;
1426 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
1427 p
->p_stats
->p_ru
.ru_oublock
+=
1429 cb
->outputcharge
= 0;
1430 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
1431 p
->p_stats
->p_ru
.ru_inblock
+= cb
->inputcharge
;
1432 cb
->inputcharge
= 0;
1439 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= ncb
) {
1440 ncb
= TAILQ_NEXT(cb
, plist
);
1441 if (((intptr_t) cb
->uaiocb
._aiocb_private
.kernelinfo
)
1444 if (ujob
== cb
->uuaiocb
) {
1445 uap
->sysmsg_result
=
1446 cb
->uaiocb
._aiocb_private
.status
;
1448 uap
->sysmsg_result
= EFAULT
;
1456 #endif /* VFS_AIO */
1460 * Allow a process to wakeup when any of the I/O requests are completed.
1463 aio_suspend(struct aio_suspend_args
*uap
)
1468 struct proc
*p
= curproc
;
1471 struct aiocb
*const *cbptr
, *cbp
;
1472 struct kaioinfo
*ki
;
1473 struct aiocblist
*cb
;
1478 struct aiocb
**ujoblist
;
1480 if (uap
->nent
> AIO_LISTIO_MAX
)
1485 /* Get timespec struct. */
1486 if ((error
= copyin(uap
->timeout
, &ts
, sizeof(ts
))) != 0)
1489 if (ts
.tv_nsec
< 0 || ts
.tv_nsec
>= 1000000000)
1492 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
1493 if (itimerfix(&atv
))
1495 timo
= tvtohz_high(&atv
);
1503 ijoblist
= zalloc(aiol_zone
);
1504 ujoblist
= zalloc(aiol_zone
);
1505 cbptr
= uap
->aiocbp
;
1507 for (i
= 0; i
< uap
->nent
; i
++) {
1508 cbp
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1511 ujoblist
[njoblist
] = cbp
;
1512 ijoblist
[njoblist
] = fuword(&cbp
->_aiocb_private
.kernelinfo
);
1516 if (njoblist
== 0) {
1517 zfree(aiol_zone
, ijoblist
);
1518 zfree(aiol_zone
, ujoblist
);
1524 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1525 for (i
= 0; i
< njoblist
; i
++) {
1527 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1529 if (ujoblist
[i
] != cb
->uuaiocb
)
1531 zfree(aiol_zone
, ijoblist
);
1532 zfree(aiol_zone
, ujoblist
);
1539 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
=
1540 TAILQ_NEXT(cb
, plist
)) {
1541 for (i
= 0; i
< njoblist
; i
++) {
1543 cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1546 if (ujoblist
[i
] != cb
->uuaiocb
)
1548 zfree(aiol_zone
, ijoblist
);
1549 zfree(aiol_zone
, ujoblist
);
1555 ki
->kaio_flags
|= KAIO_WAKEUP
;
1556 error
= tsleep(p
, PCATCH
, "aiospn", timo
);
1559 if (error
== ERESTART
|| error
== EINTR
) {
1560 zfree(aiol_zone
, ijoblist
);
1561 zfree(aiol_zone
, ujoblist
);
1563 } else if (error
== EWOULDBLOCK
) {
1564 zfree(aiol_zone
, ijoblist
);
1565 zfree(aiol_zone
, ujoblist
);
1572 #endif /* VFS_AIO */
1576 * aio_cancel cancels any non-physio aio operations not currently in
1580 aio_cancel(struct aio_cancel_args
*uap
)
1585 struct proc
*p
= curproc
;
1586 struct kaioinfo
*ki
;
1587 struct aiocblist
*cbe
, *cbn
;
1589 struct filedesc
*fdp
;
1598 if ((u_int
)uap
->fd
>= fdp
->fd_nfiles
||
1599 (fp
= fdp
->fd_files
[uap
->fd
].fp
) == NULL
)
1602 if (fp
->f_type
== DTYPE_VNODE
) {
1603 vp
= (struct vnode
*)fp
->f_data
;
1605 if (vn_isdisk(vp
,&error
)) {
1606 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1609 } else if (fp
->f_type
== DTYPE_SOCKET
) {
1610 so
= (struct socket
*)fp
->f_data
;
1614 for (cbe
= TAILQ_FIRST(&so
->so_aiojobq
); cbe
; cbe
= cbn
) {
1615 cbn
= TAILQ_NEXT(cbe
, list
);
1616 if ((uap
->aiocbp
== NULL
) ||
1617 (uap
->aiocbp
== cbe
->uuaiocb
) ) {
1620 TAILQ_REMOVE(&so
->so_aiojobq
, cbe
, list
);
1621 TAILQ_REMOVE(&ki
->kaio_sockqueue
, cbe
, plist
);
1622 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
, plist
);
1623 if (ki
->kaio_flags
& KAIO_WAKEUP
) {
1626 cbe
->jobstate
= JOBST_JOBFINISHED
;
1627 cbe
->uaiocb
._aiocb_private
.status
=-1;
1628 cbe
->uaiocb
._aiocb_private
.error
=ECANCELED
;
1630 /* XXX cancelled, knote? */
1631 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1633 psignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1640 if ((cancelled
) && (uap
->aiocbp
)) {
1641 uap
->sysmsg_result
= AIO_CANCELED
;
1650 for (cbe
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cbe
; cbe
= cbn
) {
1651 cbn
= TAILQ_NEXT(cbe
, plist
);
1653 if ((uap
->fd
== cbe
->uaiocb
.aio_fildes
) &&
1654 ((uap
->aiocbp
== NULL
) ||
1655 (uap
->aiocbp
== cbe
->uuaiocb
))) {
1657 if (cbe
->jobstate
== JOBST_JOBQGLOBAL
) {
1658 TAILQ_REMOVE(&aio_jobs
, cbe
, list
);
1659 TAILQ_REMOVE(&ki
->kaio_jobqueue
, cbe
, plist
);
1660 TAILQ_INSERT_TAIL(&ki
->kaio_jobdone
, cbe
,
1663 ki
->kaio_queue_finished_count
++;
1664 cbe
->jobstate
= JOBST_JOBFINISHED
;
1665 cbe
->uaiocb
._aiocb_private
.status
= -1;
1666 cbe
->uaiocb
._aiocb_private
.error
= ECANCELED
;
1667 /* XXX cancelled, knote? */
1668 if (cbe
->uaiocb
.aio_sigevent
.sigev_notify
==
1670 psignal(cbe
->userproc
, cbe
->uaiocb
.aio_sigevent
.sigev_signo
);
1679 uap
->sysmsg_result
= AIO_NOTCANCELED
;
1683 uap
->sysmsg_result
= AIO_CANCELED
;
1686 uap
->sysmsg_result
= AIO_ALLDONE
;
1689 #endif /* VFS_AIO */
1693 * aio_error is implemented in the kernel level for compatibility purposes only.
1694 * For a user mode async implementation, it would be best to do it in a userland
1698 aio_error(struct aio_error_args
*uap
)
1703 struct proc
*p
= curproc
;
1704 struct aiocblist
*cb
;
1705 struct kaioinfo
*ki
;
1712 jobref
= fuword(&uap
->aiocbp
->_aiocb_private
.kernelinfo
);
1713 if ((jobref
== -1) || (jobref
== 0))
1716 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1717 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1719 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1726 for (cb
= TAILQ_FIRST(&ki
->kaio_jobqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1728 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1730 uap
->sysmsg_result
= EINPROGRESS
;
1736 for (cb
= TAILQ_FIRST(&ki
->kaio_sockqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1738 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1740 uap
->sysmsg_result
= EINPROGRESS
;
1748 for (cb
= TAILQ_FIRST(&ki
->kaio_bufdone
); cb
; cb
= TAILQ_NEXT(cb
,
1750 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1752 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.error
;
1758 for (cb
= TAILQ_FIRST(&ki
->kaio_bufqueue
); cb
; cb
= TAILQ_NEXT(cb
,
1760 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
) ==
1762 uap
->sysmsg_result
= EINPROGRESS
;
1773 status
= fuword(&uap
->aiocbp
->_aiocb_private
.status
);
1775 return fuword(&uap
->aiocbp
->_aiocb_private
.error
);
1778 #endif /* VFS_AIO */
1781 /* syscall - asynchronous read from a file (REALTIME) */
1783 aio_read(struct aio_read_args
*uap
)
1788 return aio_aqueue(uap
->aiocbp
, LIO_READ
);
1789 #endif /* VFS_AIO */
1792 /* syscall - asynchronous write to a file (REALTIME) */
1794 aio_write(struct aio_write_args
*uap
)
1799 return aio_aqueue(uap
->aiocbp
, LIO_WRITE
);
1800 #endif /* VFS_AIO */
1803 /* syscall - XXX undocumented */
1805 lio_listio(struct lio_listio_args
*uap
)
1810 struct proc
*p
= curproc
;
1811 int nent
, nentqueued
;
1812 struct aiocb
*iocb
, * const *cbptr
;
1813 struct aiocblist
*cb
;
1814 struct kaioinfo
*ki
;
1815 struct aio_liojob
*lj
;
1816 int error
, runningcode
;
1820 if ((uap
->mode
!= LIO_NOWAIT
) && (uap
->mode
!= LIO_WAIT
))
1824 if (nent
> AIO_LISTIO_MAX
)
1827 if (p
->p_aioinfo
== NULL
)
1828 aio_init_aioinfo(p
);
1830 if ((nent
+ num_queue_count
) > max_queue_count
)
1834 if ((nent
+ ki
->kaio_queue_count
) > ki
->kaio_qallowed_count
)
1837 lj
= zalloc(aiolio_zone
);
1842 lj
->lioj_buffer_count
= 0;
1843 lj
->lioj_buffer_finished_count
= 0;
1844 lj
->lioj_queue_count
= 0;
1845 lj
->lioj_queue_finished_count
= 0;
1851 if (uap
->sig
&& (uap
->mode
== LIO_NOWAIT
)) {
1852 error
= copyin(uap
->sig
, &lj
->lioj_signal
,
1853 sizeof(lj
->lioj_signal
));
1855 zfree(aiolio_zone
, lj
);
1858 if (!_SIG_VALID(lj
->lioj_signal
.sigev_signo
)) {
1859 zfree(aiolio_zone
, lj
);
1862 lj
->lioj_flags
|= LIOJ_SIGNAL
;
1863 lj
->lioj_flags
&= ~LIOJ_SIGNAL_POSTED
;
1865 lj
->lioj_flags
&= ~LIOJ_SIGNAL
;
1867 TAILQ_INSERT_TAIL(&ki
->kaio_liojoblist
, lj
, lioj_list
);
1869 * Get pointers to the list of I/O requests.
1873 cbptr
= uap
->acb_list
;
1874 for (i
= 0; i
< uap
->nent
; i
++) {
1875 iocb
= (struct aiocb
*)(intptr_t)fuword(&cbptr
[i
]);
1876 if (((intptr_t)iocb
!= -1) && ((intptr_t)iocb
!= 0)) {
1877 error
= _aio_aqueue(iocb
, lj
, 0);
1886 * If we haven't queued any, then just return error.
1888 if (nentqueued
== 0)
1892 * Calculate the appropriate error return.
1898 if (uap
->mode
== LIO_WAIT
) {
1899 int command
, found
, jobref
;
1903 for (i
= 0; i
< uap
->nent
; i
++) {
1905 * Fetch address of the control buf pointer in
1908 iocb
= (struct aiocb
*)
1909 (intptr_t)fuword(&cbptr
[i
]);
1910 if (((intptr_t)iocb
== -1) || ((intptr_t)iocb
1915 * Fetch the associated command from user space.
1917 command
= fuword(&iocb
->aio_lio_opcode
);
1918 if (command
== LIO_NOP
) {
1923 jobref
= fuword(&iocb
->_aiocb_private
.kernelinfo
);
1925 TAILQ_FOREACH(cb
, &ki
->kaio_jobdone
, plist
) {
1926 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1928 if (cb
->uaiocb
.aio_lio_opcode
1930 p
->p_stats
->p_ru
.ru_oublock
1933 cb
->outputcharge
= 0;
1934 } else if (cb
->uaiocb
.aio_lio_opcode
1936 p
->p_stats
->p_ru
.ru_inblock
1938 cb
->inputcharge
= 0;
1946 TAILQ_FOREACH(cb
, &ki
->kaio_bufdone
, plist
) {
1947 if (((intptr_t)cb
->uaiocb
._aiocb_private
.kernelinfo
)
1957 * If all I/Os have been disposed of, then we can
1960 if (found
== nentqueued
)
1963 ki
->kaio_flags
|= KAIO_WAKEUP
;
1964 error
= tsleep(p
, PCATCH
, "aiospn", 0);
1968 else if (error
== EWOULDBLOCK
)
1974 #endif /* VFS_AIO */
1979 * This is a weird hack so that we can post a signal. It is safe to do so from
1980 * a timeout routine, but *not* from an interrupt routine.
1983 process_signal(void *aioj
)
1985 struct aiocblist
*aiocbe
= aioj
;
1986 struct aio_liojob
*lj
= aiocbe
->lio
;
1987 struct aiocb
*cb
= &aiocbe
->uaiocb
;
1989 if ((lj
) && (lj
->lioj_signal
.sigev_notify
== SIGEV_SIGNAL
) &&
1990 (lj
->lioj_queue_count
== lj
->lioj_queue_finished_count
)) {
1991 psignal(lj
->lioj_ki
->kaio_p
, lj
->lioj_signal
.sigev_signo
);
1992 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
1995 if (cb
->aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
)
1996 psignal(aiocbe
->userproc
, cb
->aio_sigevent
.sigev_signo
);
2000 * Interrupt handler for physio, performs the necessary process wakeups, and
2004 aio_physwakeup(struct bio
*bio
)
2006 struct buf
*bp
= bio
->bio_buf
;
2007 struct aiocblist
*aiocbe
;
2009 struct kaioinfo
*ki
;
2010 struct aio_liojob
*lj
;
2012 aiocbe
= bio
->bio_caller_info2
.ptr
;
2015 p
= bio
->bio_caller_info1
.ptr
;
2017 aiocbe
->jobstate
= JOBST_JOBBFINISHED
;
2018 aiocbe
->uaiocb
._aiocb_private
.status
-= bp
->b_resid
;
2019 aiocbe
->uaiocb
._aiocb_private
.error
= 0;
2020 aiocbe
->jobflags
|= AIOCBLIST_DONE
;
2022 if (bp
->b_flags
& B_ERROR
)
2023 aiocbe
->uaiocb
._aiocb_private
.error
= bp
->b_error
;
2027 lj
->lioj_buffer_finished_count
++;
2030 * wakeup/signal if all of the interrupt jobs are done.
2032 if (lj
->lioj_buffer_finished_count
==
2033 lj
->lioj_buffer_count
) {
2035 * Post a signal if it is called for.
2037 if ((lj
->lioj_flags
&
2038 (LIOJ_SIGNAL
|LIOJ_SIGNAL_POSTED
)) ==
2040 lj
->lioj_flags
|= LIOJ_SIGNAL_POSTED
;
2041 callout_reset(&aiocbe
->timeout
, 0,
2042 process_signal
, aiocbe
);
2049 ki
->kaio_buffer_finished_count
++;
2050 TAILQ_REMOVE(&aio_bufjobs
, aiocbe
, list
);
2051 TAILQ_REMOVE(&ki
->kaio_bufqueue
, aiocbe
, plist
);
2052 TAILQ_INSERT_TAIL(&ki
->kaio_bufdone
, aiocbe
, plist
);
2054 KNOTE(&aiocbe
->klist
, 0);
2055 /* Do the wakeup. */
2056 if (ki
->kaio_flags
& (KAIO_RUNDOWN
|KAIO_WAKEUP
)) {
2057 ki
->kaio_flags
&= ~KAIO_WAKEUP
;
2062 if (aiocbe
->uaiocb
.aio_sigevent
.sigev_notify
== SIGEV_SIGNAL
) {
2063 callout_reset(&aiocbe
->timeout
, 0,
2064 process_signal
, aiocbe
);
2069 #endif /* VFS_AIO */
2071 /* syscall - wait for the next completion of an aio request */
2073 aio_waitcomplete(struct aio_waitcomplete_args
*uap
)
2078 struct proc
*p
= curproc
;
2081 struct kaioinfo
*ki
;
2082 struct aiocblist
*cb
= NULL
;
2085 suword(uap
->aiocbp
, (int)NULL
);
2089 /* Get timespec struct. */
2090 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
2094 if ((ts
.tv_nsec
< 0) || (ts
.tv_nsec
>= 1000000000))
2097 TIMESPEC_TO_TIMEVAL(&atv
, &ts
);
2098 if (itimerfix(&atv
))
2100 timo
= tvtohz_high(&atv
);
2108 if ((cb
= TAILQ_FIRST(&ki
->kaio_jobdone
)) != 0) {
2109 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2110 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2111 if (cb
->uaiocb
.aio_lio_opcode
== LIO_WRITE
) {
2112 p
->p_stats
->p_ru
.ru_oublock
+=
2114 cb
->outputcharge
= 0;
2115 } else if (cb
->uaiocb
.aio_lio_opcode
== LIO_READ
) {
2116 p
->p_stats
->p_ru
.ru_inblock
+= cb
->inputcharge
;
2117 cb
->inputcharge
= 0;
2120 return cb
->uaiocb
._aiocb_private
.error
;
2124 if ((cb
= TAILQ_FIRST(&ki
->kaio_bufdone
)) != 0 ) {
2126 suword(uap
->aiocbp
, (uintptr_t)cb
->uuaiocb
);
2127 uap
->sysmsg_result
= cb
->uaiocb
._aiocb_private
.status
;
2129 return cb
->uaiocb
._aiocb_private
.error
;
2132 ki
->kaio_flags
|= KAIO_WAKEUP
;
2133 error
= tsleep(p
, PCATCH
, "aiowc", timo
);
2136 if (error
== ERESTART
)
2140 else if (error
== EINTR
)
2142 else if (error
== EWOULDBLOCK
)
2145 #endif /* VFS_AIO */
2150 filt_aioattach(struct knote
*kn
)
2156 struct filterops aio_filtops
=
2157 { 0, filt_aioattach
, NULL
, NULL
};
2160 /* kqueue attach function */
2162 filt_aioattach(struct knote
*kn
)
2164 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2167 * The aiocbe pointer must be validated before using it, so
2168 * registration is restricted to the kernel; the user cannot
2171 if ((kn
->kn_flags
& EV_FLAG1
) == 0)
2173 kn
->kn_flags
&= ~EV_FLAG1
;
2175 SLIST_INSERT_HEAD(&aiocbe
->klist
, kn
, kn_selnext
);
2180 /* kqueue detach function */
2182 filt_aiodetach(struct knote
*kn
)
2184 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2186 SLIST_REMOVE(&aiocbe
->klist
, kn
, knote
, kn_selnext
);
2189 /* kqueue filter function */
2192 filt_aio(struct knote
*kn
, long hint
)
2194 struct aiocblist
*aiocbe
= (struct aiocblist
*)kn
->kn_sdata
;
2196 kn
->kn_data
= aiocbe
->uaiocb
._aiocb_private
.error
;
2197 if (aiocbe
->jobstate
!= JOBST_JOBFINISHED
&&
2198 aiocbe
->jobstate
!= JOBST_JOBBFINISHED
)
2200 kn
->kn_flags
|= EV_EOF
;
2204 struct filterops aio_filtops
=
2205 { 0, filt_aioattach
, filt_aiodetach
, filt_aio
};
2206 #endif /* VFS_AIO */