2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
38 #include <sys/fcntl.h>
39 #include <sys/select.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/eventvar.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
47 #include <sys/sysctl.h>
48 #include <sys/sysproto.h>
50 #include <sys/signalvar.h>
51 #include <sys/filio.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
58 #include <vm/vm_zone.h>
60 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args
{
63 struct kevent_args
*ka
;
67 static int kqueue_sleep(struct kqueue
*kq
, struct timespec
*tsp
);
68 static int kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
69 struct knote
*marker
);
70 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
71 struct ucred
*cred
, int flags
);
72 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
73 struct ucred
*cred
, int flags
);
74 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
75 struct ucred
*cred
, struct sysmsg
*msg
);
76 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
77 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
79 static int kqueue_close(struct file
*fp
);
84 static struct fileops kqueueops
= {
85 .fo_read
= kqueue_read
,
86 .fo_write
= kqueue_write
,
87 .fo_ioctl
= kqueue_ioctl
,
88 .fo_kqfilter
= kqueue_kqfilter
,
89 .fo_stat
= kqueue_stat
,
90 .fo_close
= kqueue_close
,
91 .fo_shutdown
= nofo_shutdown
94 static void knote_attach(struct knote
*kn
);
95 static void knote_drop(struct knote
*kn
);
96 static void knote_enqueue(struct knote
*kn
);
97 static void knote_dequeue(struct knote
*kn
);
98 static void knote_init(void);
99 static struct knote
*knote_alloc(void);
100 static void knote_free(struct knote
*kn
);
102 static void filt_kqdetach(struct knote
*kn
);
103 static int filt_kqueue(struct knote
*kn
, long hint
);
104 static int filt_procattach(struct knote
*kn
);
105 static void filt_procdetach(struct knote
*kn
);
106 static int filt_proc(struct knote
*kn
, long hint
);
107 static int filt_fileattach(struct knote
*kn
);
108 static void filt_timerexpire(void *knx
);
109 static int filt_timerattach(struct knote
*kn
);
110 static void filt_timerdetach(struct knote
*kn
);
111 static int filt_timer(struct knote
*kn
, long hint
);
113 static struct filterops file_filtops
=
114 { 1, filt_fileattach
, NULL
, NULL
};
115 static struct filterops kqread_filtops
=
116 { 1, NULL
, filt_kqdetach
, filt_kqueue
};
117 static struct filterops proc_filtops
=
118 { 0, filt_procattach
, filt_procdetach
, filt_proc
};
119 static struct filterops timer_filtops
=
120 { 0, filt_timerattach
, filt_timerdetach
, filt_timer
};
122 static vm_zone_t knote_zone
;
123 static int kq_ncallouts
= 0;
124 static int kq_calloutmax
= (4 * 1024);
125 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
126 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
128 #define KNOTE_ACTIVATE(kn) do { \
129 kn->kn_status |= KN_ACTIVE; \
130 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
134 #define KN_HASHSIZE 64 /* XXX should be tunable */
135 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
137 extern struct filterops aio_filtops
;
138 extern struct filterops sig_filtops
;
141 * Table for for all system-defined filters.
143 static struct filterops
*sysfilt_ops
[] = {
144 &file_filtops
, /* EVFILT_READ */
145 &file_filtops
, /* EVFILT_WRITE */
146 &aio_filtops
, /* EVFILT_AIO */
147 &file_filtops
, /* EVFILT_VNODE */
148 &proc_filtops
, /* EVFILT_PROC */
149 &sig_filtops
, /* EVFILT_SIGNAL */
150 &timer_filtops
, /* EVFILT_TIMER */
151 &file_filtops
, /* EVFILT_EXCEPT */
155 filt_fileattach(struct knote
*kn
)
157 return (fo_kqfilter(kn
->kn_fp
, kn
));
161 * MPALMOSTSAFE - acquires mplock
164 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
166 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
169 if (kn
->kn_filter
!= EVFILT_READ
) {
174 kn
->kn_fop
= &kqread_filtops
;
175 SLIST_INSERT_HEAD(&kq
->kq_sel
.si_note
, kn
, kn_selnext
);
181 filt_kqdetach(struct knote
*kn
)
183 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
185 SLIST_REMOVE(&kq
->kq_sel
.si_note
, kn
, knote
, kn_selnext
);
190 filt_kqueue(struct knote
*kn
, long hint
)
192 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
194 kn
->kn_data
= kq
->kq_count
;
195 return (kn
->kn_data
> 0);
199 filt_procattach(struct knote
*kn
)
205 lwkt_gettoken(&proc_token
);
206 p
= pfind(kn
->kn_id
);
207 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
208 p
= zpfind(kn
->kn_id
);
212 lwkt_reltoken(&proc_token
);
215 if (!PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
216 lwkt_reltoken(&proc_token
);
220 kn
->kn_ptr
.p_proc
= p
;
221 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
224 * internal flag indicating registration done by kernel
226 if (kn
->kn_flags
& EV_FLAG1
) {
227 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
228 kn
->kn_fflags
= NOTE_CHILD
;
229 kn
->kn_flags
&= ~EV_FLAG1
;
232 /* XXX lock the proc here while adding to the list? */
233 SLIST_INSERT_HEAD(&p
->p_klist
, kn
, kn_selnext
);
236 * Immediately activate any exit notes if the target process is a
237 * zombie. This is necessary to handle the case where the target
238 * process, e.g. a child, dies before the kevent is negistered.
240 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
242 lwkt_reltoken(&proc_token
);
248 * The knote may be attached to a different process, which may exit,
249 * leaving nothing for the knote to be attached to. So when the process
250 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
251 * it will be deleted when read out. However, as part of the knote deletion,
252 * this routine is called, so a check is needed to avoid actually performing
253 * a detach, because the original process does not exist any more.
256 filt_procdetach(struct knote
*kn
)
260 if (kn
->kn_status
& KN_DETACHED
)
262 /* XXX locking? this might modify another process. */
263 p
= kn
->kn_ptr
.p_proc
;
264 SLIST_REMOVE(&p
->p_klist
, kn
, knote
, kn_selnext
);
268 filt_proc(struct knote
*kn
, long hint
)
273 * mask off extra data
275 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
278 * if the user is interested in this event, record it.
280 if (kn
->kn_sfflags
& event
)
281 kn
->kn_fflags
|= event
;
284 * Process is gone, so flag the event as finished. Detach the
285 * knote from the process now because the process will be poof,
288 if (event
== NOTE_EXIT
) {
289 struct proc
*p
= kn
->kn_ptr
.p_proc
;
290 if ((kn
->kn_status
& KN_DETACHED
) == 0) {
291 SLIST_REMOVE(&p
->p_klist
, kn
, knote
, kn_selnext
);
292 kn
->kn_status
|= KN_DETACHED
;
293 kn
->kn_data
= p
->p_xstat
;
294 kn
->kn_ptr
.p_proc
= NULL
;
296 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
301 * process forked, and user wants to track the new process,
302 * so attach a new knote to it, and immediately report an
303 * event with the parent's pid.
305 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
310 * register knote with new process.
312 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
313 kev
.filter
= kn
->kn_filter
;
314 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
315 kev
.fflags
= kn
->kn_sfflags
;
316 kev
.data
= kn
->kn_id
; /* parent */
317 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
318 error
= kqueue_register(kn
->kn_kq
, &kev
);
320 kn
->kn_fflags
|= NOTE_TRACKERR
;
323 return (kn
->kn_fflags
!= 0);
327 filt_timerexpire(void *knx
)
329 struct knote
*kn
= knx
;
330 struct callout
*calloutp
;
337 if ((kn
->kn_flags
& EV_ONESHOT
) == 0) {
338 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
339 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
340 tticks
= tvtohz_high(&tv
);
341 calloutp
= (struct callout
*)kn
->kn_hook
;
342 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
347 * data contains amount of time to sleep, in milliseconds
350 filt_timerattach(struct knote
*kn
)
352 struct callout
*calloutp
;
356 if (kq_ncallouts
>= kq_calloutmax
)
360 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
361 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
362 tticks
= tvtohz_high(&tv
);
364 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
365 MALLOC(calloutp
, struct callout
*, sizeof(*calloutp
),
367 callout_init(calloutp
);
368 kn
->kn_hook
= (caddr_t
)calloutp
;
369 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
375 filt_timerdetach(struct knote
*kn
)
377 struct callout
*calloutp
;
379 calloutp
= (struct callout
*)kn
->kn_hook
;
380 callout_stop(calloutp
);
381 FREE(calloutp
, M_KQUEUE
);
386 filt_timer(struct knote
*kn
, long hint
)
389 return (kn
->kn_data
!= 0);
393 * Initialize a kqueue.
395 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
400 kqueue_init(struct kqueue
*kq
, struct filedesc
*fdp
)
402 TAILQ_INIT(&kq
->kq_knpend
);
403 TAILQ_INIT(&kq
->kq_knlist
);
406 SLIST_INIT(&kq
->kq_sel
.si_note
);
410 * Terminate a kqueue. Freeing the actual kq itself is left up to the
411 * caller (it might be embedded in a lwp so we don't do it here).
414 kqueue_terminate(struct kqueue
*kq
)
420 while ((kn
= TAILQ_FIRST(&kq
->kq_knlist
)) != NULL
) {
421 kn
->kn_fop
->f_detach(kn
);
422 if (kn
->kn_fop
->f_isfd
) {
423 list
= &kn
->kn_fp
->f_klist
;
424 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
428 hv
= KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
);
429 list
= &kq
->kq_knhash
[hv
];
430 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
432 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
433 if (kn
->kn_status
& KN_QUEUED
)
439 kfree(kq
->kq_knhash
, M_KQUEUE
);
440 kq
->kq_knhash
= NULL
;
441 kq
->kq_knhashmask
= 0;
449 sys_kqueue(struct kqueue_args
*uap
)
451 struct thread
*td
= curthread
;
456 error
= falloc(td
->td_lwp
, &fp
, &fd
);
459 fp
->f_flag
= FREAD
| FWRITE
;
460 fp
->f_type
= DTYPE_KQUEUE
;
461 fp
->f_ops
= &kqueueops
;
463 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
464 kqueue_init(kq
, td
->td_proc
->p_fd
);
467 fsetfd(kq
->kq_fdp
, fp
, fd
);
468 uap
->sysmsg_result
= fd
;
474 * Copy 'count' items into the destination list pointed to by uap->eventlist.
477 kevent_copyout(void *arg
, struct kevent
*kevp
, int count
, int *res
)
479 struct kevent_copyin_args
*kap
;
482 kap
= (struct kevent_copyin_args
*)arg
;
484 error
= copyout(kevp
, kap
->ka
->eventlist
, count
* sizeof(*kevp
));
486 kap
->ka
->eventlist
+= count
;
496 * Copy at most 'max' items from the list pointed to by kap->changelist,
497 * return number of items in 'events'.
500 kevent_copyin(void *arg
, struct kevent
*kevp
, int max
, int *events
)
502 struct kevent_copyin_args
*kap
;
505 kap
= (struct kevent_copyin_args
*)arg
;
507 count
= min(kap
->ka
->nchanges
- kap
->pchanges
, max
);
508 error
= copyin(kap
->ka
->changelist
, kevp
, count
* sizeof *kevp
);
510 kap
->ka
->changelist
+= count
;
511 kap
->pchanges
+= count
;
522 kern_kevent(struct kqueue
*kq
, int nevents
, int *res
, void *uap
,
523 k_copyin_fn kevent_copyinfn
, k_copyout_fn kevent_copyoutfn
,
524 struct timespec
*tsp_in
)
527 struct timespec
*tsp
;
528 int i
, n
, total
, error
, nerrors
= 0;
530 struct kevent kev
[KQ_NEVENTS
];
539 error
= kevent_copyinfn(uap
, kev
, KQ_NEVENTS
, &n
);
544 for (i
= 0; i
< n
; i
++) {
546 kevp
->flags
&= ~EV_SYSFLAGS
;
547 error
= kqueue_register(kq
, kevp
);
550 * If a registration returns an error we
551 * immediately post the error. The kevent()
552 * call itself will fail with the error if
553 * no space is available for posting.
555 * Such errors normally bypass the timeout/blocking
556 * code. However, if the copyoutfn function refuses
557 * to post the error (see sys_poll()), then we
562 kevp
->flags
= EV_ERROR
;
565 kevent_copyoutfn(uap
, kevp
, 1, res
);
582 * Acquire/wait for events - setup timeout
587 if (tsp
->tv_sec
|| tsp
->tv_nsec
) {
589 timespecadd(tsp
, &ats
); /* tsp = target time */
596 * Collect as many events as we can. Sleeping on successive
597 * loops is disabled if copyoutfn has incremented (*res).
599 * The loop stops if an error occurs, all events have been
600 * scanned (the marker has been reached), or fewer than the
601 * maximum number of events is found.
603 * The copyoutfn function does not have to increment (*res) in
604 * order for the loop to continue.
606 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
610 marker
.kn_filter
= EVFILT_MARKER
;
612 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
614 while ((n
= nevents
- total
) > 0) {
619 * If no events are pending sleep until timeout (if any)
620 * or an event occurs.
622 * After the sleep completes the marker is moved to the
623 * end of the list, making any received events available
626 if (kq
->kq_count
== 0 && *res
== 0) {
627 error
= kqueue_sleep(kq
, tsp
);
632 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
633 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
638 * Process all received events
640 i
= kqueue_scan(kq
, kev
, n
, &marker
);
642 error
= kevent_copyoutfn(uap
, kev
, i
, res
);
649 * Normally when fewer events are returned than requested
650 * we can stop. However, if only spurious events were
651 * collected the copyout will not bump (*res) and we have
658 * Deal with an edge case where spurious events can cause
659 * a loop to occur without moving the marker. This can
660 * prevent kqueue_scan() from picking up new events which
661 * race us. We must be sure to move the marker for this
664 * NOTE: We do not want to move the marker if events
665 * were scanned because normal kqueue operations
666 * may reactivate events. Moving the marker in
667 * that case could result in duplicates for the
672 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
673 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
678 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
681 /* Timeouts do not return EWOULDBLOCK. */
682 if (error
== EWOULDBLOCK
)
694 sys_kevent(struct kevent_args
*uap
)
696 struct thread
*td
= curthread
;
697 struct proc
*p
= td
->td_proc
;
698 struct timespec ts
, *tsp
;
700 struct file
*fp
= NULL
;
701 struct kevent_copyin_args
*kap
, ka
;
705 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
713 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
716 if (fp
->f_type
!= DTYPE_KQUEUE
) {
721 kq
= (struct kqueue
*)fp
->f_data
;
727 error
= kern_kevent(kq
, uap
->nevents
, &uap
->sysmsg_result
, kap
,
728 kevent_copyin
, kevent_copyout
, tsp
);
736 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
)
738 struct filedesc
*fdp
= kq
->kq_fdp
;
739 struct filterops
*fops
;
740 struct file
*fp
= NULL
;
741 struct knote
*kn
= NULL
;
744 if (kev
->filter
< 0) {
745 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
747 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
751 * filter attach routine is responsible for insuring that
752 * the identifier can be attached to it.
754 kprintf("unknown filter: %d\n", kev
->filter
);
759 /* validate descriptor */
760 fp
= holdfp(fdp
, kev
->ident
, -1);
764 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
765 if (kn
->kn_kq
== kq
&&
766 kn
->kn_filter
== kev
->filter
&&
767 kn
->kn_id
== kev
->ident
) {
772 if (kq
->kq_knhashmask
) {
775 list
= &kq
->kq_knhash
[
776 KN_HASH((u_long
)kev
->ident
, kq
->kq_knhashmask
)];
777 SLIST_FOREACH(kn
, list
, kn_link
) {
778 if (kn
->kn_id
== kev
->ident
&&
779 kn
->kn_filter
== kev
->filter
)
785 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
791 * kn now contains the matching knote, or NULL if no match
793 if (kev
->flags
& EV_ADD
) {
805 * apply reference count to knote structure, and
806 * do not release it at the end of this routine.
810 kn
->kn_sfflags
= kev
->fflags
;
811 kn
->kn_sdata
= kev
->data
;
814 kn
->kn_kevent
= *kev
;
817 if ((error
= fops
->f_attach(kn
)) != 0) {
823 * The user may change some filter values after the
824 * initial EV_ADD, but doing so will not reset any
825 * filter which have already been triggered.
827 kn
->kn_sfflags
= kev
->fflags
;
828 kn
->kn_sdata
= kev
->data
;
829 kn
->kn_kevent
.udata
= kev
->udata
;
833 if (kn
->kn_fop
->f_event(kn
, 0))
836 } else if (kev
->flags
& EV_DELETE
) {
837 kn
->kn_fop
->f_detach(kn
);
842 if ((kev
->flags
& EV_DISABLE
) &&
843 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
845 kn
->kn_status
|= KN_DISABLED
;
849 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
851 kn
->kn_status
&= ~KN_DISABLED
;
852 if ((kn
->kn_status
& KN_ACTIVE
) &&
853 ((kn
->kn_status
& KN_QUEUED
) == 0))
865 * Block as necessary until the target time is reached.
866 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
867 * 0 we do not block at all.
870 kqueue_sleep(struct kqueue
*kq
, struct timespec
*tsp
)
876 kq
->kq_state
|= KQ_SLEEP
;
877 error
= tsleep(kq
, PCATCH
, "kqread", 0);
878 } else if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0) {
882 struct timespec atx
= *tsp
;
886 timespecsub(&atx
, &ats
);
887 if (ats
.tv_sec
< 0) {
890 timeout
= atx
.tv_sec
> 24 * 60 * 60 ?
891 24 * 60 * 60 * hz
: tstohz_high(&atx
);
892 kq
->kq_state
|= KQ_SLEEP
;
893 error
= tsleep(kq
, PCATCH
, "kqread", timeout
);
898 /* don't restart after signals... */
899 if (error
== ERESTART
)
906 * Scan the kqueue, return the number of active events placed in kevp up
909 * Continuous mode events may get recycled, do not continue scanning past
910 * marker unless no events have been collected.
913 kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
914 struct knote
*marker
)
916 struct knote
*kn
, local_marker
;
920 local_marker
.kn_filter
= EVFILT_MARKER
;
926 TAILQ_INSERT_HEAD(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
928 kn
= TAILQ_NEXT(&local_marker
, kn_tqe
);
929 if (kn
->kn_filter
== EVFILT_MARKER
) {
930 /* Marker reached, we are done */
934 /* Move local marker past some other threads marker */
935 kn
= TAILQ_NEXT(kn
, kn_tqe
);
936 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
937 TAILQ_INSERT_BEFORE(kn
, &local_marker
, kn_tqe
);
941 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
942 if (kn
->kn_status
& KN_DISABLED
) {
943 kn
->kn_status
&= ~KN_QUEUED
;
947 if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
948 kn
->kn_fop
->f_event(kn
, 0) == 0) {
949 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
953 *kevp
++ = kn
->kn_kevent
;
958 * Post-event action on the note
960 if (kn
->kn_flags
& EV_ONESHOT
) {
961 kn
->kn_status
&= ~KN_QUEUED
;
964 kn
->kn_fop
->f_detach(kn
);
967 } else if (kn
->kn_flags
& EV_CLEAR
) {
970 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
973 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
976 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
984 * This could be expanded to call kqueue_scan, if desired.
989 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
998 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1007 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1008 struct ucred
*cred
, struct sysmsg
*msg
)
1014 kq
= (struct kqueue
*)fp
->f_data
;
1019 kq
->kq_state
|= KQ_ASYNC
;
1021 kq
->kq_state
&= ~KQ_ASYNC
;
1025 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
1039 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
1041 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1043 bzero((void *)st
, sizeof(*st
));
1044 st
->st_size
= kq
->kq_count
;
1045 st
->st_blksize
= sizeof(struct kevent
);
1046 st
->st_mode
= S_IFIFO
;
1051 * MPALMOSTSAFE - acquires mplock
1054 kqueue_close(struct file
*fp
)
1056 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1060 kqueue_terminate(kq
);
1063 funsetown(kq
->kq_sigio
);
1066 kfree(kq
, M_KQUEUE
);
1071 kqueue_wakeup(struct kqueue
*kq
)
1073 if (kq
->kq_state
& KQ_SLEEP
) {
1074 kq
->kq_state
&= ~KQ_SLEEP
;
1077 if (kq
->kq_state
& KQ_SEL
) {
1078 kq
->kq_state
&= ~KQ_SEL
;
1079 selwakeup(&kq
->kq_sel
);
1081 KNOTE(&kq
->kq_sel
.si_note
, 0);
1085 * walk down a list of knotes, activating them if their event has triggered.
1088 knote(struct klist
*list
, long hint
)
1092 SLIST_FOREACH(kn
, list
, kn_selnext
)
1093 if (kn
->kn_fop
->f_event(kn
, hint
))
1098 * remove all knotes from a specified klist
1101 knote_remove(struct klist
*list
)
1105 while ((kn
= SLIST_FIRST(list
)) != NULL
) {
1106 kn
->kn_fop
->f_detach(kn
);
1112 * remove all knotes referencing a specified fd
1115 knote_fdclose(struct file
*fp
, struct filedesc
*fdp
, int fd
)
1120 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
1121 if (kn
->kn_kq
->kq_fdp
== fdp
&& kn
->kn_id
== fd
) {
1122 kn
->kn_fop
->f_detach(kn
);
1130 knote_attach(struct knote
*kn
)
1133 struct kqueue
*kq
= kn
->kn_kq
;
1135 if (kn
->kn_fop
->f_isfd
) {
1136 KKASSERT(kn
->kn_fp
);
1137 list
= &kn
->kn_fp
->f_klist
;
1139 if (kq
->kq_knhashmask
== 0)
1140 kq
->kq_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1141 &kq
->kq_knhashmask
);
1142 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1144 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1145 TAILQ_INSERT_HEAD(&kq
->kq_knlist
, kn
, kn_kqlink
);
1150 * should be called outside of a critical section, since we don't want to
1151 * hold a critical section while calling fdrop and free.
1154 knote_drop(struct knote
*kn
)
1161 if (kn
->kn_fop
->f_isfd
)
1162 list
= &kn
->kn_fp
->f_klist
;
1164 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1166 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
1167 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
1168 if (kn
->kn_status
& KN_QUEUED
)
1170 if (kn
->kn_fop
->f_isfd
)
1177 knote_enqueue(struct knote
*kn
)
1179 struct kqueue
*kq
= kn
->kn_kq
;
1182 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1184 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1185 kn
->kn_status
|= KN_QUEUED
;
1189 * Send SIGIO on request (typically set up as a mailbox signal)
1191 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
1192 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
1198 knote_dequeue(struct knote
*kn
)
1200 struct kqueue
*kq
= kn
->kn_kq
;
1202 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
1205 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1206 kn
->kn_status
&= ~KN_QUEUED
;
1214 knote_zone
= zinit("KNOTE", sizeof(struct knote
), 0, 0, 1);
1216 SYSINIT(knote
, SI_SUB_PSEUDO
, SI_ORDER_ANY
, knote_init
, NULL
)
1218 static struct knote
*
1221 return ((struct knote
*)zalloc(knote_zone
));
1225 knote_free(struct knote
*kn
)
1227 zfree(knote_zone
, kn
);