2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
58 * Global token for kqueue subsystem
61 struct lwkt_token kq_token
= LWKT_TOKEN_INITIALIZER(kq_token
);
62 SYSCTL_LONG(_lwkt
, OID_AUTO
, kq_collisions
,
63 CTLFLAG_RW
, &kq_token
.t_collisions
, 0,
64 "Collision counter of kq_token");
67 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
69 struct kevent_copyin_args
{
70 struct kevent_args
*ka
;
74 static int kqueue_sleep(struct kqueue
*kq
, struct timespec
*tsp
);
75 static int kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
76 struct knote
*marker
);
77 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
78 struct ucred
*cred
, int flags
);
79 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
80 struct ucred
*cred
, int flags
);
81 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
82 struct ucred
*cred
, struct sysmsg
*msg
);
83 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
84 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
86 static int kqueue_close(struct file
*fp
);
87 static void kqueue_wakeup(struct kqueue
*kq
);
88 static int filter_attach(struct knote
*kn
);
89 static int filter_event(struct knote
*kn
, long hint
);
94 static struct fileops kqueueops
= {
95 .fo_read
= kqueue_read
,
96 .fo_write
= kqueue_write
,
97 .fo_ioctl
= kqueue_ioctl
,
98 .fo_kqfilter
= kqueue_kqfilter
,
99 .fo_stat
= kqueue_stat
,
100 .fo_close
= kqueue_close
,
101 .fo_shutdown
= nofo_shutdown
104 static void knote_attach(struct knote
*kn
);
105 static void knote_drop(struct knote
*kn
);
106 static void knote_detach_and_drop(struct knote
*kn
);
107 static void knote_enqueue(struct knote
*kn
);
108 static void knote_dequeue(struct knote
*kn
);
109 static struct knote
*knote_alloc(void);
110 static void knote_free(struct knote
*kn
);
112 static void filt_kqdetach(struct knote
*kn
);
113 static int filt_kqueue(struct knote
*kn
, long hint
);
114 static int filt_procattach(struct knote
*kn
);
115 static void filt_procdetach(struct knote
*kn
);
116 static int filt_proc(struct knote
*kn
, long hint
);
117 static int filt_fileattach(struct knote
*kn
);
118 static void filt_timerexpire(void *knx
);
119 static int filt_timerattach(struct knote
*kn
);
120 static void filt_timerdetach(struct knote
*kn
);
121 static int filt_timer(struct knote
*kn
, long hint
);
123 static struct filterops file_filtops
=
124 { FILTEROP_ISFD
, filt_fileattach
, NULL
, NULL
};
125 static struct filterops kqread_filtops
=
126 { FILTEROP_ISFD
, NULL
, filt_kqdetach
, filt_kqueue
};
127 static struct filterops proc_filtops
=
128 { 0, filt_procattach
, filt_procdetach
, filt_proc
};
129 static struct filterops timer_filtops
=
130 { 0, filt_timerattach
, filt_timerdetach
, filt_timer
};
132 static int kq_ncallouts
= 0;
133 static int kq_calloutmax
= (4 * 1024);
134 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
135 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
136 static int kq_checkloop
= 1000000;
137 SYSCTL_INT(_kern
, OID_AUTO
, kq_checkloop
, CTLFLAG_RW
,
138 &kq_checkloop
, 0, "Maximum number of callouts allocated for kqueue");
140 #define KNOTE_ACTIVATE(kn) do { \
141 kn->kn_status |= KN_ACTIVE; \
142 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
146 #define KN_HASHSIZE 64 /* XXX should be tunable */
147 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
149 extern struct filterops aio_filtops
;
150 extern struct filterops sig_filtops
;
153 * Table for for all system-defined filters.
155 static struct filterops
*sysfilt_ops
[] = {
156 &file_filtops
, /* EVFILT_READ */
157 &file_filtops
, /* EVFILT_WRITE */
158 &aio_filtops
, /* EVFILT_AIO */
159 &file_filtops
, /* EVFILT_VNODE */
160 &proc_filtops
, /* EVFILT_PROC */
161 &sig_filtops
, /* EVFILT_SIGNAL */
162 &timer_filtops
, /* EVFILT_TIMER */
163 &file_filtops
, /* EVFILT_EXCEPT */
167 filt_fileattach(struct knote
*kn
)
169 return (fo_kqfilter(kn
->kn_fp
, kn
));
176 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
178 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
180 if (kn
->kn_filter
!= EVFILT_READ
)
183 kn
->kn_fop
= &kqread_filtops
;
184 knote_insert(&kq
->kq_kqinfo
.ki_note
, kn
);
189 filt_kqdetach(struct knote
*kn
)
191 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
193 knote_remove(&kq
->kq_kqinfo
.ki_note
, kn
);
198 filt_kqueue(struct knote
*kn
, long hint
)
200 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
202 kn
->kn_data
= kq
->kq_count
;
203 return (kn
->kn_data
> 0);
207 filt_procattach(struct knote
*kn
)
213 p
= pfind(kn
->kn_id
);
214 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
215 p
= zpfind(kn
->kn_id
);
221 if (!PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
227 lwkt_gettoken(&p
->p_token
);
228 kn
->kn_ptr
.p_proc
= p
;
229 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
232 * internal flag indicating registration done by kernel
234 if (kn
->kn_flags
& EV_FLAG1
) {
235 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
236 kn
->kn_fflags
= NOTE_CHILD
;
237 kn
->kn_flags
&= ~EV_FLAG1
;
240 knote_insert(&p
->p_klist
, kn
);
243 * Immediately activate any exit notes if the target process is a
244 * zombie. This is necessary to handle the case where the target
245 * process, e.g. a child, dies before the kevent is negistered.
247 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
249 lwkt_reltoken(&p
->p_token
);
256 * The knote may be attached to a different process, which may exit,
257 * leaving nothing for the knote to be attached to. So when the process
258 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
259 * it will be deleted when read out. However, as part of the knote deletion,
260 * this routine is called, so a check is needed to avoid actually performing
261 * a detach, because the original process does not exist any more.
264 filt_procdetach(struct knote
*kn
)
268 if (kn
->kn_status
& KN_DETACHED
)
270 /* XXX locking? take proc_token here? */
271 p
= kn
->kn_ptr
.p_proc
;
272 knote_remove(&p
->p_klist
, kn
);
276 filt_proc(struct knote
*kn
, long hint
)
281 * mask off extra data
283 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
286 * if the user is interested in this event, record it.
288 if (kn
->kn_sfflags
& event
)
289 kn
->kn_fflags
|= event
;
292 * Process is gone, so flag the event as finished. Detach the
293 * knote from the process now because the process will be poof,
296 if (event
== NOTE_EXIT
) {
297 struct proc
*p
= kn
->kn_ptr
.p_proc
;
298 if ((kn
->kn_status
& KN_DETACHED
) == 0) {
300 knote_remove(&p
->p_klist
, kn
);
301 kn
->kn_status
|= KN_DETACHED
;
302 kn
->kn_data
= p
->p_xstat
;
303 kn
->kn_ptr
.p_proc
= NULL
;
306 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
311 * process forked, and user wants to track the new process,
312 * so attach a new knote to it, and immediately report an
313 * event with the parent's pid.
315 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
320 * register knote with new process.
322 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
323 kev
.filter
= kn
->kn_filter
;
324 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
325 kev
.fflags
= kn
->kn_sfflags
;
326 kev
.data
= kn
->kn_id
; /* parent */
327 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
328 error
= kqueue_register(kn
->kn_kq
, &kev
);
330 kn
->kn_fflags
|= NOTE_TRACKERR
;
333 return (kn
->kn_fflags
!= 0);
337 * The callout interlocks with callout_terminate() but can still
338 * race a deletion so if KN_DELETING is set we just don't touch
342 filt_timerexpire(void *knx
)
344 struct lwkt_token
*tok
;
345 struct knote
*kn
= knx
;
346 struct callout
*calloutp
;
350 tok
= lwkt_token_pool_lookup(kn
->kn_kq
);
352 if ((kn
->kn_status
& KN_DELETING
) == 0) {
356 if ((kn
->kn_flags
& EV_ONESHOT
) == 0) {
357 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
358 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
359 tticks
= tvtohz_high(&tv
);
360 calloutp
= (struct callout
*)kn
->kn_hook
;
361 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
368 * data contains amount of time to sleep, in milliseconds
371 filt_timerattach(struct knote
*kn
)
373 struct callout
*calloutp
;
377 if (kq_ncallouts
>= kq_calloutmax
) {
383 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
384 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
385 tticks
= tvtohz_high(&tv
);
387 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
388 calloutp
= kmalloc(sizeof(*calloutp
), M_KQUEUE
, M_WAITOK
);
389 callout_init(calloutp
);
390 kn
->kn_hook
= (caddr_t
)calloutp
;
391 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
397 * This function is called with the knote flagged locked but it is
398 * still possible to race a callout event due to the callback blocking.
399 * We must call callout_terminate() instead of callout_stop() to deal
403 filt_timerdetach(struct knote
*kn
)
405 struct callout
*calloutp
;
407 calloutp
= (struct callout
*)kn
->kn_hook
;
408 callout_terminate(calloutp
);
409 kfree(calloutp
, M_KQUEUE
);
414 filt_timer(struct knote
*kn
, long hint
)
417 return (kn
->kn_data
!= 0);
421 * Acquire a knote, return non-zero on success, 0 on failure.
423 * If we cannot acquire the knote we sleep and return 0. The knote
424 * may be stale on return in this case and the caller must restart
425 * whatever loop they are in.
427 * Related kq token must be held.
431 knote_acquire(struct knote
*kn
)
433 if (kn
->kn_status
& KN_PROCESSING
) {
434 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
435 tsleep(kn
, 0, "kqepts", hz
);
436 /* knote may be stale now */
439 kn
->kn_status
|= KN_PROCESSING
;
444 * Release an acquired knote, clearing KN_PROCESSING and handling any
445 * KN_REPROCESS events.
447 * Caller must be holding the related kq token
449 * Non-zero is returned if the knote is destroyed or detached.
453 knote_release(struct knote
*kn
)
455 while (kn
->kn_status
& KN_REPROCESS
) {
456 kn
->kn_status
&= ~KN_REPROCESS
;
457 if (kn
->kn_status
& KN_WAITING
) {
458 kn
->kn_status
&= ~KN_WAITING
;
461 if (kn
->kn_status
& KN_DELETING
) {
462 knote_detach_and_drop(kn
);
466 if (filter_event(kn
, 0))
469 if (kn
->kn_status
& KN_DETACHED
) {
470 kn
->kn_status
&= ~KN_PROCESSING
;
473 kn
->kn_status
&= ~KN_PROCESSING
;
479 * Initialize a kqueue.
481 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
486 kqueue_init(struct kqueue
*kq
, struct filedesc
*fdp
)
488 TAILQ_INIT(&kq
->kq_knpend
);
489 TAILQ_INIT(&kq
->kq_knlist
);
492 SLIST_INIT(&kq
->kq_kqinfo
.ki_note
);
496 * Terminate a kqueue. Freeing the actual kq itself is left up to the
497 * caller (it might be embedded in a lwp so we don't do it here).
499 * The kq's knlist must be completely eradicated so block on any
503 kqueue_terminate(struct kqueue
*kq
)
505 struct lwkt_token
*tok
;
508 tok
= lwkt_token_pool_lookup(kq
);
510 while ((kn
= TAILQ_FIRST(&kq
->kq_knlist
)) != NULL
) {
511 if (knote_acquire(kn
))
512 knote_detach_and_drop(kn
);
515 kfree(kq
->kq_knhash
, M_KQUEUE
);
516 kq
->kq_knhash
= NULL
;
517 kq
->kq_knhashmask
= 0;
526 sys_kqueue(struct kqueue_args
*uap
)
528 struct thread
*td
= curthread
;
533 error
= falloc(td
->td_lwp
, &fp
, &fd
);
536 fp
->f_flag
= FREAD
| FWRITE
;
537 fp
->f_type
= DTYPE_KQUEUE
;
538 fp
->f_ops
= &kqueueops
;
540 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
541 kqueue_init(kq
, td
->td_proc
->p_fd
);
544 fsetfd(kq
->kq_fdp
, fp
, fd
);
545 uap
->sysmsg_result
= fd
;
551 * Copy 'count' items into the destination list pointed to by uap->eventlist.
554 kevent_copyout(void *arg
, struct kevent
*kevp
, int count
, int *res
)
556 struct kevent_copyin_args
*kap
;
559 kap
= (struct kevent_copyin_args
*)arg
;
561 error
= copyout(kevp
, kap
->ka
->eventlist
, count
* sizeof(*kevp
));
563 kap
->ka
->eventlist
+= count
;
573 * Copy at most 'max' items from the list pointed to by kap->changelist,
574 * return number of items in 'events'.
577 kevent_copyin(void *arg
, struct kevent
*kevp
, int max
, int *events
)
579 struct kevent_copyin_args
*kap
;
582 kap
= (struct kevent_copyin_args
*)arg
;
584 count
= min(kap
->ka
->nchanges
- kap
->pchanges
, max
);
585 error
= copyin(kap
->ka
->changelist
, kevp
, count
* sizeof *kevp
);
587 kap
->ka
->changelist
+= count
;
588 kap
->pchanges
+= count
;
599 kern_kevent(struct kqueue
*kq
, int nevents
, int *res
, void *uap
,
600 k_copyin_fn kevent_copyinfn
, k_copyout_fn kevent_copyoutfn
,
601 struct timespec
*tsp_in
)
604 struct timespec
*tsp
;
605 int i
, n
, total
, error
, nerrors
= 0;
607 int limit
= kq_checkloop
;
608 struct kevent kev
[KQ_NEVENTS
];
610 struct lwkt_token
*tok
;
615 tok
= lwkt_token_pool_lookup(kq
);
619 error
= kevent_copyinfn(uap
, kev
, KQ_NEVENTS
, &n
);
624 for (i
= 0; i
< n
; i
++) {
626 kevp
->flags
&= ~EV_SYSFLAGS
;
627 error
= kqueue_register(kq
, kevp
);
630 * If a registration returns an error we
631 * immediately post the error. The kevent()
632 * call itself will fail with the error if
633 * no space is available for posting.
635 * Such errors normally bypass the timeout/blocking
636 * code. However, if the copyoutfn function refuses
637 * to post the error (see sys_poll()), then we
641 kevp
->flags
= EV_ERROR
;
644 kevent_copyoutfn(uap
, kevp
, 1, res
);
647 } else if (lres
!= *res
) {
660 * Acquire/wait for events - setup timeout
665 if (tsp
->tv_sec
|| tsp
->tv_nsec
) {
667 timespecadd(tsp
, &ats
); /* tsp = target time */
674 * Collect as many events as we can. Sleeping on successive
675 * loops is disabled if copyoutfn has incremented (*res).
677 * The loop stops if an error occurs, all events have been
678 * scanned (the marker has been reached), or fewer than the
679 * maximum number of events is found.
681 * The copyoutfn function does not have to increment (*res) in
682 * order for the loop to continue.
684 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
688 marker
.kn_filter
= EVFILT_MARKER
;
689 marker
.kn_status
= KN_PROCESSING
;
690 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
691 while ((n
= nevents
- total
) > 0) {
696 * If no events are pending sleep until timeout (if any)
697 * or an event occurs.
699 * After the sleep completes the marker is moved to the
700 * end of the list, making any received events available
703 if (kq
->kq_count
== 0 && *res
== 0) {
704 error
= kqueue_sleep(kq
, tsp
);
708 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
709 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
713 * Process all received events
714 * Account for all non-spurious events in our total
716 i
= kqueue_scan(kq
, kev
, n
, &marker
);
719 error
= kevent_copyoutfn(uap
, kev
, i
, res
);
720 total
+= *res
- lres
;
724 if (limit
&& --limit
== 0)
725 panic("kqueue: checkloop failed i=%d", i
);
728 * Normally when fewer events are returned than requested
729 * we can stop. However, if only spurious events were
730 * collected the copyout will not bump (*res) and we have
737 * Deal with an edge case where spurious events can cause
738 * a loop to occur without moving the marker. This can
739 * prevent kqueue_scan() from picking up new events which
740 * race us. We must be sure to move the marker for this
743 * NOTE: We do not want to move the marker if events
744 * were scanned because normal kqueue operations
745 * may reactivate events. Moving the marker in
746 * that case could result in duplicates for the
750 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
751 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
754 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
756 /* Timeouts do not return EWOULDBLOCK. */
757 if (error
== EWOULDBLOCK
)
769 sys_kevent(struct kevent_args
*uap
)
771 struct thread
*td
= curthread
;
772 struct proc
*p
= td
->td_proc
;
773 struct timespec ts
, *tsp
;
775 struct file
*fp
= NULL
;
776 struct kevent_copyin_args
*kap
, ka
;
780 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
788 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
791 if (fp
->f_type
!= DTYPE_KQUEUE
) {
796 kq
= (struct kqueue
*)fp
->f_data
;
802 error
= kern_kevent(kq
, uap
->nevents
, &uap
->sysmsg_result
, kap
,
803 kevent_copyin
, kevent_copyout
, tsp
);
811 * Caller must be holding the kq token
814 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
)
816 struct lwkt_token
*tok
;
817 struct filedesc
*fdp
= kq
->kq_fdp
;
818 struct filterops
*fops
;
819 struct file
*fp
= NULL
;
820 struct knote
*kn
= NULL
;
823 if (kev
->filter
< 0) {
824 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
826 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
830 * filter attach routine is responsible for insuring that
831 * the identifier can be attached to it.
833 kprintf("unknown filter: %d\n", kev
->filter
);
837 tok
= lwkt_token_pool_lookup(kq
);
839 if (fops
->f_flags
& FILTEROP_ISFD
) {
840 /* validate descriptor */
841 fp
= holdfp(fdp
, kev
->ident
, -1);
846 lwkt_getpooltoken(&fp
->f_klist
);
848 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
849 if (kn
->kn_kq
== kq
&&
850 kn
->kn_filter
== kev
->filter
&&
851 kn
->kn_id
== kev
->ident
) {
852 if (knote_acquire(kn
) == 0)
857 lwkt_relpooltoken(&fp
->f_klist
);
859 if (kq
->kq_knhashmask
) {
862 list
= &kq
->kq_knhash
[
863 KN_HASH((u_long
)kev
->ident
, kq
->kq_knhashmask
)];
864 lwkt_getpooltoken(list
);
866 SLIST_FOREACH(kn
, list
, kn_link
) {
867 if (kn
->kn_id
== kev
->ident
&&
868 kn
->kn_filter
== kev
->filter
) {
869 if (knote_acquire(kn
) == 0)
874 lwkt_relpooltoken(list
);
879 * NOTE: At this point if kn is non-NULL we will have acquired
880 * it and set KN_PROCESSING.
882 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
888 * kn now contains the matching knote, or NULL if no match
890 if (kev
->flags
& EV_ADD
) {
902 * apply reference count to knote structure, and
903 * do not release it at the end of this routine.
907 kn
->kn_sfflags
= kev
->fflags
;
908 kn
->kn_sdata
= kev
->data
;
911 kn
->kn_kevent
= *kev
;
914 * KN_PROCESSING prevents the knote from getting
915 * ripped out from under us while we are trying
916 * to attach it, in case the attach blocks.
918 kn
->kn_status
= KN_PROCESSING
;
920 if ((error
= filter_attach(kn
)) != 0) {
921 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
927 * Interlock against close races which either tried
928 * to remove our knote while we were blocked or missed
929 * it entirely prior to our attachment. We do not
930 * want to end up with a knote on a closed descriptor.
932 if ((fops
->f_flags
& FILTEROP_ISFD
) &&
933 checkfdclosed(fdp
, kev
->ident
, kn
->kn_fp
)) {
934 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
938 * The user may change some filter values after the
939 * initial EV_ADD, but doing so will not reset any
940 * filter which have already been triggered.
942 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
943 kn
->kn_sfflags
= kev
->fflags
;
944 kn
->kn_sdata
= kev
->data
;
945 kn
->kn_kevent
.udata
= kev
->udata
;
949 * Execute the filter event to immediately activate the
950 * knote if necessary. If reprocessing events are pending
951 * due to blocking above we do not run the filter here
952 * but instead let knote_release() do it. Otherwise we
953 * might run the filter on a deleted event.
955 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
956 if (filter_event(kn
, 0))
959 } else if (kev
->flags
& EV_DELETE
) {
961 * Delete the existing knote
963 knote_detach_and_drop(kn
);
968 * Disablement does not deactivate a knote here.
970 if ((kev
->flags
& EV_DISABLE
) &&
971 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
972 kn
->kn_status
|= KN_DISABLED
;
976 * Re-enablement may have to immediately enqueue an active knote.
978 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
979 kn
->kn_status
&= ~KN_DISABLED
;
980 if ((kn
->kn_status
& KN_ACTIVE
) &&
981 ((kn
->kn_status
& KN_QUEUED
) == 0)) {
987 * Handle any required reprocessing
990 /* kn may be invalid now */
1000 * Block as necessary until the target time is reached.
1001 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
1002 * 0 we do not block at all.
1004 * Caller must be holding the kq token.
1007 kqueue_sleep(struct kqueue
*kq
, struct timespec
*tsp
)
1012 kq
->kq_state
|= KQ_SLEEP
;
1013 error
= tsleep(kq
, PCATCH
, "kqread", 0);
1014 } else if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0) {
1015 error
= EWOULDBLOCK
;
1017 struct timespec ats
;
1018 struct timespec atx
= *tsp
;
1022 timespecsub(&atx
, &ats
);
1023 if (ats
.tv_sec
< 0) {
1024 error
= EWOULDBLOCK
;
1026 timeout
= atx
.tv_sec
> 24 * 60 * 60 ?
1027 24 * 60 * 60 * hz
: tstohz_high(&atx
);
1028 kq
->kq_state
|= KQ_SLEEP
;
1029 error
= tsleep(kq
, PCATCH
, "kqread", timeout
);
1033 /* don't restart after signals... */
1034 if (error
== ERESTART
)
1041 * Scan the kqueue, return the number of active events placed in kevp up
1044 * Continuous mode events may get recycled, do not continue scanning past
1045 * marker unless no events have been collected.
1047 * Caller must be holding the kq token
1050 kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
1051 struct knote
*marker
)
1053 struct knote
*kn
, local_marker
;
1057 local_marker
.kn_filter
= EVFILT_MARKER
;
1058 local_marker
.kn_status
= KN_PROCESSING
;
1063 TAILQ_INSERT_HEAD(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1065 kn
= TAILQ_NEXT(&local_marker
, kn_tqe
);
1066 if (kn
->kn_filter
== EVFILT_MARKER
) {
1067 /* Marker reached, we are done */
1071 /* Move local marker past some other threads marker */
1072 kn
= TAILQ_NEXT(kn
, kn_tqe
);
1073 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1074 TAILQ_INSERT_BEFORE(kn
, &local_marker
, kn_tqe
);
1079 * We can't skip a knote undergoing processing, otherwise
1080 * we risk not returning it when the user process expects
1081 * it should be returned. Sleep and retry.
1083 if (knote_acquire(kn
) == 0)
1087 * Remove the event for processing.
1089 * WARNING! We must leave KN_QUEUED set to prevent the
1090 * event from being KNOTE_ACTIVATE()d while
1091 * the queue state is in limbo, in case we
1094 * WARNING! We must set KN_PROCESSING to avoid races
1095 * against deletion or another thread's
1098 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1102 * We have to deal with an extremely important race against
1103 * file descriptor close()s here. The file descriptor can
1104 * disappear MPSAFE, and there is a small window of
1105 * opportunity between that and the call to knote_fdclose().
1107 * If we hit that window here while doselect or dopoll is
1108 * trying to delete a spurious event they will not be able
1109 * to match up the event against a knote and will go haywire.
1111 if ((kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) &&
1112 checkfdclosed(kq
->kq_fdp
, kn
->kn_kevent
.ident
, kn
->kn_fp
)) {
1113 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1116 if (kn
->kn_status
& KN_DISABLED
) {
1118 * If disabled we ensure the event is not queued
1119 * but leave its active bit set. On re-enablement
1120 * the event may be immediately triggered.
1122 kn
->kn_status
&= ~KN_QUEUED
;
1123 } else if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
1124 (kn
->kn_status
& KN_DELETING
) == 0 &&
1125 filter_event(kn
, 0) == 0) {
1127 * If not running in one-shot mode and the event
1128 * is no longer present we ensure it is removed
1129 * from the queue and ignore it.
1131 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
1136 *kevp
++ = kn
->kn_kevent
;
1140 if (kn
->kn_flags
& EV_ONESHOT
) {
1141 kn
->kn_status
&= ~KN_QUEUED
;
1142 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1143 } else if (kn
->kn_flags
& EV_CLEAR
) {
1146 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
1148 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1154 * Handle any post-processing states
1158 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1165 * This could be expanded to call kqueue_scan, if desired.
1170 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1179 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1188 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1189 struct ucred
*cred
, struct sysmsg
*msg
)
1191 struct lwkt_token
*tok
;
1195 kq
= (struct kqueue
*)fp
->f_data
;
1196 tok
= lwkt_token_pool_lookup(kq
);
1202 kq
->kq_state
|= KQ_ASYNC
;
1204 kq
->kq_state
&= ~KQ_ASYNC
;
1208 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
1222 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
1224 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1226 bzero((void *)st
, sizeof(*st
));
1227 st
->st_size
= kq
->kq_count
;
1228 st
->st_blksize
= sizeof(struct kevent
);
1229 st
->st_mode
= S_IFIFO
;
1237 kqueue_close(struct file
*fp
)
1239 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1241 kqueue_terminate(kq
);
1244 funsetown(&kq
->kq_sigio
);
1246 kfree(kq
, M_KQUEUE
);
1251 kqueue_wakeup(struct kqueue
*kq
)
1253 if (kq
->kq_state
& KQ_SLEEP
) {
1254 kq
->kq_state
&= ~KQ_SLEEP
;
1257 KNOTE(&kq
->kq_kqinfo
.ki_note
, 0);
1261 * Calls filterops f_attach function, acquiring mplock if filter is not
1262 * marked as FILTEROP_MPSAFE.
1264 * Caller must be holding the related kq token
1267 filter_attach(struct knote
*kn
)
1271 if (!(kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
)) {
1273 ret
= kn
->kn_fop
->f_attach(kn
);
1276 ret
= kn
->kn_fop
->f_attach(kn
);
1283 * Detach the knote and drop it, destroying the knote.
1285 * Calls filterops f_detach function, acquiring mplock if filter is not
1286 * marked as FILTEROP_MPSAFE.
1288 * Caller must be holding the related kq token
1291 knote_detach_and_drop(struct knote
*kn
)
1293 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1294 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1295 kn
->kn_fop
->f_detach(kn
);
1298 kn
->kn_fop
->f_detach(kn
);
1305 * Calls filterops f_event function, acquiring mplock if filter is not
1306 * marked as FILTEROP_MPSAFE.
1308 * If the knote is in the middle of being created or deleted we cannot
1309 * safely call the filter op.
1311 * Caller must be holding the related kq token
1314 filter_event(struct knote
*kn
, long hint
)
1318 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1319 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1322 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1329 * Walk down a list of knotes, activating them if their event has triggered.
1331 * If we encounter any knotes which are undergoing processing we just mark
1332 * them for reprocessing and do not try to [re]activate the knote. However,
1333 * if a hint is being passed we have to wait and that makes things a bit
1337 knote(struct klist
*list
, long hint
)
1341 struct knote
*kntmp
;
1343 lwkt_getpooltoken(list
);
1345 SLIST_FOREACH(kn
, list
, kn_next
) {
1347 lwkt_getpooltoken(kq
);
1349 /* temporary verification hack */
1350 SLIST_FOREACH(kntmp
, list
, kn_next
) {
1354 if (kn
!= kntmp
|| kn
->kn_kq
!= kq
) {
1355 lwkt_relpooltoken(kq
);
1359 if (kn
->kn_status
& KN_PROCESSING
) {
1361 * Someone else is processing the knote, ask the
1362 * other thread to reprocess it and don't mess
1363 * with it otherwise.
1366 kn
->kn_status
|= KN_REPROCESS
;
1367 lwkt_relpooltoken(kq
);
1372 * If the hint is non-zero we have to wait or risk
1373 * losing the state the caller is trying to update.
1375 * XXX This is a real problem, certain process
1376 * and signal filters will bump kn_data for
1377 * already-processed notes more than once if
1378 * we restart the list scan. FIXME.
1380 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
1381 tsleep(kn
, 0, "knotec", hz
);
1382 lwkt_relpooltoken(kq
);
1387 * Become the reprocessing master ourselves.
1389 * If hint is non-zer running the event is mandatory
1390 * when not deleting so do it whether reprocessing is
1393 kn
->kn_status
|= KN_PROCESSING
;
1394 if ((kn
->kn_status
& KN_DELETING
) == 0) {
1395 if (filter_event(kn
, hint
))
1398 if (knote_release(kn
)) {
1399 lwkt_relpooltoken(kq
);
1402 lwkt_relpooltoken(kq
);
1404 lwkt_relpooltoken(list
);
1408 * Insert knote at head of klist.
1410 * This function may only be called via a filter function and thus
1411 * kq_token should already be held and marked for processing.
1414 knote_insert(struct klist
*klist
, struct knote
*kn
)
1416 lwkt_getpooltoken(klist
);
1417 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1418 SLIST_INSERT_HEAD(klist
, kn
, kn_next
);
1419 lwkt_relpooltoken(klist
);
1423 * Remove knote from a klist
1425 * This function may only be called via a filter function and thus
1426 * kq_token should already be held and marked for processing.
1429 knote_remove(struct klist
*klist
, struct knote
*kn
)
1431 lwkt_getpooltoken(klist
);
1432 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1433 SLIST_REMOVE(klist
, kn
, knote
, kn_next
);
1434 lwkt_relpooltoken(klist
);
1439 * Remove all knotes from a specified klist
1441 * Only called from aio.
1444 knote_empty(struct klist
*list
)
1448 lwkt_gettoken(&kq_token
);
1449 while ((kn
= SLIST_FIRST(list
)) != NULL
) {
1450 if (knote_acquire(kn
))
1451 knote_detach_and_drop(kn
);
1453 lwkt_reltoken(&kq_token
);
1458 knote_assume_knotes(struct kqinfo
*src
, struct kqinfo
*dst
,
1459 struct filterops
*ops
, void *hook
)
1464 lwkt_getpooltoken(&src
->ki_note
);
1465 lwkt_getpooltoken(&dst
->ki_note
);
1466 while ((kn
= SLIST_FIRST(&src
->ki_note
)) != NULL
) {
1468 lwkt_getpooltoken(kq
);
1469 if (SLIST_FIRST(&src
->ki_note
) != kn
|| kn
->kn_kq
!= kq
) {
1470 lwkt_relpooltoken(kq
);
1473 if (knote_acquire(kn
)) {
1474 knote_remove(&src
->ki_note
, kn
);
1477 knote_insert(&dst
->ki_note
, kn
);
1479 /* kn may be invalid now */
1481 lwkt_relpooltoken(kq
);
1483 lwkt_relpooltoken(&dst
->ki_note
);
1484 lwkt_relpooltoken(&src
->ki_note
);
1488 * Remove all knotes referencing a specified fd
1491 knote_fdclose(struct file
*fp
, struct filedesc
*fdp
, int fd
)
1495 struct knote
*kntmp
;
1497 lwkt_getpooltoken(&fp
->f_klist
);
1499 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
1500 if (kn
->kn_kq
->kq_fdp
== fdp
&& kn
->kn_id
== fd
) {
1502 lwkt_getpooltoken(kq
);
1504 /* temporary verification hack */
1505 SLIST_FOREACH(kntmp
, &fp
->f_klist
, kn_link
) {
1509 if (kn
!= kntmp
|| kn
->kn_kq
->kq_fdp
!= fdp
||
1510 kn
->kn_id
!= fd
|| kn
->kn_kq
!= kq
) {
1511 lwkt_relpooltoken(kq
);
1514 if (knote_acquire(kn
))
1515 knote_detach_and_drop(kn
);
1516 lwkt_relpooltoken(kq
);
1520 lwkt_relpooltoken(&fp
->f_klist
);
1524 * Low level attach function.
1526 * The knote should already be marked for processing.
1527 * Caller must hold the related kq token.
1530 knote_attach(struct knote
*kn
)
1533 struct kqueue
*kq
= kn
->kn_kq
;
1535 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1536 KKASSERT(kn
->kn_fp
);
1537 list
= &kn
->kn_fp
->f_klist
;
1539 if (kq
->kq_knhashmask
== 0)
1540 kq
->kq_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1541 &kq
->kq_knhashmask
);
1542 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1544 lwkt_getpooltoken(list
);
1545 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1546 TAILQ_INSERT_HEAD(&kq
->kq_knlist
, kn
, kn_kqlink
);
1547 lwkt_relpooltoken(list
);
1551 * Low level drop function.
1553 * The knote should already be marked for processing.
1554 * Caller must hold the related kq token.
1557 knote_drop(struct knote
*kn
)
1564 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
)
1565 list
= &kn
->kn_fp
->f_klist
;
1567 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1569 lwkt_getpooltoken(list
);
1570 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
1571 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
1572 if (kn
->kn_status
& KN_QUEUED
)
1574 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1579 lwkt_relpooltoken(list
);
1583 * Low level enqueue function.
1585 * The knote should already be marked for processing.
1586 * Caller must be holding the kq token
1589 knote_enqueue(struct knote
*kn
)
1591 struct kqueue
*kq
= kn
->kn_kq
;
1593 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1594 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1595 kn
->kn_status
|= KN_QUEUED
;
1599 * Send SIGIO on request (typically set up as a mailbox signal)
1601 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
1602 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
1608 * Low level dequeue function.
1610 * The knote should already be marked for processing.
1611 * Caller must be holding the kq token
1614 knote_dequeue(struct knote
*kn
)
1616 struct kqueue
*kq
= kn
->kn_kq
;
1618 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
1619 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1620 kn
->kn_status
&= ~KN_QUEUED
;
1624 static struct knote
*
1627 return kmalloc(sizeof(struct knote
), M_KQUEUE
, M_WAITOK
);
1631 knote_free(struct knote
*kn
)
1633 kfree(kn
, M_KQUEUE
);