2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
60 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args
{
63 struct kevent_args
*ka
;
67 #define KNOTE_CACHE_MAX 8
69 struct knote_cache_list
{
70 struct klist knote_cache
;
74 static int kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
75 struct knote
*marker
);
76 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
77 struct ucred
*cred
, int flags
);
78 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
79 struct ucred
*cred
, int flags
);
80 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
81 struct ucred
*cred
, struct sysmsg
*msg
);
82 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
83 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
85 static int kqueue_close(struct file
*fp
);
86 static void kqueue_wakeup(struct kqueue
*kq
);
87 static int filter_attach(struct knote
*kn
);
88 static int filter_event(struct knote
*kn
, long hint
);
93 static struct fileops kqueueops
= {
94 .fo_read
= kqueue_read
,
95 .fo_write
= kqueue_write
,
96 .fo_ioctl
= kqueue_ioctl
,
97 .fo_kqfilter
= kqueue_kqfilter
,
98 .fo_stat
= kqueue_stat
,
99 .fo_close
= kqueue_close
,
100 .fo_shutdown
= nofo_shutdown
103 static void knote_attach(struct knote
*kn
);
104 static void knote_drop(struct knote
*kn
);
105 static void knote_detach_and_drop(struct knote
*kn
);
106 static void knote_enqueue(struct knote
*kn
);
107 static void knote_dequeue(struct knote
*kn
);
108 static struct knote
*knote_alloc(void);
109 static void knote_free(struct knote
*kn
);
111 static void filt_kqdetach(struct knote
*kn
);
112 static int filt_kqueue(struct knote
*kn
, long hint
);
113 static int filt_procattach(struct knote
*kn
);
114 static void filt_procdetach(struct knote
*kn
);
115 static int filt_proc(struct knote
*kn
, long hint
);
116 static int filt_fileattach(struct knote
*kn
);
117 static void filt_timerexpire(void *knx
);
118 static int filt_timerattach(struct knote
*kn
);
119 static void filt_timerdetach(struct knote
*kn
);
120 static int filt_timer(struct knote
*kn
, long hint
);
121 static int filt_userattach(struct knote
*kn
);
122 static void filt_userdetach(struct knote
*kn
);
123 static int filt_user(struct knote
*kn
, long hint
);
124 static void filt_usertouch(struct knote
*kn
, struct kevent
*kev
,
127 static struct filterops file_filtops
=
128 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, filt_fileattach
, NULL
, NULL
};
129 static struct filterops kqread_filtops
=
130 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, NULL
, filt_kqdetach
, filt_kqueue
};
131 static struct filterops proc_filtops
=
132 { 0, filt_procattach
, filt_procdetach
, filt_proc
};
133 static struct filterops timer_filtops
=
134 { FILTEROP_MPSAFE
, filt_timerattach
, filt_timerdetach
, filt_timer
};
135 static struct filterops user_filtops
=
136 { FILTEROP_MPSAFE
, filt_userattach
, filt_userdetach
, filt_user
};
138 static int kq_ncallouts
= 0;
139 static int kq_calloutmax
= (4 * 1024);
140 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
141 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
142 static int kq_checkloop
= 1000000;
143 SYSCTL_INT(_kern
, OID_AUTO
, kq_checkloop
, CTLFLAG_RW
,
144 &kq_checkloop
, 0, "Maximum number of loops for kqueue scan");
146 #define KNOTE_ACTIVATE(kn) do { \
147 kn->kn_status |= KN_ACTIVE; \
148 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
152 #define KN_HASHSIZE 64 /* XXX should be tunable */
153 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
155 extern struct filterops aio_filtops
;
156 extern struct filterops sig_filtops
;
159 * Table for for all system-defined filters.
161 static struct filterops
*sysfilt_ops
[] = {
162 &file_filtops
, /* EVFILT_READ */
163 &file_filtops
, /* EVFILT_WRITE */
164 &aio_filtops
, /* EVFILT_AIO */
165 &file_filtops
, /* EVFILT_VNODE */
166 &proc_filtops
, /* EVFILT_PROC */
167 &sig_filtops
, /* EVFILT_SIGNAL */
168 &timer_filtops
, /* EVFILT_TIMER */
169 &file_filtops
, /* EVFILT_EXCEPT */
170 &user_filtops
, /* EVFILT_USER */
173 static struct knote_cache_list knote_cache_lists
[MAXCPU
];
176 * Acquire a knote, return non-zero on success, 0 on failure.
178 * If we cannot acquire the knote we sleep and return 0. The knote
179 * may be stale on return in this case and the caller must restart
180 * whatever loop they are in.
182 * Related kq token must be held.
185 knote_acquire(struct knote
*kn
)
187 if (kn
->kn_status
& KN_PROCESSING
) {
188 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
189 tsleep(kn
, 0, "kqepts", hz
);
190 /* knote may be stale now */
193 kn
->kn_status
|= KN_PROCESSING
;
198 * Release an acquired knote, clearing KN_PROCESSING and handling any
199 * KN_REPROCESS events.
201 * Caller must be holding the related kq token
203 * Non-zero is returned if the knote is destroyed or detached.
206 knote_release(struct knote
*kn
)
208 while (kn
->kn_status
& KN_REPROCESS
) {
209 kn
->kn_status
&= ~KN_REPROCESS
;
210 if (kn
->kn_status
& KN_WAITING
) {
211 kn
->kn_status
&= ~KN_WAITING
;
214 if (kn
->kn_status
& KN_DELETING
) {
215 knote_detach_and_drop(kn
);
219 if (filter_event(kn
, 0))
222 kn
->kn_status
&= ~KN_PROCESSING
;
223 /* kn should not be accessed anymore */
227 filt_fileattach(struct knote
*kn
)
229 return (fo_kqfilter(kn
->kn_fp
, kn
));
236 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
238 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
240 if (kn
->kn_filter
!= EVFILT_READ
)
243 kn
->kn_fop
= &kqread_filtops
;
244 knote_insert(&kq
->kq_kqinfo
.ki_note
, kn
);
249 filt_kqdetach(struct knote
*kn
)
251 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
253 knote_remove(&kq
->kq_kqinfo
.ki_note
, kn
);
258 filt_kqueue(struct knote
*kn
, long hint
)
260 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
262 kn
->kn_data
= kq
->kq_count
;
263 return (kn
->kn_data
> 0);
267 filt_procattach(struct knote
*kn
)
273 p
= pfind(kn
->kn_id
);
274 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
275 p
= zpfind(kn
->kn_id
);
281 if (!PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
287 lwkt_gettoken(&p
->p_token
);
288 kn
->kn_ptr
.p_proc
= p
;
289 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
292 * internal flag indicating registration done by kernel
294 if (kn
->kn_flags
& EV_FLAG1
) {
295 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
296 kn
->kn_fflags
= NOTE_CHILD
;
297 kn
->kn_flags
&= ~EV_FLAG1
;
300 knote_insert(&p
->p_klist
, kn
);
303 * Immediately activate any exit notes if the target process is a
304 * zombie. This is necessary to handle the case where the target
305 * process, e.g. a child, dies before the kevent is negistered.
307 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
309 lwkt_reltoken(&p
->p_token
);
316 * The knote may be attached to a different process, which may exit,
317 * leaving nothing for the knote to be attached to. So when the process
318 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
319 * it will be deleted when read out. However, as part of the knote deletion,
320 * this routine is called, so a check is needed to avoid actually performing
321 * a detach, because the original process does not exist any more.
324 filt_procdetach(struct knote
*kn
)
328 if (kn
->kn_status
& KN_DETACHED
)
330 p
= kn
->kn_ptr
.p_proc
;
331 knote_remove(&p
->p_klist
, kn
);
335 filt_proc(struct knote
*kn
, long hint
)
340 * mask off extra data
342 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
345 * if the user is interested in this event, record it.
347 if (kn
->kn_sfflags
& event
)
348 kn
->kn_fflags
|= event
;
351 * Process is gone, so flag the event as finished. Detach the
352 * knote from the process now because the process will be poof,
355 if (event
== NOTE_EXIT
) {
356 struct proc
*p
= kn
->kn_ptr
.p_proc
;
357 if ((kn
->kn_status
& KN_DETACHED
) == 0) {
359 knote_remove(&p
->p_klist
, kn
);
360 kn
->kn_status
|= KN_DETACHED
;
361 kn
->kn_data
= p
->p_xstat
;
362 kn
->kn_ptr
.p_proc
= NULL
;
365 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
370 * process forked, and user wants to track the new process,
371 * so attach a new knote to it, and immediately report an
372 * event with the parent's pid.
374 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
379 * register knote with new process.
381 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
382 kev
.filter
= kn
->kn_filter
;
383 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
384 kev
.fflags
= kn
->kn_sfflags
;
385 kev
.data
= kn
->kn_id
; /* parent */
386 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
387 error
= kqueue_register(kn
->kn_kq
, &kev
);
389 kn
->kn_fflags
|= NOTE_TRACKERR
;
392 return (kn
->kn_fflags
!= 0);
396 filt_timerreset(struct knote
*kn
)
398 struct callout
*calloutp
;
402 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
403 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
404 tticks
= tvtohz_high(&tv
);
405 calloutp
= (struct callout
*)kn
->kn_hook
;
406 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
410 * The callout interlocks with callout_terminate() but can still
411 * race a deletion so if KN_DELETING is set we just don't touch
415 filt_timerexpire(void *knx
)
417 struct knote
*kn
= knx
;
418 struct kqueue
*kq
= kn
->kn_kq
;
420 lwkt_getpooltoken(kq
);
423 * Open knote_acquire(), since we can't sleep in callout,
424 * however, we do need to record this expiration.
427 if (kn
->kn_status
& KN_PROCESSING
) {
428 kn
->kn_status
|= KN_REPROCESS
;
429 if ((kn
->kn_status
& KN_DELETING
) == 0 &&
430 (kn
->kn_flags
& EV_ONESHOT
) == 0)
432 lwkt_relpooltoken(kq
);
435 KASSERT((kn
->kn_status
& KN_DELETING
) == 0,
436 ("acquire a deleting knote %#x", kn
->kn_status
));
437 kn
->kn_status
|= KN_PROCESSING
;
440 if ((kn
->kn_flags
& EV_ONESHOT
) == 0)
445 lwkt_relpooltoken(kq
);
449 * data contains amount of time to sleep, in milliseconds
452 filt_timerattach(struct knote
*kn
)
454 struct callout
*calloutp
;
457 prev_ncallouts
= atomic_fetchadd_int(&kq_ncallouts
, 1);
458 if (prev_ncallouts
>= kq_calloutmax
) {
459 atomic_subtract_int(&kq_ncallouts
, 1);
464 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
465 calloutp
= kmalloc(sizeof(*calloutp
), M_KQUEUE
, M_WAITOK
);
466 callout_init_mp(calloutp
);
467 kn
->kn_hook
= (caddr_t
)calloutp
;
474 * This function is called with the knote flagged locked but it is
475 * still possible to race a callout event due to the callback blocking.
476 * We must call callout_terminate() instead of callout_stop() to deal
480 filt_timerdetach(struct knote
*kn
)
482 struct callout
*calloutp
;
484 calloutp
= (struct callout
*)kn
->kn_hook
;
485 callout_terminate(calloutp
);
486 kfree(calloutp
, M_KQUEUE
);
487 atomic_subtract_int(&kq_ncallouts
, 1);
491 filt_timer(struct knote
*kn
, long hint
)
494 return (kn
->kn_data
!= 0);
501 filt_userattach(struct knote
*kn
)
504 if (kn
->kn_fflags
& NOTE_TRIGGER
)
505 kn
->kn_ptr
.hookid
= 1;
507 kn
->kn_ptr
.hookid
= 0;
512 filt_userdetach(struct knote
*kn
)
518 filt_user(struct knote
*kn
, long hint
)
520 return (kn
->kn_ptr
.hookid
);
524 filt_usertouch(struct knote
*kn
, struct kevent
*kev
, u_long type
)
530 if (kev
->fflags
& NOTE_TRIGGER
)
531 kn
->kn_ptr
.hookid
= 1;
533 ffctrl
= kev
->fflags
& NOTE_FFCTRLMASK
;
534 kev
->fflags
&= NOTE_FFLAGSMASK
;
540 kn
->kn_sfflags
&= kev
->fflags
;
544 kn
->kn_sfflags
|= kev
->fflags
;
548 kn
->kn_sfflags
= kev
->fflags
;
552 /* XXX Return error? */
555 kn
->kn_sdata
= kev
->data
;
558 * This is not the correct use of EV_CLEAR in an event
559 * modification, it should have been passed as a NOTE instead.
560 * But we need to maintain compatibility with Apple & FreeBSD.
562 * Note however that EV_CLEAR can still be used when doing
563 * the initial registration of the event and works as expected
564 * (clears the event on reception).
566 if (kev
->flags
& EV_CLEAR
) {
567 kn
->kn_ptr
.hookid
= 0;
574 *kev
= kn
->kn_kevent
;
575 kev
->fflags
= kn
->kn_sfflags
;
576 kev
->data
= kn
->kn_sdata
;
577 if (kn
->kn_flags
& EV_CLEAR
) {
578 kn
->kn_ptr
.hookid
= 0;
579 /* kn_data, kn_fflags handled by parent */
584 panic("filt_usertouch() - invalid type (%ld)", type
);
590 * Initialize a kqueue.
592 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
597 kqueue_init(struct kqueue
*kq
, struct filedesc
*fdp
)
599 TAILQ_INIT(&kq
->kq_knpend
);
600 TAILQ_INIT(&kq
->kq_knlist
);
603 SLIST_INIT(&kq
->kq_kqinfo
.ki_note
);
607 * Terminate a kqueue. Freeing the actual kq itself is left up to the
608 * caller (it might be embedded in a lwp so we don't do it here).
610 * The kq's knlist must be completely eradicated so block on any
614 kqueue_terminate(struct kqueue
*kq
)
616 struct lwkt_token
*tok
;
619 tok
= lwkt_token_pool_lookup(kq
);
621 while ((kn
= TAILQ_FIRST(&kq
->kq_knlist
)) != NULL
) {
622 if (knote_acquire(kn
))
623 knote_detach_and_drop(kn
);
628 hashdestroy(kq
->kq_knhash
, M_KQUEUE
, kq
->kq_knhashmask
);
629 kq
->kq_knhash
= NULL
;
630 kq
->kq_knhashmask
= 0;
638 sys_kqueue(struct kqueue_args
*uap
)
640 struct thread
*td
= curthread
;
645 error
= falloc(td
->td_lwp
, &fp
, &fd
);
648 fp
->f_flag
= FREAD
| FWRITE
;
649 fp
->f_type
= DTYPE_KQUEUE
;
650 fp
->f_ops
= &kqueueops
;
652 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
653 kqueue_init(kq
, td
->td_proc
->p_fd
);
656 fsetfd(kq
->kq_fdp
, fp
, fd
);
657 uap
->sysmsg_result
= fd
;
663 * Copy 'count' items into the destination list pointed to by uap->eventlist.
666 kevent_copyout(void *arg
, struct kevent
*kevp
, int count
, int *res
)
668 struct kevent_copyin_args
*kap
;
671 kap
= (struct kevent_copyin_args
*)arg
;
673 error
= copyout(kevp
, kap
->ka
->eventlist
, count
* sizeof(*kevp
));
675 kap
->ka
->eventlist
+= count
;
685 * Copy at most 'max' items from the list pointed to by kap->changelist,
686 * return number of items in 'events'.
689 kevent_copyin(void *arg
, struct kevent
*kevp
, int max
, int *events
)
691 struct kevent_copyin_args
*kap
;
694 kap
= (struct kevent_copyin_args
*)arg
;
696 count
= min(kap
->ka
->nchanges
- kap
->pchanges
, max
);
697 error
= copyin(kap
->ka
->changelist
, kevp
, count
* sizeof *kevp
);
699 kap
->ka
->changelist
+= count
;
700 kap
->pchanges
+= count
;
711 kern_kevent(struct kqueue
*kq
, int nevents
, int *res
, void *uap
,
712 k_copyin_fn kevent_copyinfn
, k_copyout_fn kevent_copyoutfn
,
713 struct timespec
*tsp_in
)
716 struct timespec
*tsp
, ats
;
717 int i
, n
, total
, error
, nerrors
= 0;
719 int limit
= kq_checkloop
;
720 struct kevent kev
[KQ_NEVENTS
];
722 struct lwkt_token
*tok
;
724 if (tsp_in
== NULL
|| tsp_in
->tv_sec
|| tsp_in
->tv_nsec
)
725 atomic_set_int(&curthread
->td_mpflags
, TDF_MP_BATCH_DEMARC
);
732 error
= kevent_copyinfn(uap
, kev
, KQ_NEVENTS
, &n
);
737 for (i
= 0; i
< n
; i
++) {
739 kevp
->flags
&= ~EV_SYSFLAGS
;
740 error
= kqueue_register(kq
, kevp
);
743 * If a registration returns an error we
744 * immediately post the error. The kevent()
745 * call itself will fail with the error if
746 * no space is available for posting.
748 * Such errors normally bypass the timeout/blocking
749 * code. However, if the copyoutfn function refuses
750 * to post the error (see sys_poll()), then we
753 if (error
|| (kevp
->flags
& EV_RECEIPT
)) {
754 kevp
->flags
= EV_ERROR
;
757 kevent_copyoutfn(uap
, kevp
, 1, res
);
760 } else if (lres
!= *res
) {
771 * Acquire/wait for events - setup timeout
774 if (tsp
->tv_sec
|| tsp
->tv_nsec
) {
776 timespecadd(tsp
, &ats
); /* tsp = target time */
783 * Collect as many events as we can. Sleeping on successive
784 * loops is disabled if copyoutfn has incremented (*res).
786 * The loop stops if an error occurs, all events have been
787 * scanned (the marker has been reached), or fewer than the
788 * maximum number of events is found.
790 * The copyoutfn function does not have to increment (*res) in
791 * order for the loop to continue.
793 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
797 marker
.kn_filter
= EVFILT_MARKER
;
798 marker
.kn_status
= KN_PROCESSING
;
799 tok
= lwkt_token_pool_lookup(kq
);
801 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
803 while ((n
= nevents
- total
) > 0) {
808 * If no events are pending sleep until timeout (if any)
809 * or an event occurs.
811 * After the sleep completes the marker is moved to the
812 * end of the list, making any received events available
815 if (kq
->kq_count
== 0 && *res
== 0) {
820 } else if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0) {
824 struct timespec atx
= *tsp
;
827 timespecsub(&atx
, &ats
);
828 if (atx
.tv_sec
< 0) {
832 timeout
= atx
.tv_sec
> 24 * 60 * 60 ?
839 if (kq
->kq_count
== 0) {
841 error
= tsleep(kq
, PCATCH
, "kqread", timeout
);
843 /* don't restart after signals... */
844 if (error
== ERESTART
)
851 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
852 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
,
859 * Process all received events
860 * Account for all non-spurious events in our total
862 i
= kqueue_scan(kq
, kev
, n
, &marker
);
865 error
= kevent_copyoutfn(uap
, kev
, i
, res
);
866 total
+= *res
- lres
;
870 if (limit
&& --limit
== 0)
871 panic("kqueue: checkloop failed i=%d", i
);
874 * Normally when fewer events are returned than requested
875 * we can stop. However, if only spurious events were
876 * collected the copyout will not bump (*res) and we have
883 * Deal with an edge case where spurious events can cause
884 * a loop to occur without moving the marker. This can
885 * prevent kqueue_scan() from picking up new events which
886 * race us. We must be sure to move the marker for this
889 * NOTE: We do not want to move the marker if events
890 * were scanned because normal kqueue operations
891 * may reactivate events. Moving the marker in
892 * that case could result in duplicates for the
897 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
898 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
903 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
906 /* Timeouts do not return EWOULDBLOCK. */
907 if (error
== EWOULDBLOCK
)
916 sys_kevent(struct kevent_args
*uap
)
918 struct thread
*td
= curthread
;
919 struct proc
*p
= td
->td_proc
;
920 struct timespec ts
, *tsp
;
922 struct file
*fp
= NULL
;
923 struct kevent_copyin_args
*kap
, ka
;
927 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
934 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
937 if (fp
->f_type
!= DTYPE_KQUEUE
) {
942 kq
= (struct kqueue
*)fp
->f_data
;
948 error
= kern_kevent(kq
, uap
->nevents
, &uap
->sysmsg_result
, kap
,
949 kevent_copyin
, kevent_copyout
, tsp
);
957 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
)
959 struct filedesc
*fdp
= kq
->kq_fdp
;
960 struct klist
*list
= NULL
;
961 struct filterops
*fops
;
962 struct file
*fp
= NULL
;
963 struct knote
*kn
= NULL
;
966 struct knote_cache_list
*cache_list
;
968 if (kev
->filter
< 0) {
969 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
971 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
975 * filter attach routine is responsible for insuring that
976 * the identifier can be attached to it.
981 if (fops
->f_flags
& FILTEROP_ISFD
) {
982 /* validate descriptor */
983 fp
= holdfp(fdp
, kev
->ident
, -1);
988 cache_list
= &knote_cache_lists
[mycpuid
];
989 if (SLIST_EMPTY(&cache_list
->knote_cache
)) {
990 struct knote
*new_kn
;
992 new_kn
= knote_alloc();
993 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, new_kn
, kn_link
);
994 cache_list
->knote_cache_cnt
++;
998 lwkt_getpooltoken(kq
);
1001 * Make sure that only one thread can register event on this kqueue,
1002 * so that we would not suffer any race, even if the registration
1003 * blocked, i.e. kq token was released, and the kqueue was shared
1004 * between threads (this should be rare though).
1006 while (__predict_false(kq
->kq_regtd
!= NULL
&& kq
->kq_regtd
!= td
)) {
1007 kq
->kq_state
|= KQ_REGWAIT
;
1008 tsleep(&kq
->kq_regtd
, 0, "kqreg", 0);
1010 if (__predict_false(kq
->kq_regtd
!= NULL
)) {
1011 /* Recursive calling of kqueue_register() */
1014 /* Owner of the kq_regtd, i.e. td != NULL */
1019 list
= &fp
->f_klist
;
1020 } else if (kq
->kq_knhashmask
) {
1021 list
= &kq
->kq_knhash
[
1022 KN_HASH((u_long
)kev
->ident
, kq
->kq_knhashmask
)];
1025 lwkt_getpooltoken(list
);
1027 SLIST_FOREACH(kn
, list
, kn_link
) {
1028 if (kn
->kn_kq
== kq
&&
1029 kn
->kn_filter
== kev
->filter
&&
1030 kn
->kn_id
== kev
->ident
) {
1031 if (knote_acquire(kn
) == 0)
1036 lwkt_relpooltoken(list
);
1040 * NOTE: At this point if kn is non-NULL we will have acquired
1041 * it and set KN_PROCESSING.
1043 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
1049 * kn now contains the matching knote, or NULL if no match
1051 if (kev
->flags
& EV_ADD
) {
1053 kn
= SLIST_FIRST(&cache_list
->knote_cache
);
1057 SLIST_REMOVE_HEAD(&cache_list
->knote_cache
,
1059 cache_list
->knote_cache_cnt
--;
1066 * apply reference count to knote structure, and
1067 * do not release it at the end of this routine.
1071 kn
->kn_sfflags
= kev
->fflags
;
1072 kn
->kn_sdata
= kev
->data
;
1075 kn
->kn_kevent
= *kev
;
1078 * KN_PROCESSING prevents the knote from getting
1079 * ripped out from under us while we are trying
1080 * to attach it, in case the attach blocks.
1082 kn
->kn_status
= KN_PROCESSING
;
1084 if ((error
= filter_attach(kn
)) != 0) {
1085 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1091 * Interlock against close races which either tried
1092 * to remove our knote while we were blocked or missed
1093 * it entirely prior to our attachment. We do not
1094 * want to end up with a knote on a closed descriptor.
1096 if ((fops
->f_flags
& FILTEROP_ISFD
) &&
1097 checkfdclosed(fdp
, kev
->ident
, kn
->kn_fp
)) {
1098 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1102 * The user may change some filter values after the
1103 * initial EV_ADD, but doing so will not reset any
1104 * filter which have already been triggered.
1106 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1107 if (fops
== &user_filtops
) {
1108 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1110 kn
->kn_sfflags
= kev
->fflags
;
1111 kn
->kn_sdata
= kev
->data
;
1112 kn
->kn_kevent
.udata
= kev
->udata
;
1117 * Execute the filter event to immediately activate the
1118 * knote if necessary. If reprocessing events are pending
1119 * due to blocking above we do not run the filter here
1120 * but instead let knote_release() do it. Otherwise we
1121 * might run the filter on a deleted event.
1123 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1124 if (filter_event(kn
, 0))
1127 } else if (kev
->flags
& EV_DELETE
) {
1129 * Delete the existing knote
1131 knote_detach_and_drop(kn
);
1135 * Modify an existing event.
1137 * The user may change some filter values after the
1138 * initial EV_ADD, but doing so will not reset any
1139 * filter which have already been triggered.
1141 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1142 if (fops
== &user_filtops
) {
1143 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1145 kn
->kn_sfflags
= kev
->fflags
;
1146 kn
->kn_sdata
= kev
->data
;
1147 kn
->kn_kevent
.udata
= kev
->udata
;
1151 * Execute the filter event to immediately activate the
1152 * knote if necessary. If reprocessing events are pending
1153 * due to blocking above we do not run the filter here
1154 * but instead let knote_release() do it. Otherwise we
1155 * might run the filter on a deleted event.
1157 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1158 if (filter_event(kn
, 0))
1164 * Disablement does not deactivate a knote here.
1166 if ((kev
->flags
& EV_DISABLE
) &&
1167 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
1168 kn
->kn_status
|= KN_DISABLED
;
1172 * Re-enablement may have to immediately enqueue an active knote.
1174 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
1175 kn
->kn_status
&= ~KN_DISABLED
;
1176 if ((kn
->kn_status
& KN_ACTIVE
) &&
1177 ((kn
->kn_status
& KN_QUEUED
) == 0)) {
1183 * Handle any required reprocessing
1186 /* kn may be invalid now */
1189 if (td
!= NULL
) { /* Owner of the kq_regtd */
1190 kq
->kq_regtd
= NULL
;
1191 if (__predict_false(kq
->kq_state
& KQ_REGWAIT
)) {
1192 kq
->kq_state
&= ~KQ_REGWAIT
;
1193 wakeup(&kq
->kq_regtd
);
1196 lwkt_relpooltoken(kq
);
1203 * Scan the kqueue, return the number of active events placed in kevp up
1206 * Continuous mode events may get recycled, do not continue scanning past
1207 * marker unless no events have been collected.
1210 kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
1211 struct knote
*marker
)
1213 struct knote
*kn
, local_marker
;
1217 local_marker
.kn_filter
= EVFILT_MARKER
;
1218 local_marker
.kn_status
= KN_PROCESSING
;
1220 lwkt_getpooltoken(kq
);
1225 TAILQ_INSERT_HEAD(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1227 kn
= TAILQ_NEXT(&local_marker
, kn_tqe
);
1228 if (kn
->kn_filter
== EVFILT_MARKER
) {
1229 /* Marker reached, we are done */
1233 /* Move local marker past some other threads marker */
1234 kn
= TAILQ_NEXT(kn
, kn_tqe
);
1235 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1236 TAILQ_INSERT_BEFORE(kn
, &local_marker
, kn_tqe
);
1241 * We can't skip a knote undergoing processing, otherwise
1242 * we risk not returning it when the user process expects
1243 * it should be returned. Sleep and retry.
1245 if (knote_acquire(kn
) == 0)
1249 * Remove the event for processing.
1251 * WARNING! We must leave KN_QUEUED set to prevent the
1252 * event from being KNOTE_ACTIVATE()d while
1253 * the queue state is in limbo, in case we
1256 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1260 * We have to deal with an extremely important race against
1261 * file descriptor close()s here. The file descriptor can
1262 * disappear MPSAFE, and there is a small window of
1263 * opportunity between that and the call to knote_fdclose().
1265 * If we hit that window here while doselect or dopoll is
1266 * trying to delete a spurious event they will not be able
1267 * to match up the event against a knote and will go haywire.
1269 if ((kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) &&
1270 checkfdclosed(kq
->kq_fdp
, kn
->kn_kevent
.ident
, kn
->kn_fp
)) {
1271 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1274 if (kn
->kn_status
& KN_DISABLED
) {
1276 * If disabled we ensure the event is not queued
1277 * but leave its active bit set. On re-enablement
1278 * the event may be immediately triggered.
1280 kn
->kn_status
&= ~KN_QUEUED
;
1281 } else if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
1282 (kn
->kn_status
& KN_DELETING
) == 0 &&
1283 filter_event(kn
, 0) == 0) {
1285 * If not running in one-shot mode and the event
1286 * is no longer present we ensure it is removed
1287 * from the queue and ignore it.
1289 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
1294 if (kn
->kn_fop
== &user_filtops
)
1295 filt_usertouch(kn
, kevp
, EVENT_PROCESS
);
1297 *kevp
= kn
->kn_kevent
;
1302 if (kn
->kn_flags
& EV_ONESHOT
) {
1303 kn
->kn_status
&= ~KN_QUEUED
;
1304 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1306 if (kn
->kn_flags
& (EV_CLEAR
| EV_DISPATCH
)) {
1307 if (kn
->kn_flags
& EV_CLEAR
) {
1311 if (kn
->kn_flags
& EV_DISPATCH
) {
1312 kn
->kn_status
|= KN_DISABLED
;
1314 kn
->kn_status
&= ~(KN_QUEUED
|
1317 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1324 * Handle any post-processing states
1328 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1330 lwkt_relpooltoken(kq
);
1336 * This could be expanded to call kqueue_scan, if desired.
1341 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1350 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1359 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1360 struct ucred
*cred
, struct sysmsg
*msg
)
1362 struct lwkt_token
*tok
;
1366 kq
= (struct kqueue
*)fp
->f_data
;
1367 tok
= lwkt_token_pool_lookup(kq
);
1373 kq
->kq_state
|= KQ_ASYNC
;
1375 kq
->kq_state
&= ~KQ_ASYNC
;
1379 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
1393 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
1395 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1397 bzero((void *)st
, sizeof(*st
));
1398 st
->st_size
= kq
->kq_count
;
1399 st
->st_blksize
= sizeof(struct kevent
);
1400 st
->st_mode
= S_IFIFO
;
1408 kqueue_close(struct file
*fp
)
1410 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1412 kqueue_terminate(kq
);
1415 funsetown(&kq
->kq_sigio
);
1417 kfree(kq
, M_KQUEUE
);
1422 kqueue_wakeup(struct kqueue
*kq
)
1424 if (kq
->kq_sleep_cnt
) {
1425 if (kq
->kq_sleep_cnt
== 1)
1429 kq
->kq_sleep_cnt
= 0;
1431 KNOTE(&kq
->kq_kqinfo
.ki_note
, 0);
1435 * Calls filterops f_attach function, acquiring mplock if filter is not
1436 * marked as FILTEROP_MPSAFE.
1438 * Caller must be holding the related kq token
1441 filter_attach(struct knote
*kn
)
1445 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1446 ret
= kn
->kn_fop
->f_attach(kn
);
1449 ret
= kn
->kn_fop
->f_attach(kn
);
1456 * Detach the knote and drop it, destroying the knote.
1458 * Calls filterops f_detach function, acquiring mplock if filter is not
1459 * marked as FILTEROP_MPSAFE.
1461 * Caller must be holding the related kq token
1464 knote_detach_and_drop(struct knote
*kn
)
1466 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1467 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1468 kn
->kn_fop
->f_detach(kn
);
1471 kn
->kn_fop
->f_detach(kn
);
1478 * Calls filterops f_event function, acquiring mplock if filter is not
1479 * marked as FILTEROP_MPSAFE.
1481 * If the knote is in the middle of being created or deleted we cannot
1482 * safely call the filter op.
1484 * Caller must be holding the related kq token
1487 filter_event(struct knote
*kn
, long hint
)
1491 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1492 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1495 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1502 * Walk down a list of knotes, activating them if their event has triggered.
1504 * If we encounter any knotes which are undergoing processing we just mark
1505 * them for reprocessing and do not try to [re]activate the knote. However,
1506 * if a hint is being passed we have to wait and that makes things a bit
1510 knote(struct klist
*list
, long hint
)
1512 struct knote
*kn
, marker
;
1514 marker
.kn_filter
= EVFILT_MARKER
;
1515 marker
.kn_status
= KN_PROCESSING
;
1517 lwkt_getpooltoken(list
);
1518 if (SLIST_EMPTY(list
)) {
1519 lwkt_relpooltoken(list
);
1523 SLIST_INSERT_HEAD(list
, &marker
, kn_next
);
1524 while ((kn
= SLIST_NEXT(&marker
, kn_next
)) != NULL
) {
1528 if (kn
->kn_filter
== EVFILT_MARKER
) {
1530 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1531 if (SLIST_NEXT(kn
, kn_next
) == NULL
)
1533 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1538 lwkt_getpooltoken(kq
);
1540 if (kn
!= SLIST_NEXT(&marker
, kn_next
) || kn
->kn_kq
!= kq
) {
1542 * Don't move the marker; check the knote after
1545 lwkt_relpooltoken(kq
);
1549 if (kn
->kn_status
& KN_PROCESSING
) {
1551 * Someone else is processing the knote, ask the
1552 * other thread to reprocess it and don't mess
1553 * with it otherwise.
1557 * Move the marker w/ the kq token, so that
1558 * this knote will not be ripped behind our
1561 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1562 if (SLIST_NEXT(kn
, kn_next
) != NULL
)
1563 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1566 kn
->kn_status
|= KN_REPROCESS
;
1567 lwkt_relpooltoken(kq
);
1575 * If the hint is non-zero we have to wait or risk
1576 * losing the state the caller is trying to update.
1578 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
1579 tsleep(kn
, 0, "knotec", hz
);
1582 * Don't move the marker; check this knote again,
1583 * hopefully it is still after the marker. Or it
1584 * was deleted and we would check the next knote.
1586 lwkt_relpooltoken(kq
);
1591 * Become the reprocessing master ourselves.
1593 KASSERT((kn
->kn_status
& KN_DELETING
) == 0,
1594 ("acquire a deleting knote %#x", kn
->kn_status
));
1595 kn
->kn_status
|= KN_PROCESSING
;
1597 /* Move the marker */
1598 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1599 if (SLIST_NEXT(kn
, kn_next
) != NULL
)
1600 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1605 * If hint is non-zero running the event is mandatory
1606 * so do it whether reprocessing is set or not.
1608 if (filter_event(kn
, hint
))
1612 lwkt_relpooltoken(kq
);
1617 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1619 lwkt_relpooltoken(list
);
1623 * Insert knote at head of klist.
1625 * This function may only be called via a filter function and thus
1626 * kq_token should already be held and marked for processing.
1629 knote_insert(struct klist
*klist
, struct knote
*kn
)
1631 lwkt_getpooltoken(klist
);
1632 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1633 SLIST_INSERT_HEAD(klist
, kn
, kn_next
);
1634 lwkt_relpooltoken(klist
);
1638 * Remove knote from a klist
1640 * This function may only be called via a filter function and thus
1641 * kq_token should already be held and marked for processing.
1644 knote_remove(struct klist
*klist
, struct knote
*kn
)
1646 lwkt_getpooltoken(klist
);
1647 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1648 SLIST_REMOVE(klist
, kn
, knote
, kn_next
);
1649 lwkt_relpooltoken(klist
);
1653 knote_assume_knotes(struct kqinfo
*src
, struct kqinfo
*dst
,
1654 struct filterops
*ops
, void *hook
)
1656 struct knote
*kn
, marker
;
1659 marker
.kn_filter
= EVFILT_MARKER
;
1660 marker
.kn_status
= KN_PROCESSING
;
1662 lwkt_getpooltoken(&src
->ki_note
);
1663 if (SLIST_EMPTY(&src
->ki_note
)) {
1664 lwkt_relpooltoken(&src
->ki_note
);
1667 lwkt_getpooltoken(&dst
->ki_note
);
1671 SLIST_INSERT_HEAD(&src
->ki_note
, &marker
, kn_next
);
1672 while ((kn
= SLIST_NEXT(&marker
, kn_next
)) != NULL
) {
1675 if (kn
->kn_filter
== EVFILT_MARKER
) {
1677 SLIST_REMOVE(&src
->ki_note
, &marker
, knote
, kn_next
);
1678 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1683 lwkt_getpooltoken(kq
);
1685 if (kn
!= SLIST_NEXT(&marker
, kn_next
) || kn
->kn_kq
!= kq
) {
1687 * Don't move the marker; check the knote after
1690 lwkt_relpooltoken(kq
);
1695 SLIST_REMOVE(&src
->ki_note
, &marker
, knote
, kn_next
);
1696 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1699 if (knote_acquire(kn
)) {
1700 knote_remove(&src
->ki_note
, kn
);
1703 knote_insert(&dst
->ki_note
, kn
);
1705 /* kn may be invalid now */
1707 lwkt_relpooltoken(kq
);
1709 SLIST_REMOVE(&src
->ki_note
, &marker
, knote
, kn_next
);
1711 /* Keep draining, until nothing left */
1715 lwkt_relpooltoken(&dst
->ki_note
);
1716 lwkt_relpooltoken(&src
->ki_note
);
1720 * Remove all knotes referencing a specified fd
1723 knote_fdclose(struct file
*fp
, struct filedesc
*fdp
, int fd
)
1727 struct knote
*kntmp
;
1729 lwkt_getpooltoken(&fp
->f_klist
);
1731 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
1732 if (kn
->kn_kq
->kq_fdp
== fdp
&& kn
->kn_id
== fd
) {
1734 lwkt_getpooltoken(kq
);
1736 /* temporary verification hack */
1737 SLIST_FOREACH(kntmp
, &fp
->f_klist
, kn_link
) {
1741 if (kn
!= kntmp
|| kn
->kn_kq
->kq_fdp
!= fdp
||
1742 kn
->kn_id
!= fd
|| kn
->kn_kq
!= kq
) {
1743 lwkt_relpooltoken(kq
);
1746 if (knote_acquire(kn
))
1747 knote_detach_and_drop(kn
);
1748 lwkt_relpooltoken(kq
);
1752 lwkt_relpooltoken(&fp
->f_klist
);
1756 * Low level attach function.
1758 * The knote should already be marked for processing.
1759 * Caller must hold the related kq token.
1762 knote_attach(struct knote
*kn
)
1765 struct kqueue
*kq
= kn
->kn_kq
;
1767 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1768 KKASSERT(kn
->kn_fp
);
1769 list
= &kn
->kn_fp
->f_klist
;
1771 if (kq
->kq_knhashmask
== 0)
1772 kq
->kq_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1773 &kq
->kq_knhashmask
);
1774 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1776 lwkt_getpooltoken(list
);
1777 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1778 lwkt_relpooltoken(list
);
1779 TAILQ_INSERT_HEAD(&kq
->kq_knlist
, kn
, kn_kqlink
);
1783 * Low level drop function.
1785 * The knote should already be marked for processing.
1786 * Caller must hold the related kq token.
1789 knote_drop(struct knote
*kn
)
1796 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
)
1797 list
= &kn
->kn_fp
->f_klist
;
1799 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1801 lwkt_getpooltoken(list
);
1802 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
1803 lwkt_relpooltoken(list
);
1804 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
1805 if (kn
->kn_status
& KN_QUEUED
)
1807 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1815 * Low level enqueue function.
1817 * The knote should already be marked for processing.
1818 * Caller must be holding the kq token
1821 knote_enqueue(struct knote
*kn
)
1823 struct kqueue
*kq
= kn
->kn_kq
;
1825 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1826 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1827 kn
->kn_status
|= KN_QUEUED
;
1831 * Send SIGIO on request (typically set up as a mailbox signal)
1833 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
1834 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
1840 * Low level dequeue function.
1842 * The knote should already be marked for processing.
1843 * Caller must be holding the kq token
1846 knote_dequeue(struct knote
*kn
)
1848 struct kqueue
*kq
= kn
->kn_kq
;
1850 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
1851 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1852 kn
->kn_status
&= ~KN_QUEUED
;
1856 static struct knote
*
1859 return kmalloc(sizeof(struct knote
), M_KQUEUE
, M_WAITOK
);
1863 knote_free(struct knote
*kn
)
1865 struct knote_cache_list
*cache_list
;
1867 cache_list
= &knote_cache_lists
[mycpuid
];
1868 if (cache_list
->knote_cache_cnt
< KNOTE_CACHE_MAX
) {
1869 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, kn
, kn_link
);
1870 cache_list
->knote_cache_cnt
++;
1873 kfree(kn
, M_KQUEUE
);