2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
60 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args
{
63 struct kevent_args
*ka
;
67 #define KNOTE_CACHE_MAX 8
69 struct knote_cache_list
{
70 struct klist knote_cache
;
74 static int kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
75 struct knote
*marker
);
76 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
77 struct ucred
*cred
, int flags
);
78 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
79 struct ucred
*cred
, int flags
);
80 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
81 struct ucred
*cred
, struct sysmsg
*msg
);
82 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
83 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
85 static int kqueue_close(struct file
*fp
);
86 static void kqueue_wakeup(struct kqueue
*kq
);
87 static int filter_attach(struct knote
*kn
);
88 static int filter_event(struct knote
*kn
, long hint
);
93 static struct fileops kqueueops
= {
94 .fo_read
= kqueue_read
,
95 .fo_write
= kqueue_write
,
96 .fo_ioctl
= kqueue_ioctl
,
97 .fo_kqfilter
= kqueue_kqfilter
,
98 .fo_stat
= kqueue_stat
,
99 .fo_close
= kqueue_close
,
100 .fo_shutdown
= nofo_shutdown
103 static void knote_attach(struct knote
*kn
);
104 static void knote_drop(struct knote
*kn
);
105 static void knote_detach_and_drop(struct knote
*kn
);
106 static void knote_enqueue(struct knote
*kn
);
107 static void knote_dequeue(struct knote
*kn
);
108 static struct knote
*knote_alloc(void);
109 static void knote_free(struct knote
*kn
);
111 static void filt_kqdetach(struct knote
*kn
);
112 static int filt_kqueue(struct knote
*kn
, long hint
);
113 static int filt_procattach(struct knote
*kn
);
114 static void filt_procdetach(struct knote
*kn
);
115 static int filt_proc(struct knote
*kn
, long hint
);
116 static int filt_fileattach(struct knote
*kn
);
117 static void filt_timerexpire(void *knx
);
118 static int filt_timerattach(struct knote
*kn
);
119 static void filt_timerdetach(struct knote
*kn
);
120 static int filt_timer(struct knote
*kn
, long hint
);
121 static int filt_userattach(struct knote
*kn
);
122 static void filt_userdetach(struct knote
*kn
);
123 static int filt_user(struct knote
*kn
, long hint
);
124 static void filt_usertouch(struct knote
*kn
, struct kevent
*kev
,
127 static struct filterops file_filtops
=
128 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, filt_fileattach
, NULL
, NULL
};
129 static struct filterops kqread_filtops
=
130 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, NULL
, filt_kqdetach
, filt_kqueue
};
131 static struct filterops proc_filtops
=
132 { 0, filt_procattach
, filt_procdetach
, filt_proc
};
133 static struct filterops timer_filtops
=
134 { FILTEROP_MPSAFE
, filt_timerattach
, filt_timerdetach
, filt_timer
};
135 static struct filterops user_filtops
=
136 { FILTEROP_MPSAFE
, filt_userattach
, filt_userdetach
, filt_user
};
138 static int kq_ncallouts
= 0;
139 static int kq_calloutmax
= (4 * 1024);
140 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
141 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
142 static int kq_checkloop
= 1000000;
143 SYSCTL_INT(_kern
, OID_AUTO
, kq_checkloop
, CTLFLAG_RW
,
144 &kq_checkloop
, 0, "Maximum number of loops for kqueue scan");
146 #define KNOTE_ACTIVATE(kn) do { \
147 kn->kn_status |= KN_ACTIVE; \
148 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
152 #define KN_HASHSIZE 64 /* XXX should be tunable */
153 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
155 extern struct filterops aio_filtops
;
156 extern struct filterops sig_filtops
;
159 * Table for for all system-defined filters.
161 static struct filterops
*sysfilt_ops
[] = {
162 &file_filtops
, /* EVFILT_READ */
163 &file_filtops
, /* EVFILT_WRITE */
164 &aio_filtops
, /* EVFILT_AIO */
165 &file_filtops
, /* EVFILT_VNODE */
166 &proc_filtops
, /* EVFILT_PROC */
167 &sig_filtops
, /* EVFILT_SIGNAL */
168 &timer_filtops
, /* EVFILT_TIMER */
169 &file_filtops
, /* EVFILT_EXCEPT */
170 &user_filtops
, /* EVFILT_USER */
173 static struct knote_cache_list knote_cache_lists
[MAXCPU
];
176 * Acquire a knote, return non-zero on success, 0 on failure.
178 * If we cannot acquire the knote we sleep and return 0. The knote
179 * may be stale on return in this case and the caller must restart
180 * whatever loop they are in.
182 * Related kq token must be held.
185 knote_acquire(struct knote
*kn
)
187 if (kn
->kn_status
& KN_PROCESSING
) {
188 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
189 tsleep(kn
, 0, "kqepts", hz
);
190 /* knote may be stale now */
193 kn
->kn_status
|= KN_PROCESSING
;
198 * Release an acquired knote, clearing KN_PROCESSING and handling any
199 * KN_REPROCESS events.
201 * Caller must be holding the related kq token
203 * Non-zero is returned if the knote is destroyed or detached.
206 knote_release(struct knote
*kn
)
210 while (kn
->kn_status
& KN_REPROCESS
) {
211 kn
->kn_status
&= ~KN_REPROCESS
;
212 if (kn
->kn_status
& KN_WAITING
) {
213 kn
->kn_status
&= ~KN_WAITING
;
216 if (kn
->kn_status
& KN_DELETING
) {
217 knote_detach_and_drop(kn
);
221 if (filter_event(kn
, 0))
224 if (kn
->kn_status
& KN_DETACHED
)
228 kn
->kn_status
&= ~KN_PROCESSING
;
229 /* kn should not be accessed anymore */
234 filt_fileattach(struct knote
*kn
)
236 return (fo_kqfilter(kn
->kn_fp
, kn
));
243 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
245 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
247 if (kn
->kn_filter
!= EVFILT_READ
)
250 kn
->kn_fop
= &kqread_filtops
;
251 knote_insert(&kq
->kq_kqinfo
.ki_note
, kn
);
256 filt_kqdetach(struct knote
*kn
)
258 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
260 knote_remove(&kq
->kq_kqinfo
.ki_note
, kn
);
265 filt_kqueue(struct knote
*kn
, long hint
)
267 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
269 kn
->kn_data
= kq
->kq_count
;
270 return (kn
->kn_data
> 0);
274 filt_procattach(struct knote
*kn
)
280 p
= pfind(kn
->kn_id
);
281 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
282 p
= zpfind(kn
->kn_id
);
288 if (!PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
294 lwkt_gettoken(&p
->p_token
);
295 kn
->kn_ptr
.p_proc
= p
;
296 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
299 * internal flag indicating registration done by kernel
301 if (kn
->kn_flags
& EV_FLAG1
) {
302 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
303 kn
->kn_fflags
= NOTE_CHILD
;
304 kn
->kn_flags
&= ~EV_FLAG1
;
307 knote_insert(&p
->p_klist
, kn
);
310 * Immediately activate any exit notes if the target process is a
311 * zombie. This is necessary to handle the case where the target
312 * process, e.g. a child, dies before the kevent is negistered.
314 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
316 lwkt_reltoken(&p
->p_token
);
323 * The knote may be attached to a different process, which may exit,
324 * leaving nothing for the knote to be attached to. So when the process
325 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
326 * it will be deleted when read out. However, as part of the knote deletion,
327 * this routine is called, so a check is needed to avoid actually performing
328 * a detach, because the original process does not exist any more.
331 filt_procdetach(struct knote
*kn
)
335 if (kn
->kn_status
& KN_DETACHED
)
337 p
= kn
->kn_ptr
.p_proc
;
338 knote_remove(&p
->p_klist
, kn
);
342 filt_proc(struct knote
*kn
, long hint
)
347 * mask off extra data
349 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
352 * if the user is interested in this event, record it.
354 if (kn
->kn_sfflags
& event
)
355 kn
->kn_fflags
|= event
;
358 * Process is gone, so flag the event as finished. Detach the
359 * knote from the process now because the process will be poof,
362 if (event
== NOTE_EXIT
) {
363 struct proc
*p
= kn
->kn_ptr
.p_proc
;
364 if ((kn
->kn_status
& KN_DETACHED
) == 0) {
366 knote_remove(&p
->p_klist
, kn
);
367 kn
->kn_status
|= KN_DETACHED
;
368 kn
->kn_data
= p
->p_xstat
;
369 kn
->kn_ptr
.p_proc
= NULL
;
372 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
377 * process forked, and user wants to track the new process,
378 * so attach a new knote to it, and immediately report an
379 * event with the parent's pid.
381 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
386 * register knote with new process.
388 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
389 kev
.filter
= kn
->kn_filter
;
390 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
391 kev
.fflags
= kn
->kn_sfflags
;
392 kev
.data
= kn
->kn_id
; /* parent */
393 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
394 error
= kqueue_register(kn
->kn_kq
, &kev
);
396 kn
->kn_fflags
|= NOTE_TRACKERR
;
399 return (kn
->kn_fflags
!= 0);
403 filt_timerreset(struct knote
*kn
)
405 struct callout
*calloutp
;
409 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
410 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
411 tticks
= tvtohz_high(&tv
);
412 calloutp
= (struct callout
*)kn
->kn_hook
;
413 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
417 * The callout interlocks with callout_terminate() but can still
418 * race a deletion so if KN_DELETING is set we just don't touch
422 filt_timerexpire(void *knx
)
424 struct knote
*kn
= knx
;
425 struct kqueue
*kq
= kn
->kn_kq
;
427 lwkt_getpooltoken(kq
);
430 * Open knote_acquire(), since we can't sleep in callout,
431 * however, we do need to record this expiration.
434 if (kn
->kn_status
& KN_PROCESSING
) {
435 kn
->kn_status
|= KN_REPROCESS
;
436 if ((kn
->kn_status
& KN_DELETING
) == 0 &&
437 (kn
->kn_flags
& EV_ONESHOT
) == 0)
439 lwkt_relpooltoken(kq
);
442 KASSERT((kn
->kn_status
& KN_DELETING
) == 0,
443 ("acquire a deleting knote %#x", kn
->kn_status
));
444 kn
->kn_status
|= KN_PROCESSING
;
447 if ((kn
->kn_flags
& EV_ONESHOT
) == 0)
452 lwkt_relpooltoken(kq
);
456 * data contains amount of time to sleep, in milliseconds
459 filt_timerattach(struct knote
*kn
)
461 struct callout
*calloutp
;
464 prev_ncallouts
= atomic_fetchadd_int(&kq_ncallouts
, 1);
465 if (prev_ncallouts
>= kq_calloutmax
) {
466 atomic_subtract_int(&kq_ncallouts
, 1);
471 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
472 calloutp
= kmalloc(sizeof(*calloutp
), M_KQUEUE
, M_WAITOK
);
473 callout_init_mp(calloutp
);
474 kn
->kn_hook
= (caddr_t
)calloutp
;
481 * This function is called with the knote flagged locked but it is
482 * still possible to race a callout event due to the callback blocking.
483 * We must call callout_terminate() instead of callout_stop() to deal
487 filt_timerdetach(struct knote
*kn
)
489 struct callout
*calloutp
;
491 calloutp
= (struct callout
*)kn
->kn_hook
;
492 callout_terminate(calloutp
);
493 kfree(calloutp
, M_KQUEUE
);
494 atomic_subtract_int(&kq_ncallouts
, 1);
498 filt_timer(struct knote
*kn
, long hint
)
501 return (kn
->kn_data
!= 0);
508 filt_userattach(struct knote
*kn
)
511 if (kn
->kn_fflags
& NOTE_TRIGGER
)
512 kn
->kn_ptr
.hookid
= 1;
514 kn
->kn_ptr
.hookid
= 0;
519 filt_userdetach(struct knote
*kn
)
525 filt_user(struct knote
*kn
, long hint
)
527 return (kn
->kn_ptr
.hookid
);
531 filt_usertouch(struct knote
*kn
, struct kevent
*kev
, u_long type
)
537 if (kev
->fflags
& NOTE_TRIGGER
)
538 kn
->kn_ptr
.hookid
= 1;
540 ffctrl
= kev
->fflags
& NOTE_FFCTRLMASK
;
541 kev
->fflags
&= NOTE_FFLAGSMASK
;
547 kn
->kn_sfflags
&= kev
->fflags
;
551 kn
->kn_sfflags
|= kev
->fflags
;
555 kn
->kn_sfflags
= kev
->fflags
;
559 /* XXX Return error? */
562 kn
->kn_sdata
= kev
->data
;
565 * This is not the correct use of EV_CLEAR in an event
566 * modification, it should have been passed as a NOTE instead.
567 * But we need to maintain compatibility with Apple & FreeBSD.
569 * Note however that EV_CLEAR can still be used when doing
570 * the initial registration of the event and works as expected
571 * (clears the event on reception).
573 if (kev
->flags
& EV_CLEAR
) {
574 kn
->kn_ptr
.hookid
= 0;
581 *kev
= kn
->kn_kevent
;
582 kev
->fflags
= kn
->kn_sfflags
;
583 kev
->data
= kn
->kn_sdata
;
584 if (kn
->kn_flags
& EV_CLEAR
) {
585 kn
->kn_ptr
.hookid
= 0;
586 /* kn_data, kn_fflags handled by parent */
591 panic("filt_usertouch() - invalid type (%ld)", type
);
597 * Initialize a kqueue.
599 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
604 kqueue_init(struct kqueue
*kq
, struct filedesc
*fdp
)
606 TAILQ_INIT(&kq
->kq_knpend
);
607 TAILQ_INIT(&kq
->kq_knlist
);
610 SLIST_INIT(&kq
->kq_kqinfo
.ki_note
);
614 * Terminate a kqueue. Freeing the actual kq itself is left up to the
615 * caller (it might be embedded in a lwp so we don't do it here).
617 * The kq's knlist must be completely eradicated so block on any
621 kqueue_terminate(struct kqueue
*kq
)
625 lwkt_getpooltoken(kq
);
626 while ((kn
= TAILQ_FIRST(&kq
->kq_knlist
)) != NULL
) {
627 if (knote_acquire(kn
))
628 knote_detach_and_drop(kn
);
630 lwkt_relpooltoken(kq
);
633 hashdestroy(kq
->kq_knhash
, M_KQUEUE
, kq
->kq_knhashmask
);
634 kq
->kq_knhash
= NULL
;
635 kq
->kq_knhashmask
= 0;
643 sys_kqueue(struct kqueue_args
*uap
)
645 struct thread
*td
= curthread
;
650 error
= falloc(td
->td_lwp
, &fp
, &fd
);
653 fp
->f_flag
= FREAD
| FWRITE
;
654 fp
->f_type
= DTYPE_KQUEUE
;
655 fp
->f_ops
= &kqueueops
;
657 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
658 kqueue_init(kq
, td
->td_proc
->p_fd
);
661 fsetfd(kq
->kq_fdp
, fp
, fd
);
662 uap
->sysmsg_result
= fd
;
668 * Copy 'count' items into the destination list pointed to by uap->eventlist.
671 kevent_copyout(void *arg
, struct kevent
*kevp
, int count
, int *res
)
673 struct kevent_copyin_args
*kap
;
676 kap
= (struct kevent_copyin_args
*)arg
;
678 error
= copyout(kevp
, kap
->ka
->eventlist
, count
* sizeof(*kevp
));
680 kap
->ka
->eventlist
+= count
;
690 * Copy at most 'max' items from the list pointed to by kap->changelist,
691 * return number of items in 'events'.
694 kevent_copyin(void *arg
, struct kevent
*kevp
, int max
, int *events
)
696 struct kevent_copyin_args
*kap
;
699 kap
= (struct kevent_copyin_args
*)arg
;
701 count
= min(kap
->ka
->nchanges
- kap
->pchanges
, max
);
702 error
= copyin(kap
->ka
->changelist
, kevp
, count
* sizeof *kevp
);
704 kap
->ka
->changelist
+= count
;
705 kap
->pchanges
+= count
;
716 kern_kevent(struct kqueue
*kq
, int nevents
, int *res
, void *uap
,
717 k_copyin_fn kevent_copyinfn
, k_copyout_fn kevent_copyoutfn
,
718 struct timespec
*tsp_in
)
721 struct timespec
*tsp
, ats
;
722 int i
, n
, total
, error
, nerrors
= 0;
724 int limit
= kq_checkloop
;
725 struct kevent kev
[KQ_NEVENTS
];
727 struct lwkt_token
*tok
;
729 if (tsp_in
== NULL
|| tsp_in
->tv_sec
|| tsp_in
->tv_nsec
)
730 atomic_set_int(&curthread
->td_mpflags
, TDF_MP_BATCH_DEMARC
);
737 error
= kevent_copyinfn(uap
, kev
, KQ_NEVENTS
, &n
);
742 for (i
= 0; i
< n
; i
++) {
744 kevp
->flags
&= ~EV_SYSFLAGS
;
745 error
= kqueue_register(kq
, kevp
);
748 * If a registration returns an error we
749 * immediately post the error. The kevent()
750 * call itself will fail with the error if
751 * no space is available for posting.
753 * Such errors normally bypass the timeout/blocking
754 * code. However, if the copyoutfn function refuses
755 * to post the error (see sys_poll()), then we
758 if (error
|| (kevp
->flags
& EV_RECEIPT
)) {
759 kevp
->flags
= EV_ERROR
;
762 kevent_copyoutfn(uap
, kevp
, 1, res
);
765 } else if (lres
!= *res
) {
776 * Acquire/wait for events - setup timeout
779 if (tsp
->tv_sec
|| tsp
->tv_nsec
) {
781 timespecadd(tsp
, &ats
); /* tsp = target time */
788 * Collect as many events as we can. Sleeping on successive
789 * loops is disabled if copyoutfn has incremented (*res).
791 * The loop stops if an error occurs, all events have been
792 * scanned (the marker has been reached), or fewer than the
793 * maximum number of events is found.
795 * The copyoutfn function does not have to increment (*res) in
796 * order for the loop to continue.
798 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
802 marker
.kn_filter
= EVFILT_MARKER
;
803 marker
.kn_status
= KN_PROCESSING
;
804 tok
= lwkt_token_pool_lookup(kq
);
806 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
808 while ((n
= nevents
- total
) > 0) {
813 * If no events are pending sleep until timeout (if any)
814 * or an event occurs.
816 * After the sleep completes the marker is moved to the
817 * end of the list, making any received events available
820 if (kq
->kq_count
== 0 && *res
== 0) {
825 } else if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0) {
829 struct timespec atx
= *tsp
;
832 timespecsub(&atx
, &ats
);
833 if (atx
.tv_sec
< 0) {
837 timeout
= atx
.tv_sec
> 24 * 60 * 60 ?
844 if (kq
->kq_count
== 0) {
846 if (__predict_false(kq
->kq_sleep_cnt
== 0)) {
848 * Guard against possible wrapping. And
849 * set it to 2, so that kqueue_wakeup()
850 * can wake everyone up.
852 kq
->kq_sleep_cnt
= 2;
854 error
= tsleep(kq
, PCATCH
, "kqread", timeout
);
856 /* don't restart after signals... */
857 if (error
== ERESTART
)
864 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
865 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
,
872 * Process all received events
873 * Account for all non-spurious events in our total
875 i
= kqueue_scan(kq
, kev
, n
, &marker
);
878 error
= kevent_copyoutfn(uap
, kev
, i
, res
);
879 total
+= *res
- lres
;
883 if (limit
&& --limit
== 0)
884 panic("kqueue: checkloop failed i=%d", i
);
887 * Normally when fewer events are returned than requested
888 * we can stop. However, if only spurious events were
889 * collected the copyout will not bump (*res) and we have
896 * Deal with an edge case where spurious events can cause
897 * a loop to occur without moving the marker. This can
898 * prevent kqueue_scan() from picking up new events which
899 * race us. We must be sure to move the marker for this
902 * NOTE: We do not want to move the marker if events
903 * were scanned because normal kqueue operations
904 * may reactivate events. Moving the marker in
905 * that case could result in duplicates for the
910 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
911 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
916 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
919 /* Timeouts do not return EWOULDBLOCK. */
920 if (error
== EWOULDBLOCK
)
929 sys_kevent(struct kevent_args
*uap
)
931 struct thread
*td
= curthread
;
932 struct proc
*p
= td
->td_proc
;
933 struct timespec ts
, *tsp
;
935 struct file
*fp
= NULL
;
936 struct kevent_copyin_args
*kap
, ka
;
940 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
947 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
950 if (fp
->f_type
!= DTYPE_KQUEUE
) {
955 kq
= (struct kqueue
*)fp
->f_data
;
961 error
= kern_kevent(kq
, uap
->nevents
, &uap
->sysmsg_result
, kap
,
962 kevent_copyin
, kevent_copyout
, tsp
);
970 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
)
972 struct filedesc
*fdp
= kq
->kq_fdp
;
973 struct klist
*list
= NULL
;
974 struct filterops
*fops
;
975 struct file
*fp
= NULL
;
976 struct knote
*kn
= NULL
;
979 struct knote_cache_list
*cache_list
;
981 if (kev
->filter
< 0) {
982 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
984 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
988 * filter attach routine is responsible for insuring that
989 * the identifier can be attached to it.
994 if (fops
->f_flags
& FILTEROP_ISFD
) {
995 /* validate descriptor */
996 fp
= holdfp(fdp
, kev
->ident
, -1);
1001 cache_list
= &knote_cache_lists
[mycpuid
];
1002 if (SLIST_EMPTY(&cache_list
->knote_cache
)) {
1003 struct knote
*new_kn
;
1005 new_kn
= knote_alloc();
1007 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, new_kn
, kn_link
);
1008 cache_list
->knote_cache_cnt
++;
1013 lwkt_getpooltoken(kq
);
1016 * Make sure that only one thread can register event on this kqueue,
1017 * so that we would not suffer any race, even if the registration
1018 * blocked, i.e. kq token was released, and the kqueue was shared
1019 * between threads (this should be rare though).
1021 while (__predict_false(kq
->kq_regtd
!= NULL
&& kq
->kq_regtd
!= td
)) {
1022 kq
->kq_state
|= KQ_REGWAIT
;
1023 tsleep(&kq
->kq_regtd
, 0, "kqreg", 0);
1025 if (__predict_false(kq
->kq_regtd
!= NULL
)) {
1026 /* Recursive calling of kqueue_register() */
1029 /* Owner of the kq_regtd, i.e. td != NULL */
1034 list
= &fp
->f_klist
;
1035 } else if (kq
->kq_knhashmask
) {
1036 list
= &kq
->kq_knhash
[
1037 KN_HASH((u_long
)kev
->ident
, kq
->kq_knhashmask
)];
1040 lwkt_getpooltoken(list
);
1042 SLIST_FOREACH(kn
, list
, kn_link
) {
1043 if (kn
->kn_kq
== kq
&&
1044 kn
->kn_filter
== kev
->filter
&&
1045 kn
->kn_id
== kev
->ident
) {
1046 if (knote_acquire(kn
) == 0)
1051 lwkt_relpooltoken(list
);
1055 * NOTE: At this point if kn is non-NULL we will have acquired
1056 * it and set KN_PROCESSING.
1058 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
1064 * kn now contains the matching knote, or NULL if no match
1066 if (kev
->flags
& EV_ADD
) {
1069 kn
= SLIST_FIRST(&cache_list
->knote_cache
);
1074 SLIST_REMOVE_HEAD(&cache_list
->knote_cache
,
1076 cache_list
->knote_cache_cnt
--;
1084 * apply reference count to knote structure, and
1085 * do not release it at the end of this routine.
1089 kn
->kn_sfflags
= kev
->fflags
;
1090 kn
->kn_sdata
= kev
->data
;
1093 kn
->kn_kevent
= *kev
;
1096 * KN_PROCESSING prevents the knote from getting
1097 * ripped out from under us while we are trying
1098 * to attach it, in case the attach blocks.
1100 kn
->kn_status
= KN_PROCESSING
;
1102 if ((error
= filter_attach(kn
)) != 0) {
1103 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1109 * Interlock against close races which either tried
1110 * to remove our knote while we were blocked or missed
1111 * it entirely prior to our attachment. We do not
1112 * want to end up with a knote on a closed descriptor.
1114 if ((fops
->f_flags
& FILTEROP_ISFD
) &&
1115 checkfdclosed(fdp
, kev
->ident
, kn
->kn_fp
)) {
1116 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1120 * The user may change some filter values after the
1121 * initial EV_ADD, but doing so will not reset any
1122 * filter which have already been triggered.
1124 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1125 if (fops
== &user_filtops
) {
1126 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1128 kn
->kn_sfflags
= kev
->fflags
;
1129 kn
->kn_sdata
= kev
->data
;
1130 kn
->kn_kevent
.udata
= kev
->udata
;
1135 * Execute the filter event to immediately activate the
1136 * knote if necessary. If reprocessing events are pending
1137 * due to blocking above we do not run the filter here
1138 * but instead let knote_release() do it. Otherwise we
1139 * might run the filter on a deleted event.
1141 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1142 if (filter_event(kn
, 0))
1145 } else if (kev
->flags
& EV_DELETE
) {
1147 * Delete the existing knote
1149 knote_detach_and_drop(kn
);
1153 * Modify an existing event.
1155 * The user may change some filter values after the
1156 * initial EV_ADD, but doing so will not reset any
1157 * filter which have already been triggered.
1159 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1160 if (fops
== &user_filtops
) {
1161 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1163 kn
->kn_sfflags
= kev
->fflags
;
1164 kn
->kn_sdata
= kev
->data
;
1165 kn
->kn_kevent
.udata
= kev
->udata
;
1169 * Execute the filter event to immediately activate the
1170 * knote if necessary. If reprocessing events are pending
1171 * due to blocking above we do not run the filter here
1172 * but instead let knote_release() do it. Otherwise we
1173 * might run the filter on a deleted event.
1175 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1176 if (filter_event(kn
, 0))
1182 * Disablement does not deactivate a knote here.
1184 if ((kev
->flags
& EV_DISABLE
) &&
1185 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
1186 kn
->kn_status
|= KN_DISABLED
;
1190 * Re-enablement may have to immediately enqueue an active knote.
1192 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
1193 kn
->kn_status
&= ~KN_DISABLED
;
1194 if ((kn
->kn_status
& KN_ACTIVE
) &&
1195 ((kn
->kn_status
& KN_QUEUED
) == 0)) {
1201 * Handle any required reprocessing
1204 /* kn may be invalid now */
1207 if (td
!= NULL
) { /* Owner of the kq_regtd */
1208 kq
->kq_regtd
= NULL
;
1209 if (__predict_false(kq
->kq_state
& KQ_REGWAIT
)) {
1210 kq
->kq_state
&= ~KQ_REGWAIT
;
1211 wakeup(&kq
->kq_regtd
);
1214 lwkt_relpooltoken(kq
);
1221 * Scan the kqueue, return the number of active events placed in kevp up
1224 * Continuous mode events may get recycled, do not continue scanning past
1225 * marker unless no events have been collected.
1228 kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
1229 struct knote
*marker
)
1231 struct knote
*kn
, local_marker
;
1235 local_marker
.kn_filter
= EVFILT_MARKER
;
1236 local_marker
.kn_status
= KN_PROCESSING
;
1238 lwkt_getpooltoken(kq
);
1243 TAILQ_INSERT_HEAD(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1245 kn
= TAILQ_NEXT(&local_marker
, kn_tqe
);
1246 if (kn
->kn_filter
== EVFILT_MARKER
) {
1247 /* Marker reached, we are done */
1251 /* Move local marker past some other threads marker */
1252 kn
= TAILQ_NEXT(kn
, kn_tqe
);
1253 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1254 TAILQ_INSERT_BEFORE(kn
, &local_marker
, kn_tqe
);
1259 * We can't skip a knote undergoing processing, otherwise
1260 * we risk not returning it when the user process expects
1261 * it should be returned. Sleep and retry.
1263 if (knote_acquire(kn
) == 0)
1267 * Remove the event for processing.
1269 * WARNING! We must leave KN_QUEUED set to prevent the
1270 * event from being KNOTE_ACTIVATE()d while
1271 * the queue state is in limbo, in case we
1274 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1278 * We have to deal with an extremely important race against
1279 * file descriptor close()s here. The file descriptor can
1280 * disappear MPSAFE, and there is a small window of
1281 * opportunity between that and the call to knote_fdclose().
1283 * If we hit that window here while doselect or dopoll is
1284 * trying to delete a spurious event they will not be able
1285 * to match up the event against a knote and will go haywire.
1287 if ((kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) &&
1288 checkfdclosed(kq
->kq_fdp
, kn
->kn_kevent
.ident
, kn
->kn_fp
)) {
1289 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1292 if (kn
->kn_status
& KN_DISABLED
) {
1294 * If disabled we ensure the event is not queued
1295 * but leave its active bit set. On re-enablement
1296 * the event may be immediately triggered.
1298 kn
->kn_status
&= ~KN_QUEUED
;
1299 } else if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
1300 (kn
->kn_status
& KN_DELETING
) == 0 &&
1301 filter_event(kn
, 0) == 0) {
1303 * If not running in one-shot mode and the event
1304 * is no longer present we ensure it is removed
1305 * from the queue and ignore it.
1307 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
1312 if (kn
->kn_fop
== &user_filtops
)
1313 filt_usertouch(kn
, kevp
, EVENT_PROCESS
);
1315 *kevp
= kn
->kn_kevent
;
1320 if (kn
->kn_flags
& EV_ONESHOT
) {
1321 kn
->kn_status
&= ~KN_QUEUED
;
1322 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1324 if (kn
->kn_flags
& (EV_CLEAR
| EV_DISPATCH
)) {
1325 if (kn
->kn_flags
& EV_CLEAR
) {
1329 if (kn
->kn_flags
& EV_DISPATCH
) {
1330 kn
->kn_status
|= KN_DISABLED
;
1332 kn
->kn_status
&= ~(KN_QUEUED
|
1335 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1342 * Handle any post-processing states
1346 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1348 lwkt_relpooltoken(kq
);
1354 * This could be expanded to call kqueue_scan, if desired.
1359 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1368 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1377 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1378 struct ucred
*cred
, struct sysmsg
*msg
)
1383 kq
= (struct kqueue
*)fp
->f_data
;
1384 lwkt_getpooltoken(kq
);
1388 kq
->kq_state
|= KQ_ASYNC
;
1390 kq
->kq_state
&= ~KQ_ASYNC
;
1394 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
1400 lwkt_relpooltoken(kq
);
1408 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
1410 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1412 bzero((void *)st
, sizeof(*st
));
1413 st
->st_size
= kq
->kq_count
;
1414 st
->st_blksize
= sizeof(struct kevent
);
1415 st
->st_mode
= S_IFIFO
;
1423 kqueue_close(struct file
*fp
)
1425 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1427 kqueue_terminate(kq
);
1430 funsetown(&kq
->kq_sigio
);
1432 kfree(kq
, M_KQUEUE
);
1437 kqueue_wakeup(struct kqueue
*kq
)
1439 if (kq
->kq_sleep_cnt
) {
1440 u_int sleep_cnt
= kq
->kq_sleep_cnt
;
1442 kq
->kq_sleep_cnt
= 0;
1448 KNOTE(&kq
->kq_kqinfo
.ki_note
, 0);
1452 * Calls filterops f_attach function, acquiring mplock if filter is not
1453 * marked as FILTEROP_MPSAFE.
1455 * Caller must be holding the related kq token
1458 filter_attach(struct knote
*kn
)
1462 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1463 ret
= kn
->kn_fop
->f_attach(kn
);
1466 ret
= kn
->kn_fop
->f_attach(kn
);
1473 * Detach the knote and drop it, destroying the knote.
1475 * Calls filterops f_detach function, acquiring mplock if filter is not
1476 * marked as FILTEROP_MPSAFE.
1478 * Caller must be holding the related kq token
1481 knote_detach_and_drop(struct knote
*kn
)
1483 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1484 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1485 kn
->kn_fop
->f_detach(kn
);
1488 kn
->kn_fop
->f_detach(kn
);
1495 * Calls filterops f_event function, acquiring mplock if filter is not
1496 * marked as FILTEROP_MPSAFE.
1498 * If the knote is in the middle of being created or deleted we cannot
1499 * safely call the filter op.
1501 * Caller must be holding the related kq token
1504 filter_event(struct knote
*kn
, long hint
)
1508 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1509 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1512 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1519 * Walk down a list of knotes, activating them if their event has triggered.
1521 * If we encounter any knotes which are undergoing processing we just mark
1522 * them for reprocessing and do not try to [re]activate the knote. However,
1523 * if a hint is being passed we have to wait and that makes things a bit
1527 knote(struct klist
*list
, long hint
)
1529 struct knote
*kn
, marker
;
1531 marker
.kn_filter
= EVFILT_MARKER
;
1532 marker
.kn_status
= KN_PROCESSING
;
1534 lwkt_getpooltoken(list
);
1535 if (SLIST_EMPTY(list
)) {
1536 lwkt_relpooltoken(list
);
1540 SLIST_INSERT_HEAD(list
, &marker
, kn_next
);
1541 while ((kn
= SLIST_NEXT(&marker
, kn_next
)) != NULL
) {
1545 if (kn
->kn_filter
== EVFILT_MARKER
) {
1547 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1548 if (SLIST_NEXT(kn
, kn_next
) == NULL
)
1550 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1555 lwkt_getpooltoken(kq
);
1557 if (kn
!= SLIST_NEXT(&marker
, kn_next
) || kn
->kn_kq
!= kq
) {
1559 * Don't move the marker; check the knote after
1562 lwkt_relpooltoken(kq
);
1566 if (kn
->kn_status
& KN_PROCESSING
) {
1568 * Someone else is processing the knote, ask the
1569 * other thread to reprocess it and don't mess
1570 * with it otherwise.
1574 * Move the marker w/ the kq token, so that
1575 * this knote will not be ripped behind our
1578 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1579 if (SLIST_NEXT(kn
, kn_next
) != NULL
)
1580 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1583 kn
->kn_status
|= KN_REPROCESS
;
1584 lwkt_relpooltoken(kq
);
1592 * If the hint is non-zero we have to wait or risk
1593 * losing the state the caller is trying to update.
1595 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
1596 tsleep(kn
, 0, "knotec", hz
);
1599 * Don't move the marker; check this knote again,
1600 * hopefully it is still after the marker. Or it
1601 * was deleted and we would check the next knote.
1603 lwkt_relpooltoken(kq
);
1608 * Become the reprocessing master ourselves.
1610 KASSERT((kn
->kn_status
& KN_DELETING
) == 0,
1611 ("acquire a deleting knote %#x", kn
->kn_status
));
1612 kn
->kn_status
|= KN_PROCESSING
;
1614 /* Move the marker */
1615 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1616 if (SLIST_NEXT(kn
, kn_next
) != NULL
)
1617 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1622 * If hint is non-zero running the event is mandatory
1623 * so do it whether reprocessing is set or not.
1625 if (filter_event(kn
, hint
))
1629 lwkt_relpooltoken(kq
);
1634 SLIST_REMOVE(list
, &marker
, knote
, kn_next
);
1636 lwkt_relpooltoken(list
);
1640 * Insert knote at head of klist.
1642 * This function may only be called via a filter function and thus
1643 * kq_token should already be held and marked for processing.
1646 knote_insert(struct klist
*klist
, struct knote
*kn
)
1648 lwkt_getpooltoken(klist
);
1649 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1650 SLIST_INSERT_HEAD(klist
, kn
, kn_next
);
1651 lwkt_relpooltoken(klist
);
1655 * Remove knote from a klist
1657 * This function may only be called via a filter function and thus
1658 * kq_token should already be held and marked for processing.
1661 knote_remove(struct klist
*klist
, struct knote
*kn
)
1663 lwkt_getpooltoken(klist
);
1664 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1665 SLIST_REMOVE(klist
, kn
, knote
, kn_next
);
1666 lwkt_relpooltoken(klist
);
1670 knote_assume_knotes(struct kqinfo
*src
, struct kqinfo
*dst
,
1671 struct filterops
*ops
, void *hook
)
1673 struct knote
*kn
, marker
;
1676 marker
.kn_filter
= EVFILT_MARKER
;
1677 marker
.kn_status
= KN_PROCESSING
;
1679 lwkt_getpooltoken(&src
->ki_note
);
1680 if (SLIST_EMPTY(&src
->ki_note
)) {
1681 lwkt_relpooltoken(&src
->ki_note
);
1684 lwkt_getpooltoken(&dst
->ki_note
);
1688 SLIST_INSERT_HEAD(&src
->ki_note
, &marker
, kn_next
);
1689 while ((kn
= SLIST_NEXT(&marker
, kn_next
)) != NULL
) {
1692 if (kn
->kn_filter
== EVFILT_MARKER
) {
1694 SLIST_REMOVE(&src
->ki_note
, &marker
, knote
, kn_next
);
1695 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1700 lwkt_getpooltoken(kq
);
1702 if (kn
!= SLIST_NEXT(&marker
, kn_next
) || kn
->kn_kq
!= kq
) {
1704 * Don't move the marker; check the knote after
1707 lwkt_relpooltoken(kq
);
1712 SLIST_REMOVE(&src
->ki_note
, &marker
, knote
, kn_next
);
1713 SLIST_INSERT_AFTER(kn
, &marker
, kn_next
);
1716 if (knote_acquire(kn
)) {
1717 knote_remove(&src
->ki_note
, kn
);
1720 knote_insert(&dst
->ki_note
, kn
);
1722 /* kn may be invalid now */
1724 lwkt_relpooltoken(kq
);
1726 SLIST_REMOVE(&src
->ki_note
, &marker
, knote
, kn_next
);
1728 /* Keep draining, until nothing left */
1732 lwkt_relpooltoken(&dst
->ki_note
);
1733 lwkt_relpooltoken(&src
->ki_note
);
1737 * Remove all knotes referencing a specified fd
1740 knote_fdclose(struct file
*fp
, struct filedesc
*fdp
, int fd
)
1744 struct knote
*kntmp
;
1746 lwkt_getpooltoken(&fp
->f_klist
);
1748 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
1749 if (kn
->kn_kq
->kq_fdp
== fdp
&& kn
->kn_id
== fd
) {
1751 lwkt_getpooltoken(kq
);
1753 /* temporary verification hack */
1754 SLIST_FOREACH(kntmp
, &fp
->f_klist
, kn_link
) {
1758 if (kn
!= kntmp
|| kn
->kn_kq
->kq_fdp
!= fdp
||
1759 kn
->kn_id
!= fd
|| kn
->kn_kq
!= kq
) {
1760 lwkt_relpooltoken(kq
);
1763 if (knote_acquire(kn
))
1764 knote_detach_and_drop(kn
);
1765 lwkt_relpooltoken(kq
);
1769 lwkt_relpooltoken(&fp
->f_klist
);
1773 * Low level attach function.
1775 * The knote should already be marked for processing.
1776 * Caller must hold the related kq token.
1779 knote_attach(struct knote
*kn
)
1782 struct kqueue
*kq
= kn
->kn_kq
;
1784 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1785 KKASSERT(kn
->kn_fp
);
1786 list
= &kn
->kn_fp
->f_klist
;
1788 if (kq
->kq_knhashmask
== 0)
1789 kq
->kq_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1790 &kq
->kq_knhashmask
);
1791 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1793 lwkt_getpooltoken(list
);
1794 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1795 lwkt_relpooltoken(list
);
1796 TAILQ_INSERT_HEAD(&kq
->kq_knlist
, kn
, kn_kqlink
);
1800 * Low level drop function.
1802 * The knote should already be marked for processing.
1803 * Caller must hold the related kq token.
1806 knote_drop(struct knote
*kn
)
1813 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
)
1814 list
= &kn
->kn_fp
->f_klist
;
1816 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1818 lwkt_getpooltoken(list
);
1819 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
1820 lwkt_relpooltoken(list
);
1821 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
1822 if (kn
->kn_status
& KN_QUEUED
)
1824 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1832 * Low level enqueue function.
1834 * The knote should already be marked for processing.
1835 * Caller must be holding the kq token
1838 knote_enqueue(struct knote
*kn
)
1840 struct kqueue
*kq
= kn
->kn_kq
;
1842 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1843 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1844 kn
->kn_status
|= KN_QUEUED
;
1848 * Send SIGIO on request (typically set up as a mailbox signal)
1850 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
1851 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
1857 * Low level dequeue function.
1859 * The knote should already be marked for processing.
1860 * Caller must be holding the kq token
1863 knote_dequeue(struct knote
*kn
)
1865 struct kqueue
*kq
= kn
->kn_kq
;
1867 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
1868 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1869 kn
->kn_status
&= ~KN_QUEUED
;
1873 static struct knote
*
1876 return kmalloc(sizeof(struct knote
), M_KQUEUE
, M_WAITOK
);
1880 knote_free(struct knote
*kn
)
1882 struct knote_cache_list
*cache_list
;
1884 cache_list
= &knote_cache_lists
[mycpuid
];
1885 if (cache_list
->knote_cache_cnt
< KNOTE_CACHE_MAX
) {
1887 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, kn
, kn_link
);
1888 cache_list
->knote_cache_cnt
++;
1892 kfree(kn
, M_KQUEUE
);