2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
52 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
57 #include <sys/spinlock2.h>
59 #define EVENT_REGISTER 1
60 #define EVENT_PROCESS 2
62 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
64 struct kevent_copyin_args
{
65 struct kevent_args
*ka
;
69 #define KNOTE_CACHE_MAX 8
71 struct knote_cache_list
{
72 struct klist knote_cache
;
76 static int kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
77 struct knote
*marker
, int closedcounter
);
78 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
79 struct ucred
*cred
, int flags
);
80 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
81 struct ucred
*cred
, int flags
);
82 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
83 struct ucred
*cred
, struct sysmsg
*msg
);
84 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
85 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
87 static int kqueue_close(struct file
*fp
);
88 static void kqueue_wakeup(struct kqueue
*kq
);
89 static int filter_attach(struct knote
*kn
);
90 static int filter_event(struct knote
*kn
, long hint
);
95 static struct fileops kqueueops
= {
96 .fo_read
= kqueue_read
,
97 .fo_write
= kqueue_write
,
98 .fo_ioctl
= kqueue_ioctl
,
99 .fo_kqfilter
= kqueue_kqfilter
,
100 .fo_stat
= kqueue_stat
,
101 .fo_close
= kqueue_close
,
102 .fo_shutdown
= nofo_shutdown
105 static void knote_attach(struct knote
*kn
);
106 static void knote_drop(struct knote
*kn
);
107 static void knote_detach_and_drop(struct knote
*kn
);
108 static void knote_enqueue(struct knote
*kn
);
109 static void knote_dequeue(struct knote
*kn
);
110 static struct knote
*knote_alloc(void);
111 static void knote_free(struct knote
*kn
);
113 static void precise_sleep_intr(systimer_t info
, int in_ipi
,
114 struct intrframe
*frame
);
115 static int precise_sleep(void *ident
, int flags
, const char *wmesg
,
118 static void filt_kqdetach(struct knote
*kn
);
119 static int filt_kqueue(struct knote
*kn
, long hint
);
120 static int filt_procattach(struct knote
*kn
);
121 static void filt_procdetach(struct knote
*kn
);
122 static int filt_proc(struct knote
*kn
, long hint
);
123 static int filt_fileattach(struct knote
*kn
);
124 static void filt_timerexpire(void *knx
);
125 static int filt_timerattach(struct knote
*kn
);
126 static void filt_timerdetach(struct knote
*kn
);
127 static int filt_timer(struct knote
*kn
, long hint
);
128 static int filt_userattach(struct knote
*kn
);
129 static void filt_userdetach(struct knote
*kn
);
130 static int filt_user(struct knote
*kn
, long hint
);
131 static void filt_usertouch(struct knote
*kn
, struct kevent
*kev
,
133 static int filt_fsattach(struct knote
*kn
);
134 static void filt_fsdetach(struct knote
*kn
);
135 static int filt_fs(struct knote
*kn
, long hint
);
137 static struct filterops file_filtops
=
138 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, filt_fileattach
, NULL
, NULL
};
139 static struct filterops kqread_filtops
=
140 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, NULL
, filt_kqdetach
, filt_kqueue
};
141 static struct filterops proc_filtops
=
142 { FILTEROP_MPSAFE
, filt_procattach
, filt_procdetach
, filt_proc
};
143 static struct filterops timer_filtops
=
144 { FILTEROP_MPSAFE
, filt_timerattach
, filt_timerdetach
, filt_timer
};
145 static struct filterops user_filtops
=
146 { FILTEROP_MPSAFE
, filt_userattach
, filt_userdetach
, filt_user
};
147 static struct filterops fs_filtops
=
148 { FILTEROP_MPSAFE
, filt_fsattach
, filt_fsdetach
, filt_fs
};
150 static int kq_ncallouts
= 0;
151 static int kq_calloutmax
= (4 * 1024);
152 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
153 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
154 static int kq_checkloop
= 1000000;
155 SYSCTL_INT(_kern
, OID_AUTO
, kq_checkloop
, CTLFLAG_RW
,
156 &kq_checkloop
, 0, "Maximum number of loops for kqueue scan");
157 static int kq_sleep_threshold
= 20000;
158 SYSCTL_INT(_kern
, OID_AUTO
, kq_sleep_threshold
, CTLFLAG_RW
,
159 &kq_sleep_threshold
, 0, "Minimum sleep duration without busy-looping");
161 #define KNOTE_ACTIVATE(kn) do { \
162 kn->kn_status |= KN_ACTIVE; \
163 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
167 #define KN_HASHSIZE 64 /* XXX should be tunable */
168 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
170 extern struct filterops aio_filtops
;
171 extern struct filterops sig_filtops
;
174 * Table for for all system-defined filters.
176 static struct filterops
*sysfilt_ops
[] = {
177 &file_filtops
, /* EVFILT_READ */
178 &file_filtops
, /* EVFILT_WRITE */
179 &aio_filtops
, /* EVFILT_AIO */
180 &file_filtops
, /* EVFILT_VNODE */
181 &proc_filtops
, /* EVFILT_PROC */
182 &sig_filtops
, /* EVFILT_SIGNAL */
183 &timer_filtops
, /* EVFILT_TIMER */
184 &file_filtops
, /* EVFILT_EXCEPT */
185 &user_filtops
, /* EVFILT_USER */
186 &fs_filtops
, /* EVFILT_FS */
189 static struct knote_cache_list knote_cache_lists
[MAXCPU
];
192 * Acquire a knote, return non-zero on success, 0 on failure.
194 * If we cannot acquire the knote we sleep and return 0. The knote
195 * may be stale on return in this case and the caller must restart
196 * whatever loop they are in.
198 * Related kq token must be held.
201 knote_acquire(struct knote
*kn
)
203 if (kn
->kn_status
& KN_PROCESSING
) {
204 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
205 tsleep(kn
, 0, "kqepts", hz
);
206 /* knote may be stale now */
209 kn
->kn_status
|= KN_PROCESSING
;
214 * Release an acquired knote, clearing KN_PROCESSING and handling any
215 * KN_REPROCESS events.
217 * Caller must be holding the related kq token
219 * Non-zero is returned if the knote is destroyed or detached.
222 knote_release(struct knote
*kn
)
226 while (kn
->kn_status
& KN_REPROCESS
) {
227 kn
->kn_status
&= ~KN_REPROCESS
;
228 if (kn
->kn_status
& KN_WAITING
) {
229 kn
->kn_status
&= ~KN_WAITING
;
232 if (kn
->kn_status
& KN_DELETING
) {
233 knote_detach_and_drop(kn
);
237 if (filter_event(kn
, 0))
240 if (kn
->kn_status
& KN_DETACHED
)
244 kn
->kn_status
&= ~KN_PROCESSING
;
245 /* kn should not be accessed anymore */
250 filt_fileattach(struct knote
*kn
)
252 return (fo_kqfilter(kn
->kn_fp
, kn
));
259 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
261 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
263 if (kn
->kn_filter
!= EVFILT_READ
)
266 kn
->kn_fop
= &kqread_filtops
;
267 knote_insert(&kq
->kq_kqinfo
.ki_note
, kn
);
272 filt_kqdetach(struct knote
*kn
)
274 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
276 knote_remove(&kq
->kq_kqinfo
.ki_note
, kn
);
281 filt_kqueue(struct knote
*kn
, long hint
)
283 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
285 kn
->kn_data
= kq
->kq_count
;
286 return (kn
->kn_data
> 0);
290 filt_procattach(struct knote
*kn
)
296 p
= pfind(kn
->kn_id
);
297 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
298 p
= zpfind(kn
->kn_id
);
304 if (!PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
310 lwkt_gettoken(&p
->p_token
);
311 kn
->kn_ptr
.p_proc
= p
;
312 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
315 * internal flag indicating registration done by kernel
317 if (kn
->kn_flags
& EV_FLAG1
) {
318 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
319 kn
->kn_fflags
= NOTE_CHILD
;
320 kn
->kn_flags
&= ~EV_FLAG1
;
323 knote_insert(&p
->p_klist
, kn
);
326 * Immediately activate any exit notes if the target process is a
327 * zombie. This is necessary to handle the case where the target
328 * process, e.g. a child, dies before the kevent is negistered.
330 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
332 lwkt_reltoken(&p
->p_token
);
339 * The knote may be attached to a different process, which may exit,
340 * leaving nothing for the knote to be attached to. So when the process
341 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
342 * it will be deleted when read out. However, as part of the knote deletion,
343 * this routine is called, so a check is needed to avoid actually performing
344 * a detach, because the original process does not exist any more.
347 filt_procdetach(struct knote
*kn
)
351 if (kn
->kn_status
& KN_DETACHED
)
353 p
= kn
->kn_ptr
.p_proc
;
354 knote_remove(&p
->p_klist
, kn
);
358 filt_proc(struct knote
*kn
, long hint
)
363 * mask off extra data
365 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
368 * if the user is interested in this event, record it.
370 if (kn
->kn_sfflags
& event
)
371 kn
->kn_fflags
|= event
;
374 * Process is gone, so flag the event as finished. Detach the
375 * knote from the process now because the process will be poof,
378 if (event
== NOTE_EXIT
) {
379 struct proc
*p
= kn
->kn_ptr
.p_proc
;
380 if ((kn
->kn_status
& KN_DETACHED
) == 0) {
382 knote_remove(&p
->p_klist
, kn
);
383 kn
->kn_status
|= KN_DETACHED
;
384 kn
->kn_data
= p
->p_xstat
;
385 kn
->kn_ptr
.p_proc
= NULL
;
388 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
393 * process forked, and user wants to track the new process,
394 * so attach a new knote to it, and immediately report an
395 * event with the parent's pid.
397 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
403 * register knote with new process.
405 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
406 kev
.filter
= kn
->kn_filter
;
407 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
408 kev
.fflags
= kn
->kn_sfflags
;
409 kev
.data
= kn
->kn_id
; /* parent */
410 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
412 error
= kqueue_register(kn
->kn_kq
, &kev
, &n
);
414 kn
->kn_fflags
|= NOTE_TRACKERR
;
417 return (kn
->kn_fflags
!= 0);
421 filt_timerreset(struct knote
*kn
)
423 struct callout
*calloutp
;
427 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
428 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
429 tticks
= tvtohz_high(&tv
);
430 calloutp
= (struct callout
*)kn
->kn_hook
;
431 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
435 * The callout interlocks with callout_terminate() but can still
436 * race a deletion so if KN_DELETING is set we just don't touch
440 filt_timerexpire(void *knx
)
442 struct knote
*kn
= knx
;
443 struct kqueue
*kq
= kn
->kn_kq
;
445 lwkt_getpooltoken(kq
);
448 * Open knote_acquire(), since we can't sleep in callout,
449 * however, we do need to record this expiration.
452 if (kn
->kn_status
& KN_PROCESSING
) {
453 kn
->kn_status
|= KN_REPROCESS
;
454 if ((kn
->kn_status
& KN_DELETING
) == 0 &&
455 (kn
->kn_flags
& EV_ONESHOT
) == 0)
457 lwkt_relpooltoken(kq
);
460 KASSERT((kn
->kn_status
& KN_DELETING
) == 0,
461 ("acquire a deleting knote %#x", kn
->kn_status
));
462 kn
->kn_status
|= KN_PROCESSING
;
465 if ((kn
->kn_flags
& EV_ONESHOT
) == 0)
470 lwkt_relpooltoken(kq
);
474 * data contains amount of time to sleep, in milliseconds
477 filt_timerattach(struct knote
*kn
)
479 struct callout
*calloutp
;
482 prev_ncallouts
= atomic_fetchadd_int(&kq_ncallouts
, 1);
483 if (prev_ncallouts
>= kq_calloutmax
) {
484 atomic_subtract_int(&kq_ncallouts
, 1);
489 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
490 calloutp
= kmalloc(sizeof(*calloutp
), M_KQUEUE
, M_WAITOK
);
491 callout_init_mp(calloutp
);
492 kn
->kn_hook
= (caddr_t
)calloutp
;
499 * This function is called with the knote flagged locked but it is
500 * still possible to race a callout event due to the callback blocking.
501 * We must call callout_terminate() instead of callout_stop() to deal
505 filt_timerdetach(struct knote
*kn
)
507 struct callout
*calloutp
;
509 calloutp
= (struct callout
*)kn
->kn_hook
;
510 callout_terminate(calloutp
);
512 kfree(calloutp
, M_KQUEUE
);
513 atomic_subtract_int(&kq_ncallouts
, 1);
517 filt_timer(struct knote
*kn
, long hint
)
519 return (kn
->kn_data
!= 0);
526 filt_userattach(struct knote
*kn
)
531 if (kn
->kn_sfflags
& NOTE_TRIGGER
)
532 kn
->kn_ptr
.hookid
= 1;
534 kn
->kn_ptr
.hookid
= 0;
536 ffctrl
= kn
->kn_sfflags
& NOTE_FFCTRLMASK
;
537 kn
->kn_sfflags
&= NOTE_FFLAGSMASK
;
543 kn
->kn_fflags
&= kn
->kn_sfflags
;
547 kn
->kn_fflags
|= kn
->kn_sfflags
;
551 kn
->kn_fflags
= kn
->kn_sfflags
;
555 /* XXX Return error? */
558 /* We just happen to copy this value as well. Undocumented. */
559 kn
->kn_data
= kn
->kn_sdata
;
565 filt_userdetach(struct knote
*kn
)
571 filt_user(struct knote
*kn
, long hint
)
573 return (kn
->kn_ptr
.hookid
);
577 filt_usertouch(struct knote
*kn
, struct kevent
*kev
, u_long type
)
583 if (kev
->fflags
& NOTE_TRIGGER
)
584 kn
->kn_ptr
.hookid
= 1;
586 ffctrl
= kev
->fflags
& NOTE_FFCTRLMASK
;
587 kev
->fflags
&= NOTE_FFLAGSMASK
;
593 kn
->kn_fflags
&= kev
->fflags
;
597 kn
->kn_fflags
|= kev
->fflags
;
601 kn
->kn_fflags
= kev
->fflags
;
605 /* XXX Return error? */
608 /* We just happen to copy this value as well. Undocumented. */
609 kn
->kn_data
= kev
->data
;
612 * This is not the correct use of EV_CLEAR in an event
613 * modification, it should have been passed as a NOTE instead.
614 * But we need to maintain compatibility with Apple & FreeBSD.
616 * Note however that EV_CLEAR can still be used when doing
617 * the initial registration of the event and works as expected
618 * (clears the event on reception).
620 if (kev
->flags
& EV_CLEAR
) {
621 kn
->kn_ptr
.hookid
= 0;
623 * Clearing kn->kn_data is fine, since it gets set
624 * every time anyway. We just shouldn't clear
625 * kn->kn_fflags here, since that would limit the
626 * possible uses of this API. NOTE_FFAND or
627 * NOTE_FFCOPY should be used for explicitly clearing
635 *kev
= kn
->kn_kevent
;
636 kev
->fflags
= kn
->kn_fflags
;
637 kev
->data
= kn
->kn_data
;
638 if (kn
->kn_flags
& EV_CLEAR
) {
639 kn
->kn_ptr
.hookid
= 0;
640 /* kn_data, kn_fflags handled by parent */
645 panic("filt_usertouch() - invalid type (%ld)", type
);
653 struct klist fs_klist
= SLIST_HEAD_INITIALIZER(&fs_klist
);
656 filt_fsattach(struct knote
*kn
)
658 kn
->kn_flags
|= EV_CLEAR
;
659 knote_insert(&fs_klist
, kn
);
665 filt_fsdetach(struct knote
*kn
)
667 knote_remove(&fs_klist
, kn
);
671 filt_fs(struct knote
*kn
, long hint
)
673 kn
->kn_fflags
|= hint
;
674 return (kn
->kn_fflags
!= 0);
678 * Initialize a kqueue.
680 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
685 kqueue_init(struct kqueue
*kq
, struct filedesc
*fdp
)
687 TAILQ_INIT(&kq
->kq_knpend
);
688 TAILQ_INIT(&kq
->kq_knlist
);
691 SLIST_INIT(&kq
->kq_kqinfo
.ki_note
);
695 * Terminate a kqueue. Freeing the actual kq itself is left up to the
696 * caller (it might be embedded in a lwp so we don't do it here).
698 * The kq's knlist must be completely eradicated so block on any
702 kqueue_terminate(struct kqueue
*kq
)
706 lwkt_getpooltoken(kq
);
707 while ((kn
= TAILQ_FIRST(&kq
->kq_knlist
)) != NULL
) {
708 if (knote_acquire(kn
))
709 knote_detach_and_drop(kn
);
711 lwkt_relpooltoken(kq
);
714 hashdestroy(kq
->kq_knhash
, M_KQUEUE
, kq
->kq_knhashmask
);
715 kq
->kq_knhash
= NULL
;
716 kq
->kq_knhashmask
= 0;
724 sys_kqueue(struct kqueue_args
*uap
)
726 struct thread
*td
= curthread
;
731 error
= falloc(td
->td_lwp
, &fp
, &fd
);
734 fp
->f_flag
= FREAD
| FWRITE
;
735 fp
->f_type
= DTYPE_KQUEUE
;
736 fp
->f_ops
= &kqueueops
;
738 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
739 kqueue_init(kq
, td
->td_proc
->p_fd
);
742 fsetfd(kq
->kq_fdp
, fp
, fd
);
743 uap
->sysmsg_result
= fd
;
749 * Copy 'count' items into the destination list pointed to by uap->eventlist.
752 kevent_copyout(void *arg
, struct kevent
*kevp
, int count
, int *res
)
754 struct kevent_copyin_args
*kap
;
757 kap
= (struct kevent_copyin_args
*)arg
;
759 error
= copyout(kevp
, kap
->ka
->eventlist
, count
* sizeof(*kevp
));
761 kap
->ka
->eventlist
+= count
;
771 * Copy at most 'max' items from the list pointed to by kap->changelist,
772 * return number of items in 'events'.
775 kevent_copyin(void *arg
, struct kevent
*kevp
, int max
, int *events
)
777 struct kevent_copyin_args
*kap
;
780 kap
= (struct kevent_copyin_args
*)arg
;
782 count
= min(kap
->ka
->nchanges
- kap
->pchanges
, max
);
783 error
= copyin(kap
->ka
->changelist
, kevp
, count
* sizeof *kevp
);
785 kap
->ka
->changelist
+= count
;
786 kap
->pchanges
+= count
;
797 kern_kevent(struct kqueue
*kq
, int nevents
, int *res
, void *uap
,
798 k_copyin_fn kevent_copyinfn
, k_copyout_fn kevent_copyoutfn
,
799 struct timespec
*tsp_in
, int flags
)
802 struct timespec
*tsp
, ats
;
803 int i
, n
, total
, error
, nerrors
= 0;
806 int limit
= kq_checkloop
;
808 struct kevent kev
[KQ_NEVENTS
];
810 struct lwkt_token
*tok
;
812 if (tsp_in
== NULL
|| tsp_in
->tv_sec
|| tsp_in
->tv_nsec
)
813 atomic_set_int(&curthread
->td_mpflags
, TDF_MP_BATCH_DEMARC
);
818 closedcounter
= kq
->kq_fdp
->fd_closedcounter
;
822 error
= kevent_copyinfn(uap
, kev
, KQ_NEVENTS
, &n
);
827 for (i
= 0; i
< n
; ++i
)
828 kev
[i
].flags
&= ~EV_SYSFLAGS
;
829 for (i
= 0; i
< n
; ++i
) {
831 error
= kqueue_register(kq
, &kev
[i
], &gobbled
);
836 * If a registration returns an error we
837 * immediately post the error. The kevent()
838 * call itself will fail with the error if
839 * no space is available for posting.
841 * Such errors normally bypass the timeout/blocking
842 * code. However, if the copyoutfn function refuses
843 * to post the error (see sys_poll()), then we
846 if (error
|| (kevp
->flags
& EV_RECEIPT
)) {
847 kevp
->flags
= EV_ERROR
;
850 kevent_copyoutfn(uap
, kevp
, 1, res
);
853 } else if (lres
!= *res
) {
864 * Acquire/wait for events - setup timeout
867 if (tsp
->tv_sec
|| tsp
->tv_nsec
) {
869 timespecadd(tsp
, &ats
); /* tsp = target time */
876 * Collect as many events as we can. Sleeping on successive
877 * loops is disabled if copyoutfn has incremented (*res).
879 * The loop stops if an error occurs, all events have been
880 * scanned (the marker has been reached), or fewer than the
881 * maximum number of events is found.
883 * The copyoutfn function does not have to increment (*res) in
884 * order for the loop to continue.
886 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
890 marker
.kn_filter
= EVFILT_MARKER
;
891 marker
.kn_status
= KN_PROCESSING
;
892 tok
= lwkt_token_pool_lookup(kq
);
894 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
896 while ((n
= nevents
- total
) > 0) {
901 * If no events are pending sleep until timeout (if any)
902 * or an event occurs.
904 * After the sleep completes the marker is moved to the
905 * end of the list, making any received events available
908 if (kq
->kq_count
== 0 && *res
== 0) {
909 int timeout
, ustimeout
= 0;
913 } else if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0) {
917 struct timespec atx
= *tsp
;
920 timespecsub(&atx
, &ats
);
921 if (atx
.tv_sec
< 0) {
925 timeout
= atx
.tv_sec
> 24 * 60 * 60 ?
929 if (flags
& KEVENT_TIMEOUT_PRECISE
&&
931 if (atx
.tv_sec
== 0 &&
932 atx
.tv_nsec
< kq_sleep_threshold
) {
933 DELAY(atx
.tv_nsec
/ 1000);
936 } else if (atx
.tv_sec
< 2000) {
937 ustimeout
= atx
.tv_sec
*
938 1000000 + atx
.tv_nsec
/1000;
940 ustimeout
= 2000000000;
946 if (kq
->kq_count
== 0) {
948 if (__predict_false(kq
->kq_sleep_cnt
== 0)) {
950 * Guard against possible wrapping. And
951 * set it to 2, so that kqueue_wakeup()
952 * can wake everyone up.
954 kq
->kq_sleep_cnt
= 2;
956 if ((flags
& KEVENT_TIMEOUT_PRECISE
) &&
958 error
= precise_sleep(kq
, PCATCH
,
959 "kqread", ustimeout
);
961 error
= tsleep(kq
, PCATCH
, "kqread",
965 /* don't restart after signals... */
966 if (error
== ERESTART
)
973 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
974 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
,
981 * Process all received events
982 * Account for all non-spurious events in our total
984 i
= kqueue_scan(kq
, kev
, n
, &marker
, closedcounter
);
987 error
= kevent_copyoutfn(uap
, kev
, i
, res
);
988 total
+= *res
- lres
;
992 if (limit
&& --limit
== 0)
993 panic("kqueue: checkloop failed i=%d", i
);
996 * Normally when fewer events are returned than requested
997 * we can stop. However, if only spurious events were
998 * collected the copyout will not bump (*res) and we have
1005 * Deal with an edge case where spurious events can cause
1006 * a loop to occur without moving the marker. This can
1007 * prevent kqueue_scan() from picking up new events which
1008 * race us. We must be sure to move the marker for this
1011 * NOTE: We do not want to move the marker if events
1012 * were scanned because normal kqueue operations
1013 * may reactivate events. Moving the marker in
1014 * that case could result in duplicates for the
1019 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
1020 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, &marker
, kn_tqe
);
1025 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
1028 /* Timeouts do not return EWOULDBLOCK. */
1029 if (error
== EWOULDBLOCK
)
1038 sys_kevent(struct kevent_args
*uap
)
1040 struct thread
*td
= curthread
;
1041 struct timespec ts
, *tsp
;
1043 struct file
*fp
= NULL
;
1044 struct kevent_copyin_args
*kap
, ka
;
1048 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
1055 fp
= holdfp(td
, uap
->fd
, -1);
1058 if (fp
->f_type
!= DTYPE_KQUEUE
) {
1063 kq
= (struct kqueue
*)fp
->f_data
;
1069 error
= kern_kevent(kq
, uap
->nevents
, &uap
->sysmsg_result
, kap
,
1070 kevent_copyin
, kevent_copyout
, tsp
, 0);
1072 dropfp(td
, uap
->fd
, fp
);
1078 * Efficiently load multiple file pointers. This significantly reduces
1079 * threaded overhead. When doing simple polling we can depend on the
1080 * per-thread (fd,fp) cache. With more descriptors, we batch.
1084 floadkevfps(thread_t td
, struct filedesc
*fdp
, struct kevent
*kev
,
1085 struct file
**fp
, int climit
)
1087 struct filterops
*fops
;
1090 if (climit
<= 2 && td
->td_proc
&& td
->td_proc
->p_fd
== fdp
) {
1094 spin_lock_shared(&fdp
->fd_spin
);
1099 if (kev
->filter
< 0 &&
1100 kev
->filter
+ EVFILT_SYSCOUNT
>= 0) {
1101 fops
= sysfilt_ops
[~kev
->filter
];
1102 if (fops
->f_flags
& FILTEROP_ISFD
) {
1104 *fp
= holdfp(td
, kev
->ident
, -1);
1106 *fp
= holdfp_fdp_locked(fdp
,
1116 spin_unlock_shared(&fdp
->fd_spin
);
1120 * Register up to *countp kev's. Always registers at least 1.
1122 * The number registered is returned in *countp.
1124 * If an error occurs or a kev is flagged EV_RECEIPT, it is
1125 * processed and included in *countp, and processing then
1129 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
, int *countp
)
1131 struct filedesc
*fdp
= kq
->kq_fdp
;
1132 struct klist
*list
= NULL
;
1133 struct filterops
*fops
;
1134 struct file
*fp
[KQ_NEVENTS
];
1135 struct knote
*kn
= NULL
;
1141 struct knote_cache_list
*cache_list
;
1145 if (climit
> KQ_NEVENTS
)
1146 climit
= KQ_NEVENTS
;
1147 closedcounter
= fdp
->fd_closedcounter
;
1148 floadkevfps(td
, fdp
, kev
, fp
, climit
);
1150 lwkt_getpooltoken(kq
);
1154 * To avoid races, only one thread can register events on this
1157 while (__predict_false(kq
->kq_regtd
!= NULL
&& kq
->kq_regtd
!= td
)) {
1158 kq
->kq_state
|= KQ_REGWAIT
;
1159 tsleep(&kq
->kq_regtd
, 0, "kqreg", 0);
1161 if (__predict_false(kq
->kq_regtd
!= NULL
)) {
1162 /* Recursive calling of kqueue_register() */
1165 /* Owner of the kq_regtd, i.e. td != NULL */
1170 if (kev
->filter
< 0) {
1171 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0) {
1176 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
1180 * filter attach routine is responsible for insuring that
1181 * the identifier can be attached to it.
1188 if (fops
->f_flags
& FILTEROP_ISFD
) {
1189 /* validate descriptor */
1190 if (fp
[count
] == NULL
) {
1197 cache_list
= &knote_cache_lists
[mycpuid
];
1198 if (SLIST_EMPTY(&cache_list
->knote_cache
)) {
1199 struct knote
*new_kn
;
1201 new_kn
= knote_alloc();
1203 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, new_kn
, kn_link
);
1204 cache_list
->knote_cache_cnt
++;
1208 if (fp
[count
] != NULL
) {
1209 list
= &fp
[count
]->f_klist
;
1210 } else if (kq
->kq_knhashmask
) {
1211 list
= &kq
->kq_knhash
[
1212 KN_HASH((u_long
)kev
->ident
, kq
->kq_knhashmask
)];
1215 lwkt_getpooltoken(list
);
1217 SLIST_FOREACH(kn
, list
, kn_link
) {
1218 if (kn
->kn_kq
== kq
&&
1219 kn
->kn_filter
== kev
->filter
&&
1220 kn
->kn_id
== kev
->ident
) {
1221 if (knote_acquire(kn
) == 0)
1226 lwkt_relpooltoken(list
);
1230 * NOTE: At this point if kn is non-NULL we will have acquired
1231 * it and set KN_PROCESSING.
1233 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
1240 * kn now contains the matching knote, or NULL if no match
1242 if (kev
->flags
& EV_ADD
) {
1245 kn
= SLIST_FIRST(&cache_list
->knote_cache
);
1250 SLIST_REMOVE_HEAD(&cache_list
->knote_cache
,
1252 cache_list
->knote_cache_cnt
--;
1255 kn
->kn_fp
= fp
[count
];
1260 * apply reference count to knote structure, and
1261 * do not release it at the end of this routine.
1263 fp
[count
] = NULL
; /* safety */
1265 kn
->kn_sfflags
= kev
->fflags
;
1266 kn
->kn_sdata
= kev
->data
;
1269 kn
->kn_kevent
= *kev
;
1272 * KN_PROCESSING prevents the knote from getting
1273 * ripped out from under us while we are trying
1274 * to attach it, in case the attach blocks.
1276 kn
->kn_status
= KN_PROCESSING
;
1278 if ((error
= filter_attach(kn
)) != 0) {
1279 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1286 * Interlock against close races which either tried
1287 * to remove our knote while we were blocked or missed
1288 * it entirely prior to our attachment. We do not
1289 * want to end up with a knote on a closed descriptor.
1291 if ((fops
->f_flags
& FILTEROP_ISFD
) &&
1292 checkfdclosed(curthread
, fdp
, kev
->ident
, kn
->kn_fp
,
1294 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1298 * The user may change some filter values after the
1299 * initial EV_ADD, but doing so will not reset any
1300 * filter which have already been triggered.
1302 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1303 if (fops
== &user_filtops
) {
1304 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1306 kn
->kn_sfflags
= kev
->fflags
;
1307 kn
->kn_sdata
= kev
->data
;
1308 kn
->kn_kevent
.udata
= kev
->udata
;
1313 * Execute the filter event to immediately activate the
1314 * knote if necessary. If reprocessing events are pending
1315 * due to blocking above we do not run the filter here
1316 * but instead let knote_release() do it. Otherwise we
1317 * might run the filter on a deleted event.
1319 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1320 if (filter_event(kn
, 0))
1323 } else if (kev
->flags
& EV_DELETE
) {
1325 * Delete the existing knote
1327 knote_detach_and_drop(kn
);
1333 * Modify an existing event.
1335 * The user may change some filter values after the
1336 * initial EV_ADD, but doing so will not reset any
1337 * filter which have already been triggered.
1339 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1340 if (fops
== &user_filtops
) {
1341 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1343 kn
->kn_sfflags
= kev
->fflags
;
1344 kn
->kn_sdata
= kev
->data
;
1345 kn
->kn_kevent
.udata
= kev
->udata
;
1349 * Execute the filter event to immediately activate the
1350 * knote if necessary. If reprocessing events are pending
1351 * due to blocking above we do not run the filter here
1352 * but instead let knote_release() do it. Otherwise we
1353 * might run the filter on a deleted event.
1355 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1356 if (filter_event(kn
, 0))
1362 * Disablement does not deactivate a knote here.
1364 if ((kev
->flags
& EV_DISABLE
) &&
1365 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
1366 kn
->kn_status
|= KN_DISABLED
;
1370 * Re-enablement may have to immediately enqueue an active knote.
1372 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
1373 kn
->kn_status
&= ~KN_DISABLED
;
1374 if ((kn
->kn_status
& KN_ACTIVE
) &&
1375 ((kn
->kn_status
& KN_QUEUED
) == 0)) {
1381 * Handle any required reprocessing
1384 /* kn may be invalid now */
1387 * Loop control. We stop on errors (above), and also stop after
1388 * processing EV_RECEIPT, so the caller can process it.
1391 if (kev
->flags
& EV_RECEIPT
) {
1396 if (count
< climit
) {
1397 if (fp
[count
-1]) /* drop unprocessed fp */
1406 if (td
!= NULL
) { /* Owner of the kq_regtd */
1407 kq
->kq_regtd
= NULL
;
1408 if (__predict_false(kq
->kq_state
& KQ_REGWAIT
)) {
1409 kq
->kq_state
&= ~KQ_REGWAIT
;
1410 wakeup(&kq
->kq_regtd
);
1413 lwkt_relpooltoken(kq
);
1416 * Drop unprocessed file pointers
1419 if (count
&& fp
[count
-1])
1421 while (count
< climit
) {
1430 * Scan the kqueue, return the number of active events placed in kevp up
1433 * Continuous mode events may get recycled, do not continue scanning past
1434 * marker unless no events have been collected.
1437 kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
1438 struct knote
*marker
, int closedcounter
)
1440 struct knote
*kn
, local_marker
;
1441 thread_t td
= curthread
;
1445 local_marker
.kn_filter
= EVFILT_MARKER
;
1446 local_marker
.kn_status
= KN_PROCESSING
;
1448 lwkt_getpooltoken(kq
);
1453 TAILQ_INSERT_HEAD(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1455 kn
= TAILQ_NEXT(&local_marker
, kn_tqe
);
1456 if (kn
->kn_filter
== EVFILT_MARKER
) {
1457 /* Marker reached, we are done */
1461 /* Move local marker past some other threads marker */
1462 kn
= TAILQ_NEXT(kn
, kn_tqe
);
1463 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1464 TAILQ_INSERT_BEFORE(kn
, &local_marker
, kn_tqe
);
1469 * We can't skip a knote undergoing processing, otherwise
1470 * we risk not returning it when the user process expects
1471 * it should be returned. Sleep and retry.
1473 if (knote_acquire(kn
) == 0)
1477 * Remove the event for processing.
1479 * WARNING! We must leave KN_QUEUED set to prevent the
1480 * event from being KNOTE_ACTIVATE()d while
1481 * the queue state is in limbo, in case we
1484 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1488 * We have to deal with an extremely important race against
1489 * file descriptor close()s here. The file descriptor can
1490 * disappear MPSAFE, and there is a small window of
1491 * opportunity between that and the call to knote_fdclose().
1493 * If we hit that window here while doselect or dopoll is
1494 * trying to delete a spurious event they will not be able
1495 * to match up the event against a knote and will go haywire.
1497 if ((kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) &&
1498 checkfdclosed(td
, kq
->kq_fdp
, kn
->kn_kevent
.ident
,
1499 kn
->kn_fp
, closedcounter
)) {
1500 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1503 if (kn
->kn_status
& KN_DISABLED
) {
1505 * If disabled we ensure the event is not queued
1506 * but leave its active bit set. On re-enablement
1507 * the event may be immediately triggered.
1509 kn
->kn_status
&= ~KN_QUEUED
;
1510 } else if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
1511 (kn
->kn_status
& KN_DELETING
) == 0 &&
1512 filter_event(kn
, 0) == 0) {
1514 * If not running in one-shot mode and the event
1515 * is no longer present we ensure it is removed
1516 * from the queue and ignore it.
1518 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
1523 if (kn
->kn_fop
== &user_filtops
)
1524 filt_usertouch(kn
, kevp
, EVENT_PROCESS
);
1526 *kevp
= kn
->kn_kevent
;
1531 if (kn
->kn_flags
& EV_ONESHOT
) {
1532 kn
->kn_status
&= ~KN_QUEUED
;
1533 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1535 if (kn
->kn_flags
& (EV_CLEAR
| EV_DISPATCH
)) {
1536 if (kn
->kn_flags
& EV_CLEAR
) {
1540 if (kn
->kn_flags
& EV_DISPATCH
) {
1541 kn
->kn_status
|= KN_DISABLED
;
1543 kn
->kn_status
&= ~(KN_QUEUED
|
1546 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1553 * Handle any post-processing states
1557 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1559 lwkt_relpooltoken(kq
);
1565 * This could be expanded to call kqueue_scan, if desired.
1570 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1579 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1588 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1589 struct ucred
*cred
, struct sysmsg
*msg
)
1594 kq
= (struct kqueue
*)fp
->f_data
;
1595 lwkt_getpooltoken(kq
);
1599 kq
->kq_state
|= KQ_ASYNC
;
1601 kq
->kq_state
&= ~KQ_ASYNC
;
1605 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
1611 lwkt_relpooltoken(kq
);
1619 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
1621 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1623 bzero((void *)st
, sizeof(*st
));
1624 st
->st_size
= kq
->kq_count
;
1625 st
->st_blksize
= sizeof(struct kevent
);
1626 st
->st_mode
= S_IFIFO
;
1634 kqueue_close(struct file
*fp
)
1636 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1638 kqueue_terminate(kq
);
1641 funsetown(&kq
->kq_sigio
);
1643 kfree(kq
, M_KQUEUE
);
1648 kqueue_wakeup(struct kqueue
*kq
)
1650 if (kq
->kq_sleep_cnt
) {
1651 u_int sleep_cnt
= kq
->kq_sleep_cnt
;
1653 kq
->kq_sleep_cnt
= 0;
1659 KNOTE(&kq
->kq_kqinfo
.ki_note
, 0);
1663 * Calls filterops f_attach function, acquiring mplock if filter is not
1664 * marked as FILTEROP_MPSAFE.
1666 * Caller must be holding the related kq token
1669 filter_attach(struct knote
*kn
)
1673 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1674 ret
= kn
->kn_fop
->f_attach(kn
);
1677 ret
= kn
->kn_fop
->f_attach(kn
);
1684 * Detach the knote and drop it, destroying the knote.
1686 * Calls filterops f_detach function, acquiring mplock if filter is not
1687 * marked as FILTEROP_MPSAFE.
1689 * Caller must be holding the related kq token
1692 knote_detach_and_drop(struct knote
*kn
)
1694 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1695 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1696 kn
->kn_fop
->f_detach(kn
);
1699 kn
->kn_fop
->f_detach(kn
);
1706 * Calls filterops f_event function, acquiring mplock if filter is not
1707 * marked as FILTEROP_MPSAFE.
1709 * If the knote is in the middle of being created or deleted we cannot
1710 * safely call the filter op.
1712 * Caller must be holding the related kq token
1715 filter_event(struct knote
*kn
, long hint
)
1719 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1720 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1723 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1730 * Walk down a list of knotes, activating them if their event has triggered.
1732 * If we encounter any knotes which are undergoing processing we just mark
1733 * them for reprocessing and do not try to [re]activate the knote. However,
1734 * if a hint is being passed we have to wait and that makes things a bit
1738 knote(struct klist
*list
, long hint
)
1742 struct knote
*kntmp
;
1744 lwkt_getpooltoken(list
);
1746 SLIST_FOREACH(kn
, list
, kn_next
) {
1748 lwkt_getpooltoken(kq
);
1750 /* temporary verification hack */
1751 SLIST_FOREACH(kntmp
, list
, kn_next
) {
1755 if (kn
!= kntmp
|| kn
->kn_kq
!= kq
) {
1756 lwkt_relpooltoken(kq
);
1760 if (kn
->kn_status
& KN_PROCESSING
) {
1762 * Someone else is processing the knote, ask the
1763 * other thread to reprocess it and don't mess
1764 * with it otherwise.
1767 kn
->kn_status
|= KN_REPROCESS
;
1768 lwkt_relpooltoken(kq
);
1773 * If the hint is non-zero we have to wait or risk
1774 * losing the state the caller is trying to update.
1776 * XXX This is a real problem, certain process
1777 * and signal filters will bump kn_data for
1778 * already-processed notes more than once if
1779 * we restart the list scan. FIXME.
1781 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
1782 tsleep(kn
, 0, "knotec", hz
);
1783 lwkt_relpooltoken(kq
);
1788 * Become the reprocessing master ourselves.
1790 * If hint is non-zero running the event is mandatory
1791 * when not deleting so do it whether reprocessing is
1794 kn
->kn_status
|= KN_PROCESSING
;
1795 if ((kn
->kn_status
& KN_DELETING
) == 0) {
1796 if (filter_event(kn
, hint
))
1799 if (knote_release(kn
)) {
1800 lwkt_relpooltoken(kq
);
1803 lwkt_relpooltoken(kq
);
1805 lwkt_relpooltoken(list
);
1809 * Insert knote at head of klist.
1811 * This function may only be called via a filter function and thus
1812 * kq_token should already be held and marked for processing.
1815 knote_insert(struct klist
*klist
, struct knote
*kn
)
1817 lwkt_getpooltoken(klist
);
1818 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1819 SLIST_INSERT_HEAD(klist
, kn
, kn_next
);
1820 lwkt_relpooltoken(klist
);
1824 * Remove knote from a klist
1826 * This function may only be called via a filter function and thus
1827 * kq_token should already be held and marked for processing.
1830 knote_remove(struct klist
*klist
, struct knote
*kn
)
1832 lwkt_getpooltoken(klist
);
1833 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1834 SLIST_REMOVE(klist
, kn
, knote
, kn_next
);
1835 lwkt_relpooltoken(klist
);
1839 knote_assume_knotes(struct kqinfo
*src
, struct kqinfo
*dst
,
1840 struct filterops
*ops
, void *hook
)
1845 lwkt_getpooltoken(&src
->ki_note
);
1846 lwkt_getpooltoken(&dst
->ki_note
);
1847 while ((kn
= SLIST_FIRST(&src
->ki_note
)) != NULL
) {
1849 lwkt_getpooltoken(kq
);
1850 if (SLIST_FIRST(&src
->ki_note
) != kn
|| kn
->kn_kq
!= kq
) {
1851 lwkt_relpooltoken(kq
);
1854 if (knote_acquire(kn
)) {
1855 knote_remove(&src
->ki_note
, kn
);
1858 knote_insert(&dst
->ki_note
, kn
);
1860 /* kn may be invalid now */
1862 lwkt_relpooltoken(kq
);
1864 lwkt_relpooltoken(&dst
->ki_note
);
1865 lwkt_relpooltoken(&src
->ki_note
);
1869 * Remove all knotes referencing a specified fd
1872 knote_fdclose(struct file
*fp
, struct filedesc
*fdp
, int fd
)
1876 struct knote
*kntmp
;
1878 lwkt_getpooltoken(&fp
->f_klist
);
1880 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
1881 if (kn
->kn_kq
->kq_fdp
== fdp
&& kn
->kn_id
== fd
) {
1883 lwkt_getpooltoken(kq
);
1885 /* temporary verification hack */
1886 SLIST_FOREACH(kntmp
, &fp
->f_klist
, kn_link
) {
1890 if (kn
!= kntmp
|| kn
->kn_kq
->kq_fdp
!= fdp
||
1891 kn
->kn_id
!= fd
|| kn
->kn_kq
!= kq
) {
1892 lwkt_relpooltoken(kq
);
1895 if (knote_acquire(kn
))
1896 knote_detach_and_drop(kn
);
1897 lwkt_relpooltoken(kq
);
1901 lwkt_relpooltoken(&fp
->f_klist
);
1905 * Low level attach function.
1907 * The knote should already be marked for processing.
1908 * Caller must hold the related kq token.
1911 knote_attach(struct knote
*kn
)
1914 struct kqueue
*kq
= kn
->kn_kq
;
1916 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1917 KKASSERT(kn
->kn_fp
);
1918 list
= &kn
->kn_fp
->f_klist
;
1920 if (kq
->kq_knhashmask
== 0)
1921 kq
->kq_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1922 &kq
->kq_knhashmask
);
1923 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1925 lwkt_getpooltoken(list
);
1926 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1927 lwkt_relpooltoken(list
);
1928 TAILQ_INSERT_HEAD(&kq
->kq_knlist
, kn
, kn_kqlink
);
1932 * Low level drop function.
1934 * The knote should already be marked for processing.
1935 * Caller must hold the related kq token.
1938 knote_drop(struct knote
*kn
)
1945 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
)
1946 list
= &kn
->kn_fp
->f_klist
;
1948 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1950 lwkt_getpooltoken(list
);
1951 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
1952 lwkt_relpooltoken(list
);
1953 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
1954 if (kn
->kn_status
& KN_QUEUED
)
1956 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1964 * Low level enqueue function.
1966 * The knote should already be marked for processing.
1967 * Caller must be holding the kq token
1970 knote_enqueue(struct knote
*kn
)
1972 struct kqueue
*kq
= kn
->kn_kq
;
1974 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1975 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
1976 kn
->kn_status
|= KN_QUEUED
;
1980 * Send SIGIO on request (typically set up as a mailbox signal)
1982 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
1983 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
1989 * Low level dequeue function.
1991 * The knote should already be marked for processing.
1992 * Caller must be holding the kq token
1995 knote_dequeue(struct knote
*kn
)
1997 struct kqueue
*kq
= kn
->kn_kq
;
1999 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
2000 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
2001 kn
->kn_status
&= ~KN_QUEUED
;
2005 static struct knote
*
2008 return kmalloc(sizeof(struct knote
), M_KQUEUE
, M_WAITOK
);
2012 knote_free(struct knote
*kn
)
2014 struct knote_cache_list
*cache_list
;
2016 cache_list
= &knote_cache_lists
[mycpuid
];
2017 if (cache_list
->knote_cache_cnt
< KNOTE_CACHE_MAX
) {
2019 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, kn
, kn_link
);
2020 cache_list
->knote_cache_cnt
++;
2024 kfree(kn
, M_KQUEUE
);
2033 precise_sleep_intr(systimer_t info
, int in_ipi
, struct intrframe
*frame
)
2035 struct sleepinfo
*si
;
2043 precise_sleep(void *ident
, int flags
, const char *wmesg
, int us
)
2045 struct systimer info
;
2046 struct sleepinfo si
= {
2052 tsleep_interlock(ident
, flags
);
2053 systimer_init_oneshot(&info
, precise_sleep_intr
, &si
,
2055 r
= tsleep(ident
, flags
| PINTERLOCKED
, wmesg
, 0);
2056 systimer_del(&info
);