2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysmsg.h>
47 #include <sys/thread.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
52 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
57 #include <sys/spinlock2.h>
59 #define EVENT_REGISTER 1
60 #define EVENT_PROCESS 2
62 static MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
64 struct kevent_copyin_args
{
65 const struct kevent_args
*ka
;
66 struct kevent
*eventlist
;
67 const struct kevent
*changelist
;
71 #define KNOTE_CACHE_MAX 64
73 struct knote_cache_list
{
74 struct klist knote_cache
;
78 static int kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
79 struct knote
*marker
, int closedcounter
, int flags
);
80 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
81 struct ucred
*cred
, int flags
);
82 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
83 struct ucred
*cred
, int flags
);
84 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
85 struct ucred
*cred
, struct sysmsg
*msg
);
86 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
87 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
89 static int kqueue_close(struct file
*fp
);
90 static void kqueue_wakeup(struct kqueue
*kq
);
91 static int filter_attach(struct knote
*kn
);
92 static int filter_event(struct knote
*kn
, long hint
);
97 static struct fileops kqueueops
= {
98 .fo_read
= kqueue_read
,
99 .fo_write
= kqueue_write
,
100 .fo_ioctl
= kqueue_ioctl
,
101 .fo_kqfilter
= kqueue_kqfilter
,
102 .fo_stat
= kqueue_stat
,
103 .fo_close
= kqueue_close
,
104 .fo_shutdown
= nofo_shutdown
107 static void knote_attach(struct knote
*kn
);
108 static void knote_drop(struct knote
*kn
);
109 static void knote_detach_and_drop(struct knote
*kn
);
110 static void knote_enqueue(struct knote
*kn
);
111 static void knote_dequeue(struct knote
*kn
);
112 static struct knote
*knote_alloc(void);
113 static void knote_free(struct knote
*kn
);
115 static void precise_sleep_intr(systimer_t info
, int in_ipi
,
116 struct intrframe
*frame
);
117 static int precise_sleep(void *ident
, int flags
, const char *wmesg
,
120 static void filt_kqdetach(struct knote
*kn
);
121 static int filt_kqueue(struct knote
*kn
, long hint
);
122 static int filt_procattach(struct knote
*kn
);
123 static void filt_procdetach(struct knote
*kn
);
124 static int filt_proc(struct knote
*kn
, long hint
);
125 static int filt_fileattach(struct knote
*kn
);
126 static void filt_timerexpire(void *knx
);
127 static int filt_timerattach(struct knote
*kn
);
128 static void filt_timerdetach(struct knote
*kn
);
129 static int filt_timer(struct knote
*kn
, long hint
);
130 static int filt_userattach(struct knote
*kn
);
131 static void filt_userdetach(struct knote
*kn
);
132 static int filt_user(struct knote
*kn
, long hint
);
133 static void filt_usertouch(struct knote
*kn
, struct kevent
*kev
,
135 static int filt_fsattach(struct knote
*kn
);
136 static void filt_fsdetach(struct knote
*kn
);
137 static int filt_fs(struct knote
*kn
, long hint
);
139 static struct filterops file_filtops
=
140 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, filt_fileattach
, NULL
, NULL
};
141 static struct filterops kqread_filtops
=
142 { FILTEROP_ISFD
| FILTEROP_MPSAFE
, NULL
, filt_kqdetach
, filt_kqueue
};
143 static struct filterops proc_filtops
=
144 { FILTEROP_MPSAFE
, filt_procattach
, filt_procdetach
, filt_proc
};
145 static struct filterops timer_filtops
=
146 { FILTEROP_MPSAFE
, filt_timerattach
, filt_timerdetach
, filt_timer
};
147 static struct filterops user_filtops
=
148 { FILTEROP_MPSAFE
, filt_userattach
, filt_userdetach
, filt_user
};
149 static struct filterops fs_filtops
=
150 { FILTEROP_MPSAFE
, filt_fsattach
, filt_fsdetach
, filt_fs
};
152 static int kq_ncallouts
= 0;
153 static int kq_calloutmax
= 65536;
154 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
155 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
156 static int kq_checkloop
= 1000000;
157 SYSCTL_INT(_kern
, OID_AUTO
, kq_checkloop
, CTLFLAG_RW
,
158 &kq_checkloop
, 0, "Maximum number of loops for kqueue scan");
159 static int kq_sleep_threshold
= 20000;
160 SYSCTL_INT(_kern
, OID_AUTO
, kq_sleep_threshold
, CTLFLAG_RW
,
161 &kq_sleep_threshold
, 0, "Minimum sleep duration without busy-looping");
163 #define KNOTE_ACTIVATE(kn) do { \
164 kn->kn_status |= KN_ACTIVE; \
165 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
169 #define KN_HASHSIZE 64 /* XXX should be tunable */
170 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
172 extern struct filterops aio_filtops
;
173 extern struct filterops sig_filtops
;
176 * Table for for all system-defined filters.
178 static struct filterops
*sysfilt_ops
[] = {
179 &file_filtops
, /* EVFILT_READ */
180 &file_filtops
, /* EVFILT_WRITE */
181 &aio_filtops
, /* EVFILT_AIO */
182 &file_filtops
, /* EVFILT_VNODE */
183 &proc_filtops
, /* EVFILT_PROC */
184 &sig_filtops
, /* EVFILT_SIGNAL */
185 &timer_filtops
, /* EVFILT_TIMER */
186 &file_filtops
, /* EVFILT_EXCEPT */
187 &user_filtops
, /* EVFILT_USER */
188 &fs_filtops
, /* EVFILT_FS */
191 static struct knote_cache_list knote_cache_lists
[MAXCPU
];
194 * Acquire a knote, return non-zero on success, 0 on failure.
196 * If we cannot acquire the knote we sleep and return 0. The knote
197 * may be stale on return in this case and the caller must restart
198 * whatever loop they are in.
200 * Related kq token must be held.
203 knote_acquire(struct knote
*kn
)
205 if (kn
->kn_status
& KN_PROCESSING
) {
206 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
207 tsleep(kn
, 0, "kqepts", hz
);
208 /* knote may be stale now */
211 kn
->kn_status
|= KN_PROCESSING
;
216 * Release an acquired knote, clearing KN_PROCESSING and handling any
217 * KN_REPROCESS events.
219 * Caller must be holding the related kq token
221 * Non-zero is returned if the knote is destroyed or detached.
224 knote_release(struct knote
*kn
)
228 while (kn
->kn_status
& KN_REPROCESS
) {
229 kn
->kn_status
&= ~KN_REPROCESS
;
230 if (kn
->kn_status
& KN_WAITING
) {
231 kn
->kn_status
&= ~KN_WAITING
;
234 if (kn
->kn_status
& KN_DELETING
) {
235 knote_detach_and_drop(kn
);
239 if (filter_event(kn
, 0))
242 if (kn
->kn_status
& KN_DETACHED
)
246 kn
->kn_status
&= ~KN_PROCESSING
;
247 /* kn should not be accessed anymore */
252 filt_fileattach(struct knote
*kn
)
254 return (fo_kqfilter(kn
->kn_fp
, kn
));
261 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
263 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
265 if (kn
->kn_filter
!= EVFILT_READ
)
268 kn
->kn_fop
= &kqread_filtops
;
269 knote_insert(&kq
->kq_kqinfo
.ki_note
, kn
);
274 filt_kqdetach(struct knote
*kn
)
276 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
278 knote_remove(&kq
->kq_kqinfo
.ki_note
, kn
);
283 filt_kqueue(struct knote
*kn
, long hint
)
285 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
287 kn
->kn_data
= kq
->kq_count
;
288 return (kn
->kn_data
> 0);
292 filt_procattach(struct knote
*kn
)
298 p
= pfind(kn
->kn_id
);
299 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
300 p
= zpfind(kn
->kn_id
);
306 if (!PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
312 lwkt_gettoken(&p
->p_token
);
313 kn
->kn_ptr
.p_proc
= p
;
314 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
317 * internal flag indicating registration done by kernel
319 if (kn
->kn_flags
& EV_FLAG1
) {
320 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
321 kn
->kn_fflags
= NOTE_CHILD
;
322 kn
->kn_flags
&= ~EV_FLAG1
;
325 knote_insert(&p
->p_klist
, kn
);
328 * Immediately activate any exit notes if the target process is a
329 * zombie. This is necessary to handle the case where the target
330 * process, e.g. a child, dies before the kevent is negistered.
332 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
334 lwkt_reltoken(&p
->p_token
);
341 * The knote may be attached to a different process, which may exit,
342 * leaving nothing for the knote to be attached to. So when the process
343 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
344 * it will be deleted when read out. However, as part of the knote deletion,
345 * this routine is called, so a check is needed to avoid actually performing
346 * a detach, because the original process does not exist any more.
349 filt_procdetach(struct knote
*kn
)
353 if (kn
->kn_status
& KN_DETACHED
)
355 p
= kn
->kn_ptr
.p_proc
;
356 knote_remove(&p
->p_klist
, kn
);
360 filt_proc(struct knote
*kn
, long hint
)
365 * mask off extra data
367 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
370 * if the user is interested in this event, record it.
372 if (kn
->kn_sfflags
& event
)
373 kn
->kn_fflags
|= event
;
376 * Process is gone, so flag the event as finished. Detach the
377 * knote from the process now because the process will be poof,
380 if (event
== NOTE_EXIT
) {
381 struct proc
*p
= kn
->kn_ptr
.p_proc
;
382 if ((kn
->kn_status
& KN_DETACHED
) == 0) {
384 knote_remove(&p
->p_klist
, kn
);
385 kn
->kn_status
|= KN_DETACHED
;
386 kn
->kn_data
= p
->p_xstat
;
387 kn
->kn_ptr
.p_proc
= NULL
;
390 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
395 * process forked, and user wants to track the new process,
396 * so attach a new knote to it, and immediately report an
397 * event with the parent's pid.
399 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
405 * register knote with new process.
407 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
408 kev
.filter
= kn
->kn_filter
;
409 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
410 kev
.fflags
= kn
->kn_sfflags
;
411 kev
.data
= kn
->kn_id
; /* parent */
412 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
414 error
= kqueue_register(kn
->kn_kq
, &kev
, &n
, 0);
416 kn
->kn_fflags
|= NOTE_TRACKERR
;
419 return (kn
->kn_fflags
!= 0);
423 filt_timerreset(struct knote
*kn
)
425 struct callout
*calloutp
;
429 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
430 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
431 tticks
= tvtohz_high(&tv
);
432 calloutp
= (struct callout
*)kn
->kn_hook
;
433 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
437 * The callout interlocks with callout_stop() but can still
438 * race a deletion so if KN_DELETING is set we just don't touch
442 filt_timerexpire(void *knx
)
444 struct knote
*kn
= knx
;
445 struct kqueue
*kq
= kn
->kn_kq
;
447 lwkt_getpooltoken(kq
);
450 * Open knote_acquire(), since we can't sleep in callout,
451 * however, we do need to record this expiration.
454 if (kn
->kn_status
& KN_PROCESSING
) {
455 kn
->kn_status
|= KN_REPROCESS
;
456 if ((kn
->kn_status
& KN_DELETING
) == 0 &&
457 (kn
->kn_flags
& EV_ONESHOT
) == 0)
459 lwkt_relpooltoken(kq
);
462 KASSERT((kn
->kn_status
& KN_DELETING
) == 0,
463 ("acquire a deleting knote %#x", kn
->kn_status
));
464 kn
->kn_status
|= KN_PROCESSING
;
467 if ((kn
->kn_flags
& EV_ONESHOT
) == 0)
472 lwkt_relpooltoken(kq
);
476 * data contains amount of time to sleep, in milliseconds
479 filt_timerattach(struct knote
*kn
)
481 struct callout
*calloutp
;
484 prev_ncallouts
= atomic_fetchadd_int(&kq_ncallouts
, 1);
485 if (prev_ncallouts
>= kq_calloutmax
) {
486 atomic_subtract_int(&kq_ncallouts
, 1);
491 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
492 calloutp
= kmalloc(sizeof(*calloutp
), M_KQUEUE
, M_WAITOK
);
493 callout_init_mp(calloutp
);
494 kn
->kn_hook
= (caddr_t
)calloutp
;
501 * This function is called with the knote flagged locked but it is
502 * still possible to race a callout event due to the callback blocking.
505 filt_timerdetach(struct knote
*kn
)
507 struct callout
*calloutp
;
509 calloutp
= (struct callout
*)kn
->kn_hook
;
510 callout_terminate(calloutp
);
512 kfree(calloutp
, M_KQUEUE
);
513 atomic_subtract_int(&kq_ncallouts
, 1);
517 filt_timer(struct knote
*kn
, long hint
)
519 return (kn
->kn_data
!= 0);
526 filt_userattach(struct knote
*kn
)
531 if (kn
->kn_sfflags
& NOTE_TRIGGER
)
532 kn
->kn_ptr
.hookid
= 1;
534 kn
->kn_ptr
.hookid
= 0;
536 ffctrl
= kn
->kn_sfflags
& NOTE_FFCTRLMASK
;
537 kn
->kn_sfflags
&= NOTE_FFLAGSMASK
;
543 kn
->kn_fflags
&= kn
->kn_sfflags
;
547 kn
->kn_fflags
|= kn
->kn_sfflags
;
551 kn
->kn_fflags
= kn
->kn_sfflags
;
555 /* XXX Return error? */
558 /* We just happen to copy this value as well. Undocumented. */
559 kn
->kn_data
= kn
->kn_sdata
;
565 filt_userdetach(struct knote
*kn
)
571 filt_user(struct knote
*kn
, long hint
)
573 return (kn
->kn_ptr
.hookid
);
577 filt_usertouch(struct knote
*kn
, struct kevent
*kev
, u_long type
)
583 if (kev
->fflags
& NOTE_TRIGGER
)
584 kn
->kn_ptr
.hookid
= 1;
586 ffctrl
= kev
->fflags
& NOTE_FFCTRLMASK
;
587 kev
->fflags
&= NOTE_FFLAGSMASK
;
593 kn
->kn_fflags
&= kev
->fflags
;
597 kn
->kn_fflags
|= kev
->fflags
;
601 kn
->kn_fflags
= kev
->fflags
;
605 /* XXX Return error? */
608 /* We just happen to copy this value as well. Undocumented. */
609 kn
->kn_data
= kev
->data
;
612 * This is not the correct use of EV_CLEAR in an event
613 * modification, it should have been passed as a NOTE instead.
614 * But we need to maintain compatibility with Apple & FreeBSD.
616 * Note however that EV_CLEAR can still be used when doing
617 * the initial registration of the event and works as expected
618 * (clears the event on reception).
620 if (kev
->flags
& EV_CLEAR
) {
621 kn
->kn_ptr
.hookid
= 0;
623 * Clearing kn->kn_data is fine, since it gets set
624 * every time anyway. We just shouldn't clear
625 * kn->kn_fflags here, since that would limit the
626 * possible uses of this API. NOTE_FFAND or
627 * NOTE_FFCOPY should be used for explicitly clearing
635 *kev
= kn
->kn_kevent
;
636 kev
->fflags
= kn
->kn_fflags
;
637 kev
->data
= kn
->kn_data
;
638 if (kn
->kn_flags
& EV_CLEAR
) {
639 kn
->kn_ptr
.hookid
= 0;
640 /* kn_data, kn_fflags handled by parent */
645 panic("filt_usertouch() - invalid type (%ld)", type
);
653 struct klist fs_klist
= SLIST_HEAD_INITIALIZER(&fs_klist
);
656 filt_fsattach(struct knote
*kn
)
658 kn
->kn_flags
|= EV_CLEAR
;
659 knote_insert(&fs_klist
, kn
);
665 filt_fsdetach(struct knote
*kn
)
667 knote_remove(&fs_klist
, kn
);
671 filt_fs(struct knote
*kn
, long hint
)
673 kn
->kn_fflags
|= hint
;
674 return (kn
->kn_fflags
!= 0);
678 * Initialize a kqueue.
680 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
683 kqueue_init(struct kqueue
*kq
, struct filedesc
*fdp
)
685 bzero(kq
, sizeof(*kq
));
686 TAILQ_INIT(&kq
->kq_knpend
);
687 TAILQ_INIT(&kq
->kq_knlist
);
689 SLIST_INIT(&kq
->kq_kqinfo
.ki_note
);
693 * Terminate a kqueue. Freeing the actual kq itself is left up to the
694 * caller (it might be embedded in a lwp so we don't do it here).
696 * The kq's knlist must be completely eradicated so block on any
700 kqueue_terminate(struct kqueue
*kq
)
704 lwkt_getpooltoken(kq
);
705 while ((kn
= TAILQ_FIRST(&kq
->kq_knlist
)) != NULL
) {
706 if (knote_acquire(kn
))
707 knote_detach_and_drop(kn
);
709 lwkt_relpooltoken(kq
);
712 hashdestroy(kq
->kq_knhash
, M_KQUEUE
, kq
->kq_knhashmask
);
713 kq
->kq_knhash
= NULL
;
714 kq
->kq_knhashmask
= 0;
722 sys_kqueue(struct sysmsg
*sysmsg
, const struct kqueue_args
*uap
)
724 struct thread
*td
= curthread
;
729 error
= falloc(td
->td_lwp
, &fp
, &fd
);
732 fp
->f_flag
= FREAD
| FWRITE
;
733 fp
->f_type
= DTYPE_KQUEUE
;
734 fp
->f_ops
= &kqueueops
;
736 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
737 kqueue_init(kq
, td
->td_proc
->p_fd
);
740 fsetfd(kq
->kq_fdp
, fp
, fd
);
741 sysmsg
->sysmsg_result
= fd
;
747 * Copy 'count' items into the destination list pointed to by uap->eventlist.
750 kevent_copyout(void *arg
, struct kevent
*kevp
, int count
, int *res
)
752 struct kevent_copyin_args
*kap
;
755 kap
= (struct kevent_copyin_args
*)arg
;
757 error
= copyout(kevp
, kap
->eventlist
, count
* sizeof(*kevp
));
759 kap
->eventlist
+= count
;
769 * Copy at most 'max' items from the list pointed to by kap->changelist,
770 * return number of items in 'events'.
773 kevent_copyin(void *arg
, struct kevent
*kevp
, int max
, int *events
)
775 struct kevent_copyin_args
*kap
;
778 kap
= (struct kevent_copyin_args
*)arg
;
780 count
= min(kap
->ka
->nchanges
- kap
->pchanges
, max
);
781 error
= copyin(kap
->changelist
, kevp
, count
* sizeof *kevp
);
783 kap
->changelist
+= count
;
784 kap
->pchanges
+= count
;
795 kern_kevent(struct kqueue
*kq
, int nevents
, int *res
, void *uap
,
796 k_copyin_fn kevent_copyinfn
, k_copyout_fn kevent_copyoutfn
,
797 struct timespec
*tsp_in
, int flags
)
800 struct timespec
*tsp
, ats
;
801 int i
, n
, total
, error
, nerrors
= 0;
804 int limit
= kq_checkloop
;
806 struct kevent kev
[KQ_NEVENTS
];
808 struct lwkt_token
*tok
;
810 if (tsp_in
== NULL
|| tsp_in
->tv_sec
|| tsp_in
->tv_nsec
)
811 atomic_set_int(&curthread
->td_mpflags
, TDF_MP_BATCH_DEMARC
);
816 closedcounter
= kq
->kq_fdp
->fd_closedcounter
;
820 error
= kevent_copyinfn(uap
, kev
, KQ_NEVENTS
, &n
);
825 for (i
= 0; i
< n
; ++i
)
826 kev
[i
].flags
&= ~EV_SYSFLAGS
;
827 for (i
= 0; i
< n
; ++i
) {
830 error
= kqueue_register(kq
, &kev
[i
], &gobbled
, flags
);
835 * If a registration returns an error we
836 * immediately post the error. The kevent()
837 * call itself will fail with the error if
838 * no space is available for posting.
840 * Such errors normally bypass the timeout/blocking
841 * code. However, if the copyoutfn function refuses
842 * to post the error (see sys_poll()), then we
845 if (error
|| (kevp
->flags
& EV_RECEIPT
)) {
846 kevp
->flags
= EV_ERROR
;
849 kevent_copyoutfn(uap
, kevp
, 1, res
);
852 } else if (lres
!= *res
) {
863 * Acquire/wait for events - setup timeout
865 * If no timeout specified clean up the run path by clearing the
869 if (tsp
->tv_sec
|| tsp
->tv_nsec
) {
871 timespecadd(tsp
, &ats
, tsp
); /* tsp = target time */
874 flags
&= ~KEVENT_TIMEOUT_PRECISE
;
880 * Collect as many events as we can. Sleeping on successive
881 * loops is disabled if copyoutfn has incremented (*res).
883 * The loop stops if an error occurs, all events have been
884 * scanned (the marker has been reached), or fewer than the
885 * maximum number of events is found.
887 * The copyoutfn function does not have to increment (*res) in
888 * order for the loop to continue.
890 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
894 marker
.kn_filter
= EVFILT_MARKER
;
895 marker
.kn_status
= KN_PROCESSING
;
897 tok
= lwkt_token_pool_lookup(kq
);
898 flags
= (flags
& ~KEVENT_SCAN_MASK
) | KEVENT_SCAN_INSERT_MARKER
;
900 while ((n
= nevents
- total
) > 0) {
905 * Process all received events
906 * Account for all non-spurious events in our total
908 i
= kqueue_scan(kq
, kev
, n
, &marker
, closedcounter
, flags
);
909 flags
= (flags
& ~KEVENT_SCAN_MASK
) | KEVENT_SCAN_KEEP_MARKER
;
912 error
= kevent_copyoutfn(uap
, kev
, i
, res
);
913 total
+= *res
- lres
;
917 if (limit
&& --limit
== 0)
918 panic("kqueue: checkloop failed i=%d", i
);
921 * Normally when fewer events are returned than requested
922 * we can stop. However, if only spurious events were
923 * collected the copyout will not bump (*res) and we have
930 * If no events were recorded (no events happened or the events
931 * that did happen were all spurious), block until an event
932 * occurs or the timeout occurs and reload the marker.
934 * If we saturated n (i == n) loop up without sleeping to
935 * continue processing the list.
937 if (i
!= n
&& kq
->kq_count
== 0 && *res
== 0) {
944 } else if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0) {
948 struct timespec atx
= *tsp
;
951 timespecsub(&atx
, &ats
, &atx
);
952 if (atx
.tv_sec
< 0 ||
953 (atx
.tv_sec
== 0 && atx
.tv_nsec
<= 0)) {
957 if (flags
& KEVENT_TIMEOUT_PRECISE
) {
958 if (atx
.tv_sec
== 0 &&
959 atx
.tv_nsec
< kq_sleep_threshold
) {
960 ustimeout
= kq_sleep_threshold
/
962 } else if (atx
.tv_sec
< 60) {
964 atx
.tv_sec
* 1000000 +
967 ustimeout
= 60 * 1000000;
972 } else if (atx
.tv_sec
> 60 * 60) {
973 timeout
= 60 * 60 * hz
;
976 timeout
= tstohz_high(&atx
);
982 if (kq
->kq_count
== 0) {
984 if (__predict_false(kq
->kq_sleep_cnt
== 0)) {
986 * Guard against possible wrapping. And
987 * set it to 2, so that kqueue_wakeup()
988 * can wake everyone up.
990 kq
->kq_sleep_cnt
= 2;
992 if (flags
& KEVENT_TIMEOUT_PRECISE
) {
993 error
= precise_sleep(kq
, PCATCH
,
994 "kqread", ustimeout
);
996 error
= tsleep(kq
, PCATCH
,
1000 /* don't restart after signals... */
1001 if (error
== ERESTART
)
1003 if (error
== EWOULDBLOCK
)
1009 flags
= (flags
& ~KEVENT_SCAN_MASK
) |
1010 KEVENT_SCAN_RELOAD_MARKER
;
1016 * Deal with an edge case where spurious events can cause
1017 * a loop to occur without moving the marker. This can
1018 * prevent kqueue_scan() from picking up new events which
1019 * race us. We must be sure to move the marker for this
1022 * NOTE: We do not want to move the marker if events
1023 * were scanned because normal kqueue operations
1024 * may reactivate events. Moving the marker in
1025 * that case could result in duplicates for the
1029 flags
= (flags
& ~KEVENT_SCAN_MASK
) |
1030 KEVENT_SCAN_RELOAD_MARKER
;
1037 if ((flags
& KEVENT_SCAN_INSERT_MARKER
) == 0) {
1039 TAILQ_REMOVE(&kq
->kq_knpend
, &marker
, kn_tqe
);
1043 /* Timeouts do not return EWOULDBLOCK. */
1044 if (error
== EWOULDBLOCK
)
1053 sys_kevent(struct sysmsg
*sysmsg
, const struct kevent_args
*uap
)
1055 struct thread
*td
= curthread
;
1056 struct timespec ts
, *tsp
;
1058 struct file
*fp
= NULL
;
1059 struct kevent_copyin_args
*kap
, ka
;
1063 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
1070 fp
= holdfp(td
, uap
->fd
, -1);
1073 if (fp
->f_type
!= DTYPE_KQUEUE
) {
1078 kq
= (struct kqueue
*)fp
->f_data
;
1083 kap
->eventlist
= uap
->eventlist
;
1084 kap
->changelist
= uap
->changelist
;
1086 error
= kern_kevent(kq
, uap
->nevents
, &sysmsg
->sysmsg_result
, kap
,
1087 kevent_copyin
, kevent_copyout
, tsp
, 0);
1089 dropfp(td
, uap
->fd
, fp
);
1095 * Efficiently load multiple file pointers. This significantly reduces
1096 * threaded overhead. When doing simple polling we can depend on the
1097 * per-thread (fd,fp) cache. With more descriptors, we batch.
1101 floadkevfps(thread_t td
, struct filedesc
*fdp
, struct kevent
*kev
,
1102 struct file
**fp
, int climit
)
1104 struct filterops
*fops
;
1107 if (climit
<= 2 && td
->td_proc
&& td
->td_proc
->p_fd
== fdp
) {
1111 spin_lock_shared(&fdp
->fd_spin
);
1116 if (kev
->filter
< 0 &&
1117 kev
->filter
+ EVFILT_SYSCOUNT
>= 0) {
1118 fops
= sysfilt_ops
[~kev
->filter
];
1119 if (fops
->f_flags
& FILTEROP_ISFD
) {
1121 *fp
= holdfp(td
, kev
->ident
, -1);
1123 *fp
= holdfp_fdp_locked(fdp
,
1133 spin_unlock_shared(&fdp
->fd_spin
);
1137 * Register up to *countp kev's. Always registers at least 1.
1139 * The number registered is returned in *countp.
1141 * If an error occurs or a kev is flagged EV_RECEIPT, it is
1142 * processed and included in *countp, and processing then
1145 * If flags contains KEVENT_UNIQUE_NOTES, kev->data contains an identifier
1146 * to further distinguish knotes which might otherwise have the same kq,
1147 * ident, and filter (used by *poll() because multiple pfds are allowed to
1148 * reference the same descriptor and implied kq filter). kev->data is
1149 * implied to be zero for event processing when this flag is set.
1152 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
, int *countp
, int flags
)
1154 struct filedesc
*fdp
= kq
->kq_fdp
;
1155 struct klist
*list
= NULL
;
1156 struct filterops
*fops
;
1157 struct file
*fp
[KQ_NEVENTS
];
1158 struct knote
*kn
= NULL
;
1165 struct knote_cache_list
*cache_list
;
1169 if (climit
> KQ_NEVENTS
)
1170 climit
= KQ_NEVENTS
;
1171 closedcounter
= fdp
->fd_closedcounter
;
1172 floadkevfps(td
, fdp
, kev
, fp
, climit
);
1174 lwkt_getpooltoken(kq
);
1179 * To avoid races, only one thread can register events on this
1182 while (__predict_false(kq
->kq_regtd
!= NULL
&& kq
->kq_regtd
!= td
)) {
1183 kq
->kq_state
|= KQ_REGWAIT
;
1184 tsleep(&kq
->kq_regtd
, 0, "kqreg", 0);
1186 if (__predict_false(kq
->kq_regtd
!= NULL
)) {
1187 /* Recursive calling of kqueue_register() */
1190 /* Owner of the kq_regtd, i.e. td != NULL */
1196 * knote uniqifiers are used by *poll() because there may be
1197 * multiple pfd[] entries for the same descriptor and filter.
1198 * The unique id is stored in kev->data and kev->data for the
1199 * kevent is implied to be zero.
1201 if (flags
& KEVENT_UNIQUE_NOTES
) {
1202 uniqifier
= kev
->data
;
1206 if (kev
->filter
< 0) {
1207 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0) {
1212 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
1216 * filter attach routine is responsible for insuring that
1217 * the identifier can be attached to it.
1224 if (fops
->f_flags
& FILTEROP_ISFD
) {
1225 /* validate descriptor */
1226 if (fp
[count
] == NULL
) {
1233 cache_list
= &knote_cache_lists
[mycpuid
];
1234 if (SLIST_EMPTY(&cache_list
->knote_cache
)) {
1235 struct knote
*new_kn
;
1237 new_kn
= knote_alloc();
1239 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, new_kn
, kn_link
);
1240 cache_list
->knote_cache_cnt
++;
1244 if (fp
[count
] != NULL
) {
1245 list
= &fp
[count
]->f_klist
;
1246 } else if (kq
->kq_knhashmask
) {
1247 list
= &kq
->kq_knhash
[
1248 KN_HASH((u_long
)kev
->ident
, kq
->kq_knhashmask
)];
1251 lwkt_getpooltoken(list
);
1253 SLIST_FOREACH(kn
, list
, kn_link
) {
1254 if (kn
->kn_kq
== kq
&&
1255 kn
->kn_filter
== kev
->filter
&&
1256 kn
->kn_id
== kev
->ident
&&
1257 kn
->kn_uniqifier
== uniqifier
)
1259 if (knote_acquire(kn
) == 0)
1264 lwkt_relpooltoken(list
);
1268 * NOTE: At this point if kn is non-NULL we will have acquired
1269 * it and set KN_PROCESSING.
1271 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
1278 * kn now contains the matching knote, or NULL if no match
1280 if (kev
->flags
& EV_ADD
) {
1283 kn
= SLIST_FIRST(&cache_list
->knote_cache
);
1288 SLIST_REMOVE_HEAD(&cache_list
->knote_cache
,
1290 cache_list
->knote_cache_cnt
--;
1293 kn
->kn_fp
= fp
[count
];
1296 kn
->kn_uniqifier
= uniqifier
;
1299 * apply reference count to knote structure, and
1300 * do not release it at the end of this routine.
1302 fp
[count
] = NULL
; /* safety */
1304 kn
->kn_sfflags
= kev
->fflags
;
1305 kn
->kn_sdata
= kev
->data
;
1308 kn
->kn_kevent
= *kev
;
1311 * KN_PROCESSING prevents the knote from getting
1312 * ripped out from under us while we are trying
1313 * to attach it, in case the attach blocks.
1315 kn
->kn_status
= KN_PROCESSING
;
1317 if ((error
= filter_attach(kn
)) != 0) {
1318 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1325 * Interlock against close races which either tried
1326 * to remove our knote while we were blocked or missed
1327 * it entirely prior to our attachment. We do not
1328 * want to end up with a knote on a closed descriptor.
1330 if ((fops
->f_flags
& FILTEROP_ISFD
) &&
1331 checkfdclosed(curthread
, fdp
, kev
->ident
, kn
->kn_fp
,
1333 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1337 * The user may change some filter values after the
1338 * initial EV_ADD, but doing so will not reset any
1339 * filter which have already been triggered.
1341 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1342 if (fops
== &user_filtops
) {
1343 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1345 kn
->kn_sfflags
= kev
->fflags
;
1346 kn
->kn_sdata
= kev
->data
;
1347 kn
->kn_kevent
.udata
= kev
->udata
;
1352 * Execute the filter event to immediately activate the
1353 * knote if necessary. If reprocessing events are pending
1354 * due to blocking above we do not run the filter here
1355 * but instead let knote_release() do it. Otherwise we
1356 * might run the filter on a deleted event.
1358 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1359 if (filter_event(kn
, 0))
1362 } else if (kev
->flags
& EV_DELETE
) {
1364 * Delete the existing knote
1366 knote_detach_and_drop(kn
);
1372 * Modify an existing event.
1374 * The user may change some filter values after the
1375 * initial EV_ADD, but doing so will not reset any
1376 * filter which have already been triggered.
1378 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1379 if (fops
== &user_filtops
) {
1380 filt_usertouch(kn
, kev
, EVENT_REGISTER
);
1382 kn
->kn_sfflags
= kev
->fflags
;
1383 kn
->kn_sdata
= kev
->data
;
1384 kn
->kn_kevent
.udata
= kev
->udata
;
1388 * Execute the filter event to immediately activate the
1389 * knote if necessary. If reprocessing events are pending
1390 * due to blocking above we do not run the filter here
1391 * but instead let knote_release() do it. Otherwise we
1392 * might run the filter on a deleted event.
1394 if ((kn
->kn_status
& KN_REPROCESS
) == 0) {
1395 if (filter_event(kn
, 0))
1401 * Disablement does not deactivate a knote here.
1403 if ((kev
->flags
& EV_DISABLE
) &&
1404 ((kn
->kn_status
& KN_DISABLED
) == 0))
1406 kn
->kn_status
|= KN_DISABLED
;
1410 * Re-enablement may have to immediately enqueue an active knote.
1412 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
1413 kn
->kn_status
&= ~KN_DISABLED
;
1414 if ((kn
->kn_status
& KN_ACTIVE
) &&
1415 ((kn
->kn_status
& KN_QUEUED
) == 0))
1422 * Handle any required reprocessing
1425 /* kn may be invalid now */
1428 * Loop control. We stop on errors (above), and also stop after
1429 * processing EV_RECEIPT, so the caller can process it.
1432 if (kev
->flags
& EV_RECEIPT
) {
1437 if (count
< climit
) {
1438 if (fp
[count
-1]) /* drop unprocessed fp */
1447 if (td
!= NULL
) { /* Owner of the kq_regtd */
1448 kq
->kq_regtd
= NULL
;
1449 if (__predict_false(kq
->kq_state
& KQ_REGWAIT
)) {
1450 kq
->kq_state
&= ~KQ_REGWAIT
;
1451 wakeup(&kq
->kq_regtd
);
1454 lwkt_relpooltoken(kq
);
1457 * Drop unprocessed file pointers
1460 if (count
&& fp
[count
-1])
1462 while (count
< climit
) {
1471 * Scan the kqueue, return the number of active events placed in kevp up
1474 * Continuous mode events may get recycled, do not continue scanning past
1475 * marker unless no events have been collected.
1478 kqueue_scan(struct kqueue
*kq
, struct kevent
*kevp
, int count
,
1479 struct knote
*marker
, int closedcounter
, int flags
)
1481 struct knote
*kn
, local_marker
;
1482 thread_t td
= curthread
;
1486 local_marker
.kn_filter
= EVFILT_MARKER
;
1487 local_marker
.kn_status
= KN_PROCESSING
;
1489 lwkt_getpooltoken(kq
);
1492 * Adjust marker, insert initial marker, or leave the marker alone.
1494 * Also setup our local_marker.
1496 switch(flags
& KEVENT_SCAN_MASK
) {
1497 case KEVENT_SCAN_RELOAD_MARKER
:
1498 TAILQ_REMOVE(&kq
->kq_knpend
, marker
, kn_tqe
);
1500 case KEVENT_SCAN_INSERT_MARKER
:
1501 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, marker
, kn_tqe
);
1504 TAILQ_INSERT_HEAD(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1510 kn
= TAILQ_NEXT(&local_marker
, kn_tqe
);
1511 if (kn
->kn_filter
== EVFILT_MARKER
) {
1512 /* Marker reached, we are done */
1516 /* Move local marker past some other threads marker */
1517 kn
= TAILQ_NEXT(kn
, kn_tqe
);
1518 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1519 TAILQ_INSERT_BEFORE(kn
, &local_marker
, kn_tqe
);
1524 * We can't skip a knote undergoing processing, otherwise
1525 * we risk not returning it when the user process expects
1526 * it should be returned. Sleep and retry.
1528 if (knote_acquire(kn
) == 0)
1532 * Remove the event for processing.
1534 * WARNING! We must leave KN_QUEUED set to prevent the
1535 * event from being KNOTE_ACTIVATE()d while
1536 * the queue state is in limbo, in case we
1539 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
1543 * Kernel select() and poll() functions cache previous
1544 * operations on the assumption that future operations
1545 * will use similr descriptor sets. This removes any
1546 * stale entries in a way that does not require a descriptor
1547 * lookup and is thus not affected by close() races.
1549 * Do not report to *_copyout()
1551 if (flags
& KEVENT_AUTO_STALE
) {
1552 if ((uint64_t)kn
->kn_kevent
.udata
<
1553 curthread
->td_lwp
->lwp_kqueue_serial
)
1555 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
|
1561 * If a descriptor is close()d out from under a poll/select,
1562 * we want to report the event but delete the note because
1563 * the note can wind up being 'stuck' on kq_knpend.
1565 if ((kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) &&
1566 checkfdclosed(td
, kq
->kq_fdp
, kn
->kn_kevent
.ident
,
1567 kn
->kn_fp
, closedcounter
))
1569 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1572 if (kn
->kn_status
& KN_DISABLED
) {
1574 * If disabled we ensure the event is not queued
1575 * but leave its active bit set. On re-enablement
1576 * the event may be immediately triggered.
1578 kn
->kn_status
&= ~KN_QUEUED
;
1579 } else if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
1580 (kn
->kn_status
& KN_DELETING
) == 0 &&
1581 filter_event(kn
, 0) == 0) {
1583 * If not running in one-shot mode and the event
1584 * is no longer present we ensure it is removed
1585 * from the queue and ignore it.
1587 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
1592 if (kn
->kn_fop
== &user_filtops
)
1593 filt_usertouch(kn
, kevp
, EVENT_PROCESS
);
1595 *kevp
= kn
->kn_kevent
;
1600 if (kn
->kn_flags
& EV_ONESHOT
) {
1601 kn
->kn_status
&= ~KN_QUEUED
;
1602 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1604 if (kn
->kn_flags
& (EV_CLEAR
| EV_DISPATCH
)) {
1605 if (kn
->kn_flags
& EV_CLEAR
) {
1609 if (kn
->kn_flags
& EV_DISPATCH
) {
1610 kn
->kn_status
|= KN_DISABLED
;
1612 kn
->kn_status
&= ~(KN_QUEUED
|
1615 TAILQ_INSERT_TAIL(&kq
->kq_knpend
,
1624 * Handle any post-processing states
1628 TAILQ_REMOVE(&kq
->kq_knpend
, &local_marker
, kn_tqe
);
1630 lwkt_relpooltoken(kq
);
1636 * This could be expanded to call kqueue_scan, if desired.
1641 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1650 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
1659 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1660 struct ucred
*cred
, struct sysmsg
*msg
)
1665 kq
= (struct kqueue
*)fp
->f_data
;
1666 lwkt_getpooltoken(kq
);
1670 kq
->kq_state
|= KQ_ASYNC
;
1672 kq
->kq_state
&= ~KQ_ASYNC
;
1676 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
1682 lwkt_relpooltoken(kq
);
1690 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
1692 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1694 bzero((void *)st
, sizeof(*st
));
1695 st
->st_size
= kq
->kq_count
;
1696 st
->st_blksize
= sizeof(struct kevent
);
1697 st
->st_mode
= S_IFIFO
;
1705 kqueue_close(struct file
*fp
)
1707 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
1709 kqueue_terminate(kq
);
1712 funsetown(&kq
->kq_sigio
);
1714 kfree(kq
, M_KQUEUE
);
1719 kqueue_wakeup(struct kqueue
*kq
)
1721 if (kq
->kq_sleep_cnt
) {
1722 u_int sleep_cnt
= kq
->kq_sleep_cnt
;
1724 kq
->kq_sleep_cnt
= 0;
1730 KNOTE(&kq
->kq_kqinfo
.ki_note
, 0);
1734 * Calls filterops f_attach function, acquiring mplock if filter is not
1735 * marked as FILTEROP_MPSAFE.
1737 * Caller must be holding the related kq token
1740 filter_attach(struct knote
*kn
)
1744 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1745 ret
= kn
->kn_fop
->f_attach(kn
);
1748 ret
= kn
->kn_fop
->f_attach(kn
);
1755 * Detach the knote and drop it, destroying the knote.
1757 * Calls filterops f_detach function, acquiring mplock if filter is not
1758 * marked as FILTEROP_MPSAFE.
1760 * Caller must be holding the related kq token
1763 knote_detach_and_drop(struct knote
*kn
)
1765 kn
->kn_status
|= KN_DELETING
| KN_REPROCESS
;
1766 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1767 kn
->kn_fop
->f_detach(kn
);
1770 kn
->kn_fop
->f_detach(kn
);
1777 * Calls filterops f_event function, acquiring mplock if filter is not
1778 * marked as FILTEROP_MPSAFE.
1780 * If the knote is in the middle of being created or deleted we cannot
1781 * safely call the filter op.
1783 * Caller must be holding the related kq token
1786 filter_event(struct knote
*kn
, long hint
)
1790 if (kn
->kn_fop
->f_flags
& FILTEROP_MPSAFE
) {
1791 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1794 ret
= kn
->kn_fop
->f_event(kn
, hint
);
1801 * Walk down a list of knotes, activating them if their event has triggered.
1803 * If we encounter any knotes which are undergoing processing we just mark
1804 * them for reprocessing and do not try to [re]activate the knote. However,
1805 * if a hint is being passed we have to wait and that makes things a bit
1809 knote(struct klist
*list
, long hint
)
1813 struct knote
*kntmp
;
1815 lwkt_getpooltoken(list
);
1817 SLIST_FOREACH(kn
, list
, kn_next
) {
1819 lwkt_getpooltoken(kq
);
1821 /* temporary verification hack */
1822 SLIST_FOREACH(kntmp
, list
, kn_next
) {
1826 if (kn
!= kntmp
|| kn
->kn_kq
!= kq
) {
1827 lwkt_relpooltoken(kq
);
1831 if (kn
->kn_status
& KN_PROCESSING
) {
1833 * Someone else is processing the knote, ask the
1834 * other thread to reprocess it and don't mess
1835 * with it otherwise.
1838 kn
->kn_status
|= KN_REPROCESS
;
1839 lwkt_relpooltoken(kq
);
1844 * If the hint is non-zero we have to wait or risk
1845 * losing the state the caller is trying to update.
1847 * XXX This is a real problem, certain process
1848 * and signal filters will bump kn_data for
1849 * already-processed notes more than once if
1850 * we restart the list scan. FIXME.
1852 kn
->kn_status
|= KN_WAITING
| KN_REPROCESS
;
1853 tsleep(kn
, 0, "knotec", hz
);
1854 lwkt_relpooltoken(kq
);
1859 * Become the reprocessing master ourselves.
1861 * If hint is non-zero running the event is mandatory
1862 * when not deleting so do it whether reprocessing is
1865 kn
->kn_status
|= KN_PROCESSING
;
1866 if ((kn
->kn_status
& KN_DELETING
) == 0) {
1867 if (filter_event(kn
, hint
))
1870 if (knote_release(kn
)) {
1871 lwkt_relpooltoken(kq
);
1874 lwkt_relpooltoken(kq
);
1876 lwkt_relpooltoken(list
);
1880 * Insert knote at head of klist.
1882 * This function may only be called via a filter function and thus
1883 * kq_token should already be held and marked for processing.
1886 knote_insert(struct klist
*klist
, struct knote
*kn
)
1888 lwkt_getpooltoken(klist
);
1889 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1890 SLIST_INSERT_HEAD(klist
, kn
, kn_next
);
1891 lwkt_relpooltoken(klist
);
1895 * Remove knote from a klist
1897 * This function may only be called via a filter function and thus
1898 * kq_token should already be held and marked for processing.
1901 knote_remove(struct klist
*klist
, struct knote
*kn
)
1903 lwkt_getpooltoken(klist
);
1904 KKASSERT(kn
->kn_status
& KN_PROCESSING
);
1905 SLIST_REMOVE(klist
, kn
, knote
, kn_next
);
1906 lwkt_relpooltoken(klist
);
1910 knote_assume_knotes(struct kqinfo
*src
, struct kqinfo
*dst
,
1911 struct filterops
*ops
, void *hook
)
1916 lwkt_getpooltoken(&src
->ki_note
);
1917 lwkt_getpooltoken(&dst
->ki_note
);
1918 while ((kn
= SLIST_FIRST(&src
->ki_note
)) != NULL
) {
1920 lwkt_getpooltoken(kq
);
1921 if (SLIST_FIRST(&src
->ki_note
) != kn
|| kn
->kn_kq
!= kq
) {
1922 lwkt_relpooltoken(kq
);
1925 if (knote_acquire(kn
)) {
1926 knote_remove(&src
->ki_note
, kn
);
1929 knote_insert(&dst
->ki_note
, kn
);
1931 /* kn may be invalid now */
1933 lwkt_relpooltoken(kq
);
1935 lwkt_relpooltoken(&dst
->ki_note
);
1936 lwkt_relpooltoken(&src
->ki_note
);
1940 * Remove all knotes referencing a specified fd
1943 knote_fdclose(struct file
*fp
, struct filedesc
*fdp
, int fd
)
1947 struct knote
*kntmp
;
1949 lwkt_getpooltoken(&fp
->f_klist
);
1951 SLIST_FOREACH(kn
, &fp
->f_klist
, kn_link
) {
1952 if (kn
->kn_kq
->kq_fdp
== fdp
&& kn
->kn_id
== fd
) {
1954 lwkt_getpooltoken(kq
);
1956 /* temporary verification hack */
1957 SLIST_FOREACH(kntmp
, &fp
->f_klist
, kn_link
) {
1961 if (kn
!= kntmp
|| kn
->kn_kq
->kq_fdp
!= fdp
||
1962 kn
->kn_id
!= fd
|| kn
->kn_kq
!= kq
) {
1963 lwkt_relpooltoken(kq
);
1966 if (knote_acquire(kn
))
1967 knote_detach_and_drop(kn
);
1968 lwkt_relpooltoken(kq
);
1972 lwkt_relpooltoken(&fp
->f_klist
);
1976 * Low level attach function.
1978 * The knote should already be marked for processing.
1979 * Caller must hold the related kq token.
1982 knote_attach(struct knote
*kn
)
1985 struct kqueue
*kq
= kn
->kn_kq
;
1987 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
1988 KKASSERT(kn
->kn_fp
);
1989 list
= &kn
->kn_fp
->f_klist
;
1991 if (kq
->kq_knhashmask
== 0)
1992 kq
->kq_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
1993 &kq
->kq_knhashmask
);
1994 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
1996 lwkt_getpooltoken(list
);
1997 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
1998 lwkt_relpooltoken(list
);
1999 TAILQ_INSERT_HEAD(&kq
->kq_knlist
, kn
, kn_kqlink
);
2003 * Low level drop function.
2005 * The knote should already be marked for processing.
2006 * Caller must hold the related kq token.
2009 knote_drop(struct knote
*kn
)
2016 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
)
2017 list
= &kn
->kn_fp
->f_klist
;
2019 list
= &kq
->kq_knhash
[KN_HASH(kn
->kn_id
, kq
->kq_knhashmask
)];
2021 lwkt_getpooltoken(list
);
2022 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
2023 lwkt_relpooltoken(list
);
2024 TAILQ_REMOVE(&kq
->kq_knlist
, kn
, kn_kqlink
);
2025 if (kn
->kn_status
& KN_QUEUED
)
2027 if (kn
->kn_fop
->f_flags
& FILTEROP_ISFD
) {
2035 * Low level enqueue function.
2037 * The knote should already be marked for processing.
2038 * Caller must be holding the kq token
2041 knote_enqueue(struct knote
*kn
)
2043 struct kqueue
*kq
= kn
->kn_kq
;
2045 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
2046 TAILQ_INSERT_TAIL(&kq
->kq_knpend
, kn
, kn_tqe
);
2047 kn
->kn_status
|= KN_QUEUED
;
2051 * Send SIGIO on request (typically set up as a mailbox signal)
2053 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
2054 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
2060 * Low level dequeue function.
2062 * The knote should already be marked for processing.
2063 * Caller must be holding the kq token
2066 knote_dequeue(struct knote
*kn
)
2068 struct kqueue
*kq
= kn
->kn_kq
;
2070 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
2071 TAILQ_REMOVE(&kq
->kq_knpend
, kn
, kn_tqe
);
2072 kn
->kn_status
&= ~KN_QUEUED
;
2076 static struct knote
*
2079 return kmalloc(sizeof(struct knote
), M_KQUEUE
, M_WAITOK
);
2083 knote_free(struct knote
*kn
)
2085 struct knote_cache_list
*cache_list
;
2087 cache_list
= &knote_cache_lists
[mycpuid
];
2088 if (cache_list
->knote_cache_cnt
< KNOTE_CACHE_MAX
) {
2090 SLIST_INSERT_HEAD(&cache_list
->knote_cache
, kn
, kn_link
);
2091 cache_list
->knote_cache_cnt
++;
2095 kfree(kn
, M_KQUEUE
);
2104 precise_sleep_intr(systimer_t info
, int in_ipi
, struct intrframe
*frame
)
2106 struct sleepinfo
*si
;
2114 precise_sleep(void *ident
, int flags
, const char *wmesg
, int us
)
2116 struct systimer info
;
2117 struct sleepinfo si
= {
2123 tsleep_interlock(ident
, flags
);
2124 systimer_init_oneshot(&info
, precise_sleep_intr
, &si
, us
);
2125 r
= tsleep(ident
, flags
| PINTERLOCKED
, wmesg
, 0);
2126 systimer_del(&info
);