2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
38 #include <sys/fcntl.h>
39 #include <sys/select.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/eventvar.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49 #include <sys/sysproto.h>
51 #include <sys/thread2.h>
52 #include <sys/signalvar.h>
53 #include <sys/filio.h>
54 #include <sys/file2.h>
56 #include <vm/vm_zone.h>
58 MALLOC_DEFINE(M_KQUEUE
, "kqueue", "memory for kqueue system");
60 static int kqueue_scan(struct file
*fp
, int maxevents
,
61 struct kevent
*ulistp
, const struct timespec
*timeout
,
62 struct thread
*td
, int *res
);
63 static int kqueue_read(struct file
*fp
, struct uio
*uio
,
64 struct ucred
*cred
, int flags
);
65 static int kqueue_write(struct file
*fp
, struct uio
*uio
,
66 struct ucred
*cred
, int flags
);
67 static int kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
69 static int kqueue_poll(struct file
*fp
, int events
, struct ucred
*cred
);
70 static int kqueue_kqfilter(struct file
*fp
, struct knote
*kn
);
71 static int kqueue_stat(struct file
*fp
, struct stat
*st
,
73 static int kqueue_close(struct file
*fp
);
74 static void kqueue_wakeup(struct kqueue
*kq
);
79 static struct fileops kqueueops
= {
80 .fo_read
= kqueue_read
,
81 .fo_write
= kqueue_write
,
82 .fo_ioctl
= kqueue_ioctl
,
83 .fo_poll
= kqueue_poll
,
84 .fo_kqfilter
= kqueue_kqfilter
,
85 .fo_stat
= kqueue_stat
,
86 .fo_close
= kqueue_close
,
87 .fo_shutdown
= nofo_shutdown
90 static void knote_attach(struct knote
*kn
, struct filedesc
*fdp
);
91 static void knote_drop(struct knote
*kn
, struct thread
*td
);
92 static void knote_enqueue(struct knote
*kn
);
93 static void knote_dequeue(struct knote
*kn
);
94 static void knote_init(void);
95 static struct knote
*knote_alloc(void);
96 static void knote_free(struct knote
*kn
);
98 static void filt_kqdetach(struct knote
*kn
);
99 static int filt_kqueue(struct knote
*kn
, long hint
);
100 static int filt_procattach(struct knote
*kn
);
101 static void filt_procdetach(struct knote
*kn
);
102 static int filt_proc(struct knote
*kn
, long hint
);
103 static int filt_fileattach(struct knote
*kn
);
104 static void filt_timerexpire(void *knx
);
105 static int filt_timerattach(struct knote
*kn
);
106 static void filt_timerdetach(struct knote
*kn
);
107 static int filt_timer(struct knote
*kn
, long hint
);
109 static struct filterops file_filtops
=
110 { 1, filt_fileattach
, NULL
, NULL
};
111 static struct filterops kqread_filtops
=
112 { 1, NULL
, filt_kqdetach
, filt_kqueue
};
113 static struct filterops proc_filtops
=
114 { 0, filt_procattach
, filt_procdetach
, filt_proc
};
115 static struct filterops timer_filtops
=
116 { 0, filt_timerattach
, filt_timerdetach
, filt_timer
};
118 static vm_zone_t knote_zone
;
119 static int kq_ncallouts
= 0;
120 static int kq_calloutmax
= (4 * 1024);
121 SYSCTL_INT(_kern
, OID_AUTO
, kq_calloutmax
, CTLFLAG_RW
,
122 &kq_calloutmax
, 0, "Maximum number of callouts allocated for kqueue");
124 #define KNOTE_ACTIVATE(kn) do { \
125 kn->kn_status |= KN_ACTIVE; \
126 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
130 #define KN_HASHSIZE 64 /* XXX should be tunable */
131 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
133 extern struct filterops aio_filtops
;
134 extern struct filterops sig_filtops
;
137 * Table for for all system-defined filters.
139 static struct filterops
*sysfilt_ops
[] = {
140 &file_filtops
, /* EVFILT_READ */
141 &file_filtops
, /* EVFILT_WRITE */
142 &aio_filtops
, /* EVFILT_AIO */
143 &file_filtops
, /* EVFILT_VNODE */
144 &proc_filtops
, /* EVFILT_PROC */
145 &sig_filtops
, /* EVFILT_SIGNAL */
146 &timer_filtops
, /* EVFILT_TIMER */
150 filt_fileattach(struct knote
*kn
)
152 return (fo_kqfilter(kn
->kn_fp
, kn
));
156 * MPALMOSTSAFE - acquires mplock
159 kqueue_kqfilter(struct file
*fp
, struct knote
*kn
)
161 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
164 if (kn
->kn_filter
!= EVFILT_READ
) {
169 kn
->kn_fop
= &kqread_filtops
;
170 SLIST_INSERT_HEAD(&kq
->kq_sel
.si_note
, kn
, kn_selnext
);
176 filt_kqdetach(struct knote
*kn
)
178 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
180 SLIST_REMOVE(&kq
->kq_sel
.si_note
, kn
, knote
, kn_selnext
);
185 filt_kqueue(struct knote
*kn
, long hint
)
187 struct kqueue
*kq
= (struct kqueue
*)kn
->kn_fp
->f_data
;
189 kn
->kn_data
= kq
->kq_count
;
190 return (kn
->kn_data
> 0);
194 filt_procattach(struct knote
*kn
)
200 p
= pfind(kn
->kn_id
);
201 if (p
== NULL
&& (kn
->kn_sfflags
& NOTE_EXIT
)) {
202 p
= zpfind(kn
->kn_id
);
207 if (! PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
))
210 kn
->kn_ptr
.p_proc
= p
;
211 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
214 * internal flag indicating registration done by kernel
216 if (kn
->kn_flags
& EV_FLAG1
) {
217 kn
->kn_data
= kn
->kn_sdata
; /* ppid */
218 kn
->kn_fflags
= NOTE_CHILD
;
219 kn
->kn_flags
&= ~EV_FLAG1
;
222 /* XXX lock the proc here while adding to the list? */
223 SLIST_INSERT_HEAD(&p
->p_klist
, kn
, kn_selnext
);
226 * Immediately activate any exit notes if the target process is a
227 * zombie. This is necessary to handle the case where the target
228 * process, e.g. a child, dies before the kevent is registered.
230 if (immediate
&& filt_proc(kn
, NOTE_EXIT
))
237 * The knote may be attached to a different process, which may exit,
238 * leaving nothing for the knote to be attached to. So when the process
239 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
240 * it will be deleted when read out. However, as part of the knote deletion,
241 * this routine is called, so a check is needed to avoid actually performing
242 * a detach, because the original process does not exist any more.
245 filt_procdetach(struct knote
*kn
)
247 struct proc
*p
= kn
->kn_ptr
.p_proc
;
249 if (kn
->kn_status
& KN_DETACHED
)
252 /* XXX locking? this might modify another process. */
253 SLIST_REMOVE(&p
->p_klist
, kn
, knote
, kn_selnext
);
257 filt_proc(struct knote
*kn
, long hint
)
262 * mask off extra data
264 event
= (u_int
)hint
& NOTE_PCTRLMASK
;
267 * if the user is interested in this event, record it.
269 if (kn
->kn_sfflags
& event
)
270 kn
->kn_fflags
|= event
;
273 * process is gone, so flag the event as finished.
275 if (event
== NOTE_EXIT
) {
276 kn
->kn_status
|= KN_DETACHED
;
277 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
282 * process forked, and user wants to track the new process,
283 * so attach a new knote to it, and immediately report an
284 * event with the parent's pid.
286 if ((event
== NOTE_FORK
) && (kn
->kn_sfflags
& NOTE_TRACK
)) {
291 * register knote with new process.
293 kev
.ident
= hint
& NOTE_PDATAMASK
; /* pid */
294 kev
.filter
= kn
->kn_filter
;
295 kev
.flags
= kn
->kn_flags
| EV_ADD
| EV_ENABLE
| EV_FLAG1
;
296 kev
.fflags
= kn
->kn_sfflags
;
297 kev
.data
= kn
->kn_id
; /* parent */
298 kev
.udata
= kn
->kn_kevent
.udata
; /* preserve udata */
299 error
= kqueue_register(kn
->kn_kq
, &kev
, NULL
);
301 kn
->kn_fflags
|= NOTE_TRACKERR
;
304 return (kn
->kn_fflags
!= 0);
308 filt_timerexpire(void *knx
)
310 struct knote
*kn
= knx
;
311 struct callout
*calloutp
;
318 if ((kn
->kn_flags
& EV_ONESHOT
) == 0) {
319 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
320 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
321 tticks
= tvtohz_high(&tv
);
322 calloutp
= (struct callout
*)kn
->kn_hook
;
323 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
328 * data contains amount of time to sleep, in milliseconds
331 filt_timerattach(struct knote
*kn
)
333 struct callout
*calloutp
;
337 if (kq_ncallouts
>= kq_calloutmax
)
341 tv
.tv_sec
= kn
->kn_sdata
/ 1000;
342 tv
.tv_usec
= (kn
->kn_sdata
% 1000) * 1000;
343 tticks
= tvtohz_high(&tv
);
345 kn
->kn_flags
|= EV_CLEAR
; /* automatically set */
346 MALLOC(calloutp
, struct callout
*, sizeof(*calloutp
),
348 callout_init(calloutp
);
349 kn
->kn_hook
= (caddr_t
)calloutp
;
350 callout_reset(calloutp
, tticks
, filt_timerexpire
, kn
);
356 filt_timerdetach(struct knote
*kn
)
358 struct callout
*calloutp
;
360 calloutp
= (struct callout
*)kn
->kn_hook
;
361 callout_stop(calloutp
);
362 FREE(calloutp
, M_KQUEUE
);
367 filt_timer(struct knote
*kn
, long hint
)
370 return (kn
->kn_data
!= 0);
374 sys_kqueue(struct kqueue_args
*uap
)
376 struct proc
*p
= curproc
;
377 struct filedesc
*fdp
= p
->p_fd
;
382 error
= falloc(p
, &fp
, &fd
);
385 fp
->f_flag
= FREAD
| FWRITE
;
386 fp
->f_type
= DTYPE_KQUEUE
;
387 fp
->f_ops
= &kqueueops
;
389 kq
= kmalloc(sizeof(struct kqueue
), M_KQUEUE
, M_WAITOK
| M_ZERO
);
390 TAILQ_INIT(&kq
->kq_head
);
395 uap
->sysmsg_result
= fd
;
401 sys_kevent(struct kevent_args
*uap
)
403 struct thread
*td
= curthread
;
404 struct proc
*p
= td
->td_proc
;
407 struct file
*fp
= NULL
;
409 int i
, n
, nerrors
, error
;
413 fp
= holdfp(p
->p_fd
, uap
->fd
, -1);
416 if (fp
->f_type
!= DTYPE_KQUEUE
) {
421 if (uap
->timeout
!= NULL
) {
422 error
= copyin(uap
->timeout
, &ts
, sizeof(ts
));
428 kq
= (struct kqueue
*)fp
->f_data
;
431 while (uap
->nchanges
> 0) {
432 n
= uap
->nchanges
> KQ_NEVENTS
? KQ_NEVENTS
: uap
->nchanges
;
433 error
= copyin(uap
->changelist
, kq
->kq_kev
,
434 n
* sizeof(struct kevent
));
437 for (i
= 0; i
< n
; i
++) {
438 kevp
= &kq
->kq_kev
[i
];
439 kevp
->flags
&= ~EV_SYSFLAGS
;
440 error
= kqueue_register(kq
, kevp
, td
);
442 if (uap
->nevents
!= 0) {
443 kevp
->flags
= EV_ERROR
;
445 (void) copyout((caddr_t
)kevp
,
446 (caddr_t
)uap
->eventlist
,
457 uap
->changelist
+= n
;
460 uap
->sysmsg_result
= nerrors
;
465 error
= kqueue_scan(fp
, uap
->nevents
, uap
->eventlist
, uap
->timeout
, td
, &uap
->sysmsg_result
);
473 kqueue_register(struct kqueue
*kq
, struct kevent
*kev
, struct thread
*td
)
475 struct filedesc
*fdp
= kq
->kq_fdp
;
476 struct filterops
*fops
;
477 struct file
*fp
= NULL
;
478 struct knote
*kn
= NULL
;
481 if (kev
->filter
< 0) {
482 if (kev
->filter
+ EVFILT_SYSCOUNT
< 0)
484 fops
= sysfilt_ops
[~kev
->filter
]; /* to 0-base index */
488 * filter attach routine is responsible for insuring that
489 * the identifier can be attached to it.
491 kprintf("unknown filter: %d\n", kev
->filter
);
496 /* validate descriptor */
497 fp
= holdfp(fdp
, kev
->ident
, -1);
501 if (kev
->ident
< fdp
->fd_knlistsize
) {
502 SLIST_FOREACH(kn
, &fdp
->fd_knlist
[kev
->ident
], kn_link
)
503 if (kq
== kn
->kn_kq
&&
504 kev
->filter
== kn
->kn_filter
)
508 if (fdp
->fd_knhashmask
!= 0) {
511 list
= &fdp
->fd_knhash
[
512 KN_HASH((u_long
)kev
->ident
, fdp
->fd_knhashmask
)];
513 SLIST_FOREACH(kn
, list
, kn_link
)
514 if (kev
->ident
== kn
->kn_id
&&
516 kev
->filter
== kn
->kn_filter
)
521 if (kn
== NULL
&& ((kev
->flags
& EV_ADD
) == 0)) {
527 * kn now contains the matching knote, or NULL if no match
529 if (kev
->flags
& EV_ADD
) {
542 * apply reference count to knote structure, and
543 * do not release it at the end of this routine.
547 kn
->kn_sfflags
= kev
->fflags
;
548 kn
->kn_sdata
= kev
->data
;
551 kn
->kn_kevent
= *kev
;
553 knote_attach(kn
, fdp
);
554 if ((error
= fops
->f_attach(kn
)) != 0) {
560 * The user may change some filter values after the
561 * initial EV_ADD, but doing so will not reset any
562 * filter which have already been triggered.
564 kn
->kn_sfflags
= kev
->fflags
;
565 kn
->kn_sdata
= kev
->data
;
566 kn
->kn_kevent
.udata
= kev
->udata
;
570 if (kn
->kn_fop
->f_event(kn
, 0))
573 } else if (kev
->flags
& EV_DELETE
) {
574 kn
->kn_fop
->f_detach(kn
);
579 if ((kev
->flags
& EV_DISABLE
) &&
580 ((kn
->kn_status
& KN_DISABLED
) == 0)) {
582 kn
->kn_status
|= KN_DISABLED
;
586 if ((kev
->flags
& EV_ENABLE
) && (kn
->kn_status
& KN_DISABLED
)) {
588 kn
->kn_status
&= ~KN_DISABLED
;
589 if ((kn
->kn_status
& KN_ACTIVE
) &&
590 ((kn
->kn_status
& KN_QUEUED
) == 0))
602 kqueue_scan(struct file
*fp
, int maxevents
, struct kevent
*ulistp
,
603 const struct timespec
*tsp
, struct thread
*td
, int *res
)
605 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
607 struct timeval atv
, rtv
, ttv
;
608 struct knote
*kn
, marker
;
609 int count
, timeout
, nkev
= 0, error
= 0;
616 TIMESPEC_TO_TIMEVAL(&atv
, tsp
);
617 if (itimerfix(&atv
)) {
621 if (tsp
->tv_sec
== 0 && tsp
->tv_nsec
== 0)
624 timeout
= atv
.tv_sec
> 24 * 60 * 60 ?
625 24 * 60 * 60 * hz
: tvtohz_high(&atv
);
626 getmicrouptime(&rtv
);
627 timevaladd(&atv
, &rtv
);
636 if (atv
.tv_sec
|| atv
.tv_usec
) {
637 getmicrouptime(&rtv
);
638 if (timevalcmp(&rtv
, &atv
, >=))
641 timevalsub(&ttv
, &rtv
);
642 timeout
= ttv
.tv_sec
> 24 * 60 * 60 ?
643 24 * 60 * 60 * hz
: tvtohz_high(&ttv
);
649 if (kq
->kq_count
== 0) {
653 kq
->kq_state
|= KQ_SLEEP
;
654 error
= tsleep(kq
, PCATCH
, "kqread", timeout
);
659 /* don't restart after signals... */
660 if (error
== ERESTART
)
662 else if (error
== EWOULDBLOCK
)
667 TAILQ_INSERT_TAIL(&kq
->kq_head
, &marker
, kn_tqe
);
669 kn
= TAILQ_FIRST(&kq
->kq_head
);
670 TAILQ_REMOVE(&kq
->kq_head
, kn
, kn_tqe
);
673 if (count
== maxevents
)
677 if (kn
->kn_status
& KN_DISABLED
) {
678 kn
->kn_status
&= ~KN_QUEUED
;
682 if ((kn
->kn_flags
& EV_ONESHOT
) == 0 &&
683 kn
->kn_fop
->f_event(kn
, 0) == 0) {
684 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
688 *kevp
= kn
->kn_kevent
;
691 if (kn
->kn_flags
& EV_ONESHOT
) {
692 kn
->kn_status
&= ~KN_QUEUED
;
695 kn
->kn_fop
->f_detach(kn
);
698 } else if (kn
->kn_flags
& EV_CLEAR
) {
701 kn
->kn_status
&= ~(KN_QUEUED
| KN_ACTIVE
);
704 TAILQ_INSERT_TAIL(&kq
->kq_head
, kn
, kn_tqe
);
707 if (nkev
== KQ_NEVENTS
) {
709 error
= copyout((caddr_t
)&kq
->kq_kev
, (caddr_t
)ulistp
,
710 sizeof(struct kevent
) * nkev
);
719 TAILQ_REMOVE(&kq
->kq_head
, &marker
, kn_tqe
);
723 error
= copyout((caddr_t
)&kq
->kq_kev
, (caddr_t
)ulistp
,
724 sizeof(struct kevent
) * nkev
);
725 *res
= maxevents
- count
;
731 * This could be expanded to call kqueue_scan, if desired.
736 kqueue_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
745 kqueue_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
754 kqueue_ioctl(struct file
*fp
, u_long com
, caddr_t data
, struct ucred
*cred
)
760 kq
= (struct kqueue
*)fp
->f_data
;
765 kq
->kq_state
|= KQ_ASYNC
;
767 kq
->kq_state
&= ~KQ_ASYNC
;
771 error
= fsetown(*(int *)data
, &kq
->kq_sigio
);
782 * MPALMOSTSAFE - acquires mplock
785 kqueue_poll(struct file
*fp
, int events
, struct ucred
*cred
)
787 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
792 if (events
& (POLLIN
| POLLRDNORM
)) {
794 revents
|= events
& (POLLIN
| POLLRDNORM
);
796 selrecord(curthread
, &kq
->kq_sel
);
797 kq
->kq_state
|= KQ_SEL
;
809 kqueue_stat(struct file
*fp
, struct stat
*st
, struct ucred
*cred
)
811 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
813 bzero((void *)st
, sizeof(*st
));
814 st
->st_size
= kq
->kq_count
;
815 st
->st_blksize
= sizeof(struct kevent
);
816 st
->st_mode
= S_IFIFO
;
821 * MPALMOSTSAFE - acquires mplock
824 kqueue_close(struct file
*fp
)
826 struct thread
*td
= curthread
;
827 struct proc
*p
= td
->td_proc
;
828 struct kqueue
*kq
= (struct kqueue
*)fp
->f_data
;
829 struct filedesc
*fdp
;
830 struct knote
**knp
, *kn
, *kn0
;
836 for (i
= 0; i
< fdp
->fd_knlistsize
; i
++) {
837 knp
= &SLIST_FIRST(&fdp
->fd_knlist
[i
]);
840 kn0
= SLIST_NEXT(kn
, kn_link
);
841 if (kq
== kn
->kn_kq
) {
842 kn
->kn_fop
->f_detach(kn
);
847 knp
= &SLIST_NEXT(kn
, kn_link
);
852 if (fdp
->fd_knhashmask
!= 0) {
853 for (i
= 0; i
< fdp
->fd_knhashmask
+ 1; i
++) {
854 knp
= &SLIST_FIRST(&fdp
->fd_knhash
[i
]);
857 kn0
= SLIST_NEXT(kn
, kn_link
);
858 if (kq
== kn
->kn_kq
) {
859 kn
->kn_fop
->f_detach(kn
);
860 /* XXX non-fd release of kn->kn_ptr */
864 knp
= &SLIST_NEXT(kn
, kn_link
);
871 funsetown(kq
->kq_sigio
);
879 kqueue_wakeup(struct kqueue
*kq
)
881 if (kq
->kq_state
& KQ_SLEEP
) {
882 kq
->kq_state
&= ~KQ_SLEEP
;
885 if (kq
->kq_state
& KQ_SEL
) {
886 kq
->kq_state
&= ~KQ_SEL
;
887 selwakeup(&kq
->kq_sel
);
889 KNOTE(&kq
->kq_sel
.si_note
, 0);
893 * walk down a list of knotes, activating them if their event has triggered.
896 knote(struct klist
*list
, long hint
)
900 SLIST_FOREACH(kn
, list
, kn_selnext
)
901 if (kn
->kn_fop
->f_event(kn
, hint
))
906 * remove all knotes from a specified klist
909 knote_remove(struct thread
*td
, struct klist
*list
)
913 while ((kn
= SLIST_FIRST(list
)) != NULL
) {
914 kn
->kn_fop
->f_detach(kn
);
920 * remove all knotes referencing a specified fd
923 knote_fdclose(struct proc
*p
, int fd
)
925 struct filedesc
*fdp
= p
->p_fd
;
926 struct klist
*list
= &fdp
->fd_knlist
[fd
];
927 /* Take any thread of p */
928 struct thread
*td
= FIRST_LWP_IN_PROC(p
)->lwp_thread
;
930 knote_remove(td
, list
);
934 knote_attach(struct knote
*kn
, struct filedesc
*fdp
)
939 if (! kn
->kn_fop
->f_isfd
) {
940 if (fdp
->fd_knhashmask
== 0)
941 fdp
->fd_knhash
= hashinit(KN_HASHSIZE
, M_KQUEUE
,
942 &fdp
->fd_knhashmask
);
943 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
947 if (fdp
->fd_knlistsize
<= kn
->kn_id
) {
948 size
= fdp
->fd_knlistsize
;
949 while (size
<= kn
->kn_id
)
951 MALLOC(list
, struct klist
*,
952 size
* sizeof(struct klist
*), M_KQUEUE
, M_WAITOK
);
953 bcopy((caddr_t
)fdp
->fd_knlist
, (caddr_t
)list
,
954 fdp
->fd_knlistsize
* sizeof(struct klist
*));
955 bzero((caddr_t
)list
+
956 fdp
->fd_knlistsize
* sizeof(struct klist
*),
957 (size
- fdp
->fd_knlistsize
) * sizeof(struct klist
*));
958 if (fdp
->fd_knlist
!= NULL
)
959 FREE(fdp
->fd_knlist
, M_KQUEUE
);
960 fdp
->fd_knlistsize
= size
;
961 fdp
->fd_knlist
= list
;
963 list
= &fdp
->fd_knlist
[kn
->kn_id
];
965 SLIST_INSERT_HEAD(list
, kn
, kn_link
);
970 * should be called outside of a critical section, since we don't want to
971 * hold a critical section while calling fdrop and free.
974 knote_drop(struct knote
*kn
, struct thread
*td
)
976 struct filedesc
*fdp
;
979 KKASSERT(td
->td_proc
);
980 fdp
= td
->td_proc
->p_fd
;
981 if (kn
->kn_fop
->f_isfd
)
982 list
= &fdp
->fd_knlist
[kn
->kn_id
];
984 list
= &fdp
->fd_knhash
[KN_HASH(kn
->kn_id
, fdp
->fd_knhashmask
)];
986 SLIST_REMOVE(list
, kn
, knote
, kn_link
);
987 if (kn
->kn_status
& KN_QUEUED
)
989 if (kn
->kn_fop
->f_isfd
)
996 knote_enqueue(struct knote
*kn
)
998 struct kqueue
*kq
= kn
->kn_kq
;
1001 KASSERT((kn
->kn_status
& KN_QUEUED
) == 0, ("knote already queued"));
1003 TAILQ_INSERT_TAIL(&kq
->kq_head
, kn
, kn_tqe
);
1004 kn
->kn_status
|= KN_QUEUED
;
1008 * Send SIGIO on request (typically set up as a mailbox signal)
1010 if (kq
->kq_sigio
&& (kq
->kq_state
& KQ_ASYNC
) && kq
->kq_count
== 1)
1011 pgsigio(kq
->kq_sigio
, SIGIO
, 0);
1017 knote_dequeue(struct knote
*kn
)
1019 struct kqueue
*kq
= kn
->kn_kq
;
1021 KASSERT(kn
->kn_status
& KN_QUEUED
, ("knote not queued"));
1024 TAILQ_REMOVE(&kq
->kq_head
, kn
, kn_tqe
);
1025 kn
->kn_status
&= ~KN_QUEUED
;
1033 knote_zone
= zinit("KNOTE", sizeof(struct knote
), 0, 0, 1);
1035 SYSINIT(knote
, SI_SUB_PSEUDO
, SI_ORDER_ANY
, knote_init
, NULL
)
1037 static struct knote
*
1040 return ((struct knote
*)zalloc(knote_zone
));
1044 knote_free(struct knote
*kn
)
1046 zfree(knote_zone
, kn
);