kqueue: Avoid reprocessing processed knotes in KNOTE.
[dragonfly.git] / sys / kern / kern_event.c
blobc028eff9dd551d06e1d72f711218649698b0e395
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
60 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args {
63 struct kevent_args *ka;
64 int pchanges;
67 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
68 struct knote *marker);
69 static int kqueue_read(struct file *fp, struct uio *uio,
70 struct ucred *cred, int flags);
71 static int kqueue_write(struct file *fp, struct uio *uio,
72 struct ucred *cred, int flags);
73 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
74 struct ucred *cred, struct sysmsg *msg);
75 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
76 static int kqueue_stat(struct file *fp, struct stat *st,
77 struct ucred *cred);
78 static int kqueue_close(struct file *fp);
79 static void kqueue_wakeup(struct kqueue *kq);
80 static int filter_attach(struct knote *kn);
81 static int filter_event(struct knote *kn, long hint);
84 * MPSAFE
86 static struct fileops kqueueops = {
87 .fo_read = kqueue_read,
88 .fo_write = kqueue_write,
89 .fo_ioctl = kqueue_ioctl,
90 .fo_kqfilter = kqueue_kqfilter,
91 .fo_stat = kqueue_stat,
92 .fo_close = kqueue_close,
93 .fo_shutdown = nofo_shutdown
96 static void knote_attach(struct knote *kn);
97 static void knote_drop(struct knote *kn);
98 static void knote_detach_and_drop(struct knote *kn);
99 static void knote_enqueue(struct knote *kn);
100 static void knote_dequeue(struct knote *kn);
101 static struct knote *knote_alloc(void);
102 static void knote_free(struct knote *kn);
104 static void filt_kqdetach(struct knote *kn);
105 static int filt_kqueue(struct knote *kn, long hint);
106 static int filt_procattach(struct knote *kn);
107 static void filt_procdetach(struct knote *kn);
108 static int filt_proc(struct knote *kn, long hint);
109 static int filt_fileattach(struct knote *kn);
110 static void filt_timerexpire(void *knx);
111 static int filt_timerattach(struct knote *kn);
112 static void filt_timerdetach(struct knote *kn);
113 static int filt_timer(struct knote *kn, long hint);
114 static int filt_userattach(struct knote *kn);
115 static void filt_userdetach(struct knote *kn);
116 static int filt_user(struct knote *kn, long hint);
117 static void filt_usertouch(struct knote *kn, struct kevent *kev,
118 u_long type);
120 static struct filterops file_filtops =
121 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
122 static struct filterops kqread_filtops =
123 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
124 static struct filterops proc_filtops =
125 { 0, filt_procattach, filt_procdetach, filt_proc };
126 static struct filterops timer_filtops =
127 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
128 static struct filterops user_filtops =
129 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
131 static int kq_ncallouts = 0;
132 static int kq_calloutmax = (4 * 1024);
133 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
134 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
135 static int kq_checkloop = 1000000;
136 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
137 &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
138 static int kq_wakeup_one = 1;
139 SYSCTL_INT(_kern, OID_AUTO, kq_wakeup_one, CTLFLAG_RW,
140 &kq_wakeup_one, 0, "Wakeup only one kqueue scanner");
142 #define KNOTE_ACTIVATE(kn) do { \
143 kn->kn_status |= KN_ACTIVE; \
144 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
145 knote_enqueue(kn); \
146 } while(0)
148 #define KN_HASHSIZE 64 /* XXX should be tunable */
149 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
151 extern struct filterops aio_filtops;
152 extern struct filterops sig_filtops;
155 * Table for for all system-defined filters.
157 static struct filterops *sysfilt_ops[] = {
158 &file_filtops, /* EVFILT_READ */
159 &file_filtops, /* EVFILT_WRITE */
160 &aio_filtops, /* EVFILT_AIO */
161 &file_filtops, /* EVFILT_VNODE */
162 &proc_filtops, /* EVFILT_PROC */
163 &sig_filtops, /* EVFILT_SIGNAL */
164 &timer_filtops, /* EVFILT_TIMER */
165 &file_filtops, /* EVFILT_EXCEPT */
166 &user_filtops, /* EVFILT_USER */
170 * Acquire a knote, return non-zero on success, 0 on failure.
172 * If we cannot acquire the knote we sleep and return 0. The knote
173 * may be stale on return in this case and the caller must restart
174 * whatever loop they are in.
176 * Related kq token must be held.
178 static __inline int
179 knote_acquire(struct knote *kn)
181 if (kn->kn_status & KN_PROCESSING) {
182 kn->kn_status |= KN_WAITING | KN_REPROCESS;
183 tsleep(kn, 0, "kqepts", hz);
184 /* knote may be stale now */
185 return(0);
187 kn->kn_status |= KN_PROCESSING;
188 return(1);
192 * Release an acquired knote, clearing KN_PROCESSING and handling any
193 * KN_REPROCESS events.
195 * Caller must be holding the related kq token
197 * Non-zero is returned if the knote is destroyed or detached.
199 static __inline int
200 knote_release(struct knote *kn)
202 int ret;
204 while (kn->kn_status & KN_REPROCESS) {
205 kn->kn_status &= ~KN_REPROCESS;
206 if (kn->kn_status & KN_WAITING) {
207 kn->kn_status &= ~KN_WAITING;
208 wakeup(kn);
210 if (kn->kn_status & KN_DELETING) {
211 knote_detach_and_drop(kn);
212 return(1);
213 /* NOT REACHED */
215 if (filter_event(kn, 0))
216 KNOTE_ACTIVATE(kn);
218 if (kn->kn_status & KN_DETACHED)
219 ret = 1;
220 else
221 ret = 0;
222 kn->kn_status &= ~KN_PROCESSING;
223 /* kn should not be accessed anymore */
224 return ret;
227 static int
228 filt_fileattach(struct knote *kn)
230 return (fo_kqfilter(kn->kn_fp, kn));
234 * MPSAFE
236 static int
237 kqueue_kqfilter(struct file *fp, struct knote *kn)
239 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
241 if (kn->kn_filter != EVFILT_READ)
242 return (EOPNOTSUPP);
244 kn->kn_fop = &kqread_filtops;
245 knote_insert(&kq->kq_kqinfo.ki_note, kn);
246 return (0);
249 static void
250 filt_kqdetach(struct knote *kn)
252 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
254 knote_remove(&kq->kq_kqinfo.ki_note, kn);
257 /*ARGSUSED*/
258 static int
259 filt_kqueue(struct knote *kn, long hint)
261 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
263 kn->kn_data = kq->kq_count;
264 return (kn->kn_data > 0);
267 static int
268 filt_procattach(struct knote *kn)
270 struct proc *p;
271 int immediate;
273 immediate = 0;
274 p = pfind(kn->kn_id);
275 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
276 p = zpfind(kn->kn_id);
277 immediate = 1;
279 if (p == NULL) {
280 return (ESRCH);
282 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
283 if (p)
284 PRELE(p);
285 return (EACCES);
288 lwkt_gettoken(&p->p_token);
289 kn->kn_ptr.p_proc = p;
290 kn->kn_flags |= EV_CLEAR; /* automatically set */
293 * internal flag indicating registration done by kernel
295 if (kn->kn_flags & EV_FLAG1) {
296 kn->kn_data = kn->kn_sdata; /* ppid */
297 kn->kn_fflags = NOTE_CHILD;
298 kn->kn_flags &= ~EV_FLAG1;
301 knote_insert(&p->p_klist, kn);
304 * Immediately activate any exit notes if the target process is a
305 * zombie. This is necessary to handle the case where the target
306 * process, e.g. a child, dies before the kevent is negistered.
308 if (immediate && filt_proc(kn, NOTE_EXIT))
309 KNOTE_ACTIVATE(kn);
310 lwkt_reltoken(&p->p_token);
311 PRELE(p);
313 return (0);
317 * The knote may be attached to a different process, which may exit,
318 * leaving nothing for the knote to be attached to. So when the process
319 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
320 * it will be deleted when read out. However, as part of the knote deletion,
321 * this routine is called, so a check is needed to avoid actually performing
322 * a detach, because the original process does not exist any more.
324 static void
325 filt_procdetach(struct knote *kn)
327 struct proc *p;
329 if (kn->kn_status & KN_DETACHED)
330 return;
331 p = kn->kn_ptr.p_proc;
332 knote_remove(&p->p_klist, kn);
335 static int
336 filt_proc(struct knote *kn, long hint)
338 u_int event;
341 * mask off extra data
343 event = (u_int)hint & NOTE_PCTRLMASK;
346 * if the user is interested in this event, record it.
348 if (kn->kn_sfflags & event)
349 kn->kn_fflags |= event;
352 * Process is gone, so flag the event as finished. Detach the
353 * knote from the process now because the process will be poof,
354 * gone later on.
356 if (event == NOTE_EXIT) {
357 struct proc *p = kn->kn_ptr.p_proc;
358 if ((kn->kn_status & KN_DETACHED) == 0) {
359 PHOLD(p);
360 knote_remove(&p->p_klist, kn);
361 kn->kn_status |= KN_DETACHED;
362 kn->kn_data = p->p_xstat;
363 kn->kn_ptr.p_proc = NULL;
364 PRELE(p);
366 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
367 return (1);
371 * process forked, and user wants to track the new process,
372 * so attach a new knote to it, and immediately report an
373 * event with the parent's pid.
375 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
376 struct kevent kev;
377 int error;
380 * register knote with new process.
382 kev.ident = hint & NOTE_PDATAMASK; /* pid */
383 kev.filter = kn->kn_filter;
384 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
385 kev.fflags = kn->kn_sfflags;
386 kev.data = kn->kn_id; /* parent */
387 kev.udata = kn->kn_kevent.udata; /* preserve udata */
388 error = kqueue_register(kn->kn_kq, &kev);
389 if (error)
390 kn->kn_fflags |= NOTE_TRACKERR;
393 return (kn->kn_fflags != 0);
396 static void
397 filt_timerreset(struct knote *kn)
399 struct callout *calloutp;
400 struct timeval tv;
401 int tticks;
403 tv.tv_sec = kn->kn_sdata / 1000;
404 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
405 tticks = tvtohz_high(&tv);
406 calloutp = (struct callout *)kn->kn_hook;
407 callout_reset(calloutp, tticks, filt_timerexpire, kn);
411 * The callout interlocks with callout_terminate() but can still
412 * race a deletion so if KN_DELETING is set we just don't touch
413 * the knote.
415 static void
416 filt_timerexpire(void *knx)
418 struct knote *kn = knx;
419 struct kqueue *kq = kn->kn_kq;
421 lwkt_getpooltoken(kq);
424 * Open knote_acquire(), since we can't sleep in callout,
425 * however, we do need to record this expiration.
427 kn->kn_data++;
428 if (kn->kn_status & KN_PROCESSING) {
429 kn->kn_status |= KN_REPROCESS;
430 if ((kn->kn_status & KN_DELETING) == 0 &&
431 (kn->kn_flags & EV_ONESHOT) == 0)
432 filt_timerreset(kn);
433 lwkt_relpooltoken(kq);
434 return;
436 KASSERT((kn->kn_status & KN_DELETING) == 0,
437 ("acquire a deleting knote %#x", kn->kn_status));
438 kn->kn_status |= KN_PROCESSING;
440 KNOTE_ACTIVATE(kn);
441 if ((kn->kn_flags & EV_ONESHOT) == 0)
442 filt_timerreset(kn);
444 knote_release(kn);
446 lwkt_relpooltoken(kq);
450 * data contains amount of time to sleep, in milliseconds
452 static int
453 filt_timerattach(struct knote *kn)
455 struct callout *calloutp;
456 int prev_ncallouts;
458 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
459 if (prev_ncallouts >= kq_calloutmax) {
460 atomic_subtract_int(&kq_ncallouts, 1);
461 kn->kn_hook = NULL;
462 return (ENOMEM);
465 kn->kn_flags |= EV_CLEAR; /* automatically set */
466 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
467 callout_init_mp(calloutp);
468 kn->kn_hook = (caddr_t)calloutp;
470 filt_timerreset(kn);
471 return (0);
475 * This function is called with the knote flagged locked but it is
476 * still possible to race a callout event due to the callback blocking.
477 * We must call callout_terminate() instead of callout_stop() to deal
478 * with the race.
480 static void
481 filt_timerdetach(struct knote *kn)
483 struct callout *calloutp;
485 calloutp = (struct callout *)kn->kn_hook;
486 callout_terminate(calloutp);
487 kfree(calloutp, M_KQUEUE);
488 atomic_subtract_int(&kq_ncallouts, 1);
491 static int
492 filt_timer(struct knote *kn, long hint)
495 return (kn->kn_data != 0);
499 * EVFILT_USER
501 static int
502 filt_userattach(struct knote *kn)
504 kn->kn_hook = NULL;
505 if (kn->kn_fflags & NOTE_TRIGGER)
506 kn->kn_ptr.hookid = 1;
507 else
508 kn->kn_ptr.hookid = 0;
509 return 0;
512 static void
513 filt_userdetach(struct knote *kn)
515 /* nothing to do */
518 static int
519 filt_user(struct knote *kn, long hint)
521 return (kn->kn_ptr.hookid);
524 static void
525 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
527 u_int ffctrl;
529 switch (type) {
530 case EVENT_REGISTER:
531 if (kev->fflags & NOTE_TRIGGER)
532 kn->kn_ptr.hookid = 1;
534 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
535 kev->fflags &= NOTE_FFLAGSMASK;
536 switch (ffctrl) {
537 case NOTE_FFNOP:
538 break;
540 case NOTE_FFAND:
541 kn->kn_sfflags &= kev->fflags;
542 break;
544 case NOTE_FFOR:
545 kn->kn_sfflags |= kev->fflags;
546 break;
548 case NOTE_FFCOPY:
549 kn->kn_sfflags = kev->fflags;
550 break;
552 default:
553 /* XXX Return error? */
554 break;
556 kn->kn_sdata = kev->data;
559 * This is not the correct use of EV_CLEAR in an event
560 * modification, it should have been passed as a NOTE instead.
561 * But we need to maintain compatibility with Apple & FreeBSD.
563 * Note however that EV_CLEAR can still be used when doing
564 * the initial registration of the event and works as expected
565 * (clears the event on reception).
567 if (kev->flags & EV_CLEAR) {
568 kn->kn_ptr.hookid = 0;
569 kn->kn_data = 0;
570 kn->kn_fflags = 0;
572 break;
574 case EVENT_PROCESS:
575 *kev = kn->kn_kevent;
576 kev->fflags = kn->kn_sfflags;
577 kev->data = kn->kn_sdata;
578 if (kn->kn_flags & EV_CLEAR) {
579 kn->kn_ptr.hookid = 0;
580 /* kn_data, kn_fflags handled by parent */
582 break;
584 default:
585 panic("filt_usertouch() - invalid type (%ld)", type);
586 break;
591 * Initialize a kqueue.
593 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
595 * MPSAFE
597 void
598 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
600 TAILQ_INIT(&kq->kq_knpend);
601 TAILQ_INIT(&kq->kq_knlist);
602 kq->kq_count = 0;
603 kq->kq_fdp = fdp;
604 SLIST_INIT(&kq->kq_kqinfo.ki_note);
608 * Terminate a kqueue. Freeing the actual kq itself is left up to the
609 * caller (it might be embedded in a lwp so we don't do it here).
611 * The kq's knlist must be completely eradicated so block on any
612 * processing races.
614 void
615 kqueue_terminate(struct kqueue *kq)
617 struct lwkt_token *tok;
618 struct knote *kn;
620 tok = lwkt_token_pool_lookup(kq);
621 lwkt_gettoken(tok);
622 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
623 if (knote_acquire(kn))
624 knote_detach_and_drop(kn);
626 lwkt_reltoken(tok);
628 if (kq->kq_knhash) {
629 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
630 kq->kq_knhash = NULL;
631 kq->kq_knhashmask = 0;
636 * MPSAFE
639 sys_kqueue(struct kqueue_args *uap)
641 struct thread *td = curthread;
642 struct kqueue *kq;
643 struct file *fp;
644 int fd, error;
646 error = falloc(td->td_lwp, &fp, &fd);
647 if (error)
648 return (error);
649 fp->f_flag = FREAD | FWRITE;
650 fp->f_type = DTYPE_KQUEUE;
651 fp->f_ops = &kqueueops;
653 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
654 kqueue_init(kq, td->td_proc->p_fd);
655 fp->f_data = kq;
657 fsetfd(kq->kq_fdp, fp, fd);
658 uap->sysmsg_result = fd;
659 fdrop(fp);
660 return (error);
664 * Copy 'count' items into the destination list pointed to by uap->eventlist.
666 static int
667 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
669 struct kevent_copyin_args *kap;
670 int error;
672 kap = (struct kevent_copyin_args *)arg;
674 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
675 if (error == 0) {
676 kap->ka->eventlist += count;
677 *res += count;
678 } else {
679 *res = -1;
682 return (error);
686 * Copy at most 'max' items from the list pointed to by kap->changelist,
687 * return number of items in 'events'.
689 static int
690 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
692 struct kevent_copyin_args *kap;
693 int error, count;
695 kap = (struct kevent_copyin_args *)arg;
697 count = min(kap->ka->nchanges - kap->pchanges, max);
698 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
699 if (error == 0) {
700 kap->ka->changelist += count;
701 kap->pchanges += count;
702 *events = count;
705 return (error);
709 * MPSAFE
712 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
713 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
714 struct timespec *tsp_in)
716 struct kevent *kevp;
717 struct timespec *tsp, ats;
718 int i, n, total, error, nerrors = 0;
719 int lres;
720 int limit = kq_checkloop;
721 struct kevent kev[KQ_NEVENTS];
722 struct knote marker;
723 struct lwkt_token *tok;
725 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
726 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
728 tsp = tsp_in;
729 *res = 0;
731 for (;;) {
732 n = 0;
733 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
734 if (error)
735 return error;
736 if (n == 0)
737 break;
738 for (i = 0; i < n; i++) {
739 kevp = &kev[i];
740 kevp->flags &= ~EV_SYSFLAGS;
741 error = kqueue_register(kq, kevp);
744 * If a registration returns an error we
745 * immediately post the error. The kevent()
746 * call itself will fail with the error if
747 * no space is available for posting.
749 * Such errors normally bypass the timeout/blocking
750 * code. However, if the copyoutfn function refuses
751 * to post the error (see sys_poll()), then we
752 * ignore it too.
754 if (error || (kevp->flags & EV_RECEIPT)) {
755 kevp->flags = EV_ERROR;
756 kevp->data = error;
757 lres = *res;
758 kevent_copyoutfn(uap, kevp, 1, res);
759 if (*res < 0) {
760 return error;
761 } else if (lres != *res) {
762 nevents--;
763 nerrors++;
768 if (nerrors)
769 return 0;
772 * Acquire/wait for events - setup timeout
774 if (tsp != NULL) {
775 if (tsp->tv_sec || tsp->tv_nsec) {
776 getnanouptime(&ats);
777 timespecadd(tsp, &ats); /* tsp = target time */
782 * Loop as required.
784 * Collect as many events as we can. Sleeping on successive
785 * loops is disabled if copyoutfn has incremented (*res).
787 * The loop stops if an error occurs, all events have been
788 * scanned (the marker has been reached), or fewer than the
789 * maximum number of events is found.
791 * The copyoutfn function does not have to increment (*res) in
792 * order for the loop to continue.
794 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
796 total = 0;
797 error = 0;
798 marker.kn_filter = EVFILT_MARKER;
799 marker.kn_status = KN_PROCESSING;
800 tok = lwkt_token_pool_lookup(kq);
801 lwkt_gettoken(tok);
802 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
803 lwkt_reltoken(tok);
804 while ((n = nevents - total) > 0) {
805 if (n > KQ_NEVENTS)
806 n = KQ_NEVENTS;
809 * If no events are pending sleep until timeout (if any)
810 * or an event occurs.
812 * After the sleep completes the marker is moved to the
813 * end of the list, making any received events available
814 * to our scan.
816 if (kq->kq_count == 0 && *res == 0) {
817 int timeout;
819 if (tsp == NULL) {
820 timeout = 0;
821 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
822 error = EWOULDBLOCK;
823 break;
824 } else {
825 struct timespec atx = *tsp;
827 getnanouptime(&ats);
828 timespecsub(&atx, &ats);
829 if (atx.tv_sec < 0) {
830 error = EWOULDBLOCK;
831 break;
832 } else {
833 timeout = atx.tv_sec > 24 * 60 * 60 ?
834 24 * 60 * 60 * hz :
835 tstohz_high(&atx);
839 lwkt_gettoken(tok);
840 if (kq->kq_count == 0) {
841 kq->kq_state |= KQ_SLEEP;
842 error = tsleep(kq, PCATCH, "kqread", timeout);
844 /* don't restart after signals... */
845 if (error == ERESTART)
846 error = EINTR;
847 if (error) {
848 lwkt_reltoken(tok);
849 break;
852 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
853 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
854 kn_tqe);
856 lwkt_reltoken(tok);
860 * Process all received events
861 * Account for all non-spurious events in our total
863 i = kqueue_scan(kq, kev, n, &marker);
864 if (i) {
865 lres = *res;
866 error = kevent_copyoutfn(uap, kev, i, res);
867 total += *res - lres;
868 if (error)
869 break;
871 if (limit && --limit == 0)
872 panic("kqueue: checkloop failed i=%d", i);
875 * Normally when fewer events are returned than requested
876 * we can stop. However, if only spurious events were
877 * collected the copyout will not bump (*res) and we have
878 * to continue.
880 if (i < n && *res)
881 break;
884 * Deal with an edge case where spurious events can cause
885 * a loop to occur without moving the marker. This can
886 * prevent kqueue_scan() from picking up new events which
887 * race us. We must be sure to move the marker for this
888 * case.
890 * NOTE: We do not want to move the marker if events
891 * were scanned because normal kqueue operations
892 * may reactivate events. Moving the marker in
893 * that case could result in duplicates for the
894 * same event.
896 if (i == 0) {
897 lwkt_gettoken(tok);
898 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
899 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
900 lwkt_reltoken(tok);
903 lwkt_gettoken(tok);
904 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
905 lwkt_reltoken(tok);
907 /* Timeouts do not return EWOULDBLOCK. */
908 if (error == EWOULDBLOCK)
909 error = 0;
910 return error;
914 * MPALMOSTSAFE
917 sys_kevent(struct kevent_args *uap)
919 struct thread *td = curthread;
920 struct proc *p = td->td_proc;
921 struct timespec ts, *tsp;
922 struct kqueue *kq;
923 struct file *fp = NULL;
924 struct kevent_copyin_args *kap, ka;
925 int error;
927 if (uap->timeout) {
928 error = copyin(uap->timeout, &ts, sizeof(ts));
929 if (error)
930 return (error);
931 tsp = &ts;
932 } else {
933 tsp = NULL;
935 fp = holdfp(p->p_fd, uap->fd, -1);
936 if (fp == NULL)
937 return (EBADF);
938 if (fp->f_type != DTYPE_KQUEUE) {
939 fdrop(fp);
940 return (EBADF);
943 kq = (struct kqueue *)fp->f_data;
945 kap = &ka;
946 kap->ka = uap;
947 kap->pchanges = 0;
949 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
950 kevent_copyin, kevent_copyout, tsp);
952 fdrop(fp);
954 return (error);
958 kqueue_register(struct kqueue *kq, struct kevent *kev)
960 struct lwkt_token *tok;
961 struct filedesc *fdp = kq->kq_fdp;
962 struct filterops *fops;
963 struct file *fp = NULL;
964 struct knote *kn = NULL;
965 int error = 0;
967 if (kev->filter < 0) {
968 if (kev->filter + EVFILT_SYSCOUNT < 0)
969 return (EINVAL);
970 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
971 } else {
973 * XXX
974 * filter attach routine is responsible for insuring that
975 * the identifier can be attached to it.
977 return (EINVAL);
980 tok = lwkt_token_pool_lookup(kq);
981 lwkt_gettoken(tok);
982 if (fops->f_flags & FILTEROP_ISFD) {
983 /* validate descriptor */
984 fp = holdfp(fdp, kev->ident, -1);
985 if (fp == NULL) {
986 lwkt_reltoken(tok);
987 return (EBADF);
989 lwkt_getpooltoken(&fp->f_klist);
990 again1:
991 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
992 if (kn->kn_kq == kq &&
993 kn->kn_filter == kev->filter &&
994 kn->kn_id == kev->ident) {
995 if (knote_acquire(kn) == 0)
996 goto again1;
997 break;
1000 lwkt_relpooltoken(&fp->f_klist);
1001 } else {
1002 if (kq->kq_knhashmask) {
1003 struct klist *list;
1005 list = &kq->kq_knhash[
1006 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1007 lwkt_getpooltoken(list);
1008 again2:
1009 SLIST_FOREACH(kn, list, kn_link) {
1010 if (kn->kn_id == kev->ident &&
1011 kn->kn_filter == kev->filter) {
1012 if (knote_acquire(kn) == 0)
1013 goto again2;
1014 break;
1017 lwkt_relpooltoken(list);
1022 * NOTE: At this point if kn is non-NULL we will have acquired
1023 * it and set KN_PROCESSING.
1025 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1026 error = ENOENT;
1027 goto done;
1031 * kn now contains the matching knote, or NULL if no match
1033 if (kev->flags & EV_ADD) {
1034 if (kn == NULL) {
1035 kn = knote_alloc();
1036 kn->kn_fp = fp;
1037 kn->kn_kq = kq;
1038 kn->kn_fop = fops;
1041 * apply reference count to knote structure, and
1042 * do not release it at the end of this routine.
1044 fp = NULL;
1046 kn->kn_sfflags = kev->fflags;
1047 kn->kn_sdata = kev->data;
1048 kev->fflags = 0;
1049 kev->data = 0;
1050 kn->kn_kevent = *kev;
1053 * KN_PROCESSING prevents the knote from getting
1054 * ripped out from under us while we are trying
1055 * to attach it, in case the attach blocks.
1057 kn->kn_status = KN_PROCESSING;
1058 knote_attach(kn);
1059 if ((error = filter_attach(kn)) != 0) {
1060 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1061 knote_drop(kn);
1062 goto done;
1066 * Interlock against close races which either tried
1067 * to remove our knote while we were blocked or missed
1068 * it entirely prior to our attachment. We do not
1069 * want to end up with a knote on a closed descriptor.
1071 if ((fops->f_flags & FILTEROP_ISFD) &&
1072 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
1073 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1075 } else {
1077 * The user may change some filter values after the
1078 * initial EV_ADD, but doing so will not reset any
1079 * filter which have already been triggered.
1081 KKASSERT(kn->kn_status & KN_PROCESSING);
1082 if (fops == &user_filtops) {
1083 filt_usertouch(kn, kev, EVENT_REGISTER);
1084 } else {
1085 kn->kn_sfflags = kev->fflags;
1086 kn->kn_sdata = kev->data;
1087 kn->kn_kevent.udata = kev->udata;
1092 * Execute the filter event to immediately activate the
1093 * knote if necessary. If reprocessing events are pending
1094 * due to blocking above we do not run the filter here
1095 * but instead let knote_release() do it. Otherwise we
1096 * might run the filter on a deleted event.
1098 if ((kn->kn_status & KN_REPROCESS) == 0) {
1099 if (filter_event(kn, 0))
1100 KNOTE_ACTIVATE(kn);
1102 } else if (kev->flags & EV_DELETE) {
1104 * Delete the existing knote
1106 knote_detach_and_drop(kn);
1107 goto done;
1108 } else {
1110 * Modify an existing event.
1112 * The user may change some filter values after the
1113 * initial EV_ADD, but doing so will not reset any
1114 * filter which have already been triggered.
1116 KKASSERT(kn->kn_status & KN_PROCESSING);
1117 if (fops == &user_filtops) {
1118 filt_usertouch(kn, kev, EVENT_REGISTER);
1119 } else {
1120 kn->kn_sfflags = kev->fflags;
1121 kn->kn_sdata = kev->data;
1122 kn->kn_kevent.udata = kev->udata;
1126 * Execute the filter event to immediately activate the
1127 * knote if necessary. If reprocessing events are pending
1128 * due to blocking above we do not run the filter here
1129 * but instead let knote_release() do it. Otherwise we
1130 * might run the filter on a deleted event.
1132 if ((kn->kn_status & KN_REPROCESS) == 0) {
1133 if (filter_event(kn, 0))
1134 KNOTE_ACTIVATE(kn);
1139 * Disablement does not deactivate a knote here.
1141 if ((kev->flags & EV_DISABLE) &&
1142 ((kn->kn_status & KN_DISABLED) == 0)) {
1143 kn->kn_status |= KN_DISABLED;
1147 * Re-enablement may have to immediately enqueue an active knote.
1149 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1150 kn->kn_status &= ~KN_DISABLED;
1151 if ((kn->kn_status & KN_ACTIVE) &&
1152 ((kn->kn_status & KN_QUEUED) == 0)) {
1153 knote_enqueue(kn);
1158 * Handle any required reprocessing
1160 knote_release(kn);
1161 /* kn may be invalid now */
1163 done:
1164 lwkt_reltoken(tok);
1165 if (fp != NULL)
1166 fdrop(fp);
1167 return (error);
1171 * Scan the kqueue, return the number of active events placed in kevp up
1172 * to count.
1174 * Continuous mode events may get recycled, do not continue scanning past
1175 * marker unless no events have been collected.
1177 static int
1178 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1179 struct knote *marker)
1181 struct knote *kn, local_marker;
1182 int total;
1184 total = 0;
1185 local_marker.kn_filter = EVFILT_MARKER;
1186 local_marker.kn_status = KN_PROCESSING;
1188 lwkt_getpooltoken(kq);
1191 * Collect events.
1193 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1194 while (count) {
1195 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1196 if (kn->kn_filter == EVFILT_MARKER) {
1197 /* Marker reached, we are done */
1198 if (kn == marker)
1199 break;
1201 /* Move local marker past some other threads marker */
1202 kn = TAILQ_NEXT(kn, kn_tqe);
1203 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1204 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1205 continue;
1209 * We can't skip a knote undergoing processing, otherwise
1210 * we risk not returning it when the user process expects
1211 * it should be returned. Sleep and retry.
1213 if (knote_acquire(kn) == 0)
1214 continue;
1217 * Remove the event for processing.
1219 * WARNING! We must leave KN_QUEUED set to prevent the
1220 * event from being KNOTE_ACTIVATE()d while
1221 * the queue state is in limbo, in case we
1222 * block.
1224 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1225 kq->kq_count--;
1228 * We have to deal with an extremely important race against
1229 * file descriptor close()s here. The file descriptor can
1230 * disappear MPSAFE, and there is a small window of
1231 * opportunity between that and the call to knote_fdclose().
1233 * If we hit that window here while doselect or dopoll is
1234 * trying to delete a spurious event they will not be able
1235 * to match up the event against a knote and will go haywire.
1237 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1238 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1239 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1242 if (kn->kn_status & KN_DISABLED) {
1244 * If disabled we ensure the event is not queued
1245 * but leave its active bit set. On re-enablement
1246 * the event may be immediately triggered.
1248 kn->kn_status &= ~KN_QUEUED;
1249 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1250 (kn->kn_status & KN_DELETING) == 0 &&
1251 filter_event(kn, 0) == 0) {
1253 * If not running in one-shot mode and the event
1254 * is no longer present we ensure it is removed
1255 * from the queue and ignore it.
1257 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1258 } else {
1260 * Post the event
1262 if (kn->kn_fop == &user_filtops)
1263 filt_usertouch(kn, kevp, EVENT_PROCESS);
1264 else
1265 *kevp = kn->kn_kevent;
1266 ++kevp;
1267 ++total;
1268 --count;
1270 if (kn->kn_flags & EV_ONESHOT) {
1271 kn->kn_status &= ~KN_QUEUED;
1272 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1273 } else {
1274 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1275 if (kn->kn_flags & EV_CLEAR) {
1276 kn->kn_data = 0;
1277 kn->kn_fflags = 0;
1279 if (kn->kn_flags & EV_DISPATCH) {
1280 kn->kn_status |= KN_DISABLED;
1282 kn->kn_status &= ~(KN_QUEUED |
1283 KN_ACTIVE);
1284 } else {
1285 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1286 kq->kq_count++;
1292 * Handle any post-processing states
1294 knote_release(kn);
1296 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1298 lwkt_relpooltoken(kq);
1299 return (total);
1303 * XXX
1304 * This could be expanded to call kqueue_scan, if desired.
1306 * MPSAFE
1308 static int
1309 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1311 return (ENXIO);
1315 * MPSAFE
1317 static int
1318 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1320 return (ENXIO);
1324 * MPALMOSTSAFE
1326 static int
1327 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1328 struct ucred *cred, struct sysmsg *msg)
1330 struct lwkt_token *tok;
1331 struct kqueue *kq;
1332 int error;
1334 kq = (struct kqueue *)fp->f_data;
1335 tok = lwkt_token_pool_lookup(kq);
1336 lwkt_gettoken(tok);
1338 switch(com) {
1339 case FIOASYNC:
1340 if (*(int *)data)
1341 kq->kq_state |= KQ_ASYNC;
1342 else
1343 kq->kq_state &= ~KQ_ASYNC;
1344 error = 0;
1345 break;
1346 case FIOSETOWN:
1347 error = fsetown(*(int *)data, &kq->kq_sigio);
1348 break;
1349 default:
1350 error = ENOTTY;
1351 break;
1353 lwkt_reltoken(tok);
1354 return (error);
1358 * MPSAFE
1360 static int
1361 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1363 struct kqueue *kq = (struct kqueue *)fp->f_data;
1365 bzero((void *)st, sizeof(*st));
1366 st->st_size = kq->kq_count;
1367 st->st_blksize = sizeof(struct kevent);
1368 st->st_mode = S_IFIFO;
1369 return (0);
1373 * MPSAFE
1375 static int
1376 kqueue_close(struct file *fp)
1378 struct kqueue *kq = (struct kqueue *)fp->f_data;
1380 kqueue_terminate(kq);
1382 fp->f_data = NULL;
1383 funsetown(&kq->kq_sigio);
1385 kfree(kq, M_KQUEUE);
1386 return (0);
1389 static void
1390 kqueue_wakeup(struct kqueue *kq)
1392 if (kq->kq_state & KQ_SLEEP) {
1393 kq->kq_state &= ~KQ_SLEEP;
1394 if (kq_wakeup_one)
1395 wakeup_one(kq);
1396 else
1397 wakeup(kq);
1399 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1403 * Calls filterops f_attach function, acquiring mplock if filter is not
1404 * marked as FILTEROP_MPSAFE.
1406 * Caller must be holding the related kq token
1408 static int
1409 filter_attach(struct knote *kn)
1411 int ret;
1413 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1414 ret = kn->kn_fop->f_attach(kn);
1415 } else {
1416 get_mplock();
1417 ret = kn->kn_fop->f_attach(kn);
1418 rel_mplock();
1420 return (ret);
1424 * Detach the knote and drop it, destroying the knote.
1426 * Calls filterops f_detach function, acquiring mplock if filter is not
1427 * marked as FILTEROP_MPSAFE.
1429 * Caller must be holding the related kq token
1431 static void
1432 knote_detach_and_drop(struct knote *kn)
1434 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1435 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1436 kn->kn_fop->f_detach(kn);
1437 } else {
1438 get_mplock();
1439 kn->kn_fop->f_detach(kn);
1440 rel_mplock();
1442 knote_drop(kn);
1446 * Calls filterops f_event function, acquiring mplock if filter is not
1447 * marked as FILTEROP_MPSAFE.
1449 * If the knote is in the middle of being created or deleted we cannot
1450 * safely call the filter op.
1452 * Caller must be holding the related kq token
1454 static int
1455 filter_event(struct knote *kn, long hint)
1457 int ret;
1459 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1460 ret = kn->kn_fop->f_event(kn, hint);
1461 } else {
1462 get_mplock();
1463 ret = kn->kn_fop->f_event(kn, hint);
1464 rel_mplock();
1466 return (ret);
1470 * Walk down a list of knotes, activating them if their event has triggered.
1472 * If we encounter any knotes which are undergoing processing we just mark
1473 * them for reprocessing and do not try to [re]activate the knote. However,
1474 * if a hint is being passed we have to wait and that makes things a bit
1475 * sticky.
1477 void
1478 knote(struct klist *list, long hint)
1480 struct knote *kn, marker;
1482 marker.kn_filter = EVFILT_MARKER;
1483 marker.kn_status = KN_PROCESSING;
1485 lwkt_getpooltoken(list);
1486 if (SLIST_EMPTY(list)) {
1487 lwkt_relpooltoken(list);
1488 return;
1491 SLIST_INSERT_HEAD(list, &marker, kn_next);
1492 while ((kn = SLIST_NEXT(&marker, kn_next)) != NULL) {
1493 struct kqueue *kq;
1494 int last_knote = 0;
1496 if (kn->kn_filter == EVFILT_MARKER) {
1497 /* Skip marker */
1498 SLIST_REMOVE(list, &marker, knote, kn_next);
1499 if (SLIST_NEXT(kn, kn_next) == NULL)
1500 goto done;
1501 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1502 continue;
1505 kq = kn->kn_kq;
1506 lwkt_getpooltoken(kq);
1508 if (kn != SLIST_NEXT(&marker, kn_next) || kn->kn_kq != kq) {
1510 * Don't move the marker; check the knote after
1511 * the marker again.
1513 lwkt_relpooltoken(kq);
1514 continue;
1517 if (kn->kn_status & KN_PROCESSING) {
1519 * Someone else is processing the knote, ask the
1520 * other thread to reprocess it and don't mess
1521 * with it otherwise.
1523 if (hint == 0) {
1525 * Move the marker w/ the kq token, so that
1526 * this knote will not be ripped behind our
1527 * back.
1529 SLIST_REMOVE(list, &marker, knote, kn_next);
1530 if (SLIST_NEXT(kn, kn_next) != NULL)
1531 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1532 else
1533 last_knote = 1;
1534 kn->kn_status |= KN_REPROCESS;
1535 lwkt_relpooltoken(kq);
1537 if (last_knote)
1538 goto done;
1539 continue;
1543 * If the hint is non-zero we have to wait or risk
1544 * losing the state the caller is trying to update.
1546 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1547 tsleep(kn, 0, "knotec", hz);
1550 * Don't move the marker; check this knote again,
1551 * hopefully it is still after the marker. Or it
1552 * was deleted and we would check the next knote.
1554 lwkt_relpooltoken(kq);
1555 continue;
1559 * Become the reprocessing master ourselves.
1561 KASSERT((kn->kn_status & KN_DELETING) == 0,
1562 ("acquire a deleting knote %#x", kn->kn_status));
1563 kn->kn_status |= KN_PROCESSING;
1565 /* Move the marker */
1566 SLIST_REMOVE(list, &marker, knote, kn_next);
1567 if (SLIST_NEXT(kn, kn_next) != NULL)
1568 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1569 else
1570 last_knote = 1;
1573 * If hint is non-zero running the event is mandatory
1574 * so do it whether reprocessing is set or not.
1576 if (filter_event(kn, hint))
1577 KNOTE_ACTIVATE(kn);
1579 knote_release(kn);
1580 lwkt_relpooltoken(kq);
1582 if (last_knote)
1583 goto done;
1585 SLIST_REMOVE(list, &marker, knote, kn_next);
1586 done:
1587 lwkt_relpooltoken(list);
1591 * Insert knote at head of klist.
1593 * This function may only be called via a filter function and thus
1594 * kq_token should already be held and marked for processing.
1596 void
1597 knote_insert(struct klist *klist, struct knote *kn)
1599 lwkt_getpooltoken(klist);
1600 KKASSERT(kn->kn_status & KN_PROCESSING);
1601 SLIST_INSERT_HEAD(klist, kn, kn_next);
1602 lwkt_relpooltoken(klist);
1606 * Remove knote from a klist
1608 * This function may only be called via a filter function and thus
1609 * kq_token should already be held and marked for processing.
1611 void
1612 knote_remove(struct klist *klist, struct knote *kn)
1614 lwkt_getpooltoken(klist);
1615 KKASSERT(kn->kn_status & KN_PROCESSING);
1616 SLIST_REMOVE(klist, kn, knote, kn_next);
1617 lwkt_relpooltoken(klist);
1620 #if 0
1622 * Remove all knotes from a specified klist
1624 * Only called from aio.
1626 void
1627 knote_empty(struct klist *list)
1629 struct knote *kn;
1631 lwkt_gettoken(&kq_token);
1632 while ((kn = SLIST_FIRST(list)) != NULL) {
1633 if (knote_acquire(kn))
1634 knote_detach_and_drop(kn);
1636 lwkt_reltoken(&kq_token);
1638 #endif
1640 void
1641 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1642 struct filterops *ops, void *hook)
1644 struct knote *kn, marker;
1645 int has_note;
1647 marker.kn_filter = EVFILT_MARKER;
1648 marker.kn_status = KN_PROCESSING;
1650 lwkt_getpooltoken(&src->ki_note);
1651 if (SLIST_EMPTY(&src->ki_note)) {
1652 lwkt_relpooltoken(&src->ki_note);
1653 return;
1655 lwkt_getpooltoken(&dst->ki_note);
1657 restart:
1658 has_note = 0;
1659 SLIST_INSERT_HEAD(&src->ki_note, &marker, kn_next);
1660 while ((kn = SLIST_NEXT(&marker, kn_next)) != NULL) {
1661 struct kqueue *kq;
1663 if (kn->kn_filter == EVFILT_MARKER) {
1664 /* Skip marker */
1665 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1666 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1667 continue;
1670 kq = kn->kn_kq;
1671 lwkt_getpooltoken(kq);
1673 if (kn != SLIST_NEXT(&marker, kn_next) || kn->kn_kq != kq) {
1675 * Don't move the marker; check the knote after
1676 * the marker again.
1678 lwkt_relpooltoken(kq);
1679 continue;
1682 /* Move marker */
1683 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1684 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1686 has_note = 1;
1687 if (knote_acquire(kn)) {
1688 knote_remove(&src->ki_note, kn);
1689 kn->kn_fop = ops;
1690 kn->kn_hook = hook;
1691 knote_insert(&dst->ki_note, kn);
1692 knote_release(kn);
1693 /* kn may be invalid now */
1695 lwkt_relpooltoken(kq);
1697 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1698 if (has_note) {
1699 /* Keep draining, until nothing left */
1700 goto restart;
1703 lwkt_relpooltoken(&dst->ki_note);
1704 lwkt_relpooltoken(&src->ki_note);
1708 * Remove all knotes referencing a specified fd
1710 void
1711 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1713 struct kqueue *kq;
1714 struct knote *kn;
1715 struct knote *kntmp;
1717 lwkt_getpooltoken(&fp->f_klist);
1718 restart:
1719 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1720 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1721 kq = kn->kn_kq;
1722 lwkt_getpooltoken(kq);
1724 /* temporary verification hack */
1725 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1726 if (kn == kntmp)
1727 break;
1729 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1730 kn->kn_id != fd || kn->kn_kq != kq) {
1731 lwkt_relpooltoken(kq);
1732 goto restart;
1734 if (knote_acquire(kn))
1735 knote_detach_and_drop(kn);
1736 lwkt_relpooltoken(kq);
1737 goto restart;
1740 lwkt_relpooltoken(&fp->f_klist);
1744 * Low level attach function.
1746 * The knote should already be marked for processing.
1747 * Caller must hold the related kq token.
1749 static void
1750 knote_attach(struct knote *kn)
1752 struct klist *list;
1753 struct kqueue *kq = kn->kn_kq;
1755 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1756 KKASSERT(kn->kn_fp);
1757 list = &kn->kn_fp->f_klist;
1758 } else {
1759 if (kq->kq_knhashmask == 0)
1760 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1761 &kq->kq_knhashmask);
1762 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1764 lwkt_getpooltoken(list);
1765 SLIST_INSERT_HEAD(list, kn, kn_link);
1766 lwkt_relpooltoken(list);
1767 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1771 * Low level drop function.
1773 * The knote should already be marked for processing.
1774 * Caller must hold the related kq token.
1776 static void
1777 knote_drop(struct knote *kn)
1779 struct kqueue *kq;
1780 struct klist *list;
1782 kq = kn->kn_kq;
1784 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1785 list = &kn->kn_fp->f_klist;
1786 else
1787 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1789 lwkt_getpooltoken(list);
1790 SLIST_REMOVE(list, kn, knote, kn_link);
1791 lwkt_relpooltoken(list);
1792 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1793 if (kn->kn_status & KN_QUEUED)
1794 knote_dequeue(kn);
1795 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1796 fdrop(kn->kn_fp);
1797 kn->kn_fp = NULL;
1799 knote_free(kn);
1803 * Low level enqueue function.
1805 * The knote should already be marked for processing.
1806 * Caller must be holding the kq token
1808 static void
1809 knote_enqueue(struct knote *kn)
1811 struct kqueue *kq = kn->kn_kq;
1813 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1814 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1815 kn->kn_status |= KN_QUEUED;
1816 ++kq->kq_count;
1819 * Send SIGIO on request (typically set up as a mailbox signal)
1821 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1822 pgsigio(kq->kq_sigio, SIGIO, 0);
1824 kqueue_wakeup(kq);
1828 * Low level dequeue function.
1830 * The knote should already be marked for processing.
1831 * Caller must be holding the kq token
1833 static void
1834 knote_dequeue(struct knote *kn)
1836 struct kqueue *kq = kn->kn_kq;
1838 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1839 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1840 kn->kn_status &= ~KN_QUEUED;
1841 kq->kq_count--;
1844 static struct knote *
1845 knote_alloc(void)
1847 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1850 static void
1851 knote_free(struct knote *kn)
1853 kfree(kn, M_KQUEUE);