Revert "kqueue: Return value of knote_release is no longer useful."
[dragonfly.git] / sys / kern / kern_event.c
blob2edc26cd2e3dd53e27a6ac9c635c8c1079f05465
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
60 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args {
63 struct kevent_args *ka;
64 int pchanges;
67 #define KNOTE_CACHE_MAX 8
69 struct knote_cache_list {
70 struct klist knote_cache;
71 int knote_cache_cnt;
72 } __cachealign;
74 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
75 struct knote *marker);
76 static int kqueue_read(struct file *fp, struct uio *uio,
77 struct ucred *cred, int flags);
78 static int kqueue_write(struct file *fp, struct uio *uio,
79 struct ucred *cred, int flags);
80 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
81 struct ucred *cred, struct sysmsg *msg);
82 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
83 static int kqueue_stat(struct file *fp, struct stat *st,
84 struct ucred *cred);
85 static int kqueue_close(struct file *fp);
86 static void kqueue_wakeup(struct kqueue *kq);
87 static int filter_attach(struct knote *kn);
88 static int filter_event(struct knote *kn, long hint);
91 * MPSAFE
93 static struct fileops kqueueops = {
94 .fo_read = kqueue_read,
95 .fo_write = kqueue_write,
96 .fo_ioctl = kqueue_ioctl,
97 .fo_kqfilter = kqueue_kqfilter,
98 .fo_stat = kqueue_stat,
99 .fo_close = kqueue_close,
100 .fo_shutdown = nofo_shutdown
103 static void knote_attach(struct knote *kn);
104 static void knote_drop(struct knote *kn);
105 static void knote_detach_and_drop(struct knote *kn);
106 static void knote_enqueue(struct knote *kn);
107 static void knote_dequeue(struct knote *kn);
108 static struct knote *knote_alloc(void);
109 static void knote_free(struct knote *kn);
111 static void filt_kqdetach(struct knote *kn);
112 static int filt_kqueue(struct knote *kn, long hint);
113 static int filt_procattach(struct knote *kn);
114 static void filt_procdetach(struct knote *kn);
115 static int filt_proc(struct knote *kn, long hint);
116 static int filt_fileattach(struct knote *kn);
117 static void filt_timerexpire(void *knx);
118 static int filt_timerattach(struct knote *kn);
119 static void filt_timerdetach(struct knote *kn);
120 static int filt_timer(struct knote *kn, long hint);
121 static int filt_userattach(struct knote *kn);
122 static void filt_userdetach(struct knote *kn);
123 static int filt_user(struct knote *kn, long hint);
124 static void filt_usertouch(struct knote *kn, struct kevent *kev,
125 u_long type);
127 static struct filterops file_filtops =
128 { FILTEROP_ISFD | FILTEROP_MPSAFE, filt_fileattach, NULL, NULL };
129 static struct filterops kqread_filtops =
130 { FILTEROP_ISFD | FILTEROP_MPSAFE, NULL, filt_kqdetach, filt_kqueue };
131 static struct filterops proc_filtops =
132 { 0, filt_procattach, filt_procdetach, filt_proc };
133 static struct filterops timer_filtops =
134 { FILTEROP_MPSAFE, filt_timerattach, filt_timerdetach, filt_timer };
135 static struct filterops user_filtops =
136 { FILTEROP_MPSAFE, filt_userattach, filt_userdetach, filt_user };
138 static int kq_ncallouts = 0;
139 static int kq_calloutmax = (4 * 1024);
140 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
141 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
142 static int kq_checkloop = 1000000;
143 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
144 &kq_checkloop, 0, "Maximum number of loops for kqueue scan");
146 #define KNOTE_ACTIVATE(kn) do { \
147 kn->kn_status |= KN_ACTIVE; \
148 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
149 knote_enqueue(kn); \
150 } while(0)
152 #define KN_HASHSIZE 64 /* XXX should be tunable */
153 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
155 extern struct filterops aio_filtops;
156 extern struct filterops sig_filtops;
159 * Table for for all system-defined filters.
161 static struct filterops *sysfilt_ops[] = {
162 &file_filtops, /* EVFILT_READ */
163 &file_filtops, /* EVFILT_WRITE */
164 &aio_filtops, /* EVFILT_AIO */
165 &file_filtops, /* EVFILT_VNODE */
166 &proc_filtops, /* EVFILT_PROC */
167 &sig_filtops, /* EVFILT_SIGNAL */
168 &timer_filtops, /* EVFILT_TIMER */
169 &file_filtops, /* EVFILT_EXCEPT */
170 &user_filtops, /* EVFILT_USER */
173 static struct knote_cache_list knote_cache_lists[MAXCPU];
176 * Acquire a knote, return non-zero on success, 0 on failure.
178 * If we cannot acquire the knote we sleep and return 0. The knote
179 * may be stale on return in this case and the caller must restart
180 * whatever loop they are in.
182 * Related kq token must be held.
184 static __inline int
185 knote_acquire(struct knote *kn)
187 if (kn->kn_status & KN_PROCESSING) {
188 kn->kn_status |= KN_WAITING | KN_REPROCESS;
189 tsleep(kn, 0, "kqepts", hz);
190 /* knote may be stale now */
191 return(0);
193 kn->kn_status |= KN_PROCESSING;
194 return(1);
198 * Release an acquired knote, clearing KN_PROCESSING and handling any
199 * KN_REPROCESS events.
201 * Caller must be holding the related kq token
203 * Non-zero is returned if the knote is destroyed or detached.
205 static __inline int
206 knote_release(struct knote *kn)
208 int ret;
210 while (kn->kn_status & KN_REPROCESS) {
211 kn->kn_status &= ~KN_REPROCESS;
212 if (kn->kn_status & KN_WAITING) {
213 kn->kn_status &= ~KN_WAITING;
214 wakeup(kn);
216 if (kn->kn_status & KN_DELETING) {
217 knote_detach_and_drop(kn);
218 return(1);
219 /* NOT REACHED */
221 if (filter_event(kn, 0))
222 KNOTE_ACTIVATE(kn);
224 if (kn->kn_status & KN_DETACHED)
225 ret = 1;
226 else
227 ret = 0;
228 kn->kn_status &= ~KN_PROCESSING;
229 /* kn should not be accessed anymore */
230 return ret;
233 static int
234 filt_fileattach(struct knote *kn)
236 return (fo_kqfilter(kn->kn_fp, kn));
240 * MPSAFE
242 static int
243 kqueue_kqfilter(struct file *fp, struct knote *kn)
245 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
247 if (kn->kn_filter != EVFILT_READ)
248 return (EOPNOTSUPP);
250 kn->kn_fop = &kqread_filtops;
251 knote_insert(&kq->kq_kqinfo.ki_note, kn);
252 return (0);
255 static void
256 filt_kqdetach(struct knote *kn)
258 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
260 knote_remove(&kq->kq_kqinfo.ki_note, kn);
263 /*ARGSUSED*/
264 static int
265 filt_kqueue(struct knote *kn, long hint)
267 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
269 kn->kn_data = kq->kq_count;
270 return (kn->kn_data > 0);
273 static int
274 filt_procattach(struct knote *kn)
276 struct proc *p;
277 int immediate;
279 immediate = 0;
280 p = pfind(kn->kn_id);
281 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
282 p = zpfind(kn->kn_id);
283 immediate = 1;
285 if (p == NULL) {
286 return (ESRCH);
288 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
289 if (p)
290 PRELE(p);
291 return (EACCES);
294 lwkt_gettoken(&p->p_token);
295 kn->kn_ptr.p_proc = p;
296 kn->kn_flags |= EV_CLEAR; /* automatically set */
299 * internal flag indicating registration done by kernel
301 if (kn->kn_flags & EV_FLAG1) {
302 kn->kn_data = kn->kn_sdata; /* ppid */
303 kn->kn_fflags = NOTE_CHILD;
304 kn->kn_flags &= ~EV_FLAG1;
307 knote_insert(&p->p_klist, kn);
310 * Immediately activate any exit notes if the target process is a
311 * zombie. This is necessary to handle the case where the target
312 * process, e.g. a child, dies before the kevent is negistered.
314 if (immediate && filt_proc(kn, NOTE_EXIT))
315 KNOTE_ACTIVATE(kn);
316 lwkt_reltoken(&p->p_token);
317 PRELE(p);
319 return (0);
323 * The knote may be attached to a different process, which may exit,
324 * leaving nothing for the knote to be attached to. So when the process
325 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
326 * it will be deleted when read out. However, as part of the knote deletion,
327 * this routine is called, so a check is needed to avoid actually performing
328 * a detach, because the original process does not exist any more.
330 static void
331 filt_procdetach(struct knote *kn)
333 struct proc *p;
335 if (kn->kn_status & KN_DETACHED)
336 return;
337 p = kn->kn_ptr.p_proc;
338 knote_remove(&p->p_klist, kn);
341 static int
342 filt_proc(struct knote *kn, long hint)
344 u_int event;
347 * mask off extra data
349 event = (u_int)hint & NOTE_PCTRLMASK;
352 * if the user is interested in this event, record it.
354 if (kn->kn_sfflags & event)
355 kn->kn_fflags |= event;
358 * Process is gone, so flag the event as finished. Detach the
359 * knote from the process now because the process will be poof,
360 * gone later on.
362 if (event == NOTE_EXIT) {
363 struct proc *p = kn->kn_ptr.p_proc;
364 if ((kn->kn_status & KN_DETACHED) == 0) {
365 PHOLD(p);
366 knote_remove(&p->p_klist, kn);
367 kn->kn_status |= KN_DETACHED;
368 kn->kn_data = p->p_xstat;
369 kn->kn_ptr.p_proc = NULL;
370 PRELE(p);
372 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
373 return (1);
377 * process forked, and user wants to track the new process,
378 * so attach a new knote to it, and immediately report an
379 * event with the parent's pid.
381 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
382 struct kevent kev;
383 int error;
386 * register knote with new process.
388 kev.ident = hint & NOTE_PDATAMASK; /* pid */
389 kev.filter = kn->kn_filter;
390 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
391 kev.fflags = kn->kn_sfflags;
392 kev.data = kn->kn_id; /* parent */
393 kev.udata = kn->kn_kevent.udata; /* preserve udata */
394 error = kqueue_register(kn->kn_kq, &kev);
395 if (error)
396 kn->kn_fflags |= NOTE_TRACKERR;
399 return (kn->kn_fflags != 0);
402 static void
403 filt_timerreset(struct knote *kn)
405 struct callout *calloutp;
406 struct timeval tv;
407 int tticks;
409 tv.tv_sec = kn->kn_sdata / 1000;
410 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
411 tticks = tvtohz_high(&tv);
412 calloutp = (struct callout *)kn->kn_hook;
413 callout_reset(calloutp, tticks, filt_timerexpire, kn);
417 * The callout interlocks with callout_terminate() but can still
418 * race a deletion so if KN_DELETING is set we just don't touch
419 * the knote.
421 static void
422 filt_timerexpire(void *knx)
424 struct knote *kn = knx;
425 struct kqueue *kq = kn->kn_kq;
427 lwkt_getpooltoken(kq);
430 * Open knote_acquire(), since we can't sleep in callout,
431 * however, we do need to record this expiration.
433 kn->kn_data++;
434 if (kn->kn_status & KN_PROCESSING) {
435 kn->kn_status |= KN_REPROCESS;
436 if ((kn->kn_status & KN_DELETING) == 0 &&
437 (kn->kn_flags & EV_ONESHOT) == 0)
438 filt_timerreset(kn);
439 lwkt_relpooltoken(kq);
440 return;
442 KASSERT((kn->kn_status & KN_DELETING) == 0,
443 ("acquire a deleting knote %#x", kn->kn_status));
444 kn->kn_status |= KN_PROCESSING;
446 KNOTE_ACTIVATE(kn);
447 if ((kn->kn_flags & EV_ONESHOT) == 0)
448 filt_timerreset(kn);
450 knote_release(kn);
452 lwkt_relpooltoken(kq);
456 * data contains amount of time to sleep, in milliseconds
458 static int
459 filt_timerattach(struct knote *kn)
461 struct callout *calloutp;
462 int prev_ncallouts;
464 prev_ncallouts = atomic_fetchadd_int(&kq_ncallouts, 1);
465 if (prev_ncallouts >= kq_calloutmax) {
466 atomic_subtract_int(&kq_ncallouts, 1);
467 kn->kn_hook = NULL;
468 return (ENOMEM);
471 kn->kn_flags |= EV_CLEAR; /* automatically set */
472 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
473 callout_init_mp(calloutp);
474 kn->kn_hook = (caddr_t)calloutp;
476 filt_timerreset(kn);
477 return (0);
481 * This function is called with the knote flagged locked but it is
482 * still possible to race a callout event due to the callback blocking.
483 * We must call callout_terminate() instead of callout_stop() to deal
484 * with the race.
486 static void
487 filt_timerdetach(struct knote *kn)
489 struct callout *calloutp;
491 calloutp = (struct callout *)kn->kn_hook;
492 callout_terminate(calloutp);
493 kfree(calloutp, M_KQUEUE);
494 atomic_subtract_int(&kq_ncallouts, 1);
497 static int
498 filt_timer(struct knote *kn, long hint)
501 return (kn->kn_data != 0);
505 * EVFILT_USER
507 static int
508 filt_userattach(struct knote *kn)
510 kn->kn_hook = NULL;
511 if (kn->kn_fflags & NOTE_TRIGGER)
512 kn->kn_ptr.hookid = 1;
513 else
514 kn->kn_ptr.hookid = 0;
515 return 0;
518 static void
519 filt_userdetach(struct knote *kn)
521 /* nothing to do */
524 static int
525 filt_user(struct knote *kn, long hint)
527 return (kn->kn_ptr.hookid);
530 static void
531 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
533 u_int ffctrl;
535 switch (type) {
536 case EVENT_REGISTER:
537 if (kev->fflags & NOTE_TRIGGER)
538 kn->kn_ptr.hookid = 1;
540 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
541 kev->fflags &= NOTE_FFLAGSMASK;
542 switch (ffctrl) {
543 case NOTE_FFNOP:
544 break;
546 case NOTE_FFAND:
547 kn->kn_sfflags &= kev->fflags;
548 break;
550 case NOTE_FFOR:
551 kn->kn_sfflags |= kev->fflags;
552 break;
554 case NOTE_FFCOPY:
555 kn->kn_sfflags = kev->fflags;
556 break;
558 default:
559 /* XXX Return error? */
560 break;
562 kn->kn_sdata = kev->data;
565 * This is not the correct use of EV_CLEAR in an event
566 * modification, it should have been passed as a NOTE instead.
567 * But we need to maintain compatibility with Apple & FreeBSD.
569 * Note however that EV_CLEAR can still be used when doing
570 * the initial registration of the event and works as expected
571 * (clears the event on reception).
573 if (kev->flags & EV_CLEAR) {
574 kn->kn_ptr.hookid = 0;
575 kn->kn_data = 0;
576 kn->kn_fflags = 0;
578 break;
580 case EVENT_PROCESS:
581 *kev = kn->kn_kevent;
582 kev->fflags = kn->kn_sfflags;
583 kev->data = kn->kn_sdata;
584 if (kn->kn_flags & EV_CLEAR) {
585 kn->kn_ptr.hookid = 0;
586 /* kn_data, kn_fflags handled by parent */
588 break;
590 default:
591 panic("filt_usertouch() - invalid type (%ld)", type);
592 break;
597 * Initialize a kqueue.
599 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
601 * MPSAFE
603 void
604 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
606 TAILQ_INIT(&kq->kq_knpend);
607 TAILQ_INIT(&kq->kq_knlist);
608 kq->kq_count = 0;
609 kq->kq_fdp = fdp;
610 SLIST_INIT(&kq->kq_kqinfo.ki_note);
614 * Terminate a kqueue. Freeing the actual kq itself is left up to the
615 * caller (it might be embedded in a lwp so we don't do it here).
617 * The kq's knlist must be completely eradicated so block on any
618 * processing races.
620 void
621 kqueue_terminate(struct kqueue *kq)
623 struct knote *kn;
625 lwkt_getpooltoken(kq);
626 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
627 if (knote_acquire(kn))
628 knote_detach_and_drop(kn);
630 lwkt_relpooltoken(kq);
632 if (kq->kq_knhash) {
633 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
634 kq->kq_knhash = NULL;
635 kq->kq_knhashmask = 0;
640 * MPSAFE
643 sys_kqueue(struct kqueue_args *uap)
645 struct thread *td = curthread;
646 struct kqueue *kq;
647 struct file *fp;
648 int fd, error;
650 error = falloc(td->td_lwp, &fp, &fd);
651 if (error)
652 return (error);
653 fp->f_flag = FREAD | FWRITE;
654 fp->f_type = DTYPE_KQUEUE;
655 fp->f_ops = &kqueueops;
657 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
658 kqueue_init(kq, td->td_proc->p_fd);
659 fp->f_data = kq;
661 fsetfd(kq->kq_fdp, fp, fd);
662 uap->sysmsg_result = fd;
663 fdrop(fp);
664 return (error);
668 * Copy 'count' items into the destination list pointed to by uap->eventlist.
670 static int
671 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
673 struct kevent_copyin_args *kap;
674 int error;
676 kap = (struct kevent_copyin_args *)arg;
678 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
679 if (error == 0) {
680 kap->ka->eventlist += count;
681 *res += count;
682 } else {
683 *res = -1;
686 return (error);
690 * Copy at most 'max' items from the list pointed to by kap->changelist,
691 * return number of items in 'events'.
693 static int
694 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
696 struct kevent_copyin_args *kap;
697 int error, count;
699 kap = (struct kevent_copyin_args *)arg;
701 count = min(kap->ka->nchanges - kap->pchanges, max);
702 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
703 if (error == 0) {
704 kap->ka->changelist += count;
705 kap->pchanges += count;
706 *events = count;
709 return (error);
713 * MPSAFE
716 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
717 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
718 struct timespec *tsp_in)
720 struct kevent *kevp;
721 struct timespec *tsp, ats;
722 int i, n, total, error, nerrors = 0;
723 int lres;
724 int limit = kq_checkloop;
725 struct kevent kev[KQ_NEVENTS];
726 struct knote marker;
727 struct lwkt_token *tok;
729 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
730 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
732 tsp = tsp_in;
733 *res = 0;
735 for (;;) {
736 n = 0;
737 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
738 if (error)
739 return error;
740 if (n == 0)
741 break;
742 for (i = 0; i < n; i++) {
743 kevp = &kev[i];
744 kevp->flags &= ~EV_SYSFLAGS;
745 error = kqueue_register(kq, kevp);
748 * If a registration returns an error we
749 * immediately post the error. The kevent()
750 * call itself will fail with the error if
751 * no space is available for posting.
753 * Such errors normally bypass the timeout/blocking
754 * code. However, if the copyoutfn function refuses
755 * to post the error (see sys_poll()), then we
756 * ignore it too.
758 if (error || (kevp->flags & EV_RECEIPT)) {
759 kevp->flags = EV_ERROR;
760 kevp->data = error;
761 lres = *res;
762 kevent_copyoutfn(uap, kevp, 1, res);
763 if (*res < 0) {
764 return error;
765 } else if (lres != *res) {
766 nevents--;
767 nerrors++;
772 if (nerrors)
773 return 0;
776 * Acquire/wait for events - setup timeout
778 if (tsp != NULL) {
779 if (tsp->tv_sec || tsp->tv_nsec) {
780 getnanouptime(&ats);
781 timespecadd(tsp, &ats); /* tsp = target time */
786 * Loop as required.
788 * Collect as many events as we can. Sleeping on successive
789 * loops is disabled if copyoutfn has incremented (*res).
791 * The loop stops if an error occurs, all events have been
792 * scanned (the marker has been reached), or fewer than the
793 * maximum number of events is found.
795 * The copyoutfn function does not have to increment (*res) in
796 * order for the loop to continue.
798 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
800 total = 0;
801 error = 0;
802 marker.kn_filter = EVFILT_MARKER;
803 marker.kn_status = KN_PROCESSING;
804 tok = lwkt_token_pool_lookup(kq);
805 lwkt_gettoken(tok);
806 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
807 lwkt_reltoken(tok);
808 while ((n = nevents - total) > 0) {
809 if (n > KQ_NEVENTS)
810 n = KQ_NEVENTS;
813 * If no events are pending sleep until timeout (if any)
814 * or an event occurs.
816 * After the sleep completes the marker is moved to the
817 * end of the list, making any received events available
818 * to our scan.
820 if (kq->kq_count == 0 && *res == 0) {
821 int timeout;
823 if (tsp == NULL) {
824 timeout = 0;
825 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
826 error = EWOULDBLOCK;
827 break;
828 } else {
829 struct timespec atx = *tsp;
831 getnanouptime(&ats);
832 timespecsub(&atx, &ats);
833 if (atx.tv_sec < 0) {
834 error = EWOULDBLOCK;
835 break;
836 } else {
837 timeout = atx.tv_sec > 24 * 60 * 60 ?
838 24 * 60 * 60 * hz :
839 tstohz_high(&atx);
843 lwkt_gettoken(tok);
844 if (kq->kq_count == 0) {
845 kq->kq_sleep_cnt++;
846 if (__predict_false(kq->kq_sleep_cnt == 0)) {
848 * Guard against possible wrapping. And
849 * set it to 2, so that kqueue_wakeup()
850 * can wake everyone up.
852 kq->kq_sleep_cnt = 2;
854 error = tsleep(kq, PCATCH, "kqread", timeout);
856 /* don't restart after signals... */
857 if (error == ERESTART)
858 error = EINTR;
859 if (error) {
860 lwkt_reltoken(tok);
861 break;
864 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
865 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker,
866 kn_tqe);
868 lwkt_reltoken(tok);
872 * Process all received events
873 * Account for all non-spurious events in our total
875 i = kqueue_scan(kq, kev, n, &marker);
876 if (i) {
877 lres = *res;
878 error = kevent_copyoutfn(uap, kev, i, res);
879 total += *res - lres;
880 if (error)
881 break;
883 if (limit && --limit == 0)
884 panic("kqueue: checkloop failed i=%d", i);
887 * Normally when fewer events are returned than requested
888 * we can stop. However, if only spurious events were
889 * collected the copyout will not bump (*res) and we have
890 * to continue.
892 if (i < n && *res)
893 break;
896 * Deal with an edge case where spurious events can cause
897 * a loop to occur without moving the marker. This can
898 * prevent kqueue_scan() from picking up new events which
899 * race us. We must be sure to move the marker for this
900 * case.
902 * NOTE: We do not want to move the marker if events
903 * were scanned because normal kqueue operations
904 * may reactivate events. Moving the marker in
905 * that case could result in duplicates for the
906 * same event.
908 if (i == 0) {
909 lwkt_gettoken(tok);
910 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
911 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
912 lwkt_reltoken(tok);
915 lwkt_gettoken(tok);
916 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
917 lwkt_reltoken(tok);
919 /* Timeouts do not return EWOULDBLOCK. */
920 if (error == EWOULDBLOCK)
921 error = 0;
922 return error;
926 * MPALMOSTSAFE
929 sys_kevent(struct kevent_args *uap)
931 struct thread *td = curthread;
932 struct proc *p = td->td_proc;
933 struct timespec ts, *tsp;
934 struct kqueue *kq;
935 struct file *fp = NULL;
936 struct kevent_copyin_args *kap, ka;
937 int error;
939 if (uap->timeout) {
940 error = copyin(uap->timeout, &ts, sizeof(ts));
941 if (error)
942 return (error);
943 tsp = &ts;
944 } else {
945 tsp = NULL;
947 fp = holdfp(p->p_fd, uap->fd, -1);
948 if (fp == NULL)
949 return (EBADF);
950 if (fp->f_type != DTYPE_KQUEUE) {
951 fdrop(fp);
952 return (EBADF);
955 kq = (struct kqueue *)fp->f_data;
957 kap = &ka;
958 kap->ka = uap;
959 kap->pchanges = 0;
961 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
962 kevent_copyin, kevent_copyout, tsp);
964 fdrop(fp);
966 return (error);
970 kqueue_register(struct kqueue *kq, struct kevent *kev)
972 struct filedesc *fdp = kq->kq_fdp;
973 struct klist *list = NULL;
974 struct filterops *fops;
975 struct file *fp = NULL;
976 struct knote *kn = NULL;
977 struct thread *td;
978 int error = 0;
979 struct knote_cache_list *cache_list;
981 if (kev->filter < 0) {
982 if (kev->filter + EVFILT_SYSCOUNT < 0)
983 return (EINVAL);
984 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
985 } else {
987 * XXX
988 * filter attach routine is responsible for insuring that
989 * the identifier can be attached to it.
991 return (EINVAL);
994 if (fops->f_flags & FILTEROP_ISFD) {
995 /* validate descriptor */
996 fp = holdfp(fdp, kev->ident, -1);
997 if (fp == NULL)
998 return (EBADF);
1001 cache_list = &knote_cache_lists[mycpuid];
1002 if (SLIST_EMPTY(&cache_list->knote_cache)) {
1003 struct knote *new_kn;
1005 new_kn = knote_alloc();
1006 crit_enter();
1007 SLIST_INSERT_HEAD(&cache_list->knote_cache, new_kn, kn_link);
1008 cache_list->knote_cache_cnt++;
1009 crit_exit();
1012 td = curthread;
1013 lwkt_getpooltoken(kq);
1016 * Make sure that only one thread can register event on this kqueue,
1017 * so that we would not suffer any race, even if the registration
1018 * blocked, i.e. kq token was released, and the kqueue was shared
1019 * between threads (this should be rare though).
1021 while (__predict_false(kq->kq_regtd != NULL && kq->kq_regtd != td)) {
1022 kq->kq_state |= KQ_REGWAIT;
1023 tsleep(&kq->kq_regtd, 0, "kqreg", 0);
1025 if (__predict_false(kq->kq_regtd != NULL)) {
1026 /* Recursive calling of kqueue_register() */
1027 td = NULL;
1028 } else {
1029 /* Owner of the kq_regtd, i.e. td != NULL */
1030 kq->kq_regtd = td;
1033 if (fp != NULL) {
1034 list = &fp->f_klist;
1035 } else if (kq->kq_knhashmask) {
1036 list = &kq->kq_knhash[
1037 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1039 if (list != NULL) {
1040 lwkt_getpooltoken(list);
1041 again:
1042 SLIST_FOREACH(kn, list, kn_link) {
1043 if (kn->kn_kq == kq &&
1044 kn->kn_filter == kev->filter &&
1045 kn->kn_id == kev->ident) {
1046 if (knote_acquire(kn) == 0)
1047 goto again;
1048 break;
1051 lwkt_relpooltoken(list);
1055 * NOTE: At this point if kn is non-NULL we will have acquired
1056 * it and set KN_PROCESSING.
1058 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
1059 error = ENOENT;
1060 goto done;
1064 * kn now contains the matching knote, or NULL if no match
1066 if (kev->flags & EV_ADD) {
1067 if (kn == NULL) {
1068 crit_enter();
1069 kn = SLIST_FIRST(&cache_list->knote_cache);
1070 if (kn == NULL) {
1071 crit_exit();
1072 kn = knote_alloc();
1073 } else {
1074 SLIST_REMOVE_HEAD(&cache_list->knote_cache,
1075 kn_link);
1076 cache_list->knote_cache_cnt--;
1077 crit_exit();
1079 kn->kn_fp = fp;
1080 kn->kn_kq = kq;
1081 kn->kn_fop = fops;
1084 * apply reference count to knote structure, and
1085 * do not release it at the end of this routine.
1087 fp = NULL;
1089 kn->kn_sfflags = kev->fflags;
1090 kn->kn_sdata = kev->data;
1091 kev->fflags = 0;
1092 kev->data = 0;
1093 kn->kn_kevent = *kev;
1096 * KN_PROCESSING prevents the knote from getting
1097 * ripped out from under us while we are trying
1098 * to attach it, in case the attach blocks.
1100 kn->kn_status = KN_PROCESSING;
1101 knote_attach(kn);
1102 if ((error = filter_attach(kn)) != 0) {
1103 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1104 knote_drop(kn);
1105 goto done;
1109 * Interlock against close races which either tried
1110 * to remove our knote while we were blocked or missed
1111 * it entirely prior to our attachment. We do not
1112 * want to end up with a knote on a closed descriptor.
1114 if ((fops->f_flags & FILTEROP_ISFD) &&
1115 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
1116 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1118 } else {
1120 * The user may change some filter values after the
1121 * initial EV_ADD, but doing so will not reset any
1122 * filter which have already been triggered.
1124 KKASSERT(kn->kn_status & KN_PROCESSING);
1125 if (fops == &user_filtops) {
1126 filt_usertouch(kn, kev, EVENT_REGISTER);
1127 } else {
1128 kn->kn_sfflags = kev->fflags;
1129 kn->kn_sdata = kev->data;
1130 kn->kn_kevent.udata = kev->udata;
1135 * Execute the filter event to immediately activate the
1136 * knote if necessary. If reprocessing events are pending
1137 * due to blocking above we do not run the filter here
1138 * but instead let knote_release() do it. Otherwise we
1139 * might run the filter on a deleted event.
1141 if ((kn->kn_status & KN_REPROCESS) == 0) {
1142 if (filter_event(kn, 0))
1143 KNOTE_ACTIVATE(kn);
1145 } else if (kev->flags & EV_DELETE) {
1147 * Delete the existing knote
1149 knote_detach_and_drop(kn);
1150 goto done;
1151 } else {
1153 * Modify an existing event.
1155 * The user may change some filter values after the
1156 * initial EV_ADD, but doing so will not reset any
1157 * filter which have already been triggered.
1159 KKASSERT(kn->kn_status & KN_PROCESSING);
1160 if (fops == &user_filtops) {
1161 filt_usertouch(kn, kev, EVENT_REGISTER);
1162 } else {
1163 kn->kn_sfflags = kev->fflags;
1164 kn->kn_sdata = kev->data;
1165 kn->kn_kevent.udata = kev->udata;
1169 * Execute the filter event to immediately activate the
1170 * knote if necessary. If reprocessing events are pending
1171 * due to blocking above we do not run the filter here
1172 * but instead let knote_release() do it. Otherwise we
1173 * might run the filter on a deleted event.
1175 if ((kn->kn_status & KN_REPROCESS) == 0) {
1176 if (filter_event(kn, 0))
1177 KNOTE_ACTIVATE(kn);
1182 * Disablement does not deactivate a knote here.
1184 if ((kev->flags & EV_DISABLE) &&
1185 ((kn->kn_status & KN_DISABLED) == 0)) {
1186 kn->kn_status |= KN_DISABLED;
1190 * Re-enablement may have to immediately enqueue an active knote.
1192 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1193 kn->kn_status &= ~KN_DISABLED;
1194 if ((kn->kn_status & KN_ACTIVE) &&
1195 ((kn->kn_status & KN_QUEUED) == 0)) {
1196 knote_enqueue(kn);
1201 * Handle any required reprocessing
1203 knote_release(kn);
1204 /* kn may be invalid now */
1206 done:
1207 if (td != NULL) { /* Owner of the kq_regtd */
1208 kq->kq_regtd = NULL;
1209 if (__predict_false(kq->kq_state & KQ_REGWAIT)) {
1210 kq->kq_state &= ~KQ_REGWAIT;
1211 wakeup(&kq->kq_regtd);
1214 lwkt_relpooltoken(kq);
1215 if (fp != NULL)
1216 fdrop(fp);
1217 return (error);
1221 * Scan the kqueue, return the number of active events placed in kevp up
1222 * to count.
1224 * Continuous mode events may get recycled, do not continue scanning past
1225 * marker unless no events have been collected.
1227 static int
1228 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1229 struct knote *marker)
1231 struct knote *kn, local_marker;
1232 int total;
1234 total = 0;
1235 local_marker.kn_filter = EVFILT_MARKER;
1236 local_marker.kn_status = KN_PROCESSING;
1238 lwkt_getpooltoken(kq);
1241 * Collect events.
1243 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1244 while (count) {
1245 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1246 if (kn->kn_filter == EVFILT_MARKER) {
1247 /* Marker reached, we are done */
1248 if (kn == marker)
1249 break;
1251 /* Move local marker past some other threads marker */
1252 kn = TAILQ_NEXT(kn, kn_tqe);
1253 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1254 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1255 continue;
1259 * We can't skip a knote undergoing processing, otherwise
1260 * we risk not returning it when the user process expects
1261 * it should be returned. Sleep and retry.
1263 if (knote_acquire(kn) == 0)
1264 continue;
1267 * Remove the event for processing.
1269 * WARNING! We must leave KN_QUEUED set to prevent the
1270 * event from being KNOTE_ACTIVATE()d while
1271 * the queue state is in limbo, in case we
1272 * block.
1274 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1275 kq->kq_count--;
1278 * We have to deal with an extremely important race against
1279 * file descriptor close()s here. The file descriptor can
1280 * disappear MPSAFE, and there is a small window of
1281 * opportunity between that and the call to knote_fdclose().
1283 * If we hit that window here while doselect or dopoll is
1284 * trying to delete a spurious event they will not be able
1285 * to match up the event against a knote and will go haywire.
1287 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1288 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1289 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1292 if (kn->kn_status & KN_DISABLED) {
1294 * If disabled we ensure the event is not queued
1295 * but leave its active bit set. On re-enablement
1296 * the event may be immediately triggered.
1298 kn->kn_status &= ~KN_QUEUED;
1299 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1300 (kn->kn_status & KN_DELETING) == 0 &&
1301 filter_event(kn, 0) == 0) {
1303 * If not running in one-shot mode and the event
1304 * is no longer present we ensure it is removed
1305 * from the queue and ignore it.
1307 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1308 } else {
1310 * Post the event
1312 if (kn->kn_fop == &user_filtops)
1313 filt_usertouch(kn, kevp, EVENT_PROCESS);
1314 else
1315 *kevp = kn->kn_kevent;
1316 ++kevp;
1317 ++total;
1318 --count;
1320 if (kn->kn_flags & EV_ONESHOT) {
1321 kn->kn_status &= ~KN_QUEUED;
1322 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1323 } else {
1324 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1325 if (kn->kn_flags & EV_CLEAR) {
1326 kn->kn_data = 0;
1327 kn->kn_fflags = 0;
1329 if (kn->kn_flags & EV_DISPATCH) {
1330 kn->kn_status |= KN_DISABLED;
1332 kn->kn_status &= ~(KN_QUEUED |
1333 KN_ACTIVE);
1334 } else {
1335 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1336 kq->kq_count++;
1342 * Handle any post-processing states
1344 knote_release(kn);
1346 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1348 lwkt_relpooltoken(kq);
1349 return (total);
1353 * XXX
1354 * This could be expanded to call kqueue_scan, if desired.
1356 * MPSAFE
1358 static int
1359 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1361 return (ENXIO);
1365 * MPSAFE
1367 static int
1368 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1370 return (ENXIO);
1374 * MPALMOSTSAFE
1376 static int
1377 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1378 struct ucred *cred, struct sysmsg *msg)
1380 struct kqueue *kq;
1381 int error;
1383 kq = (struct kqueue *)fp->f_data;
1384 lwkt_getpooltoken(kq);
1385 switch(com) {
1386 case FIOASYNC:
1387 if (*(int *)data)
1388 kq->kq_state |= KQ_ASYNC;
1389 else
1390 kq->kq_state &= ~KQ_ASYNC;
1391 error = 0;
1392 break;
1393 case FIOSETOWN:
1394 error = fsetown(*(int *)data, &kq->kq_sigio);
1395 break;
1396 default:
1397 error = ENOTTY;
1398 break;
1400 lwkt_relpooltoken(kq);
1401 return (error);
1405 * MPSAFE
1407 static int
1408 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1410 struct kqueue *kq = (struct kqueue *)fp->f_data;
1412 bzero((void *)st, sizeof(*st));
1413 st->st_size = kq->kq_count;
1414 st->st_blksize = sizeof(struct kevent);
1415 st->st_mode = S_IFIFO;
1416 return (0);
1420 * MPSAFE
1422 static int
1423 kqueue_close(struct file *fp)
1425 struct kqueue *kq = (struct kqueue *)fp->f_data;
1427 kqueue_terminate(kq);
1429 fp->f_data = NULL;
1430 funsetown(&kq->kq_sigio);
1432 kfree(kq, M_KQUEUE);
1433 return (0);
1436 static void
1437 kqueue_wakeup(struct kqueue *kq)
1439 if (kq->kq_sleep_cnt) {
1440 u_int sleep_cnt = kq->kq_sleep_cnt;
1442 kq->kq_sleep_cnt = 0;
1443 if (sleep_cnt == 1)
1444 wakeup_one(kq);
1445 else
1446 wakeup(kq);
1448 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1452 * Calls filterops f_attach function, acquiring mplock if filter is not
1453 * marked as FILTEROP_MPSAFE.
1455 * Caller must be holding the related kq token
1457 static int
1458 filter_attach(struct knote *kn)
1460 int ret;
1462 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1463 ret = kn->kn_fop->f_attach(kn);
1464 } else {
1465 get_mplock();
1466 ret = kn->kn_fop->f_attach(kn);
1467 rel_mplock();
1469 return (ret);
1473 * Detach the knote and drop it, destroying the knote.
1475 * Calls filterops f_detach function, acquiring mplock if filter is not
1476 * marked as FILTEROP_MPSAFE.
1478 * Caller must be holding the related kq token
1480 static void
1481 knote_detach_and_drop(struct knote *kn)
1483 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1484 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1485 kn->kn_fop->f_detach(kn);
1486 } else {
1487 get_mplock();
1488 kn->kn_fop->f_detach(kn);
1489 rel_mplock();
1491 knote_drop(kn);
1495 * Calls filterops f_event function, acquiring mplock if filter is not
1496 * marked as FILTEROP_MPSAFE.
1498 * If the knote is in the middle of being created or deleted we cannot
1499 * safely call the filter op.
1501 * Caller must be holding the related kq token
1503 static int
1504 filter_event(struct knote *kn, long hint)
1506 int ret;
1508 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1509 ret = kn->kn_fop->f_event(kn, hint);
1510 } else {
1511 get_mplock();
1512 ret = kn->kn_fop->f_event(kn, hint);
1513 rel_mplock();
1515 return (ret);
1519 * Walk down a list of knotes, activating them if their event has triggered.
1521 * If we encounter any knotes which are undergoing processing we just mark
1522 * them for reprocessing and do not try to [re]activate the knote. However,
1523 * if a hint is being passed we have to wait and that makes things a bit
1524 * sticky.
1526 void
1527 knote(struct klist *list, long hint)
1529 struct knote *kn, marker;
1531 marker.kn_filter = EVFILT_MARKER;
1532 marker.kn_status = KN_PROCESSING;
1534 lwkt_getpooltoken(list);
1535 if (SLIST_EMPTY(list)) {
1536 lwkt_relpooltoken(list);
1537 return;
1540 SLIST_INSERT_HEAD(list, &marker, kn_next);
1541 while ((kn = SLIST_NEXT(&marker, kn_next)) != NULL) {
1542 struct kqueue *kq;
1543 int last_knote = 0;
1545 if (kn->kn_filter == EVFILT_MARKER) {
1546 /* Skip marker */
1547 SLIST_REMOVE(list, &marker, knote, kn_next);
1548 if (SLIST_NEXT(kn, kn_next) == NULL)
1549 goto done;
1550 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1551 continue;
1554 kq = kn->kn_kq;
1555 lwkt_getpooltoken(kq);
1557 if (kn != SLIST_NEXT(&marker, kn_next) || kn->kn_kq != kq) {
1559 * Don't move the marker; check the knote after
1560 * the marker again.
1562 lwkt_relpooltoken(kq);
1563 continue;
1566 if (kn->kn_status & KN_PROCESSING) {
1568 * Someone else is processing the knote, ask the
1569 * other thread to reprocess it and don't mess
1570 * with it otherwise.
1572 if (hint == 0) {
1574 * Move the marker w/ the kq token, so that
1575 * this knote will not be ripped behind our
1576 * back.
1578 SLIST_REMOVE(list, &marker, knote, kn_next);
1579 if (SLIST_NEXT(kn, kn_next) != NULL)
1580 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1581 else
1582 last_knote = 1;
1583 kn->kn_status |= KN_REPROCESS;
1584 lwkt_relpooltoken(kq);
1586 if (last_knote)
1587 goto done;
1588 continue;
1592 * If the hint is non-zero we have to wait or risk
1593 * losing the state the caller is trying to update.
1595 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1596 tsleep(kn, 0, "knotec", hz);
1599 * Don't move the marker; check this knote again,
1600 * hopefully it is still after the marker. Or it
1601 * was deleted and we would check the next knote.
1603 lwkt_relpooltoken(kq);
1604 continue;
1608 * Become the reprocessing master ourselves.
1610 KASSERT((kn->kn_status & KN_DELETING) == 0,
1611 ("acquire a deleting knote %#x", kn->kn_status));
1612 kn->kn_status |= KN_PROCESSING;
1614 /* Move the marker */
1615 SLIST_REMOVE(list, &marker, knote, kn_next);
1616 if (SLIST_NEXT(kn, kn_next) != NULL)
1617 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1618 else
1619 last_knote = 1;
1622 * If hint is non-zero running the event is mandatory
1623 * so do it whether reprocessing is set or not.
1625 if (filter_event(kn, hint))
1626 KNOTE_ACTIVATE(kn);
1628 knote_release(kn);
1629 lwkt_relpooltoken(kq);
1631 if (last_knote)
1632 goto done;
1634 SLIST_REMOVE(list, &marker, knote, kn_next);
1635 done:
1636 lwkt_relpooltoken(list);
1640 * Insert knote at head of klist.
1642 * This function may only be called via a filter function and thus
1643 * kq_token should already be held and marked for processing.
1645 void
1646 knote_insert(struct klist *klist, struct knote *kn)
1648 lwkt_getpooltoken(klist);
1649 KKASSERT(kn->kn_status & KN_PROCESSING);
1650 SLIST_INSERT_HEAD(klist, kn, kn_next);
1651 lwkt_relpooltoken(klist);
1655 * Remove knote from a klist
1657 * This function may only be called via a filter function and thus
1658 * kq_token should already be held and marked for processing.
1660 void
1661 knote_remove(struct klist *klist, struct knote *kn)
1663 lwkt_getpooltoken(klist);
1664 KKASSERT(kn->kn_status & KN_PROCESSING);
1665 SLIST_REMOVE(klist, kn, knote, kn_next);
1666 lwkt_relpooltoken(klist);
1669 void
1670 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1671 struct filterops *ops, void *hook)
1673 struct knote *kn, marker;
1674 int has_note;
1676 marker.kn_filter = EVFILT_MARKER;
1677 marker.kn_status = KN_PROCESSING;
1679 lwkt_getpooltoken(&src->ki_note);
1680 if (SLIST_EMPTY(&src->ki_note)) {
1681 lwkt_relpooltoken(&src->ki_note);
1682 return;
1684 lwkt_getpooltoken(&dst->ki_note);
1686 restart:
1687 has_note = 0;
1688 SLIST_INSERT_HEAD(&src->ki_note, &marker, kn_next);
1689 while ((kn = SLIST_NEXT(&marker, kn_next)) != NULL) {
1690 struct kqueue *kq;
1692 if (kn->kn_filter == EVFILT_MARKER) {
1693 /* Skip marker */
1694 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1695 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1696 continue;
1699 kq = kn->kn_kq;
1700 lwkt_getpooltoken(kq);
1702 if (kn != SLIST_NEXT(&marker, kn_next) || kn->kn_kq != kq) {
1704 * Don't move the marker; check the knote after
1705 * the marker again.
1707 lwkt_relpooltoken(kq);
1708 continue;
1711 /* Move marker */
1712 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1713 SLIST_INSERT_AFTER(kn, &marker, kn_next);
1715 has_note = 1;
1716 if (knote_acquire(kn)) {
1717 knote_remove(&src->ki_note, kn);
1718 kn->kn_fop = ops;
1719 kn->kn_hook = hook;
1720 knote_insert(&dst->ki_note, kn);
1721 knote_release(kn);
1722 /* kn may be invalid now */
1724 lwkt_relpooltoken(kq);
1726 SLIST_REMOVE(&src->ki_note, &marker, knote, kn_next);
1727 if (has_note) {
1728 /* Keep draining, until nothing left */
1729 goto restart;
1732 lwkt_relpooltoken(&dst->ki_note);
1733 lwkt_relpooltoken(&src->ki_note);
1737 * Remove all knotes referencing a specified fd
1739 void
1740 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1742 struct kqueue *kq;
1743 struct knote *kn;
1744 struct knote *kntmp;
1746 lwkt_getpooltoken(&fp->f_klist);
1747 restart:
1748 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1749 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1750 kq = kn->kn_kq;
1751 lwkt_getpooltoken(kq);
1753 /* temporary verification hack */
1754 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1755 if (kn == kntmp)
1756 break;
1758 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1759 kn->kn_id != fd || kn->kn_kq != kq) {
1760 lwkt_relpooltoken(kq);
1761 goto restart;
1763 if (knote_acquire(kn))
1764 knote_detach_and_drop(kn);
1765 lwkt_relpooltoken(kq);
1766 goto restart;
1769 lwkt_relpooltoken(&fp->f_klist);
1773 * Low level attach function.
1775 * The knote should already be marked for processing.
1776 * Caller must hold the related kq token.
1778 static void
1779 knote_attach(struct knote *kn)
1781 struct klist *list;
1782 struct kqueue *kq = kn->kn_kq;
1784 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1785 KKASSERT(kn->kn_fp);
1786 list = &kn->kn_fp->f_klist;
1787 } else {
1788 if (kq->kq_knhashmask == 0)
1789 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1790 &kq->kq_knhashmask);
1791 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1793 lwkt_getpooltoken(list);
1794 SLIST_INSERT_HEAD(list, kn, kn_link);
1795 lwkt_relpooltoken(list);
1796 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1800 * Low level drop function.
1802 * The knote should already be marked for processing.
1803 * Caller must hold the related kq token.
1805 static void
1806 knote_drop(struct knote *kn)
1808 struct kqueue *kq;
1809 struct klist *list;
1811 kq = kn->kn_kq;
1813 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1814 list = &kn->kn_fp->f_klist;
1815 else
1816 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1818 lwkt_getpooltoken(list);
1819 SLIST_REMOVE(list, kn, knote, kn_link);
1820 lwkt_relpooltoken(list);
1821 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1822 if (kn->kn_status & KN_QUEUED)
1823 knote_dequeue(kn);
1824 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1825 fdrop(kn->kn_fp);
1826 kn->kn_fp = NULL;
1828 knote_free(kn);
1832 * Low level enqueue function.
1834 * The knote should already be marked for processing.
1835 * Caller must be holding the kq token
1837 static void
1838 knote_enqueue(struct knote *kn)
1840 struct kqueue *kq = kn->kn_kq;
1842 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1843 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1844 kn->kn_status |= KN_QUEUED;
1845 ++kq->kq_count;
1848 * Send SIGIO on request (typically set up as a mailbox signal)
1850 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1851 pgsigio(kq->kq_sigio, SIGIO, 0);
1853 kqueue_wakeup(kq);
1857 * Low level dequeue function.
1859 * The knote should already be marked for processing.
1860 * Caller must be holding the kq token
1862 static void
1863 knote_dequeue(struct knote *kn)
1865 struct kqueue *kq = kn->kn_kq;
1867 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1868 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1869 kn->kn_status &= ~KN_QUEUED;
1870 kq->kq_count--;
1873 static struct knote *
1874 knote_alloc(void)
1876 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1879 static void
1880 knote_free(struct knote *kn)
1882 struct knote_cache_list *cache_list;
1884 cache_list = &knote_cache_lists[mycpuid];
1885 if (cache_list->knote_cache_cnt < KNOTE_CACHE_MAX) {
1886 crit_enter();
1887 SLIST_INSERT_HEAD(&cache_list->knote_cache, kn, kn_link);
1888 cache_list->knote_cache_cnt++;
1889 crit_exit();
1890 return;
1892 kfree(kn, M_KQUEUE);