kernel - Add EVFILT_USER
[dragonfly.git] / sys / kern / kern_event.c
blob2a4829df01f9f30bdaa098d175ff328a02350ff8
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
57 #define EVENT_REGISTER 1
58 #define EVENT_PROCESS 2
61 * Global token for kqueue subsystem
63 #if 0
64 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token);
65 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
66 CTLFLAG_RW, &kq_token.t_collisions, 0,
67 "Collision counter of kq_token");
68 #endif
70 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
72 struct kevent_copyin_args {
73 struct kevent_args *ka;
74 int pchanges;
77 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
78 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
79 struct knote *marker);
80 static int kqueue_read(struct file *fp, struct uio *uio,
81 struct ucred *cred, int flags);
82 static int kqueue_write(struct file *fp, struct uio *uio,
83 struct ucred *cred, int flags);
84 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
85 struct ucred *cred, struct sysmsg *msg);
86 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
87 static int kqueue_stat(struct file *fp, struct stat *st,
88 struct ucred *cred);
89 static int kqueue_close(struct file *fp);
90 static void kqueue_wakeup(struct kqueue *kq);
91 static int filter_attach(struct knote *kn);
92 static int filter_event(struct knote *kn, long hint);
95 * MPSAFE
97 static struct fileops kqueueops = {
98 .fo_read = kqueue_read,
99 .fo_write = kqueue_write,
100 .fo_ioctl = kqueue_ioctl,
101 .fo_kqfilter = kqueue_kqfilter,
102 .fo_stat = kqueue_stat,
103 .fo_close = kqueue_close,
104 .fo_shutdown = nofo_shutdown
107 static void knote_attach(struct knote *kn);
108 static void knote_drop(struct knote *kn);
109 static void knote_detach_and_drop(struct knote *kn);
110 static void knote_enqueue(struct knote *kn);
111 static void knote_dequeue(struct knote *kn);
112 static struct knote *knote_alloc(void);
113 static void knote_free(struct knote *kn);
115 static void filt_kqdetach(struct knote *kn);
116 static int filt_kqueue(struct knote *kn, long hint);
117 static int filt_procattach(struct knote *kn);
118 static void filt_procdetach(struct knote *kn);
119 static int filt_proc(struct knote *kn, long hint);
120 static int filt_fileattach(struct knote *kn);
121 static void filt_timerexpire(void *knx);
122 static int filt_timerattach(struct knote *kn);
123 static void filt_timerdetach(struct knote *kn);
124 static int filt_timer(struct knote *kn, long hint);
125 static int filt_userattach(struct knote *kn);
126 static void filt_userdetach(struct knote *kn);
127 static int filt_user(struct knote *kn, long hint);
128 static void filt_usertouch(struct knote *kn, struct kevent *kev,
129 u_long type);
131 static struct filterops file_filtops =
132 { FILTEROP_ISFD, filt_fileattach, NULL, NULL };
133 static struct filterops kqread_filtops =
134 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
135 static struct filterops proc_filtops =
136 { 0, filt_procattach, filt_procdetach, filt_proc };
137 static struct filterops timer_filtops =
138 { 0, filt_timerattach, filt_timerdetach, filt_timer };
139 static struct filterops user_filtops =
140 { 0, filt_userattach, filt_userdetach, filt_user };
142 static int kq_ncallouts = 0;
143 static int kq_calloutmax = (4 * 1024);
144 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
145 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
146 static int kq_checkloop = 1000000;
147 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
148 &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue");
150 #define KNOTE_ACTIVATE(kn) do { \
151 kn->kn_status |= KN_ACTIVE; \
152 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
153 knote_enqueue(kn); \
154 } while(0)
156 #define KN_HASHSIZE 64 /* XXX should be tunable */
157 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
159 extern struct filterops aio_filtops;
160 extern struct filterops sig_filtops;
163 * Table for for all system-defined filters.
165 static struct filterops *sysfilt_ops[] = {
166 &file_filtops, /* EVFILT_READ */
167 &file_filtops, /* EVFILT_WRITE */
168 &aio_filtops, /* EVFILT_AIO */
169 &file_filtops, /* EVFILT_VNODE */
170 &proc_filtops, /* EVFILT_PROC */
171 &sig_filtops, /* EVFILT_SIGNAL */
172 &timer_filtops, /* EVFILT_TIMER */
173 &file_filtops, /* EVFILT_EXCEPT */
174 &user_filtops, /* EVFILT_USER */
177 static int
178 filt_fileattach(struct knote *kn)
180 return (fo_kqfilter(kn->kn_fp, kn));
184 * MPSAFE
186 static int
187 kqueue_kqfilter(struct file *fp, struct knote *kn)
189 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
191 if (kn->kn_filter != EVFILT_READ)
192 return (EOPNOTSUPP);
194 kn->kn_fop = &kqread_filtops;
195 knote_insert(&kq->kq_kqinfo.ki_note, kn);
196 return (0);
199 static void
200 filt_kqdetach(struct knote *kn)
202 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
204 knote_remove(&kq->kq_kqinfo.ki_note, kn);
207 /*ARGSUSED*/
208 static int
209 filt_kqueue(struct knote *kn, long hint)
211 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
213 kn->kn_data = kq->kq_count;
214 return (kn->kn_data > 0);
217 static int
218 filt_procattach(struct knote *kn)
220 struct proc *p;
221 int immediate;
223 immediate = 0;
224 p = pfind(kn->kn_id);
225 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
226 p = zpfind(kn->kn_id);
227 immediate = 1;
229 if (p == NULL) {
230 return (ESRCH);
232 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
233 if (p)
234 PRELE(p);
235 return (EACCES);
238 lwkt_gettoken(&p->p_token);
239 kn->kn_ptr.p_proc = p;
240 kn->kn_flags |= EV_CLEAR; /* automatically set */
243 * internal flag indicating registration done by kernel
245 if (kn->kn_flags & EV_FLAG1) {
246 kn->kn_data = kn->kn_sdata; /* ppid */
247 kn->kn_fflags = NOTE_CHILD;
248 kn->kn_flags &= ~EV_FLAG1;
251 knote_insert(&p->p_klist, kn);
254 * Immediately activate any exit notes if the target process is a
255 * zombie. This is necessary to handle the case where the target
256 * process, e.g. a child, dies before the kevent is negistered.
258 if (immediate && filt_proc(kn, NOTE_EXIT))
259 KNOTE_ACTIVATE(kn);
260 lwkt_reltoken(&p->p_token);
261 PRELE(p);
263 return (0);
267 * The knote may be attached to a different process, which may exit,
268 * leaving nothing for the knote to be attached to. So when the process
269 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
270 * it will be deleted when read out. However, as part of the knote deletion,
271 * this routine is called, so a check is needed to avoid actually performing
272 * a detach, because the original process does not exist any more.
274 static void
275 filt_procdetach(struct knote *kn)
277 struct proc *p;
279 if (kn->kn_status & KN_DETACHED)
280 return;
281 p = kn->kn_ptr.p_proc;
282 knote_remove(&p->p_klist, kn);
285 static int
286 filt_proc(struct knote *kn, long hint)
288 u_int event;
291 * mask off extra data
293 event = (u_int)hint & NOTE_PCTRLMASK;
296 * if the user is interested in this event, record it.
298 if (kn->kn_sfflags & event)
299 kn->kn_fflags |= event;
302 * Process is gone, so flag the event as finished. Detach the
303 * knote from the process now because the process will be poof,
304 * gone later on.
306 if (event == NOTE_EXIT) {
307 struct proc *p = kn->kn_ptr.p_proc;
308 if ((kn->kn_status & KN_DETACHED) == 0) {
309 PHOLD(p);
310 knote_remove(&p->p_klist, kn);
311 kn->kn_status |= KN_DETACHED;
312 kn->kn_data = p->p_xstat;
313 kn->kn_ptr.p_proc = NULL;
314 PRELE(p);
316 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
317 return (1);
321 * process forked, and user wants to track the new process,
322 * so attach a new knote to it, and immediately report an
323 * event with the parent's pid.
325 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
326 struct kevent kev;
327 int error;
330 * register knote with new process.
332 kev.ident = hint & NOTE_PDATAMASK; /* pid */
333 kev.filter = kn->kn_filter;
334 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
335 kev.fflags = kn->kn_sfflags;
336 kev.data = kn->kn_id; /* parent */
337 kev.udata = kn->kn_kevent.udata; /* preserve udata */
338 error = kqueue_register(kn->kn_kq, &kev);
339 if (error)
340 kn->kn_fflags |= NOTE_TRACKERR;
343 return (kn->kn_fflags != 0);
347 * The callout interlocks with callout_terminate() but can still
348 * race a deletion so if KN_DELETING is set we just don't touch
349 * the knote.
351 static void
352 filt_timerexpire(void *knx)
354 struct lwkt_token *tok;
355 struct knote *kn = knx;
356 struct callout *calloutp;
357 struct timeval tv;
358 int tticks;
360 tok = lwkt_token_pool_lookup(kn->kn_kq);
361 lwkt_gettoken(tok);
362 if ((kn->kn_status & KN_DELETING) == 0) {
363 kn->kn_data++;
364 KNOTE_ACTIVATE(kn);
366 if ((kn->kn_flags & EV_ONESHOT) == 0) {
367 tv.tv_sec = kn->kn_sdata / 1000;
368 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
369 tticks = tvtohz_high(&tv);
370 calloutp = (struct callout *)kn->kn_hook;
371 callout_reset(calloutp, tticks, filt_timerexpire, kn);
374 lwkt_reltoken(tok);
378 * data contains amount of time to sleep, in milliseconds
380 static int
381 filt_timerattach(struct knote *kn)
383 struct callout *calloutp;
384 struct timeval tv;
385 int tticks;
387 if (kq_ncallouts >= kq_calloutmax) {
388 kn->kn_hook = NULL;
389 return (ENOMEM);
391 kq_ncallouts++;
393 tv.tv_sec = kn->kn_sdata / 1000;
394 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
395 tticks = tvtohz_high(&tv);
397 kn->kn_flags |= EV_CLEAR; /* automatically set */
398 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
399 callout_init(calloutp);
400 kn->kn_hook = (caddr_t)calloutp;
401 callout_reset(calloutp, tticks, filt_timerexpire, kn);
403 return (0);
407 * This function is called with the knote flagged locked but it is
408 * still possible to race a callout event due to the callback blocking.
409 * We must call callout_terminate() instead of callout_stop() to deal
410 * with the race.
412 static void
413 filt_timerdetach(struct knote *kn)
415 struct callout *calloutp;
417 calloutp = (struct callout *)kn->kn_hook;
418 callout_terminate(calloutp);
419 kfree(calloutp, M_KQUEUE);
420 kq_ncallouts--;
423 static int
424 filt_timer(struct knote *kn, long hint)
427 return (kn->kn_data != 0);
431 * EVFILT_USER
433 static int
434 filt_userattach(struct knote *kn)
436 kn->kn_hook = NULL;
437 if (kn->kn_fflags & NOTE_TRIGGER)
438 kn->kn_ptr.hookid = 1;
439 else
440 kn->kn_ptr.hookid = 0;
441 return 0;
445 * This function is called with the knote flagged locked but it is
446 * still possible to race a callout event due to the callback blocking.
447 * We must call callout_terminate() instead of callout_stop() to deal
448 * with the race.
450 static void
451 filt_userdetach(struct knote *kn)
453 /* nothing to do */
456 static int
457 filt_user(struct knote *kn, long hint)
459 return (kn->kn_ptr.hookid);
462 static void
463 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
465 u_int ffctrl;
467 switch (type) {
468 case EVENT_REGISTER:
469 if (kev->fflags & NOTE_TRIGGER)
470 kn->kn_ptr.hookid = 1;
472 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
473 kev->fflags &= NOTE_FFLAGSMASK;
474 switch (ffctrl) {
475 case NOTE_FFNOP:
476 break;
478 case NOTE_FFAND:
479 kn->kn_sfflags &= kev->fflags;
480 break;
482 case NOTE_FFOR:
483 kn->kn_sfflags |= kev->fflags;
484 break;
486 case NOTE_FFCOPY:
487 kn->kn_sfflags = kev->fflags;
488 break;
490 default:
491 /* XXX Return error? */
492 break;
494 kn->kn_sdata = kev->data;
497 * This is not the correct use of EV_CLEAR in an event
498 * modification, it should have been passed as a NOTE instead.
499 * But we need to maintain compatibility with Apple & FreeBSD.
501 * Note however that EV_CLEAR can still be used when doing
502 * the initial registration of the event and works as expected
503 * (clears the event on reception).
505 if (kev->flags & EV_CLEAR) {
506 kn->kn_ptr.hookid = 0;
507 kn->kn_data = 0;
508 kn->kn_fflags = 0;
510 break;
512 case EVENT_PROCESS:
513 *kev = kn->kn_kevent;
514 kev->fflags = kn->kn_sfflags;
515 kev->data = kn->kn_sdata;
516 if (kn->kn_flags & EV_CLEAR) {
517 kn->kn_ptr.hookid = 0;
518 /* kn_data, kn_fflags handled by parent */
520 break;
522 default:
523 panic("filt_usertouch() - invalid type (%ld)", type);
524 break;
529 * Acquire a knote, return non-zero on success, 0 on failure.
531 * If we cannot acquire the knote we sleep and return 0. The knote
532 * may be stale on return in this case and the caller must restart
533 * whatever loop they are in.
535 * Related kq token must be held.
537 static __inline
539 knote_acquire(struct knote *kn)
541 if (kn->kn_status & KN_PROCESSING) {
542 kn->kn_status |= KN_WAITING | KN_REPROCESS;
543 tsleep(kn, 0, "kqepts", hz);
544 /* knote may be stale now */
545 return(0);
547 kn->kn_status |= KN_PROCESSING;
548 return(1);
552 * Release an acquired knote, clearing KN_PROCESSING and handling any
553 * KN_REPROCESS events.
555 * Caller must be holding the related kq token
557 * Non-zero is returned if the knote is destroyed or detached.
559 static __inline
561 knote_release(struct knote *kn)
563 while (kn->kn_status & KN_REPROCESS) {
564 kn->kn_status &= ~KN_REPROCESS;
565 if (kn->kn_status & KN_WAITING) {
566 kn->kn_status &= ~KN_WAITING;
567 wakeup(kn);
569 if (kn->kn_status & KN_DELETING) {
570 knote_detach_and_drop(kn);
571 return(1);
572 /* NOT REACHED */
574 if (filter_event(kn, 0))
575 KNOTE_ACTIVATE(kn);
577 if (kn->kn_status & KN_DETACHED) {
578 kn->kn_status &= ~KN_PROCESSING;
579 return(1);
580 } else {
581 kn->kn_status &= ~KN_PROCESSING;
582 return(0);
587 * Initialize a kqueue.
589 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
591 * MPSAFE
593 void
594 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
596 TAILQ_INIT(&kq->kq_knpend);
597 TAILQ_INIT(&kq->kq_knlist);
598 kq->kq_count = 0;
599 kq->kq_fdp = fdp;
600 SLIST_INIT(&kq->kq_kqinfo.ki_note);
604 * Terminate a kqueue. Freeing the actual kq itself is left up to the
605 * caller (it might be embedded in a lwp so we don't do it here).
607 * The kq's knlist must be completely eradicated so block on any
608 * processing races.
610 void
611 kqueue_terminate(struct kqueue *kq)
613 struct lwkt_token *tok;
614 struct knote *kn;
616 tok = lwkt_token_pool_lookup(kq);
617 lwkt_gettoken(tok);
618 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
619 if (knote_acquire(kn))
620 knote_detach_and_drop(kn);
622 if (kq->kq_knhash) {
623 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
624 kq->kq_knhash = NULL;
625 kq->kq_knhashmask = 0;
627 lwkt_reltoken(tok);
631 * MPSAFE
634 sys_kqueue(struct kqueue_args *uap)
636 struct thread *td = curthread;
637 struct kqueue *kq;
638 struct file *fp;
639 int fd, error;
641 error = falloc(td->td_lwp, &fp, &fd);
642 if (error)
643 return (error);
644 fp->f_flag = FREAD | FWRITE;
645 fp->f_type = DTYPE_KQUEUE;
646 fp->f_ops = &kqueueops;
648 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
649 kqueue_init(kq, td->td_proc->p_fd);
650 fp->f_data = kq;
652 fsetfd(kq->kq_fdp, fp, fd);
653 uap->sysmsg_result = fd;
654 fdrop(fp);
655 return (error);
659 * Copy 'count' items into the destination list pointed to by uap->eventlist.
661 static int
662 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
664 struct kevent_copyin_args *kap;
665 int error;
667 kap = (struct kevent_copyin_args *)arg;
669 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
670 if (error == 0) {
671 kap->ka->eventlist += count;
672 *res += count;
673 } else {
674 *res = -1;
677 return (error);
681 * Copy at most 'max' items from the list pointed to by kap->changelist,
682 * return number of items in 'events'.
684 static int
685 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
687 struct kevent_copyin_args *kap;
688 int error, count;
690 kap = (struct kevent_copyin_args *)arg;
692 count = min(kap->ka->nchanges - kap->pchanges, max);
693 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
694 if (error == 0) {
695 kap->ka->changelist += count;
696 kap->pchanges += count;
697 *events = count;
700 return (error);
704 * MPSAFE
707 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
708 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
709 struct timespec *tsp_in)
711 struct kevent *kevp;
712 struct timespec *tsp;
713 int i, n, total, error, nerrors = 0;
714 int lres;
715 int limit = kq_checkloop;
716 struct kevent kev[KQ_NEVENTS];
717 struct knote marker;
718 struct lwkt_token *tok;
720 if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
721 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
724 tsp = tsp_in;
725 *res = 0;
727 tok = lwkt_token_pool_lookup(kq);
728 lwkt_gettoken(tok);
729 for ( ;; ) {
730 n = 0;
731 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
732 if (error)
733 goto done;
734 if (n == 0)
735 break;
736 for (i = 0; i < n; i++) {
737 kevp = &kev[i];
738 kevp->flags &= ~EV_SYSFLAGS;
739 error = kqueue_register(kq, kevp);
742 * If a registration returns an error we
743 * immediately post the error. The kevent()
744 * call itself will fail with the error if
745 * no space is available for posting.
747 * Such errors normally bypass the timeout/blocking
748 * code. However, if the copyoutfn function refuses
749 * to post the error (see sys_poll()), then we
750 * ignore it too.
752 if (error) {
753 kevp->flags = EV_ERROR;
754 kevp->data = error;
755 lres = *res;
756 kevent_copyoutfn(uap, kevp, 1, res);
757 if (*res < 0) {
758 goto done;
759 } else if (lres != *res) {
760 nevents--;
761 nerrors++;
766 if (nerrors) {
767 error = 0;
768 goto done;
772 * Acquire/wait for events - setup timeout
774 if (tsp != NULL) {
775 struct timespec ats;
777 if (tsp->tv_sec || tsp->tv_nsec) {
778 getnanouptime(&ats);
779 timespecadd(tsp, &ats); /* tsp = target time */
784 * Loop as required.
786 * Collect as many events as we can. Sleeping on successive
787 * loops is disabled if copyoutfn has incremented (*res).
789 * The loop stops if an error occurs, all events have been
790 * scanned (the marker has been reached), or fewer than the
791 * maximum number of events is found.
793 * The copyoutfn function does not have to increment (*res) in
794 * order for the loop to continue.
796 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
798 total = 0;
799 error = 0;
800 marker.kn_filter = EVFILT_MARKER;
801 marker.kn_status = KN_PROCESSING;
802 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
803 while ((n = nevents - total) > 0) {
804 if (n > KQ_NEVENTS)
805 n = KQ_NEVENTS;
808 * If no events are pending sleep until timeout (if any)
809 * or an event occurs.
811 * After the sleep completes the marker is moved to the
812 * end of the list, making any received events available
813 * to our scan.
815 if (kq->kq_count == 0 && *res == 0) {
816 error = kqueue_sleep(kq, tsp);
817 if (error)
818 break;
820 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
821 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
825 * Process all received events
826 * Account for all non-spurious events in our total
828 i = kqueue_scan(kq, kev, n, &marker);
829 if (i) {
830 lres = *res;
831 error = kevent_copyoutfn(uap, kev, i, res);
832 total += *res - lres;
833 if (error)
834 break;
836 if (limit && --limit == 0)
837 panic("kqueue: checkloop failed i=%d", i);
840 * Normally when fewer events are returned than requested
841 * we can stop. However, if only spurious events were
842 * collected the copyout will not bump (*res) and we have
843 * to continue.
845 if (i < n && *res)
846 break;
849 * Deal with an edge case where spurious events can cause
850 * a loop to occur without moving the marker. This can
851 * prevent kqueue_scan() from picking up new events which
852 * race us. We must be sure to move the marker for this
853 * case.
855 * NOTE: We do not want to move the marker if events
856 * were scanned because normal kqueue operations
857 * may reactivate events. Moving the marker in
858 * that case could result in duplicates for the
859 * same event.
861 if (i == 0) {
862 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
863 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
866 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
868 /* Timeouts do not return EWOULDBLOCK. */
869 if (error == EWOULDBLOCK)
870 error = 0;
872 done:
873 lwkt_reltoken(tok);
874 return (error);
878 * MPALMOSTSAFE
881 sys_kevent(struct kevent_args *uap)
883 struct thread *td = curthread;
884 struct proc *p = td->td_proc;
885 struct timespec ts, *tsp;
886 struct kqueue *kq;
887 struct file *fp = NULL;
888 struct kevent_copyin_args *kap, ka;
889 int error;
891 if (uap->timeout) {
892 error = copyin(uap->timeout, &ts, sizeof(ts));
893 if (error)
894 return (error);
895 tsp = &ts;
896 } else {
897 tsp = NULL;
899 fp = holdfp(p->p_fd, uap->fd, -1);
900 if (fp == NULL)
901 return (EBADF);
902 if (fp->f_type != DTYPE_KQUEUE) {
903 fdrop(fp);
904 return (EBADF);
907 kq = (struct kqueue *)fp->f_data;
909 kap = &ka;
910 kap->ka = uap;
911 kap->pchanges = 0;
913 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
914 kevent_copyin, kevent_copyout, tsp);
916 fdrop(fp);
918 return (error);
922 * Caller must be holding the kq token
925 kqueue_register(struct kqueue *kq, struct kevent *kev)
927 struct lwkt_token *tok;
928 struct filedesc *fdp = kq->kq_fdp;
929 struct filterops *fops;
930 struct file *fp = NULL;
931 struct knote *kn = NULL;
932 int error = 0;
934 if (kev->filter < 0) {
935 if (kev->filter + EVFILT_SYSCOUNT < 0)
936 return (EINVAL);
937 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
938 } else {
940 * XXX
941 * filter attach routine is responsible for insuring that
942 * the identifier can be attached to it.
944 return (EINVAL);
947 tok = lwkt_token_pool_lookup(kq);
948 lwkt_gettoken(tok);
949 if (fops->f_flags & FILTEROP_ISFD) {
950 /* validate descriptor */
951 fp = holdfp(fdp, kev->ident, -1);
952 if (fp == NULL) {
953 lwkt_reltoken(tok);
954 return (EBADF);
956 lwkt_getpooltoken(&fp->f_klist);
957 again1:
958 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
959 if (kn->kn_kq == kq &&
960 kn->kn_filter == kev->filter &&
961 kn->kn_id == kev->ident) {
962 if (knote_acquire(kn) == 0)
963 goto again1;
964 break;
967 lwkt_relpooltoken(&fp->f_klist);
968 } else {
969 if (kq->kq_knhashmask) {
970 struct klist *list;
972 list = &kq->kq_knhash[
973 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
974 lwkt_getpooltoken(list);
975 again2:
976 SLIST_FOREACH(kn, list, kn_link) {
977 if (kn->kn_id == kev->ident &&
978 kn->kn_filter == kev->filter) {
979 if (knote_acquire(kn) == 0)
980 goto again2;
981 break;
984 lwkt_relpooltoken(list);
989 * NOTE: At this point if kn is non-NULL we will have acquired
990 * it and set KN_PROCESSING.
992 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
993 error = ENOENT;
994 goto done;
998 * kn now contains the matching knote, or NULL if no match
1000 if (kev->flags & EV_ADD) {
1001 if (kn == NULL) {
1002 kn = knote_alloc();
1003 if (kn == NULL) {
1004 error = ENOMEM;
1005 goto done;
1007 kn->kn_fp = fp;
1008 kn->kn_kq = kq;
1009 kn->kn_fop = fops;
1012 * apply reference count to knote structure, and
1013 * do not release it at the end of this routine.
1015 fp = NULL;
1017 kn->kn_sfflags = kev->fflags;
1018 kn->kn_sdata = kev->data;
1019 kev->fflags = 0;
1020 kev->data = 0;
1021 kn->kn_kevent = *kev;
1024 * KN_PROCESSING prevents the knote from getting
1025 * ripped out from under us while we are trying
1026 * to attach it, in case the attach blocks.
1028 kn->kn_status = KN_PROCESSING;
1029 knote_attach(kn);
1030 if ((error = filter_attach(kn)) != 0) {
1031 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1032 knote_drop(kn);
1033 goto done;
1037 * Interlock against close races which either tried
1038 * to remove our knote while we were blocked or missed
1039 * it entirely prior to our attachment. We do not
1040 * want to end up with a knote on a closed descriptor.
1042 if ((fops->f_flags & FILTEROP_ISFD) &&
1043 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
1044 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1046 } else {
1048 * The user may change some filter values after the
1049 * initial EV_ADD, but doing so will not reset any
1050 * filter which have already been triggered.
1052 KKASSERT(kn->kn_status & KN_PROCESSING);
1053 if (fops == &user_filtops) {
1054 filt_usertouch(kn, kev, EVENT_REGISTER);
1055 } else {
1056 kn->kn_sfflags = kev->fflags;
1057 kn->kn_sdata = kev->data;
1058 kn->kn_kevent.udata = kev->udata;
1063 * Execute the filter event to immediately activate the
1064 * knote if necessary. If reprocessing events are pending
1065 * due to blocking above we do not run the filter here
1066 * but instead let knote_release() do it. Otherwise we
1067 * might run the filter on a deleted event.
1069 if ((kn->kn_status & KN_REPROCESS) == 0) {
1070 if (filter_event(kn, 0))
1071 KNOTE_ACTIVATE(kn);
1073 } else if (kev->flags & EV_DELETE) {
1075 * Delete the existing knote
1077 knote_detach_and_drop(kn);
1078 goto done;
1079 } else {
1081 * Modify an existing event.
1083 * The user may change some filter values after the
1084 * initial EV_ADD, but doing so will not reset any
1085 * filter which have already been triggered.
1087 KKASSERT(kn->kn_status & KN_PROCESSING);
1088 if (fops == &user_filtops) {
1089 filt_usertouch(kn, kev, EVENT_REGISTER);
1090 } else {
1091 kn->kn_sfflags = kev->fflags;
1092 kn->kn_sdata = kev->data;
1093 kn->kn_kevent.udata = kev->udata;
1097 * Execute the filter event to immediately activate the
1098 * knote if necessary. If reprocessing events are pending
1099 * due to blocking above we do not run the filter here
1100 * but instead let knote_release() do it. Otherwise we
1101 * might run the filter on a deleted event.
1103 if ((kn->kn_status & KN_REPROCESS) == 0) {
1104 if (filter_event(kn, 0))
1105 KNOTE_ACTIVATE(kn);
1110 * Disablement does not deactivate a knote here.
1112 if ((kev->flags & EV_DISABLE) &&
1113 ((kn->kn_status & KN_DISABLED) == 0)) {
1114 kn->kn_status |= KN_DISABLED;
1118 * Re-enablement may have to immediately enqueue an active knote.
1120 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1121 kn->kn_status &= ~KN_DISABLED;
1122 if ((kn->kn_status & KN_ACTIVE) &&
1123 ((kn->kn_status & KN_QUEUED) == 0)) {
1124 knote_enqueue(kn);
1129 * Handle any required reprocessing
1131 knote_release(kn);
1132 /* kn may be invalid now */
1134 done:
1135 lwkt_reltoken(tok);
1136 if (fp != NULL)
1137 fdrop(fp);
1138 return (error);
1142 * Block as necessary until the target time is reached.
1143 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
1144 * 0 we do not block at all.
1146 * Caller must be holding the kq token.
1148 static int
1149 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
1151 int error = 0;
1153 if (tsp == NULL) {
1154 kq->kq_state |= KQ_SLEEP;
1155 error = tsleep(kq, PCATCH, "kqread", 0);
1156 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
1157 error = EWOULDBLOCK;
1158 } else {
1159 struct timespec ats;
1160 struct timespec atx = *tsp;
1161 int timeout;
1163 getnanouptime(&ats);
1164 timespecsub(&atx, &ats);
1165 if (ats.tv_sec < 0) {
1166 error = EWOULDBLOCK;
1167 } else {
1168 timeout = atx.tv_sec > 24 * 60 * 60 ?
1169 24 * 60 * 60 * hz : tstohz_high(&atx);
1170 kq->kq_state |= KQ_SLEEP;
1171 error = tsleep(kq, PCATCH, "kqread", timeout);
1175 /* don't restart after signals... */
1176 if (error == ERESTART)
1177 return (EINTR);
1179 return (error);
1183 * Scan the kqueue, return the number of active events placed in kevp up
1184 * to count.
1186 * Continuous mode events may get recycled, do not continue scanning past
1187 * marker unless no events have been collected.
1189 * Caller must be holding the kq token
1191 static int
1192 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1193 struct knote *marker)
1195 struct knote *kn, local_marker;
1196 int total;
1198 total = 0;
1199 local_marker.kn_filter = EVFILT_MARKER;
1200 local_marker.kn_status = KN_PROCESSING;
1203 * Collect events.
1205 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1206 while (count) {
1207 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1208 if (kn->kn_filter == EVFILT_MARKER) {
1209 /* Marker reached, we are done */
1210 if (kn == marker)
1211 break;
1213 /* Move local marker past some other threads marker */
1214 kn = TAILQ_NEXT(kn, kn_tqe);
1215 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1216 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1217 continue;
1221 * We can't skip a knote undergoing processing, otherwise
1222 * we risk not returning it when the user process expects
1223 * it should be returned. Sleep and retry.
1225 if (knote_acquire(kn) == 0)
1226 continue;
1229 * Remove the event for processing.
1231 * WARNING! We must leave KN_QUEUED set to prevent the
1232 * event from being KNOTE_ACTIVATE()d while
1233 * the queue state is in limbo, in case we
1234 * block.
1236 * WARNING! We must set KN_PROCESSING to avoid races
1237 * against deletion or another thread's
1238 * processing.
1240 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1241 kq->kq_count--;
1244 * We have to deal with an extremely important race against
1245 * file descriptor close()s here. The file descriptor can
1246 * disappear MPSAFE, and there is a small window of
1247 * opportunity between that and the call to knote_fdclose().
1249 * If we hit that window here while doselect or dopoll is
1250 * trying to delete a spurious event they will not be able
1251 * to match up the event against a knote and will go haywire.
1253 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1254 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1255 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1258 if (kn->kn_status & KN_DISABLED) {
1260 * If disabled we ensure the event is not queued
1261 * but leave its active bit set. On re-enablement
1262 * the event may be immediately triggered.
1264 kn->kn_status &= ~KN_QUEUED;
1265 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1266 (kn->kn_status & KN_DELETING) == 0 &&
1267 filter_event(kn, 0) == 0) {
1269 * If not running in one-shot mode and the event
1270 * is no longer present we ensure it is removed
1271 * from the queue and ignore it.
1273 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1274 } else {
1276 * Post the event
1278 if (kn->kn_fop == &user_filtops)
1279 filt_usertouch(kn, kevp, EVENT_PROCESS);
1280 else
1281 *kevp = kn->kn_kevent;
1282 ++kevp;
1283 ++total;
1284 --count;
1286 if (kn->kn_flags & EV_ONESHOT) {
1287 kn->kn_status &= ~KN_QUEUED;
1288 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1289 } else {
1290 if (kn->kn_flags & EV_CLEAR) {
1291 kn->kn_data = 0;
1292 kn->kn_fflags = 0;
1293 kn->kn_status &= ~(KN_QUEUED |
1294 KN_ACTIVE);
1295 } else {
1296 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1297 kq->kq_count++;
1303 * Handle any post-processing states
1305 knote_release(kn);
1307 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1309 return (total);
1313 * XXX
1314 * This could be expanded to call kqueue_scan, if desired.
1316 * MPSAFE
1318 static int
1319 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1321 return (ENXIO);
1325 * MPSAFE
1327 static int
1328 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1330 return (ENXIO);
1334 * MPALMOSTSAFE
1336 static int
1337 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1338 struct ucred *cred, struct sysmsg *msg)
1340 struct lwkt_token *tok;
1341 struct kqueue *kq;
1342 int error;
1344 kq = (struct kqueue *)fp->f_data;
1345 tok = lwkt_token_pool_lookup(kq);
1346 lwkt_gettoken(tok);
1348 switch(com) {
1349 case FIOASYNC:
1350 if (*(int *)data)
1351 kq->kq_state |= KQ_ASYNC;
1352 else
1353 kq->kq_state &= ~KQ_ASYNC;
1354 error = 0;
1355 break;
1356 case FIOSETOWN:
1357 error = fsetown(*(int *)data, &kq->kq_sigio);
1358 break;
1359 default:
1360 error = ENOTTY;
1361 break;
1363 lwkt_reltoken(tok);
1364 return (error);
1368 * MPSAFE
1370 static int
1371 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1373 struct kqueue *kq = (struct kqueue *)fp->f_data;
1375 bzero((void *)st, sizeof(*st));
1376 st->st_size = kq->kq_count;
1377 st->st_blksize = sizeof(struct kevent);
1378 st->st_mode = S_IFIFO;
1379 return (0);
1383 * MPSAFE
1385 static int
1386 kqueue_close(struct file *fp)
1388 struct kqueue *kq = (struct kqueue *)fp->f_data;
1390 kqueue_terminate(kq);
1392 fp->f_data = NULL;
1393 funsetown(&kq->kq_sigio);
1395 kfree(kq, M_KQUEUE);
1396 return (0);
1399 static void
1400 kqueue_wakeup(struct kqueue *kq)
1402 if (kq->kq_state & KQ_SLEEP) {
1403 kq->kq_state &= ~KQ_SLEEP;
1404 wakeup(kq);
1406 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1410 * Calls filterops f_attach function, acquiring mplock if filter is not
1411 * marked as FILTEROP_MPSAFE.
1413 * Caller must be holding the related kq token
1415 static int
1416 filter_attach(struct knote *kn)
1418 int ret;
1420 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1421 ret = kn->kn_fop->f_attach(kn);
1422 } else {
1423 get_mplock();
1424 ret = kn->kn_fop->f_attach(kn);
1425 rel_mplock();
1427 return (ret);
1431 * Detach the knote and drop it, destroying the knote.
1433 * Calls filterops f_detach function, acquiring mplock if filter is not
1434 * marked as FILTEROP_MPSAFE.
1436 * Caller must be holding the related kq token
1438 static void
1439 knote_detach_and_drop(struct knote *kn)
1441 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1442 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1443 kn->kn_fop->f_detach(kn);
1444 } else {
1445 get_mplock();
1446 kn->kn_fop->f_detach(kn);
1447 rel_mplock();
1449 knote_drop(kn);
1453 * Calls filterops f_event function, acquiring mplock if filter is not
1454 * marked as FILTEROP_MPSAFE.
1456 * If the knote is in the middle of being created or deleted we cannot
1457 * safely call the filter op.
1459 * Caller must be holding the related kq token
1461 static int
1462 filter_event(struct knote *kn, long hint)
1464 int ret;
1466 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1467 ret = kn->kn_fop->f_event(kn, hint);
1468 } else {
1469 get_mplock();
1470 ret = kn->kn_fop->f_event(kn, hint);
1471 rel_mplock();
1473 return (ret);
1477 * Walk down a list of knotes, activating them if their event has triggered.
1479 * If we encounter any knotes which are undergoing processing we just mark
1480 * them for reprocessing and do not try to [re]activate the knote. However,
1481 * if a hint is being passed we have to wait and that makes things a bit
1482 * sticky.
1484 void
1485 knote(struct klist *list, long hint)
1487 struct kqueue *kq;
1488 struct knote *kn;
1489 struct knote *kntmp;
1491 lwkt_getpooltoken(list);
1492 restart:
1493 SLIST_FOREACH(kn, list, kn_next) {
1494 kq = kn->kn_kq;
1495 lwkt_getpooltoken(kq);
1497 /* temporary verification hack */
1498 SLIST_FOREACH(kntmp, list, kn_next) {
1499 if (kn == kntmp)
1500 break;
1502 if (kn != kntmp || kn->kn_kq != kq) {
1503 lwkt_relpooltoken(kq);
1504 goto restart;
1507 if (kn->kn_status & KN_PROCESSING) {
1509 * Someone else is processing the knote, ask the
1510 * other thread to reprocess it and don't mess
1511 * with it otherwise.
1513 if (hint == 0) {
1514 kn->kn_status |= KN_REPROCESS;
1515 lwkt_relpooltoken(kq);
1516 continue;
1520 * If the hint is non-zero we have to wait or risk
1521 * losing the state the caller is trying to update.
1523 * XXX This is a real problem, certain process
1524 * and signal filters will bump kn_data for
1525 * already-processed notes more than once if
1526 * we restart the list scan. FIXME.
1528 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1529 tsleep(kn, 0, "knotec", hz);
1530 lwkt_relpooltoken(kq);
1531 goto restart;
1535 * Become the reprocessing master ourselves.
1537 * If hint is non-zer running the event is mandatory
1538 * when not deleting so do it whether reprocessing is
1539 * set or not.
1541 kn->kn_status |= KN_PROCESSING;
1542 if ((kn->kn_status & KN_DELETING) == 0) {
1543 if (filter_event(kn, hint))
1544 KNOTE_ACTIVATE(kn);
1546 if (knote_release(kn)) {
1547 lwkt_relpooltoken(kq);
1548 goto restart;
1550 lwkt_relpooltoken(kq);
1552 lwkt_relpooltoken(list);
1556 * Insert knote at head of klist.
1558 * This function may only be called via a filter function and thus
1559 * kq_token should already be held and marked for processing.
1561 void
1562 knote_insert(struct klist *klist, struct knote *kn)
1564 lwkt_getpooltoken(klist);
1565 KKASSERT(kn->kn_status & KN_PROCESSING);
1566 SLIST_INSERT_HEAD(klist, kn, kn_next);
1567 lwkt_relpooltoken(klist);
1571 * Remove knote from a klist
1573 * This function may only be called via a filter function and thus
1574 * kq_token should already be held and marked for processing.
1576 void
1577 knote_remove(struct klist *klist, struct knote *kn)
1579 lwkt_getpooltoken(klist);
1580 KKASSERT(kn->kn_status & KN_PROCESSING);
1581 SLIST_REMOVE(klist, kn, knote, kn_next);
1582 lwkt_relpooltoken(klist);
1585 #if 0
1587 * Remove all knotes from a specified klist
1589 * Only called from aio.
1591 void
1592 knote_empty(struct klist *list)
1594 struct knote *kn;
1596 lwkt_gettoken(&kq_token);
1597 while ((kn = SLIST_FIRST(list)) != NULL) {
1598 if (knote_acquire(kn))
1599 knote_detach_and_drop(kn);
1601 lwkt_reltoken(&kq_token);
1603 #endif
1605 void
1606 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1607 struct filterops *ops, void *hook)
1609 struct kqueue *kq;
1610 struct knote *kn;
1612 lwkt_getpooltoken(&src->ki_note);
1613 lwkt_getpooltoken(&dst->ki_note);
1614 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1615 kq = kn->kn_kq;
1616 lwkt_getpooltoken(kq);
1617 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1618 lwkt_relpooltoken(kq);
1619 continue;
1621 if (knote_acquire(kn)) {
1622 knote_remove(&src->ki_note, kn);
1623 kn->kn_fop = ops;
1624 kn->kn_hook = hook;
1625 knote_insert(&dst->ki_note, kn);
1626 knote_release(kn);
1627 /* kn may be invalid now */
1629 lwkt_relpooltoken(kq);
1631 lwkt_relpooltoken(&dst->ki_note);
1632 lwkt_relpooltoken(&src->ki_note);
1636 * Remove all knotes referencing a specified fd
1638 void
1639 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1641 struct kqueue *kq;
1642 struct knote *kn;
1643 struct knote *kntmp;
1645 lwkt_getpooltoken(&fp->f_klist);
1646 restart:
1647 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1648 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1649 kq = kn->kn_kq;
1650 lwkt_getpooltoken(kq);
1652 /* temporary verification hack */
1653 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1654 if (kn == kntmp)
1655 break;
1657 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1658 kn->kn_id != fd || kn->kn_kq != kq) {
1659 lwkt_relpooltoken(kq);
1660 goto restart;
1662 if (knote_acquire(kn))
1663 knote_detach_and_drop(kn);
1664 lwkt_relpooltoken(kq);
1665 goto restart;
1668 lwkt_relpooltoken(&fp->f_klist);
1672 * Low level attach function.
1674 * The knote should already be marked for processing.
1675 * Caller must hold the related kq token.
1677 static void
1678 knote_attach(struct knote *kn)
1680 struct klist *list;
1681 struct kqueue *kq = kn->kn_kq;
1683 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1684 KKASSERT(kn->kn_fp);
1685 list = &kn->kn_fp->f_klist;
1686 } else {
1687 if (kq->kq_knhashmask == 0)
1688 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1689 &kq->kq_knhashmask);
1690 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1692 lwkt_getpooltoken(list);
1693 SLIST_INSERT_HEAD(list, kn, kn_link);
1694 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1695 lwkt_relpooltoken(list);
1699 * Low level drop function.
1701 * The knote should already be marked for processing.
1702 * Caller must hold the related kq token.
1704 static void
1705 knote_drop(struct knote *kn)
1707 struct kqueue *kq;
1708 struct klist *list;
1710 kq = kn->kn_kq;
1712 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1713 list = &kn->kn_fp->f_klist;
1714 else
1715 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1717 lwkt_getpooltoken(list);
1718 SLIST_REMOVE(list, kn, knote, kn_link);
1719 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1720 if (kn->kn_status & KN_QUEUED)
1721 knote_dequeue(kn);
1722 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1723 fdrop(kn->kn_fp);
1724 kn->kn_fp = NULL;
1726 knote_free(kn);
1727 lwkt_relpooltoken(list);
1731 * Low level enqueue function.
1733 * The knote should already be marked for processing.
1734 * Caller must be holding the kq token
1736 static void
1737 knote_enqueue(struct knote *kn)
1739 struct kqueue *kq = kn->kn_kq;
1741 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1742 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1743 kn->kn_status |= KN_QUEUED;
1744 ++kq->kq_count;
1747 * Send SIGIO on request (typically set up as a mailbox signal)
1749 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1750 pgsigio(kq->kq_sigio, SIGIO, 0);
1752 kqueue_wakeup(kq);
1756 * Low level dequeue function.
1758 * The knote should already be marked for processing.
1759 * Caller must be holding the kq token
1761 static void
1762 knote_dequeue(struct knote *kn)
1764 struct kqueue *kq = kn->kn_kq;
1766 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1767 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1768 kn->kn_status &= ~KN_QUEUED;
1769 kq->kq_count--;
1772 static struct knote *
1773 knote_alloc(void)
1775 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1778 static void
1779 knote_free(struct knote *kn)
1781 kfree(kn, M_KQUEUE);