kern - Make socket_wait() actually work
[dragonfly.git] / sys / kern / kern_event.c
blob91bb3e168f1477b523510f8485641a8b00920735
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
36 #include <sys/file.h>
37 #include <sys/lock.h>
38 #include <sys/fcntl.h>
39 #include <sys/select.h>
40 #include <sys/queue.h>
41 #include <sys/event.h>
42 #include <sys/eventvar.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/stat.h>
47 #include <sys/sysctl.h>
48 #include <sys/sysproto.h>
49 #include <sys/uio.h>
50 #include <sys/signalvar.h>
51 #include <sys/filio.h>
52 #include <sys/ktr.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
58 #include <vm/vm_zone.h>
60 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
62 struct kevent_copyin_args {
63 struct kevent_args *ka;
64 int pchanges;
67 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
68 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
69 struct knote *marker);
70 static int kqueue_read(struct file *fp, struct uio *uio,
71 struct ucred *cred, int flags);
72 static int kqueue_write(struct file *fp, struct uio *uio,
73 struct ucred *cred, int flags);
74 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
75 struct ucred *cred, struct sysmsg *msg);
76 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
77 static int kqueue_stat(struct file *fp, struct stat *st,
78 struct ucred *cred);
79 static int kqueue_close(struct file *fp);
82 * MPSAFE
84 static struct fileops kqueueops = {
85 .fo_read = kqueue_read,
86 .fo_write = kqueue_write,
87 .fo_ioctl = kqueue_ioctl,
88 .fo_kqfilter = kqueue_kqfilter,
89 .fo_stat = kqueue_stat,
90 .fo_close = kqueue_close,
91 .fo_shutdown = nofo_shutdown
94 static void knote_attach(struct knote *kn);
95 static void knote_drop(struct knote *kn);
96 static void knote_enqueue(struct knote *kn);
97 static void knote_dequeue(struct knote *kn);
98 static void knote_init(void);
99 static struct knote *knote_alloc(void);
100 static void knote_free(struct knote *kn);
102 static void filt_kqdetach(struct knote *kn);
103 static int filt_kqueue(struct knote *kn, long hint);
104 static int filt_procattach(struct knote *kn);
105 static void filt_procdetach(struct knote *kn);
106 static int filt_proc(struct knote *kn, long hint);
107 static int filt_fileattach(struct knote *kn);
108 static void filt_timerexpire(void *knx);
109 static int filt_timerattach(struct knote *kn);
110 static void filt_timerdetach(struct knote *kn);
111 static int filt_timer(struct knote *kn, long hint);
113 static struct filterops file_filtops =
114 { 1, filt_fileattach, NULL, NULL };
115 static struct filterops kqread_filtops =
116 { 1, NULL, filt_kqdetach, filt_kqueue };
117 static struct filterops proc_filtops =
118 { 0, filt_procattach, filt_procdetach, filt_proc };
119 static struct filterops timer_filtops =
120 { 0, filt_timerattach, filt_timerdetach, filt_timer };
122 static vm_zone_t knote_zone;
123 static int kq_ncallouts = 0;
124 static int kq_calloutmax = (4 * 1024);
125 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
126 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
128 #define KNOTE_ACTIVATE(kn) do { \
129 kn->kn_status |= KN_ACTIVE; \
130 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
131 knote_enqueue(kn); \
132 } while(0)
134 #define KN_HASHSIZE 64 /* XXX should be tunable */
135 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
137 extern struct filterops aio_filtops;
138 extern struct filterops sig_filtops;
141 * Table for for all system-defined filters.
143 static struct filterops *sysfilt_ops[] = {
144 &file_filtops, /* EVFILT_READ */
145 &file_filtops, /* EVFILT_WRITE */
146 &aio_filtops, /* EVFILT_AIO */
147 &file_filtops, /* EVFILT_VNODE */
148 &proc_filtops, /* EVFILT_PROC */
149 &sig_filtops, /* EVFILT_SIGNAL */
150 &timer_filtops, /* EVFILT_TIMER */
151 &file_filtops, /* EVFILT_EXCEPT */
154 static int
155 filt_fileattach(struct knote *kn)
157 return (fo_kqfilter(kn->kn_fp, kn));
161 * MPALMOSTSAFE - acquires mplock
163 static int
164 kqueue_kqfilter(struct file *fp, struct knote *kn)
166 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
168 get_mplock();
169 if (kn->kn_filter != EVFILT_READ) {
170 rel_mplock();
171 return (EOPNOTSUPP);
174 kn->kn_fop = &kqread_filtops;
175 SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
176 rel_mplock();
177 return (0);
180 static void
181 filt_kqdetach(struct knote *kn)
183 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
185 SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
188 /*ARGSUSED*/
189 static int
190 filt_kqueue(struct knote *kn, long hint)
192 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
194 kn->kn_data = kq->kq_count;
195 return (kn->kn_data > 0);
198 static int
199 filt_procattach(struct knote *kn)
201 struct proc *p;
202 int immediate;
204 immediate = 0;
205 lwkt_gettoken(&proc_token);
206 p = pfind(kn->kn_id);
207 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
208 p = zpfind(kn->kn_id);
209 immediate = 1;
211 if (p == NULL) {
212 lwkt_reltoken(&proc_token);
213 return (ESRCH);
215 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
216 lwkt_reltoken(&proc_token);
217 return (EACCES);
220 kn->kn_ptr.p_proc = p;
221 kn->kn_flags |= EV_CLEAR; /* automatically set */
224 * internal flag indicating registration done by kernel
226 if (kn->kn_flags & EV_FLAG1) {
227 kn->kn_data = kn->kn_sdata; /* ppid */
228 kn->kn_fflags = NOTE_CHILD;
229 kn->kn_flags &= ~EV_FLAG1;
232 /* XXX lock the proc here while adding to the list? */
233 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
236 * Immediately activate any exit notes if the target process is a
237 * zombie. This is necessary to handle the case where the target
238 * process, e.g. a child, dies before the kevent is negistered.
240 if (immediate && filt_proc(kn, NOTE_EXIT))
241 KNOTE_ACTIVATE(kn);
242 lwkt_reltoken(&proc_token);
244 return (0);
248 * The knote may be attached to a different process, which may exit,
249 * leaving nothing for the knote to be attached to. So when the process
250 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
251 * it will be deleted when read out. However, as part of the knote deletion,
252 * this routine is called, so a check is needed to avoid actually performing
253 * a detach, because the original process does not exist any more.
255 static void
256 filt_procdetach(struct knote *kn)
258 struct proc *p;
260 if (kn->kn_status & KN_DETACHED)
261 return;
262 /* XXX locking? this might modify another process. */
263 p = kn->kn_ptr.p_proc;
264 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
267 static int
268 filt_proc(struct knote *kn, long hint)
270 u_int event;
273 * mask off extra data
275 event = (u_int)hint & NOTE_PCTRLMASK;
278 * if the user is interested in this event, record it.
280 if (kn->kn_sfflags & event)
281 kn->kn_fflags |= event;
284 * Process is gone, so flag the event as finished. Detach the
285 * knote from the process now because the process will be poof,
286 * gone later on.
288 if (event == NOTE_EXIT) {
289 struct proc *p = kn->kn_ptr.p_proc;
290 if ((kn->kn_status & KN_DETACHED) == 0) {
291 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
292 kn->kn_status |= KN_DETACHED;
293 kn->kn_data = p->p_xstat;
294 kn->kn_ptr.p_proc = NULL;
296 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
297 return (1);
301 * process forked, and user wants to track the new process,
302 * so attach a new knote to it, and immediately report an
303 * event with the parent's pid.
305 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
306 struct kevent kev;
307 int error;
310 * register knote with new process.
312 kev.ident = hint & NOTE_PDATAMASK; /* pid */
313 kev.filter = kn->kn_filter;
314 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
315 kev.fflags = kn->kn_sfflags;
316 kev.data = kn->kn_id; /* parent */
317 kev.udata = kn->kn_kevent.udata; /* preserve udata */
318 error = kqueue_register(kn->kn_kq, &kev);
319 if (error)
320 kn->kn_fflags |= NOTE_TRACKERR;
323 return (kn->kn_fflags != 0);
326 static void
327 filt_timerexpire(void *knx)
329 struct knote *kn = knx;
330 struct callout *calloutp;
331 struct timeval tv;
332 int tticks;
334 kn->kn_data++;
335 KNOTE_ACTIVATE(kn);
337 if ((kn->kn_flags & EV_ONESHOT) == 0) {
338 tv.tv_sec = kn->kn_sdata / 1000;
339 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
340 tticks = tvtohz_high(&tv);
341 calloutp = (struct callout *)kn->kn_hook;
342 callout_reset(calloutp, tticks, filt_timerexpire, kn);
347 * data contains amount of time to sleep, in milliseconds
349 static int
350 filt_timerattach(struct knote *kn)
352 struct callout *calloutp;
353 struct timeval tv;
354 int tticks;
356 if (kq_ncallouts >= kq_calloutmax)
357 return (ENOMEM);
358 kq_ncallouts++;
360 tv.tv_sec = kn->kn_sdata / 1000;
361 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
362 tticks = tvtohz_high(&tv);
364 kn->kn_flags |= EV_CLEAR; /* automatically set */
365 MALLOC(calloutp, struct callout *, sizeof(*calloutp),
366 M_KQUEUE, M_WAITOK);
367 callout_init(calloutp);
368 kn->kn_hook = (caddr_t)calloutp;
369 callout_reset(calloutp, tticks, filt_timerexpire, kn);
371 return (0);
374 static void
375 filt_timerdetach(struct knote *kn)
377 struct callout *calloutp;
379 calloutp = (struct callout *)kn->kn_hook;
380 callout_stop(calloutp);
381 FREE(calloutp, M_KQUEUE);
382 kq_ncallouts--;
385 static int
386 filt_timer(struct knote *kn, long hint)
389 return (kn->kn_data != 0);
393 * Initialize a kqueue.
395 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
397 * MPSAFE
399 void
400 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
402 TAILQ_INIT(&kq->kq_knpend);
403 TAILQ_INIT(&kq->kq_knlist);
404 kq->kq_count = 0;
405 kq->kq_fdp = fdp;
406 SLIST_INIT(&kq->kq_sel.si_note);
410 * Terminate a kqueue. Freeing the actual kq itself is left up to the
411 * caller (it might be embedded in a lwp so we don't do it here).
413 void
414 kqueue_terminate(struct kqueue *kq)
416 struct knote *kn;
417 struct klist *list;
418 int hv;
420 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
421 kn->kn_fop->f_detach(kn);
422 if (kn->kn_fop->f_isfd) {
423 list = &kn->kn_fp->f_klist;
424 SLIST_REMOVE(list, kn, knote, kn_link);
425 fdrop(kn->kn_fp);
426 kn->kn_fp = NULL;
427 } else {
428 hv = KN_HASH(kn->kn_id, kq->kq_knhashmask);
429 list = &kq->kq_knhash[hv];
430 SLIST_REMOVE(list, kn, knote, kn_link);
432 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
433 if (kn->kn_status & KN_QUEUED)
434 knote_dequeue(kn);
435 knote_free(kn);
438 if (kq->kq_knhash) {
439 kfree(kq->kq_knhash, M_KQUEUE);
440 kq->kq_knhash = NULL;
441 kq->kq_knhashmask = 0;
446 * MPSAFE
449 sys_kqueue(struct kqueue_args *uap)
451 struct thread *td = curthread;
452 struct kqueue *kq;
453 struct file *fp;
454 int fd, error;
456 error = falloc(td->td_lwp, &fp, &fd);
457 if (error)
458 return (error);
459 fp->f_flag = FREAD | FWRITE;
460 fp->f_type = DTYPE_KQUEUE;
461 fp->f_ops = &kqueueops;
463 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
464 kqueue_init(kq, td->td_proc->p_fd);
465 fp->f_data = kq;
467 fsetfd(kq->kq_fdp, fp, fd);
468 uap->sysmsg_result = fd;
469 fdrop(fp);
470 return (error);
474 * Copy 'count' items into the destination list pointed to by uap->eventlist.
476 static int
477 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
479 struct kevent_copyin_args *kap;
480 int error;
482 kap = (struct kevent_copyin_args *)arg;
484 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
485 if (error == 0) {
486 kap->ka->eventlist += count;
487 *res += count;
488 } else {
489 *res = -1;
492 return (error);
496 * Copy at most 'max' items from the list pointed to by kap->changelist,
497 * return number of items in 'events'.
499 static int
500 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
502 struct kevent_copyin_args *kap;
503 int error, count;
505 kap = (struct kevent_copyin_args *)arg;
507 count = min(kap->ka->nchanges - kap->pchanges, max);
508 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
509 if (error == 0) {
510 kap->ka->changelist += count;
511 kap->pchanges += count;
512 *events = count;
515 return (error);
519 * MPALMOSTSAFE
522 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
523 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
524 struct timespec *tsp_in)
526 struct kevent *kevp;
527 struct timespec *tsp;
528 int i, n, total, error, nerrors = 0;
529 int lres;
530 struct kevent kev[KQ_NEVENTS];
531 struct knote marker;
533 tsp = tsp_in;
534 *res = 0;
536 get_mplock();
537 for ( ;; ) {
538 n = 0;
539 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
540 if (error)
541 goto done;
542 if (n == 0)
543 break;
544 for (i = 0; i < n; i++) {
545 kevp = &kev[i];
546 kevp->flags &= ~EV_SYSFLAGS;
547 error = kqueue_register(kq, kevp);
550 * If a registration returns an error we
551 * immediately post the error. The kevent()
552 * call itself will fail with the error if
553 * no space is available for posting.
555 * Such errors normally bypass the timeout/blocking
556 * code. However, if the copyoutfn function refuses
557 * to post the error (see sys_poll()), then we
558 * ignore it too.
560 if (error) {
561 if (nevents != 0) {
562 kevp->flags = EV_ERROR;
563 kevp->data = error;
564 lres = *res;
565 kevent_copyoutfn(uap, kevp, 1, res);
566 if (lres != *res) {
567 nevents--;
568 nerrors++;
570 } else {
571 goto done;
576 if (nerrors) {
577 error = 0;
578 goto done;
582 * Acquire/wait for events - setup timeout
584 if (tsp != NULL) {
585 struct timespec ats;
587 if (tsp->tv_sec || tsp->tv_nsec) {
588 nanouptime(&ats);
589 timespecadd(tsp, &ats); /* tsp = target time */
594 * Loop as required.
596 * Collect as many events as we can. Sleeping on successive
597 * loops is disabled if copyoutfn has incremented (*res).
599 * The loop stops if an error occurs, all events have been
600 * scanned (the marker has been reached), or fewer than the
601 * maximum number of events is found.
603 * The copyoutfn function does not have to increment (*res) in
604 * order for the loop to continue.
606 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
608 total = 0;
609 error = 0;
610 marker.kn_filter = EVFILT_MARKER;
611 crit_enter();
612 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
613 crit_exit();
614 while ((n = nevents - total) > 0) {
615 if (n > KQ_NEVENTS)
616 n = KQ_NEVENTS;
619 * If no events are pending sleep until timeout (if any)
620 * or an event occurs.
622 * After the sleep completes the marker is moved to the
623 * end of the list, making any received events available
624 * to our scan.
626 if (kq->kq_count == 0 && *res == 0) {
627 error = kqueue_sleep(kq, tsp);
629 if (error)
630 break;
631 crit_enter();
632 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
633 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
634 crit_exit();
638 * Process all received events
640 i = kqueue_scan(kq, kev, n, &marker);
641 if (i) {
642 error = kevent_copyoutfn(uap, kev, i, res);
643 total += i;
644 if (error)
645 break;
649 * Normally when fewer events are returned than requested
650 * we can stop. However, if only spurious events were
651 * collected the copyout will not bump (*res) and we have
652 * to continue.
654 if (i < n && *res)
655 break;
658 * Deal with an edge case where spurious events can cause
659 * a loop to occur without moving the marker. This can
660 * prevent kqueue_scan() from picking up new events which
661 * race us. We must be sure to move the marker for this
662 * case.
664 * NOTE: We do not want to move the marker if events
665 * were scanned because normal kqueue operations
666 * may reactivate events. Moving the marker in
667 * that case could result in duplicates for the
668 * same event.
670 if (i == 0) {
671 crit_enter();
672 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
673 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
674 crit_exit();
677 crit_enter();
678 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
679 crit_exit();
681 /* Timeouts do not return EWOULDBLOCK. */
682 if (error == EWOULDBLOCK)
683 error = 0;
685 done:
686 rel_mplock();
687 return (error);
691 * MPALMOSTSAFE
694 sys_kevent(struct kevent_args *uap)
696 struct thread *td = curthread;
697 struct proc *p = td->td_proc;
698 struct timespec ts, *tsp;
699 struct kqueue *kq;
700 struct file *fp = NULL;
701 struct kevent_copyin_args *kap, ka;
702 int error;
704 if (uap->timeout) {
705 error = copyin(uap->timeout, &ts, sizeof(ts));
706 if (error)
707 return (error);
708 tsp = &ts;
709 } else {
710 tsp = NULL;
713 fp = holdfp(p->p_fd, uap->fd, -1);
714 if (fp == NULL)
715 return (EBADF);
716 if (fp->f_type != DTYPE_KQUEUE) {
717 fdrop(fp);
718 return (EBADF);
721 kq = (struct kqueue *)fp->f_data;
723 kap = &ka;
724 kap->ka = uap;
725 kap->pchanges = 0;
727 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
728 kevent_copyin, kevent_copyout, tsp);
730 fdrop(fp);
732 return (error);
736 kqueue_register(struct kqueue *kq, struct kevent *kev)
738 struct filedesc *fdp = kq->kq_fdp;
739 struct filterops *fops;
740 struct file *fp = NULL;
741 struct knote *kn = NULL;
742 int error = 0;
744 if (kev->filter < 0) {
745 if (kev->filter + EVFILT_SYSCOUNT < 0)
746 return (EINVAL);
747 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
748 } else {
750 * XXX
751 * filter attach routine is responsible for insuring that
752 * the identifier can be attached to it.
754 kprintf("unknown filter: %d\n", kev->filter);
755 return (EINVAL);
758 if (fops->f_isfd) {
759 /* validate descriptor */
760 fp = holdfp(fdp, kev->ident, -1);
761 if (fp == NULL)
762 return (EBADF);
764 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
765 if (kn->kn_kq == kq &&
766 kn->kn_filter == kev->filter &&
767 kn->kn_id == kev->ident) {
768 break;
771 } else {
772 if (kq->kq_knhashmask) {
773 struct klist *list;
775 list = &kq->kq_knhash[
776 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
777 SLIST_FOREACH(kn, list, kn_link) {
778 if (kn->kn_id == kev->ident &&
779 kn->kn_filter == kev->filter)
780 break;
785 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
786 error = ENOENT;
787 goto done;
791 * kn now contains the matching knote, or NULL if no match
793 if (kev->flags & EV_ADD) {
794 if (kn == NULL) {
795 kn = knote_alloc();
796 if (kn == NULL) {
797 error = ENOMEM;
798 goto done;
800 kn->kn_fp = fp;
801 kn->kn_kq = kq;
802 kn->kn_fop = fops;
805 * apply reference count to knote structure, and
806 * do not release it at the end of this routine.
808 fp = NULL;
810 kn->kn_sfflags = kev->fflags;
811 kn->kn_sdata = kev->data;
812 kev->fflags = 0;
813 kev->data = 0;
814 kn->kn_kevent = *kev;
816 knote_attach(kn);
817 if ((error = fops->f_attach(kn)) != 0) {
818 knote_drop(kn);
819 goto done;
821 } else {
823 * The user may change some filter values after the
824 * initial EV_ADD, but doing so will not reset any
825 * filter which have already been triggered.
827 kn->kn_sfflags = kev->fflags;
828 kn->kn_sdata = kev->data;
829 kn->kn_kevent.udata = kev->udata;
832 crit_enter();
833 if (kn->kn_fop->f_event(kn, 0))
834 KNOTE_ACTIVATE(kn);
835 crit_exit();
836 } else if (kev->flags & EV_DELETE) {
837 kn->kn_fop->f_detach(kn);
838 knote_drop(kn);
839 goto done;
842 if ((kev->flags & EV_DISABLE) &&
843 ((kn->kn_status & KN_DISABLED) == 0)) {
844 crit_enter();
845 kn->kn_status |= KN_DISABLED;
846 crit_exit();
849 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
850 crit_enter();
851 kn->kn_status &= ~KN_DISABLED;
852 if ((kn->kn_status & KN_ACTIVE) &&
853 ((kn->kn_status & KN_QUEUED) == 0))
854 knote_enqueue(kn);
855 crit_exit();
858 done:
859 if (fp != NULL)
860 fdrop(fp);
861 return (error);
865 * Block as necessary until the target time is reached.
866 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
867 * 0 we do not block at all.
869 static int
870 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
872 int error = 0;
874 crit_enter();
875 if (tsp == NULL) {
876 kq->kq_state |= KQ_SLEEP;
877 error = tsleep(kq, PCATCH, "kqread", 0);
878 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
879 error = EWOULDBLOCK;
880 } else {
881 struct timespec ats;
882 struct timespec atx = *tsp;
883 int timeout;
885 nanouptime(&ats);
886 timespecsub(&atx, &ats);
887 if (ats.tv_sec < 0) {
888 error = EWOULDBLOCK;
889 } else {
890 timeout = atx.tv_sec > 24 * 60 * 60 ?
891 24 * 60 * 60 * hz : tstohz_high(&atx);
892 kq->kq_state |= KQ_SLEEP;
893 error = tsleep(kq, PCATCH, "kqread", timeout);
896 crit_exit();
898 /* don't restart after signals... */
899 if (error == ERESTART)
900 return (EINTR);
902 return (error);
906 * Scan the kqueue, return the number of active events placed in kevp up
907 * to count.
909 * Continuous mode events may get recycled, do not continue scanning past
910 * marker unless no events have been collected.
912 static int
913 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
914 struct knote *marker)
916 struct knote *kn, local_marker;
917 int total;
919 total = 0;
920 local_marker.kn_filter = EVFILT_MARKER;
921 crit_enter();
924 * Collect events.
926 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
927 while (count) {
928 kn = TAILQ_NEXT(&local_marker, kn_tqe);
929 if (kn->kn_filter == EVFILT_MARKER) {
930 /* Marker reached, we are done */
931 if (kn == marker)
932 break;
934 /* Move local marker past some other threads marker */
935 kn = TAILQ_NEXT(kn, kn_tqe);
936 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
937 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
938 continue;
941 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
942 if (kn->kn_status & KN_DISABLED) {
943 kn->kn_status &= ~KN_QUEUED;
944 kq->kq_count--;
945 continue;
947 if ((kn->kn_flags & EV_ONESHOT) == 0 &&
948 kn->kn_fop->f_event(kn, 0) == 0) {
949 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
950 kq->kq_count--;
951 continue;
953 *kevp++ = kn->kn_kevent;
954 ++total;
955 --count;
958 * Post-event action on the note
960 if (kn->kn_flags & EV_ONESHOT) {
961 kn->kn_status &= ~KN_QUEUED;
962 kq->kq_count--;
963 crit_exit();
964 kn->kn_fop->f_detach(kn);
965 knote_drop(kn);
966 crit_enter();
967 } else if (kn->kn_flags & EV_CLEAR) {
968 kn->kn_data = 0;
969 kn->kn_fflags = 0;
970 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
971 kq->kq_count--;
972 } else {
973 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
976 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
978 crit_exit();
979 return (total);
983 * XXX
984 * This could be expanded to call kqueue_scan, if desired.
986 * MPSAFE
988 static int
989 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
991 return (ENXIO);
995 * MPSAFE
997 static int
998 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1000 return (ENXIO);
1004 * MPALMOSTSAFE
1006 static int
1007 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1008 struct ucred *cred, struct sysmsg *msg)
1010 struct kqueue *kq;
1011 int error;
1013 get_mplock();
1014 kq = (struct kqueue *)fp->f_data;
1016 switch(com) {
1017 case FIOASYNC:
1018 if (*(int *)data)
1019 kq->kq_state |= KQ_ASYNC;
1020 else
1021 kq->kq_state &= ~KQ_ASYNC;
1022 error = 0;
1023 break;
1024 case FIOSETOWN:
1025 error = fsetown(*(int *)data, &kq->kq_sigio);
1026 break;
1027 default:
1028 error = ENOTTY;
1029 break;
1031 rel_mplock();
1032 return (error);
1036 * MPSAFE
1038 static int
1039 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1041 struct kqueue *kq = (struct kqueue *)fp->f_data;
1043 bzero((void *)st, sizeof(*st));
1044 st->st_size = kq->kq_count;
1045 st->st_blksize = sizeof(struct kevent);
1046 st->st_mode = S_IFIFO;
1047 return (0);
1051 * MPALMOSTSAFE - acquires mplock
1053 static int
1054 kqueue_close(struct file *fp)
1056 struct kqueue *kq = (struct kqueue *)fp->f_data;
1058 get_mplock();
1060 kqueue_terminate(kq);
1062 fp->f_data = NULL;
1063 funsetown(kq->kq_sigio);
1064 rel_mplock();
1066 kfree(kq, M_KQUEUE);
1067 return (0);
1070 void
1071 kqueue_wakeup(struct kqueue *kq)
1073 if (kq->kq_state & KQ_SLEEP) {
1074 kq->kq_state &= ~KQ_SLEEP;
1075 wakeup(kq);
1077 if (kq->kq_state & KQ_SEL) {
1078 kq->kq_state &= ~KQ_SEL;
1079 selwakeup(&kq->kq_sel);
1081 KNOTE(&kq->kq_sel.si_note, 0);
1085 * walk down a list of knotes, activating them if their event has triggered.
1087 void
1088 knote(struct klist *list, long hint)
1090 struct knote *kn;
1092 SLIST_FOREACH(kn, list, kn_selnext)
1093 if (kn->kn_fop->f_event(kn, hint))
1094 KNOTE_ACTIVATE(kn);
1098 * remove all knotes from a specified klist
1100 void
1101 knote_remove(struct klist *list)
1103 struct knote *kn;
1105 while ((kn = SLIST_FIRST(list)) != NULL) {
1106 kn->kn_fop->f_detach(kn);
1107 knote_drop(kn);
1112 * remove all knotes referencing a specified fd
1114 void
1115 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1117 struct knote *kn;
1119 restart:
1120 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1121 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1122 kn->kn_fop->f_detach(kn);
1123 knote_drop(kn);
1124 goto restart;
1129 static void
1130 knote_attach(struct knote *kn)
1132 struct klist *list;
1133 struct kqueue *kq = kn->kn_kq;
1135 if (kn->kn_fop->f_isfd) {
1136 KKASSERT(kn->kn_fp);
1137 list = &kn->kn_fp->f_klist;
1138 } else {
1139 if (kq->kq_knhashmask == 0)
1140 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1141 &kq->kq_knhashmask);
1142 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1144 SLIST_INSERT_HEAD(list, kn, kn_link);
1145 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1146 kn->kn_status = 0;
1150 * should be called outside of a critical section, since we don't want to
1151 * hold a critical section while calling fdrop and free.
1153 static void
1154 knote_drop(struct knote *kn)
1156 struct kqueue *kq;
1157 struct klist *list;
1159 kq = kn->kn_kq;
1161 if (kn->kn_fop->f_isfd)
1162 list = &kn->kn_fp->f_klist;
1163 else
1164 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1166 SLIST_REMOVE(list, kn, knote, kn_link);
1167 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1168 if (kn->kn_status & KN_QUEUED)
1169 knote_dequeue(kn);
1170 if (kn->kn_fop->f_isfd)
1171 fdrop(kn->kn_fp);
1172 knote_free(kn);
1176 static void
1177 knote_enqueue(struct knote *kn)
1179 struct kqueue *kq = kn->kn_kq;
1181 crit_enter();
1182 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1184 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1185 kn->kn_status |= KN_QUEUED;
1186 ++kq->kq_count;
1189 * Send SIGIO on request (typically set up as a mailbox signal)
1191 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1192 pgsigio(kq->kq_sigio, SIGIO, 0);
1193 crit_exit();
1194 kqueue_wakeup(kq);
1197 static void
1198 knote_dequeue(struct knote *kn)
1200 struct kqueue *kq = kn->kn_kq;
1202 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1203 crit_enter();
1205 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1206 kn->kn_status &= ~KN_QUEUED;
1207 kq->kq_count--;
1208 crit_exit();
1211 static void
1212 knote_init(void)
1214 knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
1216 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1218 static struct knote *
1219 knote_alloc(void)
1221 return ((struct knote *)zalloc(knote_zone));
1224 static void
1225 knote_free(struct knote *kn)
1227 zfree(knote_zone, kn);