bridge.4: Add missing .Bl/.El
[dragonfly.git] / sys / kern / kern_event.c
blob25483fb77ee4091e3d7fec33acb984ab3c529dab
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
27 * $DragonFly: src/sys/kern/kern_event.c,v 1.33 2007/02/03 17:05:57 corecode Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
36 #include <sys/file.h>
37 #include <sys/lock.h>
38 #include <sys/fcntl.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/eventvar.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/stat.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/thread.h>
49 #include <sys/uio.h>
50 #include <sys/signalvar.h>
51 #include <sys/filio.h>
52 #include <sys/ktr.h>
54 #include <sys/thread2.h>
55 #include <sys/file2.h>
56 #include <sys/mplock2.h>
59 * Global token for kqueue subsystem
61 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token);
62 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
63 CTLFLAG_RW, &kq_token.t_collisions, 0,
64 "Collision counter of kq_token");
66 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
68 struct kevent_copyin_args {
69 struct kevent_args *ka;
70 int pchanges;
73 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
74 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
75 struct knote *marker);
76 static int kqueue_read(struct file *fp, struct uio *uio,
77 struct ucred *cred, int flags);
78 static int kqueue_write(struct file *fp, struct uio *uio,
79 struct ucred *cred, int flags);
80 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
81 struct ucred *cred, struct sysmsg *msg);
82 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
83 static int kqueue_stat(struct file *fp, struct stat *st,
84 struct ucred *cred);
85 static int kqueue_close(struct file *fp);
86 static void kqueue_wakeup(struct kqueue *kq);
87 static int filter_attach(struct knote *kn);
88 static int filter_event(struct knote *kn, long hint);
91 * MPSAFE
93 static struct fileops kqueueops = {
94 .fo_read = kqueue_read,
95 .fo_write = kqueue_write,
96 .fo_ioctl = kqueue_ioctl,
97 .fo_kqfilter = kqueue_kqfilter,
98 .fo_stat = kqueue_stat,
99 .fo_close = kqueue_close,
100 .fo_shutdown = nofo_shutdown
103 static void knote_attach(struct knote *kn);
104 static void knote_drop(struct knote *kn);
105 static void knote_detach_and_drop(struct knote *kn);
106 static void knote_enqueue(struct knote *kn);
107 static void knote_dequeue(struct knote *kn);
108 static struct knote *knote_alloc(void);
109 static void knote_free(struct knote *kn);
111 static void filt_kqdetach(struct knote *kn);
112 static int filt_kqueue(struct knote *kn, long hint);
113 static int filt_procattach(struct knote *kn);
114 static void filt_procdetach(struct knote *kn);
115 static int filt_proc(struct knote *kn, long hint);
116 static int filt_fileattach(struct knote *kn);
117 static void filt_timerexpire(void *knx);
118 static int filt_timerattach(struct knote *kn);
119 static void filt_timerdetach(struct knote *kn);
120 static int filt_timer(struct knote *kn, long hint);
122 static struct filterops file_filtops =
123 { FILTEROP_ISFD, filt_fileattach, NULL, NULL };
124 static struct filterops kqread_filtops =
125 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
126 static struct filterops proc_filtops =
127 { 0, filt_procattach, filt_procdetach, filt_proc };
128 static struct filterops timer_filtops =
129 { 0, filt_timerattach, filt_timerdetach, filt_timer };
131 static int kq_ncallouts = 0;
132 static int kq_calloutmax = (4 * 1024);
133 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
134 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
135 static int kq_checkloop = 1000000;
136 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
137 &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue");
139 #define KNOTE_ACTIVATE(kn) do { \
140 kn->kn_status |= KN_ACTIVE; \
141 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
142 knote_enqueue(kn); \
143 } while(0)
145 #define KN_HASHSIZE 64 /* XXX should be tunable */
146 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
148 extern struct filterops aio_filtops;
149 extern struct filterops sig_filtops;
152 * Table for for all system-defined filters.
154 static struct filterops *sysfilt_ops[] = {
155 &file_filtops, /* EVFILT_READ */
156 &file_filtops, /* EVFILT_WRITE */
157 &aio_filtops, /* EVFILT_AIO */
158 &file_filtops, /* EVFILT_VNODE */
159 &proc_filtops, /* EVFILT_PROC */
160 &sig_filtops, /* EVFILT_SIGNAL */
161 &timer_filtops, /* EVFILT_TIMER */
162 &file_filtops, /* EVFILT_EXCEPT */
165 static int
166 filt_fileattach(struct knote *kn)
168 return (fo_kqfilter(kn->kn_fp, kn));
172 * MPSAFE
174 static int
175 kqueue_kqfilter(struct file *fp, struct knote *kn)
177 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
179 if (kn->kn_filter != EVFILT_READ)
180 return (EOPNOTSUPP);
182 kn->kn_fop = &kqread_filtops;
183 knote_insert(&kq->kq_kqinfo.ki_note, kn);
184 return (0);
187 static void
188 filt_kqdetach(struct knote *kn)
190 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
192 knote_remove(&kq->kq_kqinfo.ki_note, kn);
195 /*ARGSUSED*/
196 static int
197 filt_kqueue(struct knote *kn, long hint)
199 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
201 kn->kn_data = kq->kq_count;
202 return (kn->kn_data > 0);
205 static int
206 filt_procattach(struct knote *kn)
208 struct proc *p;
209 int immediate;
211 immediate = 0;
212 p = pfind(kn->kn_id);
213 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
214 p = zpfind(kn->kn_id);
215 immediate = 1;
217 if (p == NULL) {
218 return (ESRCH);
220 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
221 if (p)
222 PRELE(p);
223 return (EACCES);
226 lwkt_gettoken(&p->p_token);
227 kn->kn_ptr.p_proc = p;
228 kn->kn_flags |= EV_CLEAR; /* automatically set */
231 * internal flag indicating registration done by kernel
233 if (kn->kn_flags & EV_FLAG1) {
234 kn->kn_data = kn->kn_sdata; /* ppid */
235 kn->kn_fflags = NOTE_CHILD;
236 kn->kn_flags &= ~EV_FLAG1;
239 knote_insert(&p->p_klist, kn);
242 * Immediately activate any exit notes if the target process is a
243 * zombie. This is necessary to handle the case where the target
244 * process, e.g. a child, dies before the kevent is negistered.
246 if (immediate && filt_proc(kn, NOTE_EXIT))
247 KNOTE_ACTIVATE(kn);
248 lwkt_reltoken(&p->p_token);
249 PRELE(p);
251 return (0);
255 * The knote may be attached to a different process, which may exit,
256 * leaving nothing for the knote to be attached to. So when the process
257 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
258 * it will be deleted when read out. However, as part of the knote deletion,
259 * this routine is called, so a check is needed to avoid actually performing
260 * a detach, because the original process does not exist any more.
262 static void
263 filt_procdetach(struct knote *kn)
265 struct proc *p;
267 if (kn->kn_status & KN_DETACHED)
268 return;
269 /* XXX locking? take proc_token here? */
270 p = kn->kn_ptr.p_proc;
271 knote_remove(&p->p_klist, kn);
274 static int
275 filt_proc(struct knote *kn, long hint)
277 u_int event;
280 * mask off extra data
282 event = (u_int)hint & NOTE_PCTRLMASK;
285 * if the user is interested in this event, record it.
287 if (kn->kn_sfflags & event)
288 kn->kn_fflags |= event;
291 * Process is gone, so flag the event as finished. Detach the
292 * knote from the process now because the process will be poof,
293 * gone later on.
295 if (event == NOTE_EXIT) {
296 struct proc *p = kn->kn_ptr.p_proc;
297 if ((kn->kn_status & KN_DETACHED) == 0) {
298 knote_remove(&p->p_klist, kn);
299 kn->kn_status |= KN_DETACHED;
300 kn->kn_data = p->p_xstat;
301 kn->kn_ptr.p_proc = NULL;
303 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
304 return (1);
308 * process forked, and user wants to track the new process,
309 * so attach a new knote to it, and immediately report an
310 * event with the parent's pid.
312 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
313 struct kevent kev;
314 int error;
317 * register knote with new process.
319 kev.ident = hint & NOTE_PDATAMASK; /* pid */
320 kev.filter = kn->kn_filter;
321 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
322 kev.fflags = kn->kn_sfflags;
323 kev.data = kn->kn_id; /* parent */
324 kev.udata = kn->kn_kevent.udata; /* preserve udata */
325 error = kqueue_register(kn->kn_kq, &kev);
326 if (error)
327 kn->kn_fflags |= NOTE_TRACKERR;
330 return (kn->kn_fflags != 0);
334 * The callout interlocks with callout_stop() (or should), so the
335 * knote should still be a valid structure. However the timeout
336 * can race a deletion so if KN_DELETING is set we just don't touch
337 * the knote.
339 static void
340 filt_timerexpire(void *knx)
342 struct knote *kn = knx;
343 struct callout *calloutp;
344 struct timeval tv;
345 int tticks;
347 lwkt_gettoken(&kq_token);
348 if ((kn->kn_status & KN_DELETING) == 0) {
349 kn->kn_data++;
350 KNOTE_ACTIVATE(kn);
352 if ((kn->kn_flags & EV_ONESHOT) == 0) {
353 tv.tv_sec = kn->kn_sdata / 1000;
354 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
355 tticks = tvtohz_high(&tv);
356 calloutp = (struct callout *)kn->kn_hook;
357 callout_reset(calloutp, tticks, filt_timerexpire, kn);
360 lwkt_reltoken(&kq_token);
364 * data contains amount of time to sleep, in milliseconds
366 static int
367 filt_timerattach(struct knote *kn)
369 struct callout *calloutp;
370 struct timeval tv;
371 int tticks;
373 if (kq_ncallouts >= kq_calloutmax) {
374 kn->kn_hook = NULL;
375 return (ENOMEM);
377 kq_ncallouts++;
379 tv.tv_sec = kn->kn_sdata / 1000;
380 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
381 tticks = tvtohz_high(&tv);
383 kn->kn_flags |= EV_CLEAR; /* automatically set */
384 MALLOC(calloutp, struct callout *, sizeof(*calloutp),
385 M_KQUEUE, M_WAITOK);
386 callout_init(calloutp);
387 kn->kn_hook = (caddr_t)calloutp;
388 callout_reset(calloutp, tticks, filt_timerexpire, kn);
390 return (0);
393 static void
394 filt_timerdetach(struct knote *kn)
396 struct callout *calloutp;
398 calloutp = (struct callout *)kn->kn_hook;
399 callout_stop(calloutp);
400 FREE(calloutp, M_KQUEUE);
401 kq_ncallouts--;
404 static int
405 filt_timer(struct knote *kn, long hint)
408 return (kn->kn_data != 0);
412 * Acquire a knote, return non-zero on success, 0 on failure.
414 * If we cannot acquire the knote we sleep and return 0. The knote
415 * may be stale on return in this case and the caller must restart
416 * whatever loop they are in.
418 static __inline
420 knote_acquire(struct knote *kn)
422 if (kn->kn_status & KN_PROCESSING) {
423 kn->kn_status |= KN_WAITING | KN_REPROCESS;
424 tsleep(kn, 0, "kqepts", hz);
425 /* knote may be stale now */
426 return(0);
428 kn->kn_status |= KN_PROCESSING;
429 return(1);
433 * Release an acquired knote, clearing KN_PROCESSING and handling any
434 * KN_REPROCESS events.
436 * Non-zero is returned if the knote is destroyed.
438 static __inline
440 knote_release(struct knote *kn)
442 while (kn->kn_status & KN_REPROCESS) {
443 kn->kn_status &= ~KN_REPROCESS;
444 if (kn->kn_status & KN_WAITING) {
445 kn->kn_status &= ~KN_WAITING;
446 wakeup(kn);
448 if (kn->kn_status & KN_DELETING) {
449 knote_detach_and_drop(kn);
450 return(1);
451 /* NOT REACHED */
453 if (filter_event(kn, 0))
454 KNOTE_ACTIVATE(kn);
456 kn->kn_status &= ~KN_PROCESSING;
457 return(0);
461 * Initialize a kqueue.
463 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
465 * MPSAFE
467 void
468 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
470 TAILQ_INIT(&kq->kq_knpend);
471 TAILQ_INIT(&kq->kq_knlist);
472 kq->kq_count = 0;
473 kq->kq_fdp = fdp;
474 SLIST_INIT(&kq->kq_kqinfo.ki_note);
478 * Terminate a kqueue. Freeing the actual kq itself is left up to the
479 * caller (it might be embedded in a lwp so we don't do it here).
481 * The kq's knlist must be completely eradicated so block on any
482 * processing races.
484 void
485 kqueue_terminate(struct kqueue *kq)
487 struct knote *kn;
489 lwkt_gettoken(&kq_token);
490 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
491 if (knote_acquire(kn))
492 knote_detach_and_drop(kn);
494 if (kq->kq_knhash) {
495 kfree(kq->kq_knhash, M_KQUEUE);
496 kq->kq_knhash = NULL;
497 kq->kq_knhashmask = 0;
499 lwkt_reltoken(&kq_token);
503 * MPSAFE
506 sys_kqueue(struct kqueue_args *uap)
508 struct thread *td = curthread;
509 struct kqueue *kq;
510 struct file *fp;
511 int fd, error;
513 error = falloc(td->td_lwp, &fp, &fd);
514 if (error)
515 return (error);
516 fp->f_flag = FREAD | FWRITE;
517 fp->f_type = DTYPE_KQUEUE;
518 fp->f_ops = &kqueueops;
520 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
521 kqueue_init(kq, td->td_proc->p_fd);
522 fp->f_data = kq;
524 fsetfd(kq->kq_fdp, fp, fd);
525 uap->sysmsg_result = fd;
526 fdrop(fp);
527 return (error);
531 * Copy 'count' items into the destination list pointed to by uap->eventlist.
533 static int
534 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
536 struct kevent_copyin_args *kap;
537 int error;
539 kap = (struct kevent_copyin_args *)arg;
541 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
542 if (error == 0) {
543 kap->ka->eventlist += count;
544 *res += count;
545 } else {
546 *res = -1;
549 return (error);
553 * Copy at most 'max' items from the list pointed to by kap->changelist,
554 * return number of items in 'events'.
556 static int
557 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
559 struct kevent_copyin_args *kap;
560 int error, count;
562 kap = (struct kevent_copyin_args *)arg;
564 count = min(kap->ka->nchanges - kap->pchanges, max);
565 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
566 if (error == 0) {
567 kap->ka->changelist += count;
568 kap->pchanges += count;
569 *events = count;
572 return (error);
576 * MPSAFE
579 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
580 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
581 struct timespec *tsp_in)
583 struct kevent *kevp;
584 struct timespec *tsp;
585 int i, n, total, error, nerrors = 0;
586 int lres;
587 int limit = kq_checkloop;
588 struct kevent kev[KQ_NEVENTS];
589 struct knote marker;
591 tsp = tsp_in;
592 *res = 0;
594 lwkt_gettoken(&kq_token);
595 for ( ;; ) {
596 n = 0;
597 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
598 if (error)
599 goto done;
600 if (n == 0)
601 break;
602 for (i = 0; i < n; i++) {
603 kevp = &kev[i];
604 kevp->flags &= ~EV_SYSFLAGS;
605 error = kqueue_register(kq, kevp);
608 * If a registration returns an error we
609 * immediately post the error. The kevent()
610 * call itself will fail with the error if
611 * no space is available for posting.
613 * Such errors normally bypass the timeout/blocking
614 * code. However, if the copyoutfn function refuses
615 * to post the error (see sys_poll()), then we
616 * ignore it too.
618 if (error) {
619 kevp->flags = EV_ERROR;
620 kevp->data = error;
621 lres = *res;
622 kevent_copyoutfn(uap, kevp, 1, res);
623 if (lres != *res) {
624 nevents--;
625 nerrors++;
630 if (nerrors) {
631 error = 0;
632 goto done;
636 * Acquire/wait for events - setup timeout
638 if (tsp != NULL) {
639 struct timespec ats;
641 if (tsp->tv_sec || tsp->tv_nsec) {
642 nanouptime(&ats);
643 timespecadd(tsp, &ats); /* tsp = target time */
648 * Loop as required.
650 * Collect as many events as we can. Sleeping on successive
651 * loops is disabled if copyoutfn has incremented (*res).
653 * The loop stops if an error occurs, all events have been
654 * scanned (the marker has been reached), or fewer than the
655 * maximum number of events is found.
657 * The copyoutfn function does not have to increment (*res) in
658 * order for the loop to continue.
660 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
662 total = 0;
663 error = 0;
664 marker.kn_filter = EVFILT_MARKER;
665 marker.kn_status = KN_PROCESSING;
666 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
667 while ((n = nevents - total) > 0) {
668 if (n > KQ_NEVENTS)
669 n = KQ_NEVENTS;
672 * If no events are pending sleep until timeout (if any)
673 * or an event occurs.
675 * After the sleep completes the marker is moved to the
676 * end of the list, making any received events available
677 * to our scan.
679 if (kq->kq_count == 0 && *res == 0) {
680 error = kqueue_sleep(kq, tsp);
681 if (error)
682 break;
684 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
685 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
689 * Process all received events
690 * Account for all non-spurious events in our total
692 i = kqueue_scan(kq, kev, n, &marker);
693 if (i) {
694 lres = *res;
695 error = kevent_copyoutfn(uap, kev, i, res);
696 total += *res - lres;
697 if (error)
698 break;
700 if (limit && --limit == 0)
701 panic("kqueue: checkloop failed i=%d", i);
704 * Normally when fewer events are returned than requested
705 * we can stop. However, if only spurious events were
706 * collected the copyout will not bump (*res) and we have
707 * to continue.
709 if (i < n && *res)
710 break;
713 * Deal with an edge case where spurious events can cause
714 * a loop to occur without moving the marker. This can
715 * prevent kqueue_scan() from picking up new events which
716 * race us. We must be sure to move the marker for this
717 * case.
719 * NOTE: We do not want to move the marker if events
720 * were scanned because normal kqueue operations
721 * may reactivate events. Moving the marker in
722 * that case could result in duplicates for the
723 * same event.
725 if (i == 0) {
726 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
727 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
730 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
732 /* Timeouts do not return EWOULDBLOCK. */
733 if (error == EWOULDBLOCK)
734 error = 0;
736 done:
737 lwkt_reltoken(&kq_token);
738 return (error);
742 * MPALMOSTSAFE
745 sys_kevent(struct kevent_args *uap)
747 struct thread *td = curthread;
748 struct proc *p = td->td_proc;
749 struct timespec ts, *tsp;
750 struct kqueue *kq;
751 struct file *fp = NULL;
752 struct kevent_copyin_args *kap, ka;
753 int error;
755 if (uap->timeout) {
756 error = copyin(uap->timeout, &ts, sizeof(ts));
757 if (error)
758 return (error);
759 tsp = &ts;
760 } else {
761 tsp = NULL;
764 fp = holdfp(p->p_fd, uap->fd, -1);
765 if (fp == NULL)
766 return (EBADF);
767 if (fp->f_type != DTYPE_KQUEUE) {
768 fdrop(fp);
769 return (EBADF);
772 kq = (struct kqueue *)fp->f_data;
774 kap = &ka;
775 kap->ka = uap;
776 kap->pchanges = 0;
778 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
779 kevent_copyin, kevent_copyout, tsp);
781 fdrop(fp);
783 return (error);
787 kqueue_register(struct kqueue *kq, struct kevent *kev)
789 struct filedesc *fdp = kq->kq_fdp;
790 struct filterops *fops;
791 struct file *fp = NULL;
792 struct knote *kn = NULL;
793 int error = 0;
795 if (kev->filter < 0) {
796 if (kev->filter + EVFILT_SYSCOUNT < 0)
797 return (EINVAL);
798 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
799 } else {
801 * XXX
802 * filter attach routine is responsible for insuring that
803 * the identifier can be attached to it.
805 kprintf("unknown filter: %d\n", kev->filter);
806 return (EINVAL);
809 lwkt_gettoken(&kq_token);
810 if (fops->f_flags & FILTEROP_ISFD) {
811 /* validate descriptor */
812 fp = holdfp(fdp, kev->ident, -1);
813 if (fp == NULL) {
814 lwkt_reltoken(&kq_token);
815 return (EBADF);
818 again1:
819 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
820 if (kn->kn_kq == kq &&
821 kn->kn_filter == kev->filter &&
822 kn->kn_id == kev->ident) {
823 if (knote_acquire(kn) == 0)
824 goto again1;
825 break;
828 } else {
829 if (kq->kq_knhashmask) {
830 struct klist *list;
832 list = &kq->kq_knhash[
833 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
834 again2:
835 SLIST_FOREACH(kn, list, kn_link) {
836 if (kn->kn_id == kev->ident &&
837 kn->kn_filter == kev->filter) {
838 if (knote_acquire(kn) == 0)
839 goto again2;
840 break;
847 * NOTE: At this point if kn is non-NULL we will have acquired
848 * it and set KN_PROCESSING.
850 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
851 error = ENOENT;
852 goto done;
856 * kn now contains the matching knote, or NULL if no match
858 if (kev->flags & EV_ADD) {
859 if (kn == NULL) {
860 kn = knote_alloc();
861 if (kn == NULL) {
862 error = ENOMEM;
863 goto done;
865 kn->kn_fp = fp;
866 kn->kn_kq = kq;
867 kn->kn_fop = fops;
870 * apply reference count to knote structure, and
871 * do not release it at the end of this routine.
873 fp = NULL;
875 kn->kn_sfflags = kev->fflags;
876 kn->kn_sdata = kev->data;
877 kev->fflags = 0;
878 kev->data = 0;
879 kn->kn_kevent = *kev;
882 * KN_PROCESSING prevents the knote from getting
883 * ripped out from under us while we are trying
884 * to attach it, in case the attach blocks.
886 kn->kn_status = KN_PROCESSING;
887 knote_attach(kn);
888 if ((error = filter_attach(kn)) != 0) {
889 kn->kn_status |= KN_DELETING | KN_REPROCESS;
890 knote_drop(kn);
891 goto done;
895 * Interlock against close races which either tried
896 * to remove our knote while we were blocked or missed
897 * it entirely prior to our attachment. We do not
898 * want to end up with a knote on a closed descriptor.
900 if ((fops->f_flags & FILTEROP_ISFD) &&
901 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
902 kn->kn_status |= KN_DELETING | KN_REPROCESS;
904 } else {
906 * The user may change some filter values after the
907 * initial EV_ADD, but doing so will not reset any
908 * filter which have already been triggered.
910 KKASSERT(kn->kn_status & KN_PROCESSING);
911 kn->kn_sfflags = kev->fflags;
912 kn->kn_sdata = kev->data;
913 kn->kn_kevent.udata = kev->udata;
917 * Execute the filter event to immediately activate the
918 * knote if necessary. If reprocessing events are pending
919 * due to blocking above we do not run the filter here
920 * but instead let knote_release() do it. Otherwise we
921 * might run the filter on a deleted event.
923 if ((kn->kn_status & KN_REPROCESS) == 0) {
924 if (filter_event(kn, 0))
925 KNOTE_ACTIVATE(kn);
927 } else if (kev->flags & EV_DELETE) {
929 * Delete the existing knote
931 knote_detach_and_drop(kn);
932 goto done;
936 * Disablement does not deactivate a knote here.
938 if ((kev->flags & EV_DISABLE) &&
939 ((kn->kn_status & KN_DISABLED) == 0)) {
940 kn->kn_status |= KN_DISABLED;
944 * Re-enablement may have to immediately enqueue an active knote.
946 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
947 kn->kn_status &= ~KN_DISABLED;
948 if ((kn->kn_status & KN_ACTIVE) &&
949 ((kn->kn_status & KN_QUEUED) == 0)) {
950 knote_enqueue(kn);
955 * Handle any required reprocessing
957 knote_release(kn);
958 /* kn may be invalid now */
960 done:
961 lwkt_reltoken(&kq_token);
962 if (fp != NULL)
963 fdrop(fp);
964 return (error);
968 * Block as necessary until the target time is reached.
969 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
970 * 0 we do not block at all.
972 static int
973 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
975 int error = 0;
977 if (tsp == NULL) {
978 kq->kq_state |= KQ_SLEEP;
979 error = tsleep(kq, PCATCH, "kqread", 0);
980 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
981 error = EWOULDBLOCK;
982 } else {
983 struct timespec ats;
984 struct timespec atx = *tsp;
985 int timeout;
987 nanouptime(&ats);
988 timespecsub(&atx, &ats);
989 if (ats.tv_sec < 0) {
990 error = EWOULDBLOCK;
991 } else {
992 timeout = atx.tv_sec > 24 * 60 * 60 ?
993 24 * 60 * 60 * hz : tstohz_high(&atx);
994 kq->kq_state |= KQ_SLEEP;
995 error = tsleep(kq, PCATCH, "kqread", timeout);
999 /* don't restart after signals... */
1000 if (error == ERESTART)
1001 return (EINTR);
1003 return (error);
1007 * Scan the kqueue, return the number of active events placed in kevp up
1008 * to count.
1010 * Continuous mode events may get recycled, do not continue scanning past
1011 * marker unless no events have been collected.
1013 static int
1014 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1015 struct knote *marker)
1017 struct knote *kn, local_marker;
1018 int total;
1020 total = 0;
1021 local_marker.kn_filter = EVFILT_MARKER;
1022 local_marker.kn_status = KN_PROCESSING;
1025 * Collect events.
1027 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1028 while (count) {
1029 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1030 if (kn->kn_filter == EVFILT_MARKER) {
1031 /* Marker reached, we are done */
1032 if (kn == marker)
1033 break;
1035 /* Move local marker past some other threads marker */
1036 kn = TAILQ_NEXT(kn, kn_tqe);
1037 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1038 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1039 continue;
1043 * We can't skip a knote undergoing processing, otherwise
1044 * we risk not returning it when the user process expects
1045 * it should be returned. Sleep and retry.
1047 if (knote_acquire(kn) == 0)
1048 continue;
1051 * Remove the event for processing.
1053 * WARNING! We must leave KN_QUEUED set to prevent the
1054 * event from being KNOTE_ACTIVATE()d while
1055 * the queue state is in limbo, in case we
1056 * block.
1058 * WARNING! We must set KN_PROCESSING to avoid races
1059 * against deletion or another thread's
1060 * processing.
1062 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1063 kq->kq_count--;
1066 * We have to deal with an extremely important race against
1067 * file descriptor close()s here. The file descriptor can
1068 * disappear MPSAFE, and there is a small window of
1069 * opportunity between that and the call to knote_fdclose().
1071 * If we hit that window here while doselect or dopoll is
1072 * trying to delete a spurious event they will not be able
1073 * to match up the event against a knote and will go haywire.
1075 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1076 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1077 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1080 if (kn->kn_status & KN_DISABLED) {
1082 * If disabled we ensure the event is not queued
1083 * but leave its active bit set. On re-enablement
1084 * the event may be immediately triggered.
1086 kn->kn_status &= ~KN_QUEUED;
1087 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1088 (kn->kn_status & KN_DELETING) == 0 &&
1089 filter_event(kn, 0) == 0) {
1091 * If not running in one-shot mode and the event
1092 * is no longer present we ensure it is removed
1093 * from the queue and ignore it.
1095 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1096 } else {
1098 * Post the event
1100 *kevp++ = kn->kn_kevent;
1101 ++total;
1102 --count;
1104 if (kn->kn_flags & EV_ONESHOT) {
1105 kn->kn_status &= ~KN_QUEUED;
1106 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1107 } else if (kn->kn_flags & EV_CLEAR) {
1108 kn->kn_data = 0;
1109 kn->kn_fflags = 0;
1110 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1111 } else {
1112 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1113 kq->kq_count++;
1118 * Handle any post-processing states
1120 knote_release(kn);
1122 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1124 return (total);
1128 * XXX
1129 * This could be expanded to call kqueue_scan, if desired.
1131 * MPSAFE
1133 static int
1134 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1136 return (ENXIO);
1140 * MPSAFE
1142 static int
1143 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1145 return (ENXIO);
1149 * MPALMOSTSAFE
1151 static int
1152 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1153 struct ucred *cred, struct sysmsg *msg)
1155 struct kqueue *kq;
1156 int error;
1158 lwkt_gettoken(&kq_token);
1159 kq = (struct kqueue *)fp->f_data;
1161 switch(com) {
1162 case FIOASYNC:
1163 if (*(int *)data)
1164 kq->kq_state |= KQ_ASYNC;
1165 else
1166 kq->kq_state &= ~KQ_ASYNC;
1167 error = 0;
1168 break;
1169 case FIOSETOWN:
1170 error = fsetown(*(int *)data, &kq->kq_sigio);
1171 break;
1172 default:
1173 error = ENOTTY;
1174 break;
1176 lwkt_reltoken(&kq_token);
1177 return (error);
1181 * MPSAFE
1183 static int
1184 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1186 struct kqueue *kq = (struct kqueue *)fp->f_data;
1188 bzero((void *)st, sizeof(*st));
1189 st->st_size = kq->kq_count;
1190 st->st_blksize = sizeof(struct kevent);
1191 st->st_mode = S_IFIFO;
1192 return (0);
1196 * MPSAFE
1198 static int
1199 kqueue_close(struct file *fp)
1201 struct kqueue *kq = (struct kqueue *)fp->f_data;
1203 kqueue_terminate(kq);
1205 fp->f_data = NULL;
1206 funsetown(&kq->kq_sigio);
1208 kfree(kq, M_KQUEUE);
1209 return (0);
1212 static void
1213 kqueue_wakeup(struct kqueue *kq)
1215 if (kq->kq_state & KQ_SLEEP) {
1216 kq->kq_state &= ~KQ_SLEEP;
1217 wakeup(kq);
1219 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1223 * Calls filterops f_attach function, acquiring mplock if filter is not
1224 * marked as FILTEROP_MPSAFE.
1226 static int
1227 filter_attach(struct knote *kn)
1229 int ret;
1231 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1232 get_mplock();
1233 ret = kn->kn_fop->f_attach(kn);
1234 rel_mplock();
1235 } else {
1236 ret = kn->kn_fop->f_attach(kn);
1239 return (ret);
1243 * Detach the knote and drop it, destroying the knote.
1245 * Calls filterops f_detach function, acquiring mplock if filter is not
1246 * marked as FILTEROP_MPSAFE.
1248 static void
1249 knote_detach_and_drop(struct knote *kn)
1251 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1252 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1253 kn->kn_fop->f_detach(kn);
1254 } else {
1255 get_mplock();
1256 kn->kn_fop->f_detach(kn);
1257 rel_mplock();
1259 knote_drop(kn);
1263 * Calls filterops f_event function, acquiring mplock if filter is not
1264 * marked as FILTEROP_MPSAFE.
1266 * If the knote is in the middle of being created or deleted we cannot
1267 * safely call the filter op.
1269 static int
1270 filter_event(struct knote *kn, long hint)
1272 int ret;
1274 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1275 ret = kn->kn_fop->f_event(kn, hint);
1276 } else {
1277 get_mplock();
1278 ret = kn->kn_fop->f_event(kn, hint);
1279 rel_mplock();
1281 return (ret);
1285 * Walk down a list of knotes, activating them if their event has triggered.
1287 * If we encounter any knotes which are undergoing processing we just mark
1288 * them for reprocessing and do not try to [re]activate the knote. However,
1289 * if a hint is being passed we have to wait and that makes things a bit
1290 * sticky.
1292 void
1293 knote(struct klist *list, long hint)
1295 struct knote *kn;
1297 lwkt_gettoken(&kq_token);
1298 restart:
1299 SLIST_FOREACH(kn, list, kn_next) {
1300 if (kn->kn_status & KN_PROCESSING) {
1302 * Someone else is processing the knote, ask the
1303 * other thread to reprocess it and don't mess
1304 * with it otherwise.
1306 if (hint == 0) {
1307 kn->kn_status |= KN_REPROCESS;
1308 continue;
1312 * If the hint is non-zero we have to wait or risk
1313 * losing the state the caller is trying to update.
1315 * XXX This is a real problem, certain process
1316 * and signal filters will bump kn_data for
1317 * already-processed notes more than once if
1318 * we restart the list scan. FIXME.
1320 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1321 tsleep(kn, 0, "knotec", hz);
1322 goto restart;
1326 * Become the reprocessing master ourselves.
1328 * If hint is non-zer running the event is mandatory
1329 * when not deleting so do it whether reprocessing is
1330 * set or not.
1332 kn->kn_status |= KN_PROCESSING;
1333 if ((kn->kn_status & KN_DELETING) == 0) {
1334 if (filter_event(kn, hint))
1335 KNOTE_ACTIVATE(kn);
1337 if (knote_release(kn))
1338 goto restart;
1340 lwkt_reltoken(&kq_token);
1344 * Insert knote at head of klist.
1346 * This function may only be called via a filter function and thus
1347 * kq_token should already be held and marked for processing.
1349 void
1350 knote_insert(struct klist *klist, struct knote *kn)
1352 KKASSERT(kn->kn_status & KN_PROCESSING);
1353 ASSERT_LWKT_TOKEN_HELD(&kq_token);
1354 SLIST_INSERT_HEAD(klist, kn, kn_next);
1358 * Remove knote from a klist
1360 * This function may only be called via a filter function and thus
1361 * kq_token should already be held and marked for processing.
1363 void
1364 knote_remove(struct klist *klist, struct knote *kn)
1366 KKASSERT(kn->kn_status & KN_PROCESSING);
1367 ASSERT_LWKT_TOKEN_HELD(&kq_token);
1368 SLIST_REMOVE(klist, kn, knote, kn_next);
1372 * Remove all knotes from a specified klist
1374 * Only called from aio.
1376 void
1377 knote_empty(struct klist *list)
1379 struct knote *kn;
1381 lwkt_gettoken(&kq_token);
1382 while ((kn = SLIST_FIRST(list)) != NULL) {
1383 if (knote_acquire(kn))
1384 knote_detach_and_drop(kn);
1386 lwkt_reltoken(&kq_token);
1389 void
1390 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1391 struct filterops *ops, void *hook)
1393 struct knote *kn;
1395 lwkt_gettoken(&kq_token);
1396 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1397 if (knote_acquire(kn)) {
1398 knote_remove(&src->ki_note, kn);
1399 kn->kn_fop = ops;
1400 kn->kn_hook = hook;
1401 knote_insert(&dst->ki_note, kn);
1402 knote_release(kn);
1403 /* kn may be invalid now */
1406 lwkt_reltoken(&kq_token);
1410 * Remove all knotes referencing a specified fd
1412 void
1413 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1415 struct knote *kn;
1417 lwkt_gettoken(&kq_token);
1418 restart:
1419 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1420 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1421 if (knote_acquire(kn))
1422 knote_detach_and_drop(kn);
1423 goto restart;
1426 lwkt_reltoken(&kq_token);
1430 * Low level attach function.
1432 * The knote should already be marked for processing.
1434 static void
1435 knote_attach(struct knote *kn)
1437 struct klist *list;
1438 struct kqueue *kq = kn->kn_kq;
1440 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1441 KKASSERT(kn->kn_fp);
1442 list = &kn->kn_fp->f_klist;
1443 } else {
1444 if (kq->kq_knhashmask == 0)
1445 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1446 &kq->kq_knhashmask);
1447 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1449 SLIST_INSERT_HEAD(list, kn, kn_link);
1450 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1454 * Low level drop function.
1456 * The knote should already be marked for processing.
1458 static void
1459 knote_drop(struct knote *kn)
1461 struct kqueue *kq;
1462 struct klist *list;
1464 kq = kn->kn_kq;
1466 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1467 list = &kn->kn_fp->f_klist;
1468 else
1469 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1471 SLIST_REMOVE(list, kn, knote, kn_link);
1472 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1473 if (kn->kn_status & KN_QUEUED)
1474 knote_dequeue(kn);
1475 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1476 fdrop(kn->kn_fp);
1477 kn->kn_fp = NULL;
1479 knote_free(kn);
1483 * Low level enqueue function.
1485 * The knote should already be marked for processing.
1487 static void
1488 knote_enqueue(struct knote *kn)
1490 struct kqueue *kq = kn->kn_kq;
1492 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1493 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1494 kn->kn_status |= KN_QUEUED;
1495 ++kq->kq_count;
1498 * Send SIGIO on request (typically set up as a mailbox signal)
1500 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1501 pgsigio(kq->kq_sigio, SIGIO, 0);
1503 kqueue_wakeup(kq);
1507 * Low level dequeue function.
1509 * The knote should already be marked for processing.
1511 static void
1512 knote_dequeue(struct knote *kn)
1514 struct kqueue *kq = kn->kn_kq;
1516 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1517 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1518 kn->kn_status &= ~KN_QUEUED;
1519 kq->kq_count--;
1522 static struct knote *
1523 knote_alloc(void)
1525 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1528 static void
1529 knote_free(struct knote *kn)
1531 kfree(kn, M_KQUEUE);