fwcontrol(8): Remove an unused variable and raise WARNS to 2.
[dragonfly.git] / sys / kern / kern_event.c
blobba553f3441731fa5b31bcd55f6a7f5e6fb7a05ed
1 /*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/malloc.h>
34 #include <sys/unistd.h>
35 #include <sys/file.h>
36 #include <sys/lock.h>
37 #include <sys/fcntl.h>
38 #include <sys/queue.h>
39 #include <sys/event.h>
40 #include <sys/eventvar.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/stat.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysproto.h>
47 #include <sys/thread.h>
48 #include <sys/uio.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/ktr.h>
53 #include <sys/thread2.h>
54 #include <sys/file2.h>
55 #include <sys/mplock2.h>
58 * Global token for kqueue subsystem
60 #if 0
61 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token);
62 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
63 CTLFLAG_RW, &kq_token.t_collisions, 0,
64 "Collision counter of kq_token");
65 #endif
67 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
69 struct kevent_copyin_args {
70 struct kevent_args *ka;
71 int pchanges;
74 static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
75 static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
76 struct knote *marker);
77 static int kqueue_read(struct file *fp, struct uio *uio,
78 struct ucred *cred, int flags);
79 static int kqueue_write(struct file *fp, struct uio *uio,
80 struct ucred *cred, int flags);
81 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
82 struct ucred *cred, struct sysmsg *msg);
83 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
84 static int kqueue_stat(struct file *fp, struct stat *st,
85 struct ucred *cred);
86 static int kqueue_close(struct file *fp);
87 static void kqueue_wakeup(struct kqueue *kq);
88 static int filter_attach(struct knote *kn);
89 static int filter_event(struct knote *kn, long hint);
92 * MPSAFE
94 static struct fileops kqueueops = {
95 .fo_read = kqueue_read,
96 .fo_write = kqueue_write,
97 .fo_ioctl = kqueue_ioctl,
98 .fo_kqfilter = kqueue_kqfilter,
99 .fo_stat = kqueue_stat,
100 .fo_close = kqueue_close,
101 .fo_shutdown = nofo_shutdown
104 static void knote_attach(struct knote *kn);
105 static void knote_drop(struct knote *kn);
106 static void knote_detach_and_drop(struct knote *kn);
107 static void knote_enqueue(struct knote *kn);
108 static void knote_dequeue(struct knote *kn);
109 static struct knote *knote_alloc(void);
110 static void knote_free(struct knote *kn);
112 static void filt_kqdetach(struct knote *kn);
113 static int filt_kqueue(struct knote *kn, long hint);
114 static int filt_procattach(struct knote *kn);
115 static void filt_procdetach(struct knote *kn);
116 static int filt_proc(struct knote *kn, long hint);
117 static int filt_fileattach(struct knote *kn);
118 static void filt_timerexpire(void *knx);
119 static int filt_timerattach(struct knote *kn);
120 static void filt_timerdetach(struct knote *kn);
121 static int filt_timer(struct knote *kn, long hint);
123 static struct filterops file_filtops =
124 { FILTEROP_ISFD, filt_fileattach, NULL, NULL };
125 static struct filterops kqread_filtops =
126 { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
127 static struct filterops proc_filtops =
128 { 0, filt_procattach, filt_procdetach, filt_proc };
129 static struct filterops timer_filtops =
130 { 0, filt_timerattach, filt_timerdetach, filt_timer };
132 static int kq_ncallouts = 0;
133 static int kq_calloutmax = (4 * 1024);
134 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
135 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
136 static int kq_checkloop = 1000000;
137 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
138 &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue");
140 #define KNOTE_ACTIVATE(kn) do { \
141 kn->kn_status |= KN_ACTIVE; \
142 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
143 knote_enqueue(kn); \
144 } while(0)
146 #define KN_HASHSIZE 64 /* XXX should be tunable */
147 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
149 extern struct filterops aio_filtops;
150 extern struct filterops sig_filtops;
153 * Table for for all system-defined filters.
155 static struct filterops *sysfilt_ops[] = {
156 &file_filtops, /* EVFILT_READ */
157 &file_filtops, /* EVFILT_WRITE */
158 &aio_filtops, /* EVFILT_AIO */
159 &file_filtops, /* EVFILT_VNODE */
160 &proc_filtops, /* EVFILT_PROC */
161 &sig_filtops, /* EVFILT_SIGNAL */
162 &timer_filtops, /* EVFILT_TIMER */
163 &file_filtops, /* EVFILT_EXCEPT */
166 static int
167 filt_fileattach(struct knote *kn)
169 return (fo_kqfilter(kn->kn_fp, kn));
173 * MPSAFE
175 static int
176 kqueue_kqfilter(struct file *fp, struct knote *kn)
178 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
180 if (kn->kn_filter != EVFILT_READ)
181 return (EOPNOTSUPP);
183 kn->kn_fop = &kqread_filtops;
184 knote_insert(&kq->kq_kqinfo.ki_note, kn);
185 return (0);
188 static void
189 filt_kqdetach(struct knote *kn)
191 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
193 knote_remove(&kq->kq_kqinfo.ki_note, kn);
196 /*ARGSUSED*/
197 static int
198 filt_kqueue(struct knote *kn, long hint)
200 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
202 kn->kn_data = kq->kq_count;
203 return (kn->kn_data > 0);
206 static int
207 filt_procattach(struct knote *kn)
209 struct proc *p;
210 int immediate;
212 immediate = 0;
213 p = pfind(kn->kn_id);
214 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
215 p = zpfind(kn->kn_id);
216 immediate = 1;
218 if (p == NULL) {
219 return (ESRCH);
221 if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
222 if (p)
223 PRELE(p);
224 return (EACCES);
227 lwkt_gettoken(&p->p_token);
228 kn->kn_ptr.p_proc = p;
229 kn->kn_flags |= EV_CLEAR; /* automatically set */
232 * internal flag indicating registration done by kernel
234 if (kn->kn_flags & EV_FLAG1) {
235 kn->kn_data = kn->kn_sdata; /* ppid */
236 kn->kn_fflags = NOTE_CHILD;
237 kn->kn_flags &= ~EV_FLAG1;
240 knote_insert(&p->p_klist, kn);
243 * Immediately activate any exit notes if the target process is a
244 * zombie. This is necessary to handle the case where the target
245 * process, e.g. a child, dies before the kevent is negistered.
247 if (immediate && filt_proc(kn, NOTE_EXIT))
248 KNOTE_ACTIVATE(kn);
249 lwkt_reltoken(&p->p_token);
250 PRELE(p);
252 return (0);
256 * The knote may be attached to a different process, which may exit,
257 * leaving nothing for the knote to be attached to. So when the process
258 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
259 * it will be deleted when read out. However, as part of the knote deletion,
260 * this routine is called, so a check is needed to avoid actually performing
261 * a detach, because the original process does not exist any more.
263 static void
264 filt_procdetach(struct knote *kn)
266 struct proc *p;
268 if (kn->kn_status & KN_DETACHED)
269 return;
270 /* XXX locking? take proc_token here? */
271 p = kn->kn_ptr.p_proc;
272 knote_remove(&p->p_klist, kn);
275 static int
276 filt_proc(struct knote *kn, long hint)
278 u_int event;
281 * mask off extra data
283 event = (u_int)hint & NOTE_PCTRLMASK;
286 * if the user is interested in this event, record it.
288 if (kn->kn_sfflags & event)
289 kn->kn_fflags |= event;
292 * Process is gone, so flag the event as finished. Detach the
293 * knote from the process now because the process will be poof,
294 * gone later on.
296 if (event == NOTE_EXIT) {
297 struct proc *p = kn->kn_ptr.p_proc;
298 if ((kn->kn_status & KN_DETACHED) == 0) {
299 PHOLD(p);
300 knote_remove(&p->p_klist, kn);
301 kn->kn_status |= KN_DETACHED;
302 kn->kn_data = p->p_xstat;
303 kn->kn_ptr.p_proc = NULL;
304 PRELE(p);
306 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
307 return (1);
311 * process forked, and user wants to track the new process,
312 * so attach a new knote to it, and immediately report an
313 * event with the parent's pid.
315 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
316 struct kevent kev;
317 int error;
320 * register knote with new process.
322 kev.ident = hint & NOTE_PDATAMASK; /* pid */
323 kev.filter = kn->kn_filter;
324 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
325 kev.fflags = kn->kn_sfflags;
326 kev.data = kn->kn_id; /* parent */
327 kev.udata = kn->kn_kevent.udata; /* preserve udata */
328 error = kqueue_register(kn->kn_kq, &kev);
329 if (error)
330 kn->kn_fflags |= NOTE_TRACKERR;
333 return (kn->kn_fflags != 0);
337 * The callout interlocks with callout_terminate() but can still
338 * race a deletion so if KN_DELETING is set we just don't touch
339 * the knote.
341 static void
342 filt_timerexpire(void *knx)
344 struct lwkt_token *tok;
345 struct knote *kn = knx;
346 struct callout *calloutp;
347 struct timeval tv;
348 int tticks;
350 tok = lwkt_token_pool_lookup(kn->kn_kq);
351 lwkt_gettoken(tok);
352 if ((kn->kn_status & KN_DELETING) == 0) {
353 kn->kn_data++;
354 KNOTE_ACTIVATE(kn);
356 if ((kn->kn_flags & EV_ONESHOT) == 0) {
357 tv.tv_sec = kn->kn_sdata / 1000;
358 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
359 tticks = tvtohz_high(&tv);
360 calloutp = (struct callout *)kn->kn_hook;
361 callout_reset(calloutp, tticks, filt_timerexpire, kn);
364 lwkt_reltoken(tok);
368 * data contains amount of time to sleep, in milliseconds
370 static int
371 filt_timerattach(struct knote *kn)
373 struct callout *calloutp;
374 struct timeval tv;
375 int tticks;
377 if (kq_ncallouts >= kq_calloutmax) {
378 kn->kn_hook = NULL;
379 return (ENOMEM);
381 kq_ncallouts++;
383 tv.tv_sec = kn->kn_sdata / 1000;
384 tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
385 tticks = tvtohz_high(&tv);
387 kn->kn_flags |= EV_CLEAR; /* automatically set */
388 calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
389 callout_init(calloutp);
390 kn->kn_hook = (caddr_t)calloutp;
391 callout_reset(calloutp, tticks, filt_timerexpire, kn);
393 return (0);
397 * This function is called with the knote flagged locked but it is
398 * still possible to race a callout event due to the callback blocking.
399 * We must call callout_terminate() instead of callout_stop() to deal
400 * with the race.
402 static void
403 filt_timerdetach(struct knote *kn)
405 struct callout *calloutp;
407 calloutp = (struct callout *)kn->kn_hook;
408 callout_terminate(calloutp);
409 kfree(calloutp, M_KQUEUE);
410 kq_ncallouts--;
413 static int
414 filt_timer(struct knote *kn, long hint)
417 return (kn->kn_data != 0);
421 * Acquire a knote, return non-zero on success, 0 on failure.
423 * If we cannot acquire the knote we sleep and return 0. The knote
424 * may be stale on return in this case and the caller must restart
425 * whatever loop they are in.
427 * Related kq token must be held.
429 static __inline
431 knote_acquire(struct knote *kn)
433 if (kn->kn_status & KN_PROCESSING) {
434 kn->kn_status |= KN_WAITING | KN_REPROCESS;
435 tsleep(kn, 0, "kqepts", hz);
436 /* knote may be stale now */
437 return(0);
439 kn->kn_status |= KN_PROCESSING;
440 return(1);
444 * Release an acquired knote, clearing KN_PROCESSING and handling any
445 * KN_REPROCESS events.
447 * Caller must be holding the related kq token
449 * Non-zero is returned if the knote is destroyed or detached.
451 static __inline
453 knote_release(struct knote *kn)
455 while (kn->kn_status & KN_REPROCESS) {
456 kn->kn_status &= ~KN_REPROCESS;
457 if (kn->kn_status & KN_WAITING) {
458 kn->kn_status &= ~KN_WAITING;
459 wakeup(kn);
461 if (kn->kn_status & KN_DELETING) {
462 knote_detach_and_drop(kn);
463 return(1);
464 /* NOT REACHED */
466 if (filter_event(kn, 0))
467 KNOTE_ACTIVATE(kn);
469 if (kn->kn_status & KN_DETACHED) {
470 kn->kn_status &= ~KN_PROCESSING;
471 return(1);
472 } else {
473 kn->kn_status &= ~KN_PROCESSING;
474 return(0);
479 * Initialize a kqueue.
481 * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
483 * MPSAFE
485 void
486 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
488 TAILQ_INIT(&kq->kq_knpend);
489 TAILQ_INIT(&kq->kq_knlist);
490 kq->kq_count = 0;
491 kq->kq_fdp = fdp;
492 SLIST_INIT(&kq->kq_kqinfo.ki_note);
496 * Terminate a kqueue. Freeing the actual kq itself is left up to the
497 * caller (it might be embedded in a lwp so we don't do it here).
499 * The kq's knlist must be completely eradicated so block on any
500 * processing races.
502 void
503 kqueue_terminate(struct kqueue *kq)
505 struct lwkt_token *tok;
506 struct knote *kn;
508 tok = lwkt_token_pool_lookup(kq);
509 lwkt_gettoken(tok);
510 while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
511 if (knote_acquire(kn))
512 knote_detach_and_drop(kn);
514 if (kq->kq_knhash) {
515 kfree(kq->kq_knhash, M_KQUEUE);
516 kq->kq_knhash = NULL;
517 kq->kq_knhashmask = 0;
519 lwkt_reltoken(tok);
523 * MPSAFE
526 sys_kqueue(struct kqueue_args *uap)
528 struct thread *td = curthread;
529 struct kqueue *kq;
530 struct file *fp;
531 int fd, error;
533 error = falloc(td->td_lwp, &fp, &fd);
534 if (error)
535 return (error);
536 fp->f_flag = FREAD | FWRITE;
537 fp->f_type = DTYPE_KQUEUE;
538 fp->f_ops = &kqueueops;
540 kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
541 kqueue_init(kq, td->td_proc->p_fd);
542 fp->f_data = kq;
544 fsetfd(kq->kq_fdp, fp, fd);
545 uap->sysmsg_result = fd;
546 fdrop(fp);
547 return (error);
551 * Copy 'count' items into the destination list pointed to by uap->eventlist.
553 static int
554 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
556 struct kevent_copyin_args *kap;
557 int error;
559 kap = (struct kevent_copyin_args *)arg;
561 error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
562 if (error == 0) {
563 kap->ka->eventlist += count;
564 *res += count;
565 } else {
566 *res = -1;
569 return (error);
573 * Copy at most 'max' items from the list pointed to by kap->changelist,
574 * return number of items in 'events'.
576 static int
577 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
579 struct kevent_copyin_args *kap;
580 int error, count;
582 kap = (struct kevent_copyin_args *)arg;
584 count = min(kap->ka->nchanges - kap->pchanges, max);
585 error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
586 if (error == 0) {
587 kap->ka->changelist += count;
588 kap->pchanges += count;
589 *events = count;
592 return (error);
596 * MPSAFE
599 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
600 k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
601 struct timespec *tsp_in)
603 struct kevent *kevp;
604 struct timespec *tsp;
605 int i, n, total, error, nerrors = 0;
606 int lres;
607 int limit = kq_checkloop;
608 struct kevent kev[KQ_NEVENTS];
609 struct knote marker;
610 struct lwkt_token *tok;
612 tsp = tsp_in;
613 *res = 0;
615 tok = lwkt_token_pool_lookup(kq);
616 lwkt_gettoken(tok);
617 for ( ;; ) {
618 n = 0;
619 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
620 if (error)
621 goto done;
622 if (n == 0)
623 break;
624 for (i = 0; i < n; i++) {
625 kevp = &kev[i];
626 kevp->flags &= ~EV_SYSFLAGS;
627 error = kqueue_register(kq, kevp);
630 * If a registration returns an error we
631 * immediately post the error. The kevent()
632 * call itself will fail with the error if
633 * no space is available for posting.
635 * Such errors normally bypass the timeout/blocking
636 * code. However, if the copyoutfn function refuses
637 * to post the error (see sys_poll()), then we
638 * ignore it too.
640 if (error) {
641 kevp->flags = EV_ERROR;
642 kevp->data = error;
643 lres = *res;
644 kevent_copyoutfn(uap, kevp, 1, res);
645 if (*res < 0) {
646 goto done;
647 } else if (lres != *res) {
648 nevents--;
649 nerrors++;
654 if (nerrors) {
655 error = 0;
656 goto done;
660 * Acquire/wait for events - setup timeout
662 if (tsp != NULL) {
663 struct timespec ats;
665 if (tsp->tv_sec || tsp->tv_nsec) {
666 nanouptime(&ats);
667 timespecadd(tsp, &ats); /* tsp = target time */
672 * Loop as required.
674 * Collect as many events as we can. Sleeping on successive
675 * loops is disabled if copyoutfn has incremented (*res).
677 * The loop stops if an error occurs, all events have been
678 * scanned (the marker has been reached), or fewer than the
679 * maximum number of events is found.
681 * The copyoutfn function does not have to increment (*res) in
682 * order for the loop to continue.
684 * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
686 total = 0;
687 error = 0;
688 marker.kn_filter = EVFILT_MARKER;
689 marker.kn_status = KN_PROCESSING;
690 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
691 while ((n = nevents - total) > 0) {
692 if (n > KQ_NEVENTS)
693 n = KQ_NEVENTS;
696 * If no events are pending sleep until timeout (if any)
697 * or an event occurs.
699 * After the sleep completes the marker is moved to the
700 * end of the list, making any received events available
701 * to our scan.
703 if (kq->kq_count == 0 && *res == 0) {
704 error = kqueue_sleep(kq, tsp);
705 if (error)
706 break;
708 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
709 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
713 * Process all received events
714 * Account for all non-spurious events in our total
716 i = kqueue_scan(kq, kev, n, &marker);
717 if (i) {
718 lres = *res;
719 error = kevent_copyoutfn(uap, kev, i, res);
720 total += *res - lres;
721 if (error)
722 break;
724 if (limit && --limit == 0)
725 panic("kqueue: checkloop failed i=%d", i);
728 * Normally when fewer events are returned than requested
729 * we can stop. However, if only spurious events were
730 * collected the copyout will not bump (*res) and we have
731 * to continue.
733 if (i < n && *res)
734 break;
737 * Deal with an edge case where spurious events can cause
738 * a loop to occur without moving the marker. This can
739 * prevent kqueue_scan() from picking up new events which
740 * race us. We must be sure to move the marker for this
741 * case.
743 * NOTE: We do not want to move the marker if events
744 * were scanned because normal kqueue operations
745 * may reactivate events. Moving the marker in
746 * that case could result in duplicates for the
747 * same event.
749 if (i == 0) {
750 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
751 TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
754 TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
756 /* Timeouts do not return EWOULDBLOCK. */
757 if (error == EWOULDBLOCK)
758 error = 0;
760 done:
761 lwkt_reltoken(tok);
762 return (error);
766 * MPALMOSTSAFE
769 sys_kevent(struct kevent_args *uap)
771 struct thread *td = curthread;
772 struct proc *p = td->td_proc;
773 struct timespec ts, *tsp;
774 struct kqueue *kq;
775 struct file *fp = NULL;
776 struct kevent_copyin_args *kap, ka;
777 int error;
779 if (uap->timeout) {
780 error = copyin(uap->timeout, &ts, sizeof(ts));
781 if (error)
782 return (error);
783 tsp = &ts;
784 } else {
785 tsp = NULL;
788 fp = holdfp(p->p_fd, uap->fd, -1);
789 if (fp == NULL)
790 return (EBADF);
791 if (fp->f_type != DTYPE_KQUEUE) {
792 fdrop(fp);
793 return (EBADF);
796 kq = (struct kqueue *)fp->f_data;
798 kap = &ka;
799 kap->ka = uap;
800 kap->pchanges = 0;
802 error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
803 kevent_copyin, kevent_copyout, tsp);
805 fdrop(fp);
807 return (error);
811 * Caller must be holding the kq token
814 kqueue_register(struct kqueue *kq, struct kevent *kev)
816 struct lwkt_token *tok;
817 struct filedesc *fdp = kq->kq_fdp;
818 struct filterops *fops;
819 struct file *fp = NULL;
820 struct knote *kn = NULL;
821 int error = 0;
823 if (kev->filter < 0) {
824 if (kev->filter + EVFILT_SYSCOUNT < 0)
825 return (EINVAL);
826 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */
827 } else {
829 * XXX
830 * filter attach routine is responsible for insuring that
831 * the identifier can be attached to it.
833 kprintf("unknown filter: %d\n", kev->filter);
834 return (EINVAL);
837 tok = lwkt_token_pool_lookup(kq);
838 lwkt_gettoken(tok);
839 if (fops->f_flags & FILTEROP_ISFD) {
840 /* validate descriptor */
841 fp = holdfp(fdp, kev->ident, -1);
842 if (fp == NULL) {
843 lwkt_reltoken(tok);
844 return (EBADF);
846 lwkt_getpooltoken(&fp->f_klist);
847 again1:
848 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
849 if (kn->kn_kq == kq &&
850 kn->kn_filter == kev->filter &&
851 kn->kn_id == kev->ident) {
852 if (knote_acquire(kn) == 0)
853 goto again1;
854 break;
857 lwkt_relpooltoken(&fp->f_klist);
858 } else {
859 if (kq->kq_knhashmask) {
860 struct klist *list;
862 list = &kq->kq_knhash[
863 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
864 lwkt_getpooltoken(list);
865 again2:
866 SLIST_FOREACH(kn, list, kn_link) {
867 if (kn->kn_id == kev->ident &&
868 kn->kn_filter == kev->filter) {
869 if (knote_acquire(kn) == 0)
870 goto again2;
871 break;
874 lwkt_relpooltoken(list);
879 * NOTE: At this point if kn is non-NULL we will have acquired
880 * it and set KN_PROCESSING.
882 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
883 error = ENOENT;
884 goto done;
888 * kn now contains the matching knote, or NULL if no match
890 if (kev->flags & EV_ADD) {
891 if (kn == NULL) {
892 kn = knote_alloc();
893 if (kn == NULL) {
894 error = ENOMEM;
895 goto done;
897 kn->kn_fp = fp;
898 kn->kn_kq = kq;
899 kn->kn_fop = fops;
902 * apply reference count to knote structure, and
903 * do not release it at the end of this routine.
905 fp = NULL;
907 kn->kn_sfflags = kev->fflags;
908 kn->kn_sdata = kev->data;
909 kev->fflags = 0;
910 kev->data = 0;
911 kn->kn_kevent = *kev;
914 * KN_PROCESSING prevents the knote from getting
915 * ripped out from under us while we are trying
916 * to attach it, in case the attach blocks.
918 kn->kn_status = KN_PROCESSING;
919 knote_attach(kn);
920 if ((error = filter_attach(kn)) != 0) {
921 kn->kn_status |= KN_DELETING | KN_REPROCESS;
922 knote_drop(kn);
923 goto done;
927 * Interlock against close races which either tried
928 * to remove our knote while we were blocked or missed
929 * it entirely prior to our attachment. We do not
930 * want to end up with a knote on a closed descriptor.
932 if ((fops->f_flags & FILTEROP_ISFD) &&
933 checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
934 kn->kn_status |= KN_DELETING | KN_REPROCESS;
936 } else {
938 * The user may change some filter values after the
939 * initial EV_ADD, but doing so will not reset any
940 * filter which have already been triggered.
942 KKASSERT(kn->kn_status & KN_PROCESSING);
943 kn->kn_sfflags = kev->fflags;
944 kn->kn_sdata = kev->data;
945 kn->kn_kevent.udata = kev->udata;
949 * Execute the filter event to immediately activate the
950 * knote if necessary. If reprocessing events are pending
951 * due to blocking above we do not run the filter here
952 * but instead let knote_release() do it. Otherwise we
953 * might run the filter on a deleted event.
955 if ((kn->kn_status & KN_REPROCESS) == 0) {
956 if (filter_event(kn, 0))
957 KNOTE_ACTIVATE(kn);
959 } else if (kev->flags & EV_DELETE) {
961 * Delete the existing knote
963 knote_detach_and_drop(kn);
964 goto done;
968 * Disablement does not deactivate a knote here.
970 if ((kev->flags & EV_DISABLE) &&
971 ((kn->kn_status & KN_DISABLED) == 0)) {
972 kn->kn_status |= KN_DISABLED;
976 * Re-enablement may have to immediately enqueue an active knote.
978 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
979 kn->kn_status &= ~KN_DISABLED;
980 if ((kn->kn_status & KN_ACTIVE) &&
981 ((kn->kn_status & KN_QUEUED) == 0)) {
982 knote_enqueue(kn);
987 * Handle any required reprocessing
989 knote_release(kn);
990 /* kn may be invalid now */
992 done:
993 lwkt_reltoken(tok);
994 if (fp != NULL)
995 fdrop(fp);
996 return (error);
1000 * Block as necessary until the target time is reached.
1001 * If tsp is NULL we block indefinitely. If tsp->ts_secs/nsecs are both
1002 * 0 we do not block at all.
1004 * Caller must be holding the kq token.
1006 static int
1007 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
1009 int error = 0;
1011 if (tsp == NULL) {
1012 kq->kq_state |= KQ_SLEEP;
1013 error = tsleep(kq, PCATCH, "kqread", 0);
1014 } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
1015 error = EWOULDBLOCK;
1016 } else {
1017 struct timespec ats;
1018 struct timespec atx = *tsp;
1019 int timeout;
1021 nanouptime(&ats);
1022 timespecsub(&atx, &ats);
1023 if (ats.tv_sec < 0) {
1024 error = EWOULDBLOCK;
1025 } else {
1026 timeout = atx.tv_sec > 24 * 60 * 60 ?
1027 24 * 60 * 60 * hz : tstohz_high(&atx);
1028 kq->kq_state |= KQ_SLEEP;
1029 error = tsleep(kq, PCATCH, "kqread", timeout);
1033 /* don't restart after signals... */
1034 if (error == ERESTART)
1035 return (EINTR);
1037 return (error);
1041 * Scan the kqueue, return the number of active events placed in kevp up
1042 * to count.
1044 * Continuous mode events may get recycled, do not continue scanning past
1045 * marker unless no events have been collected.
1047 * Caller must be holding the kq token
1049 static int
1050 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
1051 struct knote *marker)
1053 struct knote *kn, local_marker;
1054 int total;
1056 total = 0;
1057 local_marker.kn_filter = EVFILT_MARKER;
1058 local_marker.kn_status = KN_PROCESSING;
1061 * Collect events.
1063 TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
1064 while (count) {
1065 kn = TAILQ_NEXT(&local_marker, kn_tqe);
1066 if (kn->kn_filter == EVFILT_MARKER) {
1067 /* Marker reached, we are done */
1068 if (kn == marker)
1069 break;
1071 /* Move local marker past some other threads marker */
1072 kn = TAILQ_NEXT(kn, kn_tqe);
1073 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1074 TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
1075 continue;
1079 * We can't skip a knote undergoing processing, otherwise
1080 * we risk not returning it when the user process expects
1081 * it should be returned. Sleep and retry.
1083 if (knote_acquire(kn) == 0)
1084 continue;
1087 * Remove the event for processing.
1089 * WARNING! We must leave KN_QUEUED set to prevent the
1090 * event from being KNOTE_ACTIVATE()d while
1091 * the queue state is in limbo, in case we
1092 * block.
1094 * WARNING! We must set KN_PROCESSING to avoid races
1095 * against deletion or another thread's
1096 * processing.
1098 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1099 kq->kq_count--;
1102 * We have to deal with an extremely important race against
1103 * file descriptor close()s here. The file descriptor can
1104 * disappear MPSAFE, and there is a small window of
1105 * opportunity between that and the call to knote_fdclose().
1107 * If we hit that window here while doselect or dopoll is
1108 * trying to delete a spurious event they will not be able
1109 * to match up the event against a knote and will go haywire.
1111 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
1112 checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
1113 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1116 if (kn->kn_status & KN_DISABLED) {
1118 * If disabled we ensure the event is not queued
1119 * but leave its active bit set. On re-enablement
1120 * the event may be immediately triggered.
1122 kn->kn_status &= ~KN_QUEUED;
1123 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
1124 (kn->kn_status & KN_DELETING) == 0 &&
1125 filter_event(kn, 0) == 0) {
1127 * If not running in one-shot mode and the event
1128 * is no longer present we ensure it is removed
1129 * from the queue and ignore it.
1131 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1132 } else {
1134 * Post the event
1136 *kevp++ = kn->kn_kevent;
1137 ++total;
1138 --count;
1140 if (kn->kn_flags & EV_ONESHOT) {
1141 kn->kn_status &= ~KN_QUEUED;
1142 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1143 } else if (kn->kn_flags & EV_CLEAR) {
1144 kn->kn_data = 0;
1145 kn->kn_fflags = 0;
1146 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1147 } else {
1148 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1149 kq->kq_count++;
1154 * Handle any post-processing states
1156 knote_release(kn);
1158 TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
1160 return (total);
1164 * XXX
1165 * This could be expanded to call kqueue_scan, if desired.
1167 * MPSAFE
1169 static int
1170 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1172 return (ENXIO);
1176 * MPSAFE
1178 static int
1179 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
1181 return (ENXIO);
1185 * MPALMOSTSAFE
1187 static int
1188 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
1189 struct ucred *cred, struct sysmsg *msg)
1191 struct lwkt_token *tok;
1192 struct kqueue *kq;
1193 int error;
1195 kq = (struct kqueue *)fp->f_data;
1196 tok = lwkt_token_pool_lookup(kq);
1197 lwkt_gettoken(tok);
1199 switch(com) {
1200 case FIOASYNC:
1201 if (*(int *)data)
1202 kq->kq_state |= KQ_ASYNC;
1203 else
1204 kq->kq_state &= ~KQ_ASYNC;
1205 error = 0;
1206 break;
1207 case FIOSETOWN:
1208 error = fsetown(*(int *)data, &kq->kq_sigio);
1209 break;
1210 default:
1211 error = ENOTTY;
1212 break;
1214 lwkt_reltoken(tok);
1215 return (error);
1219 * MPSAFE
1221 static int
1222 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
1224 struct kqueue *kq = (struct kqueue *)fp->f_data;
1226 bzero((void *)st, sizeof(*st));
1227 st->st_size = kq->kq_count;
1228 st->st_blksize = sizeof(struct kevent);
1229 st->st_mode = S_IFIFO;
1230 return (0);
1234 * MPSAFE
1236 static int
1237 kqueue_close(struct file *fp)
1239 struct kqueue *kq = (struct kqueue *)fp->f_data;
1241 kqueue_terminate(kq);
1243 fp->f_data = NULL;
1244 funsetown(&kq->kq_sigio);
1246 kfree(kq, M_KQUEUE);
1247 return (0);
1250 static void
1251 kqueue_wakeup(struct kqueue *kq)
1253 if (kq->kq_state & KQ_SLEEP) {
1254 kq->kq_state &= ~KQ_SLEEP;
1255 wakeup(kq);
1257 KNOTE(&kq->kq_kqinfo.ki_note, 0);
1261 * Calls filterops f_attach function, acquiring mplock if filter is not
1262 * marked as FILTEROP_MPSAFE.
1264 * Caller must be holding the related kq token
1266 static int
1267 filter_attach(struct knote *kn)
1269 int ret;
1271 if (!(kn->kn_fop->f_flags & FILTEROP_MPSAFE)) {
1272 get_mplock();
1273 ret = kn->kn_fop->f_attach(kn);
1274 rel_mplock();
1275 } else {
1276 ret = kn->kn_fop->f_attach(kn);
1279 return (ret);
1283 * Detach the knote and drop it, destroying the knote.
1285 * Calls filterops f_detach function, acquiring mplock if filter is not
1286 * marked as FILTEROP_MPSAFE.
1288 * Caller must be holding the related kq token
1290 static void
1291 knote_detach_and_drop(struct knote *kn)
1293 kn->kn_status |= KN_DELETING | KN_REPROCESS;
1294 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1295 kn->kn_fop->f_detach(kn);
1296 } else {
1297 get_mplock();
1298 kn->kn_fop->f_detach(kn);
1299 rel_mplock();
1301 knote_drop(kn);
1305 * Calls filterops f_event function, acquiring mplock if filter is not
1306 * marked as FILTEROP_MPSAFE.
1308 * If the knote is in the middle of being created or deleted we cannot
1309 * safely call the filter op.
1311 * Caller must be holding the related kq token
1313 static int
1314 filter_event(struct knote *kn, long hint)
1316 int ret;
1318 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
1319 ret = kn->kn_fop->f_event(kn, hint);
1320 } else {
1321 get_mplock();
1322 ret = kn->kn_fop->f_event(kn, hint);
1323 rel_mplock();
1325 return (ret);
1329 * Walk down a list of knotes, activating them if their event has triggered.
1331 * If we encounter any knotes which are undergoing processing we just mark
1332 * them for reprocessing and do not try to [re]activate the knote. However,
1333 * if a hint is being passed we have to wait and that makes things a bit
1334 * sticky.
1336 void
1337 knote(struct klist *list, long hint)
1339 struct kqueue *kq;
1340 struct knote *kn;
1341 struct knote *kntmp;
1343 lwkt_getpooltoken(list);
1344 restart:
1345 SLIST_FOREACH(kn, list, kn_next) {
1346 kq = kn->kn_kq;
1347 lwkt_getpooltoken(kq);
1349 /* temporary verification hack */
1350 SLIST_FOREACH(kntmp, list, kn_next) {
1351 if (kn == kntmp)
1352 break;
1354 if (kn != kntmp || kn->kn_kq != kq) {
1355 lwkt_relpooltoken(kq);
1356 goto restart;
1359 if (kn->kn_status & KN_PROCESSING) {
1361 * Someone else is processing the knote, ask the
1362 * other thread to reprocess it and don't mess
1363 * with it otherwise.
1365 if (hint == 0) {
1366 kn->kn_status |= KN_REPROCESS;
1367 lwkt_relpooltoken(kq);
1368 continue;
1372 * If the hint is non-zero we have to wait or risk
1373 * losing the state the caller is trying to update.
1375 * XXX This is a real problem, certain process
1376 * and signal filters will bump kn_data for
1377 * already-processed notes more than once if
1378 * we restart the list scan. FIXME.
1380 kn->kn_status |= KN_WAITING | KN_REPROCESS;
1381 tsleep(kn, 0, "knotec", hz);
1382 lwkt_relpooltoken(kq);
1383 goto restart;
1387 * Become the reprocessing master ourselves.
1389 * If hint is non-zer running the event is mandatory
1390 * when not deleting so do it whether reprocessing is
1391 * set or not.
1393 kn->kn_status |= KN_PROCESSING;
1394 if ((kn->kn_status & KN_DELETING) == 0) {
1395 if (filter_event(kn, hint))
1396 KNOTE_ACTIVATE(kn);
1398 if (knote_release(kn)) {
1399 lwkt_relpooltoken(kq);
1400 goto restart;
1402 lwkt_relpooltoken(kq);
1404 lwkt_relpooltoken(list);
1408 * Insert knote at head of klist.
1410 * This function may only be called via a filter function and thus
1411 * kq_token should already be held and marked for processing.
1413 void
1414 knote_insert(struct klist *klist, struct knote *kn)
1416 lwkt_getpooltoken(klist);
1417 KKASSERT(kn->kn_status & KN_PROCESSING);
1418 SLIST_INSERT_HEAD(klist, kn, kn_next);
1419 lwkt_relpooltoken(klist);
1423 * Remove knote from a klist
1425 * This function may only be called via a filter function and thus
1426 * kq_token should already be held and marked for processing.
1428 void
1429 knote_remove(struct klist *klist, struct knote *kn)
1431 lwkt_getpooltoken(klist);
1432 KKASSERT(kn->kn_status & KN_PROCESSING);
1433 SLIST_REMOVE(klist, kn, knote, kn_next);
1434 lwkt_relpooltoken(klist);
1437 #if 0
1439 * Remove all knotes from a specified klist
1441 * Only called from aio.
1443 void
1444 knote_empty(struct klist *list)
1446 struct knote *kn;
1448 lwkt_gettoken(&kq_token);
1449 while ((kn = SLIST_FIRST(list)) != NULL) {
1450 if (knote_acquire(kn))
1451 knote_detach_and_drop(kn);
1453 lwkt_reltoken(&kq_token);
1455 #endif
1457 void
1458 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
1459 struct filterops *ops, void *hook)
1461 struct kqueue *kq;
1462 struct knote *kn;
1464 lwkt_getpooltoken(&src->ki_note);
1465 lwkt_getpooltoken(&dst->ki_note);
1466 while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
1467 kq = kn->kn_kq;
1468 lwkt_getpooltoken(kq);
1469 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
1470 lwkt_relpooltoken(kq);
1471 continue;
1473 if (knote_acquire(kn)) {
1474 knote_remove(&src->ki_note, kn);
1475 kn->kn_fop = ops;
1476 kn->kn_hook = hook;
1477 knote_insert(&dst->ki_note, kn);
1478 knote_release(kn);
1479 /* kn may be invalid now */
1481 lwkt_relpooltoken(kq);
1483 lwkt_relpooltoken(&dst->ki_note);
1484 lwkt_relpooltoken(&src->ki_note);
1488 * Remove all knotes referencing a specified fd
1490 void
1491 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
1493 struct kqueue *kq;
1494 struct knote *kn;
1495 struct knote *kntmp;
1497 lwkt_getpooltoken(&fp->f_klist);
1498 restart:
1499 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
1500 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
1501 kq = kn->kn_kq;
1502 lwkt_getpooltoken(kq);
1504 /* temporary verification hack */
1505 SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
1506 if (kn == kntmp)
1507 break;
1509 if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
1510 kn->kn_id != fd || kn->kn_kq != kq) {
1511 lwkt_relpooltoken(kq);
1512 goto restart;
1514 if (knote_acquire(kn))
1515 knote_detach_and_drop(kn);
1516 lwkt_relpooltoken(kq);
1517 goto restart;
1520 lwkt_relpooltoken(&fp->f_klist);
1524 * Low level attach function.
1526 * The knote should already be marked for processing.
1527 * Caller must hold the related kq token.
1529 static void
1530 knote_attach(struct knote *kn)
1532 struct klist *list;
1533 struct kqueue *kq = kn->kn_kq;
1535 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1536 KKASSERT(kn->kn_fp);
1537 list = &kn->kn_fp->f_klist;
1538 } else {
1539 if (kq->kq_knhashmask == 0)
1540 kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1541 &kq->kq_knhashmask);
1542 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1544 lwkt_getpooltoken(list);
1545 SLIST_INSERT_HEAD(list, kn, kn_link);
1546 TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
1547 lwkt_relpooltoken(list);
1551 * Low level drop function.
1553 * The knote should already be marked for processing.
1554 * Caller must hold the related kq token.
1556 static void
1557 knote_drop(struct knote *kn)
1559 struct kqueue *kq;
1560 struct klist *list;
1562 kq = kn->kn_kq;
1564 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
1565 list = &kn->kn_fp->f_klist;
1566 else
1567 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1569 lwkt_getpooltoken(list);
1570 SLIST_REMOVE(list, kn, knote, kn_link);
1571 TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
1572 if (kn->kn_status & KN_QUEUED)
1573 knote_dequeue(kn);
1574 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
1575 fdrop(kn->kn_fp);
1576 kn->kn_fp = NULL;
1578 knote_free(kn);
1579 lwkt_relpooltoken(list);
1583 * Low level enqueue function.
1585 * The knote should already be marked for processing.
1586 * Caller must be holding the kq token
1588 static void
1589 knote_enqueue(struct knote *kn)
1591 struct kqueue *kq = kn->kn_kq;
1593 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1594 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
1595 kn->kn_status |= KN_QUEUED;
1596 ++kq->kq_count;
1599 * Send SIGIO on request (typically set up as a mailbox signal)
1601 if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
1602 pgsigio(kq->kq_sigio, SIGIO, 0);
1604 kqueue_wakeup(kq);
1608 * Low level dequeue function.
1610 * The knote should already be marked for processing.
1611 * Caller must be holding the kq token
1613 static void
1614 knote_dequeue(struct knote *kn)
1616 struct kqueue *kq = kn->kn_kq;
1618 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1619 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
1620 kn->kn_status &= ~KN_QUEUED;
1621 kq->kq_count--;
1624 static struct knote *
1625 knote_alloc(void)
1627 return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
1630 static void
1631 knote_free(struct knote *kn)
1633 kfree(kn, M_KQUEUE);