4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
30 #include <sys/types.h>
31 #include <sys/devops.h>
33 #include <sys/modctl.h>
34 #include <sys/sunddi.h>
36 #include <sys/poll_impl.h>
37 #include <sys/errno.h>
39 #include <sys/mkdev.h>
40 #include <sys/debug.h>
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/bitmap.h>
45 #include <sys/devpoll.h>
47 #include <sys/resource.h>
51 /* local data struct */
52 static dp_entry_t
**devpolltbl
; /* dev poll entries */
53 static size_t dptblsize
;
55 static kmutex_t devpoll_lock
; /* lock protecting dev tbl */
56 int devpoll_init
; /* is /dev/poll initialized already */
58 /* device local functions */
60 static int dpopen(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
);
61 static int dpwrite(dev_t dev
, struct uio
*uiop
, cred_t
*credp
);
62 static int dpioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
64 static int dppoll(dev_t dev
, short events
, int anyyet
, short *reventsp
,
65 struct pollhead
**phpp
);
66 static int dpclose(dev_t dev
, int flag
, int otyp
, cred_t
*credp
);
67 static dev_info_t
*dpdevi
;
70 static struct cb_ops dp_cb_ops
= {
83 ddi_prop_op
, /* prop_op */
84 (struct streamtab
*)0, /* streamtab */
86 CB_REV
, /* cb_ops revision */
91 static int dpattach(dev_info_t
*, ddi_attach_cmd_t
);
92 static int dpdetach(dev_info_t
*, ddi_detach_cmd_t
);
93 static int dpinfo(dev_info_t
*, ddi_info_cmd_t
, void *, void **);
95 static struct dev_ops dp_ops
= {
96 DEVO_REV
, /* devo_rev */
99 nulldev
, /* identify */
101 dpattach
, /* attach */
102 dpdetach
, /* detach */
104 &dp_cb_ops
, /* driver operations */
105 (struct bus_ops
*)NULL
, /* bus operations */
107 ddi_quiesce_not_needed
, /* quiesce */
111 static struct modldrv modldrv
= {
112 &mod_driverops
, /* type of module - a driver */
117 static struct modlinkage modlinkage
= {
126 * The /dev/poll driver shares most of its code with poll sys call whose
127 * code is in common/syscall/poll.c. In poll(2) design, the pollcache
128 * structure is per lwp. An implicit assumption is made there that some
129 * portion of pollcache will never be touched by other lwps. E.g., in
130 * poll(2) design, no lwp will ever need to grow bitmap of other lwp.
131 * This assumption is not true for /dev/poll; hence the need for extra
134 * To allow more parallelism, each /dev/poll file descriptor (indexed by
135 * minor number) has its own lock. Since read (dpioctl) is a much more
136 * frequent operation than write, we want to allow multiple reads on same
137 * /dev/poll fd. However, we prevent writes from being starved by giving
138 * priority to write operation. Theoretically writes can starve reads as
139 * well. But in practical sense this is not important because (1) writes
140 * happens less often than reads, and (2) write operation defines the
141 * content of poll fd a cache set. If writes happens so often that they
142 * can starve reads, that means the cached set is very unstable. It may
143 * not make sense to read an unstable cache set anyway. Therefore, the
144 * writers starving readers case is not handled in this design.
152 dptblsize
= DEVPOLLSIZE
;
153 devpolltbl
= kmem_zalloc(sizeof (caddr_t
) * dptblsize
, KM_SLEEP
);
154 mutex_init(&devpoll_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
156 if ((error
= mod_install(&modlinkage
)) != 0) {
157 mutex_destroy(&devpoll_lock
);
158 kmem_free(devpolltbl
, sizeof (caddr_t
) * dptblsize
);
169 if ((error
= mod_remove(&modlinkage
)) != 0) {
172 mutex_destroy(&devpoll_lock
);
173 kmem_free(devpolltbl
, sizeof (caddr_t
) * dptblsize
);
178 _info(struct modinfo
*modinfop
)
180 return (mod_info(&modlinkage
, modinfop
));
185 dpattach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
187 if (ddi_create_minor_node(devi
, "poll", S_IFCHR
, 0, DDI_PSEUDO
, NULL
)
189 ddi_remove_minor_node(devi
, NULL
);
190 return (DDI_FAILURE
);
193 return (DDI_SUCCESS
);
197 dpdetach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
)
199 if (cmd
!= DDI_DETACH
)
200 return (DDI_FAILURE
);
202 ddi_remove_minor_node(devi
, NULL
);
203 return (DDI_SUCCESS
);
208 dpinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
, void **result
)
213 case DDI_INFO_DEVT2DEVINFO
:
214 *result
= (void *)dpdevi
;
217 case DDI_INFO_DEVT2INSTANCE
:
228 * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major
229 * differences are: (1) /dev/poll requires scanning the bitmap starting at
230 * where it was stopped last time, instead of always starting from 0,
231 * (2) since user may not have cleaned up the cached fds when they are
232 * closed, some polldats in cache may refer to closed or reused fds. We
233 * need to check for those cases.
235 * NOTE: Upon closing an fd, automatic poll cache cleanup is done for
236 * poll(2) caches but NOT for /dev/poll caches. So expect some
240 dp_pcache_poll(pollfd_t
*pfdp
, pollcache_t
*pcp
, nfds_t nfds
, int *fdcntp
)
242 int start
, ostart
, end
;
252 ASSERT(MUTEX_HELD(&pcp
->pc_lock
));
253 if (pcp
->pc_bitmap
== NULL
) {
255 * No Need to search because no poll fd
261 start
= ostart
= pcp
->pc_mapstart
;
262 end
= pcp
->pc_mapend
;
267 * started from every begining, no need to wrap around.
275 while ((fdcnt
< nfds
) && !done
) {
279 * Examine the bit map in a circular fashion
280 * to avoid starvation. Always resume from
281 * last stop. Scan till end of the map. Then
284 fd
= bt_getlowbit(pcp
->pc_bitmap
, start
, end
);
298 pdp
= pcache_lookup_fd(pcp
, fd
);
301 ASSERT(pdp
->pd_fd
== fd
);
302 if (pdp
->pd_fp
== NULL
) {
304 * The fd is POLLREMOVed. This fd is
305 * logically no longer cached. So move
306 * on to the next one.
310 if ((fp
= getf(fd
)) == NULL
) {
312 * The fd has been closed, but user has not
313 * done a POLLREMOVE on this fd yet. Instead
314 * of cleaning it here implicitly, we return
315 * POLLNVAL. This is consistent with poll(2)
316 * polling a closed fd. Hope this will remind
317 * user to do a POLLREMOVE.
320 pfdp
[fdcnt
].revents
= POLLNVAL
;
324 if (fp
!= pdp
->pd_fp
) {
326 * user is polling on a cached fd which was
327 * closed and then reused. Unfortunately
328 * there is no good way to inform user.
329 * If the file struct is also reused, we
330 * may not be able to detect the fd reuse
331 * at all. As long as this does not
332 * cause system failure and/or memory leak,
333 * we will play along. Man page states if
334 * user does not clean up closed fds, polling
335 * results will be indeterministic.
337 * XXX - perhaps log the detection of fd
343 * XXX - pollrelock() logic needs to know which
344 * which pollcache lock to grab. It'd be a
345 * cleaner solution if we could pass pcp as
346 * an arguement in VOP_POLL interface instead
347 * of implicitly passing it using thread_t
348 * struct. On the other hand, changing VOP_POLL
349 * interface will require all driver/file system
350 * poll routine to change. May want to revisit
351 * the tradeoff later.
353 curthread
->t_pollcache
= pcp
;
354 error
= VOP_POLL(fp
->f_vnode
, pdp
->pd_events
, 0,
355 &revent
, &php
, NULL
);
356 curthread
->t_pollcache
= NULL
;
362 * layered devices (e.g. console driver)
363 * may change the vnode and thus the pollhead
364 * pointer out from underneath us.
366 if (php
!= NULL
&& pdp
->pd_php
!= NULL
&&
367 php
!= pdp
->pd_php
) {
368 pollhead_delete(pdp
->pd_php
, pdp
);
370 pollhead_insert(php
, pdp
);
372 * The bit should still be set.
374 ASSERT(BT_TEST(pcp
->pc_bitmap
, fd
));
380 pfdp
[fdcnt
].events
= pdp
->pd_events
;
381 pfdp
[fdcnt
].revents
= revent
;
383 } else if (php
!= NULL
) {
385 * We clear a bit or cache a poll fd if
386 * the driver returns a poll head ptr,
387 * which is expected in the case of 0
388 * revents. Some buggy driver may return
389 * NULL php pointer with 0 revents. In
390 * this case, we just treat the driver as
391 * "noncachable" and not clearing the bit
394 if ((pdp
->pd_php
!= NULL
) &&
395 ((pcp
->pc_flag
& T_POLLWAKE
) == 0)) {
396 BT_CLEAR(pcp
->pc_bitmap
, fd
);
398 if (pdp
->pd_php
== NULL
) {
399 pollhead_insert(php
, pdp
);
402 * An event of interest may have
403 * arrived between the VOP_POLL() and
404 * the pollhead_insert(); check again.
411 * No bit set in the range. Check for wrap around.
424 pcp
->pc_mapstart
= start
;
426 ASSERT(*fdcntp
== 0);
433 dpopen(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
)
439 ASSERT(devpoll_init
);
440 ASSERT(dptblsize
<= MAXMIN
);
441 mutex_enter(&devpoll_lock
);
442 for (minordev
= 0; minordev
< dptblsize
; minordev
++) {
443 if (devpolltbl
[minordev
] == NULL
) {
444 devpolltbl
[minordev
] = (dp_entry_t
*)RESERVED
;
448 if (minordev
== dptblsize
) {
453 * Used up every entry in the existing devpoll table.
454 * Grow the table by DEVPOLLSIZE.
456 if ((oldsize
= dptblsize
) >= MAXMIN
) {
457 mutex_exit(&devpoll_lock
);
460 dptblsize
+= DEVPOLLSIZE
;
461 if (dptblsize
> MAXMIN
) {
464 newtbl
= kmem_zalloc(sizeof (caddr_t
) * dptblsize
, KM_SLEEP
);
465 bcopy(devpolltbl
, newtbl
, sizeof (caddr_t
) * oldsize
);
466 kmem_free(devpolltbl
, sizeof (caddr_t
) * oldsize
);
468 devpolltbl
[minordev
] = (dp_entry_t
*)RESERVED
;
470 mutex_exit(&devpoll_lock
);
472 dpep
= kmem_zalloc(sizeof (dp_entry_t
), KM_SLEEP
);
474 * allocate a pollcache skeleton here. Delay allocating bitmap
475 * structures until dpwrite() time, since we don't know the
478 pcp
= pcache_alloc();
479 dpep
->dpe_pcache
= pcp
;
480 pcp
->pc_pid
= curproc
->p_pid
;
481 *devp
= makedevice(getmajor(*devp
), minordev
); /* clone the driver */
482 mutex_enter(&devpoll_lock
);
483 ASSERT(minordev
< dptblsize
);
484 ASSERT(devpolltbl
[minordev
] == (dp_entry_t
*)RESERVED
);
485 devpolltbl
[minordev
] = dpep
;
486 mutex_exit(&devpoll_lock
);
491 * Write to dev/poll add/remove fd's to/from a cached poll fd set,
492 * or change poll events for a watched fd.
496 dpwrite(dev_t dev
, struct uio
*uiop
, cred_t
*credp
)
501 pollfd_t
*pollfdp
, *pfdp
;
505 struct pollhead
*php
= NULL
;
510 minor
= getminor(dev
);
512 mutex_enter(&devpoll_lock
);
513 ASSERT(minor
< dptblsize
);
514 dpep
= devpolltbl
[minor
];
515 ASSERT(dpep
!= NULL
);
516 mutex_exit(&devpoll_lock
);
517 pcp
= dpep
->dpe_pcache
;
518 if (curproc
->p_pid
!= pcp
->pc_pid
) {
521 uiosize
= uiop
->uio_resid
;
522 pollfdnum
= uiosize
/ sizeof (pollfd_t
);
523 mutex_enter(&curproc
->p_lock
);
524 if (pollfdnum
> (uint_t
)rctl_enforced_value(
525 rctlproc_legacy
[RLIMIT_NOFILE
], curproc
->p_rctls
, curproc
)) {
526 (void) rctl_action(rctlproc_legacy
[RLIMIT_NOFILE
],
527 curproc
->p_rctls
, curproc
, RCA_SAFE
);
528 mutex_exit(&curproc
->p_lock
);
529 return (set_errno(EINVAL
));
531 mutex_exit(&curproc
->p_lock
);
533 * Copy in the pollfd array. Walk through the array and add
534 * each polled fd to the cached set.
536 pollfdp
= kmem_alloc(uiosize
, KM_SLEEP
);
539 * Although /dev/poll uses the write(2) interface to cache fds, it's
540 * not supposed to function as a seekable device. To prevent offset
541 * from growing and eventually exceed the maximum, reset the offset
542 * here for every call.
544 uiop
->uio_loffset
= 0;
545 if ((error
= uiomove((caddr_t
)pollfdp
, uiosize
, UIO_WRITE
, uiop
))
547 kmem_free(pollfdp
, uiosize
);
551 * We are about to enter the core portion of dpwrite(). Make sure this
552 * write has exclusive access in this portion of the code, i.e., no
553 * other writers in this code and no other readers in dpioctl.
555 mutex_enter(&dpep
->dpe_lock
);
556 dpep
->dpe_writerwait
++;
557 while (dpep
->dpe_refcnt
!= 0) {
558 if (!cv_wait_sig_swap(&dpep
->dpe_cv
, &dpep
->dpe_lock
)) {
559 dpep
->dpe_writerwait
--;
560 mutex_exit(&dpep
->dpe_lock
);
561 kmem_free(pollfdp
, uiosize
);
562 return (set_errno(EINTR
));
565 dpep
->dpe_writerwait
--;
566 dpep
->dpe_flag
|= DP_WRITER_PRESENT
;
568 mutex_exit(&dpep
->dpe_lock
);
570 mutex_enter(&pcp
->pc_lock
);
571 if (pcp
->pc_bitmap
== NULL
) {
572 pcache_create(pcp
, pollfdnum
);
574 for (pfdp
= pollfdp
; pfdp
< pollfdp
+ pollfdnum
; pfdp
++) {
576 if ((uint_t
)fd
>= P_FINFO(curproc
)->fi_nfiles
)
578 pdp
= pcache_lookup_fd(pcp
, fd
);
579 if (pfdp
->events
!= POLLREMOVE
) {
581 pdp
= pcache_alloc_fd(0);
583 pdp
->pd_pcache
= pcp
;
584 pcache_insert_fd(pcp
, pdp
, pollfdnum
);
586 ASSERT(pdp
->pd_fd
== fd
);
587 ASSERT(pdp
->pd_pcache
== pcp
);
588 if (fd
>= pcp
->pc_mapsize
) {
589 mutex_exit(&pcp
->pc_lock
);
590 pcache_grow_map(pcp
, fd
);
591 mutex_enter(&pcp
->pc_lock
);
593 if (fd
> pcp
->pc_mapend
) {
596 if ((fp
= getf(fd
)) == NULL
) {
598 * The fd is not valid. Since we can't pass
599 * this error back in the write() call, set
600 * the bit in bitmap to force DP_POLL ioctl
603 BT_SET(pcp
->pc_bitmap
, fd
);
604 pdp
->pd_events
|= pfdp
->events
;
608 * Don't do VOP_POLL for an already cached fd with
611 if ((pdp
->pd_events
== pfdp
->events
) &&
612 (pdp
->pd_fp
!= NULL
)) {
614 * the events are already cached
621 * do VOP_POLL and cache this poll fd.
624 * XXX - pollrelock() logic needs to know which
625 * which pollcache lock to grab. It'd be a
626 * cleaner solution if we could pass pcp as
627 * an arguement in VOP_POLL interface instead
628 * of implicitly passing it using thread_t
629 * struct. On the other hand, changing VOP_POLL
630 * interface will require all driver/file system
631 * poll routine to change. May want to revisit
632 * the tradeoff later.
634 curthread
->t_pollcache
= pcp
;
635 error
= VOP_POLL(fp
->f_vnode
, pfdp
->events
, 0,
636 &pfdp
->revents
, &php
, NULL
);
637 curthread
->t_pollcache
= NULL
;
639 * We always set the bit when this fd is cached;
640 * this forces the first DP_POLL to poll this fd.
641 * Real performance gain comes from subsequent
642 * DP_POLL. We also attempt a pollhead_insert();
643 * if it's not possible, we'll do it in dpioctl().
645 BT_SET(pcp
->pc_bitmap
, fd
);
651 pdp
->pd_events
|= pfdp
->events
;
653 if (pdp
->pd_php
== NULL
) {
654 pollhead_insert(php
, pdp
);
657 if (pdp
->pd_php
!= php
) {
658 pollhead_delete(pdp
->pd_php
,
660 pollhead_insert(php
, pdp
);
671 ASSERT(pdp
->pd_fd
== fd
);
674 ASSERT(pdp
->pd_thread
== NULL
);
675 if (pdp
->pd_php
!= NULL
) {
676 pollhead_delete(pdp
->pd_php
, pdp
);
679 BT_CLEAR(pcp
->pc_bitmap
, fd
);
682 mutex_exit(&pcp
->pc_lock
);
683 mutex_enter(&dpep
->dpe_lock
);
684 dpep
->dpe_flag
&= ~DP_WRITER_PRESENT
;
685 ASSERT(dpep
->dpe_refcnt
== 1);
687 cv_broadcast(&dpep
->dpe_cv
);
688 mutex_exit(&dpep
->dpe_lock
);
689 kmem_free(pollfdp
, uiosize
);
695 dpioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
, int *rvalp
)
702 STRUCT_DECL(dvpoll
, dvpoll
);
704 if (cmd
== DP_POLL
) {
705 /* do this now, before we sleep on DP_WRITER_PRESENT */
709 minor
= getminor(dev
);
710 mutex_enter(&devpoll_lock
);
711 ASSERT(minor
< dptblsize
);
712 dpep
= devpolltbl
[minor
];
713 mutex_exit(&devpoll_lock
);
714 ASSERT(dpep
!= NULL
);
715 pcp
= dpep
->dpe_pcache
;
716 if (curproc
->p_pid
!= pcp
->pc_pid
)
719 mutex_enter(&dpep
->dpe_lock
);
720 while ((dpep
->dpe_flag
& DP_WRITER_PRESENT
) ||
721 (dpep
->dpe_writerwait
!= 0)) {
722 if (!cv_wait_sig_swap(&dpep
->dpe_cv
, &dpep
->dpe_lock
)) {
723 mutex_exit(&dpep
->dpe_lock
);
728 mutex_exit(&dpep
->dpe_lock
);
736 hrtime_t deadline
= 0;
738 STRUCT_INIT(dvpoll
, mode
);
739 error
= copyin((caddr_t
)arg
, STRUCT_BUF(dvpoll
),
740 STRUCT_SIZE(dvpoll
));
746 deadline
= STRUCT_FGET(dvpoll
, dp_timeout
);
749 * Convert the deadline from relative milliseconds
750 * to absolute nanoseconds. They must wait for at
753 deadline
= MSEC2NSEC(deadline
);
754 deadline
= MAX(deadline
, nsec_per_tick
);
758 if ((nfds
= STRUCT_FGET(dvpoll
, dp_nfds
)) == 0) {
760 * We are just using DP_POLL to sleep, so
761 * we don't any of the devpoll apparatus.
762 * Do not check for signals if we have a zero timeout.
767 mutex_enter(&curthread
->t_delay_lock
);
769 cv_timedwait_sig_hrtime(&curthread
->t_delay_cv
,
770 &curthread
->t_delay_lock
, deadline
)) > 0)
772 mutex_exit(&curthread
->t_delay_lock
);
773 return (error
== 0 ? EINTR
: 0);
777 * XXX It would be nice not to have to alloc each time, but it
778 * requires another per thread structure hook. This can be
779 * implemented later if data suggests that it's necessary.
781 if ((ps
= curthread
->t_pollstate
) == NULL
) {
782 curthread
->t_pollstate
= pollstate_create();
783 ps
= curthread
->t_pollstate
;
785 if (ps
->ps_dpbufsize
< nfds
) {
786 struct proc
*p
= ttoproc(curthread
);
788 * The maximum size should be no large than
789 * current maximum open file count.
791 mutex_enter(&p
->p_lock
);
792 if (nfds
> p
->p_fno_ctl
) {
793 mutex_exit(&p
->p_lock
);
797 mutex_exit(&p
->p_lock
);
798 kmem_free(ps
->ps_dpbuf
, sizeof (pollfd_t
) *
800 ps
->ps_dpbuf
= kmem_zalloc(sizeof (pollfd_t
) *
802 ps
->ps_dpbufsize
= nfds
;
805 mutex_enter(&pcp
->pc_lock
);
808 error
= dp_pcache_poll(ps
->ps_dpbuf
, pcp
, nfds
, &fdcnt
);
809 if (fdcnt
> 0 || error
!= 0)
813 * A pollwake has happened since we polled cache.
815 if (pcp
->pc_flag
& T_POLLWAKE
)
819 * Sleep until we are notified, signaled, or timed out.
822 /* immediate timeout; do not check signals */
825 error
= cv_timedwait_sig_hrtime(&pcp
->pc_cv
,
826 &pcp
->pc_lock
, deadline
);
828 * If we were awakened by a signal or timeout
829 * then break the loop, else poll again.
832 error
= (error
== 0) ? EINTR
: 0;
838 mutex_exit(&pcp
->pc_lock
);
840 if (error
== 0 && fdcnt
> 0) {
841 if (copyout(ps
->ps_dpbuf
, STRUCT_FGETP(dvpoll
,
842 dp_fds
), sizeof (pollfd_t
) * fdcnt
)) {
856 STRUCT_INIT(dvpoll
, mode
);
857 error
= copyin((caddr_t
)arg
, &pollfd
, sizeof (pollfd_t
));
862 mutex_enter(&pcp
->pc_lock
);
863 if (pcp
->pc_hash
== NULL
) {
865 * No Need to search because no poll fd
868 mutex_exit(&pcp
->pc_lock
);
873 mutex_exit(&pcp
->pc_lock
);
876 pdp
= pcache_lookup_fd(pcp
, pollfd
.fd
);
877 if ((pdp
!= NULL
) && (pdp
->pd_fd
== pollfd
.fd
) &&
878 (pdp
->pd_fp
!= NULL
)) {
879 pollfd
.revents
= pdp
->pd_events
;
880 if (copyout(&pollfd
, (caddr_t
)arg
, sizeof (pollfd_t
))) {
881 mutex_exit(&pcp
->pc_lock
);
887 mutex_exit(&pcp
->pc_lock
);
901 dppoll(dev_t dev
, short events
, int anyyet
, short *reventsp
,
902 struct pollhead
**phpp
)
905 * Polling on a /dev/poll fd is not fully supported yet.
912 * devpoll close should do enough clean up before the pollcache is deleted,
913 * i.e., it should ensure no one still references the pollcache later.
914 * There is no "permission" check in here. Any process having the last
915 * reference of this /dev/poll fd can close.
919 dpclose(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
928 minor
= getminor(dev
);
930 mutex_enter(&devpoll_lock
);
931 dpep
= devpolltbl
[minor
];
932 ASSERT(dpep
!= NULL
);
933 devpolltbl
[minor
] = NULL
;
934 mutex_exit(&devpoll_lock
);
935 pcp
= dpep
->dpe_pcache
;
938 * At this point, no other lwp can access this pollcache via the
939 * /dev/poll fd. This pollcache is going away, so do the clean
940 * up without the pc_lock.
942 hashtbl
= pcp
->pc_hash
;
943 for (i
= 0; i
< pcp
->pc_hashsize
; i
++) {
944 for (pdp
= hashtbl
[i
]; pdp
; pdp
= pdp
->pd_hashnext
) {
945 if (pdp
->pd_php
!= NULL
) {
946 pollhead_delete(pdp
->pd_php
, pdp
);
953 * pollwakeup() may still interact with this pollcache. Wait until
956 mutex_enter(&pcp
->pc_no_exit
);
957 ASSERT(pcp
->pc_busy
>= 0);
958 while (pcp
->pc_busy
> 0)
959 cv_wait(&pcp
->pc_busy_cv
, &pcp
->pc_no_exit
);
960 mutex_exit(&pcp
->pc_no_exit
);
962 ASSERT(dpep
->dpe_refcnt
== 0);
963 kmem_free(dpep
, sizeof (dp_entry_t
));