2 * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: head/sys/dev/sound/clone.c 193640 2009-06-07 19:12:08Z ariff $
29 #include <sys/param.h>
30 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
35 #include <sys/devfs.h>
37 #ifdef HAVE_KERNEL_OPTION_HEADERS
41 #include <dev/sound/pcm/sound.h>
42 #include <dev/sound/clone.h>
44 DEVFS_DECLARE_CLONE_BITMAP(dsp
);
47 * So here we go again, another clonedevs manager. Unlike default clonedevs,
48 * this clone manager is designed to withstand various abusive behavior
49 * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
50 * after reaching certain expiration threshold, aggressive garbage collector,
51 * transparent device allocator and concurrency handling across multiple
52 * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
53 * we don't have much clues whether the caller wants a real open() or simply
54 * making fun of us with things like stat(), mtime() etc. Assuming that:
55 * 1) Time window between dev_clone EH <-> real open() should be small
56 * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
57 * operation, we can decide whether a new cdev must be created, old
58 * (expired) cdev can be reused or an existing cdev can be shared.
60 * Most of the operations and logics are generic enough and can be applied
61 * on other places (such as if_tap, snp, etc). Perhaps this can be
62 * rearranged to complement clone_*(). However, due to this still being
63 * specific to the sound driver (and as a proof of concept on how it can be
64 * done), si_drv2 is used to keep the pointer of the clone list entry to
65 * avoid expensive lookup.
69 struct snd_clone_entry
{
70 TAILQ_ENTRY(snd_clone_entry
) link
;
71 struct snd_clone
*parent
;
81 TAILQ_HEAD(link_head
, snd_clone_entry
) head
;
92 #define SND_CLONE_ASSERT(x, y) do { \
97 #define SND_CLONE_ASSERT(...) KASSERT(__VA_ARGS__)
101 * Shamelessly ripped off from vfs_subr.c
102 * We need at least 1/HZ precision as default timestamping.
104 enum { SND_TSP_SEC
, SND_TSP_HZ
, SND_TSP_USEC
, SND_TSP_NSEC
};
106 static int snd_timestamp_precision
= SND_TSP_HZ
;
107 TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision
);
110 snd_timestamp(struct timespec
*tsp
)
114 switch (snd_timestamp_precision
) {
116 tsp
->tv_sec
= time_second
;
124 TIMEVAL_TO_TIMESPEC(&tv
, tsp
);
130 snd_timestamp_precision
= SND_TSP_HZ
;
136 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
138 sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS
)
142 val
= snd_timestamp_precision
;
143 err
= sysctl_handle_int(oidp
, &val
, 0, req
);
144 if (err
== 0 && req
->newptr
!= NULL
) {
150 snd_timestamp_precision
= val
;
159 SYSCTL_PROC(_hw_snd
, OID_AUTO
, timestamp_precision
, CTLTYPE_INT
| CTLFLAG_RW
,
160 0, sizeof(int), sysctl_hw_snd_timestamp_precision
, "I",
161 "timestamp precision (0=s 1=hz 2=us 3=ns)");
165 * snd_clone_create() : Return opaque allocated clone manager.
168 snd_clone_create(int typemask
, int maxunit
, int deadline
, uint32_t flags
)
172 SND_CLONE_ASSERT(!(typemask
& ~SND_CLONE_MAXUNIT
),
173 ("invalid typemask: 0x%08x", typemask
));
174 SND_CLONE_ASSERT(!(flags
& ~SND_CLONE_MASK
),
175 ("invalid clone flags=0x%08x", flags
));
177 c
= kmalloc(sizeof(*c
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
180 c
->typemask
= typemask
;
181 c
->maxunit
= (maxunit
== -1) ? (~typemask
& SND_CLONE_MAXUNIT
) :
183 c
->deadline
= deadline
;
185 snd_timestamp(&c
->tsp
);
186 TAILQ_INIT(&c
->head
);
192 snd_clone_busy(struct snd_clone
*c
)
194 struct snd_clone_entry
*ce
;
196 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
201 TAILQ_FOREACH(ce
, &c
->head
, link
) {
202 if (ce
->flags
& SND_CLONE_BUSY
)
210 * snd_clone_enable()/disable() : Suspend/resume clone allocation through
211 * snd_clone_alloc(). Everything else will not be affected by this.
214 snd_clone_enable(struct snd_clone
*c
)
216 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
218 if (c
->flags
& SND_CLONE_ENABLE
)
221 c
->flags
|= SND_CLONE_ENABLE
;
227 snd_clone_disable(struct snd_clone
*c
)
229 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
231 if (!(c
->flags
& SND_CLONE_ENABLE
))
234 c
->flags
&= ~SND_CLONE_ENABLE
;
240 * Getters / Setters. Not worth explaining :)
243 snd_clone_getsize(struct snd_clone
*c
)
245 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
251 snd_clone_getmaxunit(struct snd_clone
*c
)
253 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
259 snd_clone_setmaxunit(struct snd_clone
*c
, int maxunit
)
261 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
263 c
->maxunit
= (maxunit
== -1) ? (~c
->typemask
& SND_CLONE_MAXUNIT
) :
270 snd_clone_getdeadline(struct snd_clone
*c
)
272 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
274 return (c
->deadline
);
278 snd_clone_setdeadline(struct snd_clone
*c
, int deadline
)
280 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
282 c
->deadline
= deadline
;
284 return (c
->deadline
);
288 snd_clone_gettime(struct snd_clone
*c
, struct timespec
*tsp
)
290 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
291 SND_CLONE_ASSERT(tsp
!= NULL
, ("NULL timespec"));
299 snd_clone_getflags(struct snd_clone
*c
)
301 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
307 snd_clone_setflags(struct snd_clone
*c
, uint32_t flags
)
309 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
310 SND_CLONE_ASSERT(!(flags
& ~SND_CLONE_MASK
),
311 ("invalid clone flags=0x%08x", flags
));
319 snd_clone_getdevtime(struct cdev
*dev
, struct timespec
*tsp
)
321 struct snd_clone_entry
*ce
;
323 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
324 SND_CLONE_ASSERT(tsp
!= NULL
, ("NULL timespec"));
330 SND_CLONE_ASSERT(ce
->parent
!= NULL
, ("NULL parent"));
338 snd_clone_getdevflags(struct cdev
*dev
)
340 struct snd_clone_entry
*ce
;
342 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
348 SND_CLONE_ASSERT(ce
->parent
!= NULL
, ("NULL parent"));
354 snd_clone_setdevflags(struct cdev
*dev
, uint32_t flags
)
356 struct snd_clone_entry
*ce
;
358 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
359 SND_CLONE_ASSERT(!(flags
& ~SND_CLONE_DEVMASK
),
360 ("invalid clone dev flags=0x%08x", flags
));
366 SND_CLONE_ASSERT(ce
->parent
!= NULL
, ("NULL parent"));
373 /* Elapsed time conversion to ms */
374 #define SND_CLONE_ELAPSED(x, y) \
375 ((((x)->tv_sec - (y)->tv_sec) * 1000) + \
376 (((y)->tv_nsec > (x)->tv_nsec) ? \
377 (((1000000000L + (x)->tv_nsec - \
378 (y)->tv_nsec) / 1000000) - 1000) : \
379 (((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
381 #define SND_CLONE_EXPIRED(x, y, z) \
382 ((x)->deadline < 1 || \
383 ((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) || \
384 SND_CLONE_ELAPSED(y, z) > (x)->deadline)
387 * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
388 * clone.h for explanations on GC settings.
391 snd_clone_gc(struct snd_clone
*c
)
393 struct snd_clone_entry
*ce
, *tce
;
398 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
400 if (!(c
->flags
& SND_CLONE_GC_ENABLE
) || c
->size
== 0)
406 * Bail out if the last clone handler was invoked below the deadline
409 if ((c
->flags
& SND_CLONE_GC_EXPIRED
) &&
410 !SND_CLONE_EXPIRED(c
, &now
, &c
->tsp
))
416 * Visit each object in reverse order. If the object is still being
417 * referenced by a valid open(), skip it. Look for expired objects
418 * and either revoke its clone invocation status or mercilessly
421 TAILQ_FOREACH_REVERSE_MUTABLE(ce
, &c
->head
, link_head
, link
, tce
) {
422 if (!(ce
->flags
& SND_CLONE_BUSY
) &&
423 (!(ce
->flags
& SND_CLONE_INVOKE
) ||
424 SND_CLONE_EXPIRED(c
, &now
, &ce
->tsp
))) {
425 if (c
->flags
& SND_CLONE_GC_REVOKE
) {
426 ce
->flags
&= ~SND_CLONE_INVOKE
;
429 TAILQ_REMOVE(&c
->head
, ce
, link
);
430 subunit
= PCMSUBUNIT(ce
->devt
);
431 destroy_dev(ce
->devt
);
432 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(dsp
), subunit
);
440 /* return total pruned objects */
445 snd_clone_destroy(struct snd_clone
*c
)
447 struct snd_clone_entry
*ce
, *tmp
;
450 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
452 ce
= TAILQ_FIRST(&c
->head
);
454 tmp
= TAILQ_NEXT(ce
, link
);
455 if (ce
->devt
!= NULL
) {
456 subunit
= PCMSUBUNIT(ce
->devt
);
457 destroy_dev(ce
->devt
);
458 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(dsp
), subunit
);
468 * snd_clone_acquire() : The vital part of concurrency management. Must be
469 * called somewhere at the beginning of open() handler. ENODEV is not really
470 * fatal since it just tell the caller that this is not cloned stuff.
471 * EBUSY is *real*, don't forget that!
474 snd_clone_acquire(struct cdev
*dev
)
476 struct snd_clone_entry
*ce
;
478 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
484 SND_CLONE_ASSERT(ce
->parent
!= NULL
, ("NULL parent"));
486 ce
->flags
&= ~SND_CLONE_INVOKE
;
488 if (ce
->flags
& SND_CLONE_BUSY
)
491 ce
->flags
|= SND_CLONE_BUSY
;
497 * snd_clone_release() : Release busy status. Must be called somewhere at
498 * the end of close() handler, or somewhere after fail open().
501 snd_clone_release(struct cdev
*dev
)
503 struct snd_clone_entry
*ce
;
505 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
511 SND_CLONE_ASSERT(ce
->parent
!= NULL
, ("NULL parent"));
513 ce
->flags
&= ~SND_CLONE_INVOKE
;
515 if (!(ce
->flags
& SND_CLONE_BUSY
))
518 ce
->flags
&= ~SND_CLONE_BUSY
;
525 * snd_clone_ref/unref() : Garbage collector reference counter. To make
526 * garbage collector run automatically, the sequence must be something like
527 * this (both in open() and close() handlers):
529 * open() - 1) snd_clone_acquire()
530 * 2) .... check check ... if failed, snd_clone_release()
531 * 3) Success. Call snd_clone_ref()
533 * close() - 1) .... check check check ....
534 * 2) Success. snd_clone_release()
535 * 3) snd_clone_unref() . Garbage collector will run at this point
536 * if this is the last referenced object.
539 snd_clone_ref(struct cdev
*dev
)
541 struct snd_clone_entry
*ce
;
544 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
551 SND_CLONE_ASSERT(c
!= NULL
, ("NULL parent"));
552 SND_CLONE_ASSERT(c
->refcount
>= 0, ("refcount < 0"));
554 return (++c
->refcount
);
558 snd_clone_unref(struct cdev
*dev
)
560 struct snd_clone_entry
*ce
;
563 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
570 SND_CLONE_ASSERT(c
!= NULL
, ("NULL parent"));
571 SND_CLONE_ASSERT(c
->refcount
> 0, ("refcount <= 0"));
576 * Run automatic garbage collector, if needed.
578 if ((c
->flags
& SND_CLONE_GC_UNREF
) &&
579 (!(c
->flags
& SND_CLONE_GC_LASTREF
) ||
580 (c
->refcount
== 0 && (c
->flags
& SND_CLONE_GC_LASTREF
))))
581 (void)snd_clone_gc(c
);
583 return (c
->refcount
);
587 snd_clone_register(struct snd_clone_entry
*ce
, struct cdev
*dev
)
589 SND_CLONE_ASSERT(ce
!= NULL
, ("NULL snd_clone_entry"));
590 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev"));
591 SND_CLONE_ASSERT(dev
->si_drv2
== NULL
, ("dev->si_drv2 not NULL"));
592 SND_CLONE_ASSERT((ce
->flags
& SND_CLONE_ALLOC
) == SND_CLONE_ALLOC
,
593 ("invalid clone alloc flags=0x%08x", ce
->flags
));
594 SND_CLONE_ASSERT(ce
->devt
== NULL
, ("ce->devt not NULL"));
595 #if 0 /* dev2unit doesn't make any sense on DragonFly */
596 SND_CLONE_ASSERT(ce
->unit
== dev2unit(dev
),
597 ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
598 ce
->unit
, dev2unit(dev
)));
601 SND_CLONE_ASSERT(ce
->parent
!= NULL
, ("NULL parent"));
605 ce
->flags
&= ~SND_CLONE_ALLOC
;
606 ce
->flags
|= SND_CLONE_INVOKE
;
609 struct snd_clone_entry
*
610 snd_clone_alloc(struct snd_clone
*c
, struct cdev
**dev
, int *unit
, int tmask
)
612 struct snd_clone_entry
*ce
, *after
, *bce
, *cce
, *nce
, *tce
;
614 int cunit
, allocunit
;
617 SND_CLONE_ASSERT(c
!= NULL
, ("NULL snd_clone"));
618 SND_CLONE_ASSERT(dev
!= NULL
, ("NULL dev pointer"));
619 SND_CLONE_ASSERT((c
->typemask
& tmask
) == tmask
,
620 ("invalid tmask: typemask=0x%08x tmask=0x%08x",
621 c
->typemask
, tmask
));
622 SND_CLONE_ASSERT(unit
!= NULL
, ("NULL unit pointer"));
623 SND_CLONE_ASSERT(*unit
== -1 || !(*unit
& (c
->typemask
| tmask
)),
624 ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
625 c
->typemask
, tmask
, *unit
));
627 if (!(c
->flags
& SND_CLONE_ENABLE
) ||
628 (*unit
!= -1 && *unit
> c
->maxunit
))
633 bce
= NULL
; /* "b"usy candidate */
634 cce
= NULL
; /* "c"urthread/proc candidate */
635 nce
= NULL
; /* "n"ull, totally unbusy candidate */
636 tce
= NULL
; /* Last "t"ry candidate */
638 allocunit
= (*unit
== -1) ? 0 : *unit
;
639 curpid
= curthread
->td_proc
->p_pid
;
643 TAILQ_FOREACH(ce
, &c
->head
, link
) {
645 * Sort incrementally according to device type.
647 if (tmask
> (ce
->unit
& c
->typemask
)) {
651 } else if (tmask
< (ce
->unit
& c
->typemask
))
655 * Shoot.. this is where the grumpiness begin. Just
656 * return immediately.
658 if (*unit
!= -1 && *unit
== (ce
->unit
& ~tmask
))
659 goto snd_clone_alloc_out
;
663 * Simmilar device type. Sort incrementally according
664 * to allocation unit. While here, look for free slot
665 * and possible collision for new / future allocation.
667 if (*unit
== -1 && (ce
->unit
& ~tmask
) == allocunit
)
669 if ((ce
->unit
& ~tmask
) < allocunit
)
673 * 1. Look for non busy, but keep track of the best
674 * possible busy cdev.
675 * 2. Look for the best (oldest referenced) entry that is
676 * in a same process / thread.
677 * 3. Look for the best (oldest referenced), absolute free
679 * 4. Lastly, look for the best (oldest referenced)
680 * any entries that doesn't fit with anything above.
682 if (ce
->flags
& SND_CLONE_BUSY
) {
683 if (ce
->devt
!= NULL
&& (bce
== NULL
||
684 timespeccmp(&ce
->tsp
, &bce
->tsp
, <)))
688 if (ce
->pid
== curpid
&&
689 (cce
== NULL
|| timespeccmp(&ce
->tsp
, &cce
->tsp
, <)))
691 else if (!(ce
->flags
& SND_CLONE_INVOKE
) &&
692 (nce
== NULL
|| timespeccmp(&ce
->tsp
, &nce
->tsp
, <)))
694 else if (tce
== NULL
|| timespeccmp(&ce
->tsp
, &tce
->tsp
, <))
698 goto snd_clone_alloc_new
;
699 else if (cce
!= NULL
) {
700 /* Same proc entry found, go for it */
702 goto snd_clone_alloc_out
;
703 } else if (nce
!= NULL
) {
705 * Next, try absolute free entry. If the calculated
706 * allocunit is smaller, create new entry instead.
708 if (allocunit
< (nce
->unit
& ~tmask
))
709 goto snd_clone_alloc_new
;
711 goto snd_clone_alloc_out
;
712 } else if (allocunit
> c
->maxunit
) {
714 * Maximum allowable unit reached. Try returning any
715 * available cdev and hope for the best. If the lookup is
716 * done for things like stat(), mtime() etc. , things should
717 * be ok. Otherwise, open() handler should do further checks
718 * and decide whether to return correct error code or not.
722 goto snd_clone_alloc_out
;
723 } else if (bce
!= NULL
) {
725 goto snd_clone_alloc_out
;
732 * No free entries found, and we still haven't reached maximum
733 * allowable units. Allocate, setup a minimal unique entry with busy
734 * status so nobody will monkey on this new entry. Unit magic is set
735 * right here to avoid collision with other contesting handler.
736 * The caller must be carefull here to maintain its own
737 * synchronization, as long as it will not conflict with malloc(9)
740 * That said, go figure.
742 ce
= kmalloc(sizeof(*ce
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
747 * We're being dense, ignorance is bliss,
748 * Super Regulatory Measure (TM).. TRY AGAIN!
752 goto snd_clone_alloc_out
;
753 } else if (tce
!= NULL
) {
755 goto snd_clone_alloc_out
;
756 } else if (bce
!= NULL
) {
758 goto snd_clone_alloc_out
;
762 /* Setup new entry */
764 ce
->unit
= tmask
| allocunit
;
767 ce
->flags
|= SND_CLONE_ALLOC
;
769 TAILQ_INSERT_AFTER(&c
->head
, after
, ce
, link
);
771 TAILQ_INSERT_HEAD(&c
->head
, ce
, link
);
776 * Save new allocation unit for caller which will be used
785 * Set, mark, timestamp the entry if this is a truly free entry.
786 * Leave busy entry alone.
788 if (!(ce
->flags
& SND_CLONE_BUSY
)) {
791 ce
->flags
|= SND_CLONE_INVOKE
;