2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/limits.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
70 #include <sys/bus.h> /* XXX debugging */
71 #include <machine/bus.h>
73 #include <sys/sysctl.h>
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space). That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
90 TAILQ_ENTRY(resource_i
) r_link
;
91 LIST_ENTRY(resource_i
) r_sharelink
;
92 LIST_HEAD(, resource_i
) *r_sharehead
;
93 u_long r_start
; /* index of the first entry in this resource */
94 u_long r_end
; /* index of the last entry (inclusive) */
96 void *r_virtual
; /* virtual address of this resource */
97 struct device
*r_dev
; /* device which has allocated this resource */
98 struct rman
*r_rm
; /* resource manager from whence this came */
99 int r_rid
; /* optional rid for this resource. */
102 static int rman_debug
= 0;
103 SYSCTL_INT(_debug
, OID_AUTO
, rman_debug
, CTLFLAG_RWTUN
,
104 &rman_debug
, 0, "rman debug");
106 #define DPRINTF(params) if (rman_debug) printf params
108 static MALLOC_DEFINE(M_RMAN
, "rman", "Resource manager");
110 struct rman_head rman_head
;
111 static struct mtx rman_mtx
; /* mutex to protect rman_head */
112 static int int_rman_release_resource(struct rman
*rm
, struct resource_i
*r
);
114 static __inline
struct resource_i
*
115 int_alloc_resource(int malloc_flag
)
117 struct resource_i
*r
;
119 r
= malloc(sizeof *r
, M_RMAN
, malloc_flag
| M_ZERO
);
127 rman_init(struct rman
*rm
)
133 TAILQ_INIT(&rman_head
);
134 mtx_init(&rman_mtx
, "rman head", NULL
, MTX_DEF
);
137 if (rm
->rm_start
== 0 && rm
->rm_end
== 0)
139 if (rm
->rm_type
== RMAN_UNINIT
)
141 if (rm
->rm_type
== RMAN_GAUGE
)
142 panic("implement RMAN_GAUGE");
144 TAILQ_INIT(&rm
->rm_list
);
145 rm
->rm_mtx
= malloc(sizeof *rm
->rm_mtx
, M_RMAN
, M_NOWAIT
| M_ZERO
);
146 if (rm
->rm_mtx
== NULL
)
148 mtx_init(rm
->rm_mtx
, "rman", NULL
, MTX_DEF
);
151 TAILQ_INSERT_TAIL(&rman_head
, rm
, rm_link
);
152 mtx_unlock(&rman_mtx
);
157 rman_manage_region(struct rman
*rm
, u_long start
, u_long end
)
159 struct resource_i
*r
, *s
, *t
;
162 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
163 rm
->rm_descr
, start
, end
));
164 if (start
< rm
->rm_start
|| end
> rm
->rm_end
)
166 r
= int_alloc_resource(M_NOWAIT
);
173 mtx_lock(rm
->rm_mtx
);
175 /* Skip entries before us. */
176 TAILQ_FOREACH(s
, &rm
->rm_list
, r_link
) {
177 if (s
->r_end
== ULONG_MAX
)
179 if (s
->r_end
+ 1 >= r
->r_start
)
183 /* If we ran off the end of the list, insert at the tail. */
185 TAILQ_INSERT_TAIL(&rm
->rm_list
, r
, r_link
);
187 /* Check for any overlap with the current region. */
188 if (r
->r_start
<= s
->r_end
&& r
->r_end
>= s
->r_start
) {
193 /* Check for any overlap with the next region. */
194 t
= TAILQ_NEXT(s
, r_link
);
195 if (t
&& r
->r_start
<= t
->r_end
&& r
->r_end
>= t
->r_start
) {
201 * See if this region can be merged with the next region. If
202 * not, clear the pointer.
204 if (t
&& (r
->r_end
+ 1 != t
->r_start
|| t
->r_flags
!= 0))
207 /* See if we can merge with the current region. */
208 if (s
->r_end
+ 1 == r
->r_start
&& s
->r_flags
== 0) {
209 /* Can we merge all 3 regions? */
212 TAILQ_REMOVE(&rm
->rm_list
, t
, r_link
);
219 } else if (t
!= NULL
) {
220 /* Can we merge with just the next region? */
221 t
->r_start
= r
->r_start
;
223 } else if (s
->r_end
< r
->r_start
) {
224 TAILQ_INSERT_AFTER(&rm
->rm_list
, s
, r
, r_link
);
226 TAILQ_INSERT_BEFORE(s
, r
, r_link
);
230 mtx_unlock(rm
->rm_mtx
);
235 rman_init_from_resource(struct rman
*rm
, struct resource
*r
)
239 if ((rv
= rman_init(rm
)) != 0)
241 return (rman_manage_region(rm
, r
->__r_i
->r_start
, r
->__r_i
->r_end
));
245 rman_fini(struct rman
*rm
)
247 struct resource_i
*r
;
249 mtx_lock(rm
->rm_mtx
);
250 TAILQ_FOREACH(r
, &rm
->rm_list
, r_link
) {
251 if (r
->r_flags
& RF_ALLOCATED
) {
252 mtx_unlock(rm
->rm_mtx
);
258 * There really should only be one of these if we are in this
259 * state and the code is working properly, but it can't hurt.
261 while (!TAILQ_EMPTY(&rm
->rm_list
)) {
262 r
= TAILQ_FIRST(&rm
->rm_list
);
263 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
266 mtx_unlock(rm
->rm_mtx
);
268 TAILQ_REMOVE(&rman_head
, rm
, rm_link
);
269 mtx_unlock(&rman_mtx
);
270 mtx_destroy(rm
->rm_mtx
);
271 free(rm
->rm_mtx
, M_RMAN
);
277 rman_first_free_region(struct rman
*rm
, u_long
*start
, u_long
*end
)
279 struct resource_i
*r
;
281 mtx_lock(rm
->rm_mtx
);
282 TAILQ_FOREACH(r
, &rm
->rm_list
, r_link
) {
283 if (!(r
->r_flags
& RF_ALLOCATED
)) {
286 mtx_unlock(rm
->rm_mtx
);
290 mtx_unlock(rm
->rm_mtx
);
295 rman_last_free_region(struct rman
*rm
, u_long
*start
, u_long
*end
)
297 struct resource_i
*r
;
299 mtx_lock(rm
->rm_mtx
);
300 TAILQ_FOREACH_REVERSE(r
, &rm
->rm_list
, resource_head
, r_link
) {
301 if (!(r
->r_flags
& RF_ALLOCATED
)) {
304 mtx_unlock(rm
->rm_mtx
);
308 mtx_unlock(rm
->rm_mtx
);
312 /* Shrink or extend one or both ends of an allocated resource. */
314 rman_adjust_resource(struct resource
*rr
, u_long start
, u_long end
)
316 struct resource_i
*r
, *s
, *t
, *new;
319 /* Not supported for shared resources. */
321 if (r
->r_flags
& RF_SHAREABLE
)
325 * This does not support wholesale moving of a resource. At
326 * least part of the desired new range must overlap with the
329 if (end
< r
->r_start
|| r
->r_end
< start
)
333 * Find the two resource regions immediately adjacent to the
334 * allocated resource.
337 mtx_lock(rm
->rm_mtx
);
339 TAILQ_FOREACH(s
, &rm
->rm_list
, r_link
) {
344 panic("resource not in list");
346 s
= TAILQ_PREV(r
, resource_head
, r_link
);
347 t
= TAILQ_NEXT(r
, r_link
);
348 KASSERT(s
== NULL
|| s
->r_end
+ 1 == r
->r_start
,
349 ("prev resource mismatch"));
350 KASSERT(t
== NULL
|| r
->r_end
+ 1 == t
->r_start
,
351 ("next resource mismatch"));
354 * See if the changes are permitted. Shrinking is always allowed,
355 * but growing requires sufficient room in the adjacent region.
357 if (start
< r
->r_start
&& (s
== NULL
|| (s
->r_flags
& RF_ALLOCATED
) ||
358 s
->r_start
> start
)) {
359 mtx_unlock(rm
->rm_mtx
);
362 if (end
> r
->r_end
&& (t
== NULL
|| (t
->r_flags
& RF_ALLOCATED
) ||
364 mtx_unlock(rm
->rm_mtx
);
369 * While holding the lock, grow either end of the resource as
370 * needed and shrink either end if the shrinking does not require
371 * allocating a new resource. We can safely drop the lock and then
372 * insert a new range to handle the shrinking case afterwards.
374 if (start
< r
->r_start
||
375 (start
> r
->r_start
&& s
!= NULL
&& !(s
->r_flags
& RF_ALLOCATED
))) {
376 KASSERT(s
->r_flags
== 0, ("prev is busy"));
378 if (s
->r_start
== start
) {
379 TAILQ_REMOVE(&rm
->rm_list
, s
, r_link
);
382 s
->r_end
= start
- 1;
384 if (end
> r
->r_end
||
385 (end
< r
->r_end
&& t
!= NULL
&& !(t
->r_flags
& RF_ALLOCATED
))) {
386 KASSERT(t
->r_flags
== 0, ("next is busy"));
388 if (t
->r_end
== end
) {
389 TAILQ_REMOVE(&rm
->rm_list
, t
, r_link
);
392 t
->r_start
= end
+ 1;
394 mtx_unlock(rm
->rm_mtx
);
397 * Handle the shrinking cases that require allocating a new
398 * resource to hold the newly-free region. We have to recheck
399 * if we still need this new region after acquiring the lock.
401 if (start
> r
->r_start
) {
402 new = int_alloc_resource(M_WAITOK
);
403 new->r_start
= r
->r_start
;
404 new->r_end
= start
- 1;
406 mtx_lock(rm
->rm_mtx
);
408 s
= TAILQ_PREV(r
, resource_head
, r_link
);
409 if (s
!= NULL
&& !(s
->r_flags
& RF_ALLOCATED
)) {
410 s
->r_end
= start
- 1;
413 TAILQ_INSERT_BEFORE(r
, new, r_link
);
414 mtx_unlock(rm
->rm_mtx
);
416 if (end
< r
->r_end
) {
417 new = int_alloc_resource(M_WAITOK
);
418 new->r_start
= end
+ 1;
419 new->r_end
= r
->r_end
;
421 mtx_lock(rm
->rm_mtx
);
423 t
= TAILQ_NEXT(r
, r_link
);
424 if (t
!= NULL
&& !(t
->r_flags
& RF_ALLOCATED
)) {
425 t
->r_start
= end
+ 1;
428 TAILQ_INSERT_AFTER(&rm
->rm_list
, r
, new, r_link
);
429 mtx_unlock(rm
->rm_mtx
);
434 #define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE))
437 rman_reserve_resource_bound(struct rman
*rm
, u_long start
, u_long end
,
438 u_long count
, u_long bound
, u_int flags
,
442 struct resource_i
*r
, *s
, *rv
;
443 u_long rstart
, rend
, amask
, bmask
;
447 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
448 "length %#lx, flags %u, device %s\n", rm
->rm_descr
, start
, end
,
450 dev
== NULL
? "<null>" : device_get_nameunit(dev
)));
451 KASSERT((flags
& RF_FIRSTSHARE
) == 0,
452 ("invalid flags %#x", flags
));
453 new_rflags
= (flags
& ~RF_FIRSTSHARE
) | RF_ALLOCATED
;
455 mtx_lock(rm
->rm_mtx
);
457 for (r
= TAILQ_FIRST(&rm
->rm_list
);
458 r
&& r
->r_end
< start
+ count
- 1;
459 r
= TAILQ_NEXT(r
, r_link
))
463 DPRINTF(("could not find a region\n"));
467 amask
= (1ul << RF_ALIGNMENT(flags
)) - 1;
468 KASSERT(start
<= ULONG_MAX
- amask
,
469 ("start (%#lx) + amask (%#lx) would wrap around", start
, amask
));
471 /* If bound is 0, bmask will also be 0 */
472 bmask
= ~(bound
- 1);
474 * First try to find an acceptable totally-unshared region.
476 for (s
= r
; s
; s
= TAILQ_NEXT(s
, r_link
)) {
477 DPRINTF(("considering [%#lx, %#lx]\n", s
->r_start
, s
->r_end
));
479 * The resource list is sorted, so there is no point in
480 * searching further once r_start is too large.
482 if (s
->r_start
> end
- (count
- 1)) {
483 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
487 if (s
->r_start
> ULONG_MAX
- amask
) {
488 DPRINTF(("s->r_start (%#lx) + amask (%#lx) too large\n",
492 if (s
->r_flags
& RF_ALLOCATED
) {
493 DPRINTF(("region is allocated\n"));
496 rstart
= ulmax(s
->r_start
, start
);
498 * Try to find a region by adjusting to boundary and alignment
499 * until both conditions are satisfied. This is not an optimal
500 * algorithm, but in most cases it isn't really bad, either.
503 rstart
= (rstart
+ amask
) & ~amask
;
504 if (((rstart
^ (rstart
+ count
- 1)) & bmask
) != 0)
505 rstart
+= bound
- (rstart
& ~bmask
);
506 } while ((rstart
& amask
) != 0 && rstart
< end
&&
508 rend
= ulmin(s
->r_end
, ulmax(rstart
+ count
- 1, end
));
510 DPRINTF(("adjusted start exceeds end\n"));
513 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
514 rstart
, rend
, (rend
- rstart
+ 1), count
));
516 if ((rend
- rstart
+ 1) >= count
) {
517 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
518 rstart
, rend
, (rend
- rstart
+ 1)));
519 if ((s
->r_end
- s
->r_start
+ 1) == count
) {
520 DPRINTF(("candidate region is entire chunk\n"));
522 rv
->r_flags
= new_rflags
;
528 * If s->r_start < rstart and
529 * s->r_end > rstart + count - 1, then
530 * we need to split the region into three pieces
531 * (the middle one will get returned to the user).
532 * Otherwise, we are allocating at either the
533 * beginning or the end of s, so we only need to
534 * split it in two. The first case requires
535 * two new allocations; the second requires but one.
537 rv
= int_alloc_resource(M_NOWAIT
);
540 rv
->r_start
= rstart
;
541 rv
->r_end
= rstart
+ count
- 1;
542 rv
->r_flags
= new_rflags
;
546 if (s
->r_start
< rv
->r_start
&& s
->r_end
> rv
->r_end
) {
547 DPRINTF(("splitting region in three parts: "
548 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
549 s
->r_start
, rv
->r_start
- 1,
550 rv
->r_start
, rv
->r_end
,
551 rv
->r_end
+ 1, s
->r_end
));
553 * We are allocating in the middle.
555 r
= int_alloc_resource(M_NOWAIT
);
561 r
->r_start
= rv
->r_end
+ 1;
563 r
->r_flags
= s
->r_flags
;
565 s
->r_end
= rv
->r_start
- 1;
566 TAILQ_INSERT_AFTER(&rm
->rm_list
, s
, rv
,
568 TAILQ_INSERT_AFTER(&rm
->rm_list
, rv
, r
,
570 } else if (s
->r_start
== rv
->r_start
) {
571 DPRINTF(("allocating from the beginning\n"));
573 * We are allocating at the beginning.
575 s
->r_start
= rv
->r_end
+ 1;
576 TAILQ_INSERT_BEFORE(s
, rv
, r_link
);
578 DPRINTF(("allocating at the end\n"));
580 * We are allocating at the end.
582 s
->r_end
= rv
->r_start
- 1;
583 TAILQ_INSERT_AFTER(&rm
->rm_list
, s
, rv
,
591 * Now find an acceptable shared region, if the client's requirements
592 * allow sharing. By our implementation restriction, a candidate
593 * region must match exactly by both size and sharing type in order
594 * to be considered compatible with the client's request. (The
595 * former restriction could probably be lifted without too much
596 * additional work, but this does not seem warranted.)
598 DPRINTF(("no unshared regions found\n"));
599 if ((flags
& RF_SHAREABLE
) == 0)
602 for (s
= r
; s
&& s
->r_end
<= end
; s
= TAILQ_NEXT(s
, r_link
)) {
603 if (SHARE_TYPE(s
->r_flags
) == SHARE_TYPE(flags
) &&
604 s
->r_start
>= start
&&
605 (s
->r_end
- s
->r_start
+ 1) == count
&&
606 (s
->r_start
& amask
) == 0 &&
607 ((s
->r_start
^ s
->r_end
) & bmask
) == 0) {
608 rv
= int_alloc_resource(M_NOWAIT
);
611 rv
->r_start
= s
->r_start
;
612 rv
->r_end
= s
->r_end
;
613 rv
->r_flags
= new_rflags
;
616 if (s
->r_sharehead
== NULL
) {
617 s
->r_sharehead
= malloc(sizeof *s
->r_sharehead
,
618 M_RMAN
, M_NOWAIT
| M_ZERO
);
619 if (s
->r_sharehead
== NULL
) {
624 LIST_INIT(s
->r_sharehead
);
625 LIST_INSERT_HEAD(s
->r_sharehead
, s
,
627 s
->r_flags
|= RF_FIRSTSHARE
;
629 rv
->r_sharehead
= s
->r_sharehead
;
630 LIST_INSERT_HEAD(s
->r_sharehead
, rv
, r_sharelink
);
635 * We couldn't find anything.
639 mtx_unlock(rm
->rm_mtx
);
640 return (rv
== NULL
? NULL
: &rv
->r_r
);
644 rman_reserve_resource(struct rman
*rm
, u_long start
, u_long end
, u_long count
,
645 u_int flags
, struct device
*dev
)
648 return (rman_reserve_resource_bound(rm
, start
, end
, count
, 0, flags
,
653 rman_activate_resource(struct resource
*re
)
655 struct resource_i
*r
;
660 mtx_lock(rm
->rm_mtx
);
661 r
->r_flags
|= RF_ACTIVE
;
662 mtx_unlock(rm
->rm_mtx
);
667 rman_deactivate_resource(struct resource
*r
)
672 mtx_lock(rm
->rm_mtx
);
673 r
->__r_i
->r_flags
&= ~RF_ACTIVE
;
674 mtx_unlock(rm
->rm_mtx
);
679 int_rman_release_resource(struct rman
*rm
, struct resource_i
*r
)
681 struct resource_i
*s
, *t
;
683 if (r
->r_flags
& RF_ACTIVE
)
684 r
->r_flags
&= ~RF_ACTIVE
;
687 * Check for a sharing list first. If there is one, then we don't
688 * have to think as hard.
690 if (r
->r_sharehead
) {
692 * If a sharing list exists, then we know there are at
695 * If we are in the main circleq, appoint someone else.
697 LIST_REMOVE(r
, r_sharelink
);
698 s
= LIST_FIRST(r
->r_sharehead
);
699 if (r
->r_flags
& RF_FIRSTSHARE
) {
700 s
->r_flags
|= RF_FIRSTSHARE
;
701 TAILQ_INSERT_BEFORE(r
, s
, r_link
);
702 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
706 * Make sure that the sharing list goes away completely
707 * if the resource is no longer being shared at all.
709 if (LIST_NEXT(s
, r_sharelink
) == NULL
) {
710 free(s
->r_sharehead
, M_RMAN
);
711 s
->r_sharehead
= NULL
;
712 s
->r_flags
&= ~RF_FIRSTSHARE
;
718 * Look at the adjacent resources in the list and see if our
719 * segment can be merged with any of them. If either of the
720 * resources is allocated or is not exactly adjacent then they
721 * cannot be merged with our segment.
723 s
= TAILQ_PREV(r
, resource_head
, r_link
);
724 if (s
!= NULL
&& ((s
->r_flags
& RF_ALLOCATED
) != 0 ||
725 s
->r_end
+ 1 != r
->r_start
))
727 t
= TAILQ_NEXT(r
, r_link
);
728 if (t
!= NULL
&& ((t
->r_flags
& RF_ALLOCATED
) != 0 ||
729 r
->r_end
+ 1 != t
->r_start
))
732 if (s
!= NULL
&& t
!= NULL
) {
734 * Merge all three segments.
737 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
738 TAILQ_REMOVE(&rm
->rm_list
, t
, r_link
);
740 } else if (s
!= NULL
) {
742 * Merge previous segment with ours.
745 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
746 } else if (t
!= NULL
) {
748 * Merge next segment with ours.
750 t
->r_start
= r
->r_start
;
751 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
754 * At this point, we know there is nothing we
755 * can potentially merge with, because on each
756 * side, there is either nothing there or what is
757 * there is still allocated. In that case, we don't
758 * want to remove r from the list; we simply want to
759 * change it to an unallocated region and return
760 * without freeing anything.
762 r
->r_flags
&= ~RF_ALLOCATED
;
773 rman_release_resource(struct resource
*re
)
776 struct resource_i
*r
;
781 mtx_lock(rm
->rm_mtx
);
782 rv
= int_rman_release_resource(rm
, r
);
783 mtx_unlock(rm
->rm_mtx
);
788 rman_make_alignment_flags(uint32_t size
)
793 * Find the hightest bit set, and add one if more than one bit
794 * set. We're effectively computing the ceil(log2(size)) here.
796 for (i
= 31; i
> 0; i
--)
799 if (~(1 << i
) & size
)
802 return(RF_ALIGNMENT_LOG2(i
));
806 rman_set_start(struct resource
*r
, u_long start
)
809 r
->__r_i
->r_start
= start
;
813 rman_get_start(struct resource
*r
)
816 return (r
->__r_i
->r_start
);
820 rman_set_end(struct resource
*r
, u_long end
)
823 r
->__r_i
->r_end
= end
;
827 rman_get_end(struct resource
*r
)
830 return (r
->__r_i
->r_end
);
834 rman_get_size(struct resource
*r
)
837 return (r
->__r_i
->r_end
- r
->__r_i
->r_start
+ 1);
841 rman_get_flags(struct resource
*r
)
844 return (r
->__r_i
->r_flags
);
848 rman_set_virtual(struct resource
*r
, void *v
)
851 r
->__r_i
->r_virtual
= v
;
855 rman_get_virtual(struct resource
*r
)
858 return (r
->__r_i
->r_virtual
);
862 rman_set_bustag(struct resource
*r
, bus_space_tag_t t
)
869 rman_get_bustag(struct resource
*r
)
872 return (r
->r_bustag
);
876 rman_set_bushandle(struct resource
*r
, bus_space_handle_t h
)
883 rman_get_bushandle(struct resource
*r
)
886 return (r
->r_bushandle
);
890 rman_set_rid(struct resource
*r
, int rid
)
893 r
->__r_i
->r_rid
= rid
;
897 rman_get_rid(struct resource
*r
)
900 return (r
->__r_i
->r_rid
);
904 rman_set_device(struct resource
*r
, struct device
*dev
)
907 r
->__r_i
->r_dev
= dev
;
911 rman_get_device(struct resource
*r
)
914 return (r
->__r_i
->r_dev
);
918 rman_is_region_manager(struct resource
*r
, struct rman
*rm
)
921 return (r
->__r_i
->r_rm
== rm
);
925 * Sysctl interface for scanning the resource lists.
927 * We take two input parameters; the index into the list of resource
928 * managers, and the resource offset into the list.
931 sysctl_rman(SYSCTL_HANDLER_ARGS
)
933 int *name
= (int *)arg1
;
934 u_int namelen
= arg2
;
935 int rman_idx
, res_idx
;
937 struct resource_i
*res
;
938 struct resource_i
*sres
;
940 struct u_resource ures
;
946 if (bus_data_generation_check(name
[0]))
952 * Find the indexed resource manager
955 TAILQ_FOREACH(rm
, &rman_head
, rm_link
) {
959 mtx_unlock(&rman_mtx
);
964 * If the resource index is -1, we want details on the
968 bzero(&urm
, sizeof(urm
));
969 urm
.rm_handle
= (uintptr_t)rm
;
970 if (rm
->rm_descr
!= NULL
)
971 strlcpy(urm
.rm_descr
, rm
->rm_descr
, RM_TEXTLEN
);
972 urm
.rm_start
= rm
->rm_start
;
973 urm
.rm_size
= rm
->rm_end
- rm
->rm_start
+ 1;
974 urm
.rm_type
= rm
->rm_type
;
976 error
= SYSCTL_OUT(req
, &urm
, sizeof(urm
));
981 * Find the indexed resource and return it.
983 mtx_lock(rm
->rm_mtx
);
984 TAILQ_FOREACH(res
, &rm
->rm_list
, r_link
) {
985 if (res
->r_sharehead
!= NULL
) {
986 LIST_FOREACH(sres
, res
->r_sharehead
, r_sharelink
)
987 if (res_idx
-- == 0) {
992 else if (res_idx
-- == 0)
995 mtx_unlock(rm
->rm_mtx
);
999 bzero(&ures
, sizeof(ures
));
1000 ures
.r_handle
= (uintptr_t)res
;
1001 ures
.r_parent
= (uintptr_t)res
->r_rm
;
1002 ures
.r_device
= (uintptr_t)res
->r_dev
;
1003 if (res
->r_dev
!= NULL
) {
1004 if (device_get_name(res
->r_dev
) != NULL
) {
1005 snprintf(ures
.r_devname
, RM_TEXTLEN
,
1007 device_get_name(res
->r_dev
),
1008 device_get_unit(res
->r_dev
));
1010 strlcpy(ures
.r_devname
, "nomatch",
1014 ures
.r_devname
[0] = '\0';
1016 ures
.r_start
= res
->r_start
;
1017 ures
.r_size
= res
->r_end
- res
->r_start
+ 1;
1018 ures
.r_flags
= res
->r_flags
;
1020 mtx_unlock(rm
->rm_mtx
);
1021 error
= SYSCTL_OUT(req
, &ures
, sizeof(ures
));
1025 static SYSCTL_NODE(_hw_bus
, OID_AUTO
, rman
, CTLFLAG_RD
, sysctl_rman
,
1026 "kernel resource manager");
1030 dump_rman_header(struct rman
*rm
)
1035 db_printf("rman %p: %s (0x%lx-0x%lx full range)\n",
1036 rm
, rm
->rm_descr
, rm
->rm_start
, rm
->rm_end
);
1040 dump_rman(struct rman
*rm
)
1042 struct resource_i
*r
;
1043 const char *devname
;
1047 TAILQ_FOREACH(r
, &rm
->rm_list
, r_link
) {
1048 if (r
->r_dev
!= NULL
) {
1049 devname
= device_get_nameunit(r
->r_dev
);
1050 if (devname
== NULL
)
1051 devname
= "nomatch";
1054 db_printf(" 0x%lx-0x%lx (RID=%d) ",
1055 r
->r_start
, r
->r_end
, r
->r_rid
);
1056 if (devname
!= NULL
)
1057 db_printf("(%s)\n", devname
);
1059 db_printf("----\n");
1065 DB_SHOW_COMMAND(rman
, db_show_rman
)
1069 dump_rman_header((struct rman
*)addr
);
1070 dump_rman((struct rman
*)addr
);
1074 DB_SHOW_COMMAND(rmans
, db_show_rmans
)
1078 TAILQ_FOREACH(rm
, &rman_head
, rm_link
) {
1079 dump_rman_header(rm
);
1083 DB_SHOW_ALL_COMMAND(rman
, db_show_all_rman
)
1087 TAILQ_FOREACH(rm
, &rman_head
, rm_link
) {
1088 dump_rman_header(rm
);
1092 DB_SHOW_ALIAS(allrman
, db_show_all_rman
);