2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/limits.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
70 #include <sys/bus.h> /* XXX debugging */
71 #include <machine/bus.h>
73 #include <sys/sysctl.h>
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space). That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
90 TAILQ_ENTRY(resource_i
) r_link
;
91 LIST_ENTRY(resource_i
) r_sharelink
;
92 LIST_HEAD(, resource_i
) *r_sharehead
;
93 u_long r_start
; /* index of the first entry in this resource */
94 u_long r_end
; /* index of the last entry (inclusive) */
96 void *r_virtual
; /* virtual address of this resource */
97 struct device
*r_dev
; /* device which has allocated this resource */
98 struct rman
*r_rm
; /* resource manager from whence this came */
99 int r_rid
; /* optional rid for this resource. */
103 TUNABLE_INT("debug.rman_debug", &rman_debug
);
104 SYSCTL_INT(_debug
, OID_AUTO
, rman_debug
, CTLFLAG_RW
,
105 &rman_debug
, 0, "rman debug");
107 #define DPRINTF(params) if (rman_debug) printf params
109 static MALLOC_DEFINE(M_RMAN
, "rman", "Resource manager");
111 struct rman_head rman_head
;
112 static struct mtx rman_mtx
; /* mutex to protect rman_head */
113 static int int_rman_activate_resource(struct rman
*rm
, struct resource_i
*r
,
114 struct resource_i
**whohas
);
115 static int int_rman_deactivate_resource(struct resource_i
*r
);
116 static int int_rman_release_resource(struct rman
*rm
, struct resource_i
*r
);
118 static __inline
struct resource_i
*
119 int_alloc_resource(int malloc_flag
)
121 struct resource_i
*r
;
123 r
= malloc(sizeof *r
, M_RMAN
, malloc_flag
| M_ZERO
);
131 rman_init(struct rman
*rm
)
137 TAILQ_INIT(&rman_head
);
138 mtx_init(&rman_mtx
, "rman head", NULL
, MTX_DEF
);
141 if (rm
->rm_type
== RMAN_UNINIT
)
143 if (rm
->rm_type
== RMAN_GAUGE
)
144 panic("implement RMAN_GAUGE");
146 TAILQ_INIT(&rm
->rm_list
);
147 rm
->rm_mtx
= malloc(sizeof *rm
->rm_mtx
, M_RMAN
, M_NOWAIT
| M_ZERO
);
148 if (rm
->rm_mtx
== NULL
)
150 mtx_init(rm
->rm_mtx
, "rman", NULL
, MTX_DEF
);
153 TAILQ_INSERT_TAIL(&rman_head
, rm
, rm_link
);
154 mtx_unlock(&rman_mtx
);
159 rman_manage_region(struct rman
*rm
, u_long start
, u_long end
)
161 struct resource_i
*r
, *s
, *t
;
163 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
164 rm
->rm_descr
, start
, end
));
165 r
= int_alloc_resource(M_NOWAIT
);
172 mtx_lock(rm
->rm_mtx
);
174 /* Skip entries before us. */
175 TAILQ_FOREACH(s
, &rm
->rm_list
, r_link
) {
176 if (s
->r_end
== ULONG_MAX
)
178 if (s
->r_end
+ 1 >= r
->r_start
)
182 /* If we ran off the end of the list, insert at the tail. */
184 TAILQ_INSERT_TAIL(&rm
->rm_list
, r
, r_link
);
186 /* Check for any overlap with the current region. */
187 if (r
->r_start
<= s
->r_end
&& r
->r_end
>= s
->r_start
)
190 /* Check for any overlap with the next region. */
191 t
= TAILQ_NEXT(s
, r_link
);
192 if (t
&& r
->r_start
<= t
->r_end
&& r
->r_end
>= t
->r_start
)
196 * See if this region can be merged with the next region. If
197 * not, clear the pointer.
199 if (t
&& (r
->r_end
+ 1 != t
->r_start
|| t
->r_flags
!= 0))
202 /* See if we can merge with the current region. */
203 if (s
->r_end
+ 1 == r
->r_start
&& s
->r_flags
== 0) {
204 /* Can we merge all 3 regions? */
207 TAILQ_REMOVE(&rm
->rm_list
, t
, r_link
);
214 } else if (t
!= NULL
) {
215 /* Can we merge with just the next region? */
216 t
->r_start
= r
->r_start
;
218 } else if (s
->r_end
< r
->r_start
) {
219 TAILQ_INSERT_AFTER(&rm
->rm_list
, s
, r
, r_link
);
221 TAILQ_INSERT_BEFORE(s
, r
, r_link
);
225 mtx_unlock(rm
->rm_mtx
);
230 rman_init_from_resource(struct rman
*rm
, struct resource
*r
)
234 if ((rv
= rman_init(rm
)) != 0)
236 return (rman_manage_region(rm
, r
->__r_i
->r_start
, r
->__r_i
->r_end
));
240 rman_fini(struct rman
*rm
)
242 struct resource_i
*r
;
244 mtx_lock(rm
->rm_mtx
);
245 TAILQ_FOREACH(r
, &rm
->rm_list
, r_link
) {
246 if (r
->r_flags
& RF_ALLOCATED
) {
247 mtx_unlock(rm
->rm_mtx
);
253 * There really should only be one of these if we are in this
254 * state and the code is working properly, but it can't hurt.
256 while (!TAILQ_EMPTY(&rm
->rm_list
)) {
257 r
= TAILQ_FIRST(&rm
->rm_list
);
258 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
261 mtx_unlock(rm
->rm_mtx
);
263 TAILQ_REMOVE(&rman_head
, rm
, rm_link
);
264 mtx_unlock(&rman_mtx
);
265 mtx_destroy(rm
->rm_mtx
);
266 free(rm
->rm_mtx
, M_RMAN
);
272 rman_reserve_resource_bound(struct rman
*rm
, u_long start
, u_long end
,
273 u_long count
, u_long bound
, u_int flags
,
277 struct resource_i
*r
, *s
, *rv
;
278 u_long rstart
, rend
, amask
, bmask
;
282 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
283 "length %#lx, flags %u, device %s\n", rm
->rm_descr
, start
, end
,
285 dev
== NULL
? "<null>" : device_get_nameunit(dev
)));
286 want_activate
= (flags
& RF_ACTIVE
);
289 mtx_lock(rm
->rm_mtx
);
291 for (r
= TAILQ_FIRST(&rm
->rm_list
);
292 r
&& r
->r_end
< start
;
293 r
= TAILQ_NEXT(r
, r_link
))
297 DPRINTF(("could not find a region\n"));
301 amask
= (1ul << RF_ALIGNMENT(flags
)) - 1;
302 /* If bound is 0, bmask will also be 0 */
303 bmask
= ~(bound
- 1);
305 * First try to find an acceptable totally-unshared region.
307 for (s
= r
; s
; s
= TAILQ_NEXT(s
, r_link
)) {
308 DPRINTF(("considering [%#lx, %#lx]\n", s
->r_start
, s
->r_end
));
309 if (s
->r_start
+ count
- 1 > end
) {
310 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
314 if (s
->r_flags
& RF_ALLOCATED
) {
315 DPRINTF(("region is allocated\n"));
318 rstart
= ulmax(s
->r_start
, start
);
320 * Try to find a region by adjusting to boundary and alignment
321 * until both conditions are satisfied. This is not an optimal
322 * algorithm, but in most cases it isn't really bad, either.
325 rstart
= (rstart
+ amask
) & ~amask
;
326 if (((rstart
^ (rstart
+ count
- 1)) & bmask
) != 0)
327 rstart
+= bound
- (rstart
& ~bmask
);
328 } while ((rstart
& amask
) != 0 && rstart
< end
&&
330 rend
= ulmin(s
->r_end
, ulmax(rstart
+ count
- 1, end
));
332 DPRINTF(("adjusted start exceeds end\n"));
335 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
336 rstart
, rend
, (rend
- rstart
+ 1), count
));
338 if ((rend
- rstart
+ 1) >= count
) {
339 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
340 rstart
, rend
, (rend
- rstart
+ 1)));
341 if ((s
->r_end
- s
->r_start
+ 1) == count
) {
342 DPRINTF(("candidate region is entire chunk\n"));
344 rv
->r_flags
|= RF_ALLOCATED
| flags
;
350 * If s->r_start < rstart and
351 * s->r_end > rstart + count - 1, then
352 * we need to split the region into three pieces
353 * (the middle one will get returned to the user).
354 * Otherwise, we are allocating at either the
355 * beginning or the end of s, so we only need to
356 * split it in two. The first case requires
357 * two new allocations; the second requires but one.
359 rv
= int_alloc_resource(M_NOWAIT
);
362 rv
->r_start
= rstart
;
363 rv
->r_end
= rstart
+ count
- 1;
364 rv
->r_flags
= flags
| RF_ALLOCATED
;
368 if (s
->r_start
< rv
->r_start
&& s
->r_end
> rv
->r_end
) {
369 DPRINTF(("splitting region in three parts: "
370 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
371 s
->r_start
, rv
->r_start
- 1,
372 rv
->r_start
, rv
->r_end
,
373 rv
->r_end
+ 1, s
->r_end
));
375 * We are allocating in the middle.
377 r
= int_alloc_resource(M_NOWAIT
);
383 r
->r_start
= rv
->r_end
+ 1;
385 r
->r_flags
= s
->r_flags
;
387 s
->r_end
= rv
->r_start
- 1;
388 TAILQ_INSERT_AFTER(&rm
->rm_list
, s
, rv
,
390 TAILQ_INSERT_AFTER(&rm
->rm_list
, rv
, r
,
392 } else if (s
->r_start
== rv
->r_start
) {
393 DPRINTF(("allocating from the beginning\n"));
395 * We are allocating at the beginning.
397 s
->r_start
= rv
->r_end
+ 1;
398 TAILQ_INSERT_BEFORE(s
, rv
, r_link
);
400 DPRINTF(("allocating at the end\n"));
402 * We are allocating at the end.
404 s
->r_end
= rv
->r_start
- 1;
405 TAILQ_INSERT_AFTER(&rm
->rm_list
, s
, rv
,
413 * Now find an acceptable shared region, if the client's requirements
414 * allow sharing. By our implementation restriction, a candidate
415 * region must match exactly by both size and sharing type in order
416 * to be considered compatible with the client's request. (The
417 * former restriction could probably be lifted without too much
418 * additional work, but this does not seem warranted.)
420 DPRINTF(("no unshared regions found\n"));
421 if ((flags
& (RF_SHAREABLE
| RF_TIMESHARE
)) == 0)
424 for (s
= r
; s
; s
= TAILQ_NEXT(s
, r_link
)) {
425 if (s
->r_start
> end
)
427 if ((s
->r_flags
& flags
) != flags
)
429 rstart
= ulmax(s
->r_start
, start
);
430 rend
= ulmin(s
->r_end
, ulmax(start
+ count
- 1, end
));
431 if (s
->r_start
>= start
&& s
->r_end
<= end
432 && (s
->r_end
- s
->r_start
+ 1) == count
&&
433 (s
->r_start
& amask
) == 0 &&
434 ((s
->r_start
^ s
->r_end
) & bmask
) == 0) {
435 rv
= int_alloc_resource(M_NOWAIT
);
438 rv
->r_start
= s
->r_start
;
439 rv
->r_end
= s
->r_end
;
440 rv
->r_flags
= s
->r_flags
&
441 (RF_ALLOCATED
| RF_SHAREABLE
| RF_TIMESHARE
);
444 if (s
->r_sharehead
== NULL
) {
445 s
->r_sharehead
= malloc(sizeof *s
->r_sharehead
,
446 M_RMAN
, M_NOWAIT
| M_ZERO
);
447 if (s
->r_sharehead
== NULL
) {
452 LIST_INIT(s
->r_sharehead
);
453 LIST_INSERT_HEAD(s
->r_sharehead
, s
,
455 s
->r_flags
|= RF_FIRSTSHARE
;
457 rv
->r_sharehead
= s
->r_sharehead
;
458 LIST_INSERT_HEAD(s
->r_sharehead
, rv
, r_sharelink
);
464 * We couldn't find anything.
468 * If the user specified RF_ACTIVE in the initial flags,
469 * which is reflected in `want_activate', we attempt to atomically
470 * activate the resource. If this fails, we release the resource
471 * and indicate overall failure. (This behavior probably doesn't
472 * make sense for RF_TIMESHARE-type resources.)
474 if (rv
&& want_activate
) {
475 struct resource_i
*whohas
;
476 if (int_rman_activate_resource(rm
, rv
, &whohas
)) {
477 int_rman_release_resource(rm
, rv
);
482 mtx_unlock(rm
->rm_mtx
);
483 return (rv
== NULL
? NULL
: &rv
->r_r
);
487 rman_reserve_resource(struct rman
*rm
, u_long start
, u_long end
, u_long count
,
488 u_int flags
, struct device
*dev
)
491 return (rman_reserve_resource_bound(rm
, start
, end
, count
, 0, flags
,
496 int_rman_activate_resource(struct rman
*rm
, struct resource_i
*r
,
497 struct resource_i
**whohas
)
499 struct resource_i
*s
;
503 * If we are not timesharing, then there is nothing much to do.
504 * If we already have the resource, then there is nothing at all to do.
505 * If we are not on a sharing list with anybody else, then there is
508 if ((r
->r_flags
& RF_TIMESHARE
) == 0
509 || (r
->r_flags
& RF_ACTIVE
) != 0
510 || r
->r_sharehead
== NULL
) {
511 r
->r_flags
|= RF_ACTIVE
;
516 for (s
= LIST_FIRST(r
->r_sharehead
); s
&& ok
;
517 s
= LIST_NEXT(s
, r_sharelink
)) {
518 if ((s
->r_flags
& RF_ACTIVE
) != 0) {
524 r
->r_flags
|= RF_ACTIVE
;
531 rman_activate_resource(struct resource
*re
)
534 struct resource_i
*r
, *whohas
;
539 mtx_lock(rm
->rm_mtx
);
540 rv
= int_rman_activate_resource(rm
, r
, &whohas
);
541 mtx_unlock(rm
->rm_mtx
);
546 rman_await_resource(struct resource
*re
, int pri
, int timo
)
549 struct resource_i
*r
, *whohas
;
554 mtx_lock(rm
->rm_mtx
);
556 rv
= int_rman_activate_resource(rm
, r
, &whohas
);
558 return (rv
); /* returns with mutex held */
560 if (r
->r_sharehead
== NULL
)
561 panic("rman_await_resource");
562 whohas
->r_flags
|= RF_WANTED
;
563 rv
= msleep(r
->r_sharehead
, rm
->rm_mtx
, pri
, "rmwait", timo
);
565 mtx_unlock(rm
->rm_mtx
);
572 int_rman_deactivate_resource(struct resource_i
*r
)
575 r
->r_flags
&= ~RF_ACTIVE
;
576 if (r
->r_flags
& RF_WANTED
) {
577 r
->r_flags
&= ~RF_WANTED
;
578 wakeup(r
->r_sharehead
);
584 rman_deactivate_resource(struct resource
*r
)
589 mtx_lock(rm
->rm_mtx
);
590 int_rman_deactivate_resource(r
->__r_i
);
591 mtx_unlock(rm
->rm_mtx
);
596 int_rman_release_resource(struct rman
*rm
, struct resource_i
*r
)
598 struct resource_i
*s
, *t
;
600 if (r
->r_flags
& RF_ACTIVE
)
601 int_rman_deactivate_resource(r
);
604 * Check for a sharing list first. If there is one, then we don't
605 * have to think as hard.
607 if (r
->r_sharehead
) {
609 * If a sharing list exists, then we know there are at
612 * If we are in the main circleq, appoint someone else.
614 LIST_REMOVE(r
, r_sharelink
);
615 s
= LIST_FIRST(r
->r_sharehead
);
616 if (r
->r_flags
& RF_FIRSTSHARE
) {
617 s
->r_flags
|= RF_FIRSTSHARE
;
618 TAILQ_INSERT_BEFORE(r
, s
, r_link
);
619 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
623 * Make sure that the sharing list goes away completely
624 * if the resource is no longer being shared at all.
626 if (LIST_NEXT(s
, r_sharelink
) == NULL
) {
627 free(s
->r_sharehead
, M_RMAN
);
628 s
->r_sharehead
= NULL
;
629 s
->r_flags
&= ~RF_FIRSTSHARE
;
635 * Look at the adjacent resources in the list and see if our
636 * segment can be merged with any of them. If either of the
637 * resources is allocated or is not exactly adjacent then they
638 * cannot be merged with our segment.
640 s
= TAILQ_PREV(r
, resource_head
, r_link
);
641 if (s
!= NULL
&& ((s
->r_flags
& RF_ALLOCATED
) != 0 ||
642 s
->r_end
+ 1 != r
->r_start
))
644 t
= TAILQ_NEXT(r
, r_link
);
645 if (t
!= NULL
&& ((t
->r_flags
& RF_ALLOCATED
) != 0 ||
646 r
->r_end
+ 1 != t
->r_start
))
649 if (s
!= NULL
&& t
!= NULL
) {
651 * Merge all three segments.
654 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
655 TAILQ_REMOVE(&rm
->rm_list
, t
, r_link
);
657 } else if (s
!= NULL
) {
659 * Merge previous segment with ours.
662 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
663 } else if (t
!= NULL
) {
665 * Merge next segment with ours.
667 t
->r_start
= r
->r_start
;
668 TAILQ_REMOVE(&rm
->rm_list
, r
, r_link
);
671 * At this point, we know there is nothing we
672 * can potentially merge with, because on each
673 * side, there is either nothing there or what is
674 * there is still allocated. In that case, we don't
675 * want to remove r from the list; we simply want to
676 * change it to an unallocated region and return
677 * without freeing anything.
679 r
->r_flags
&= ~RF_ALLOCATED
;
689 rman_release_resource(struct resource
*re
)
692 struct resource_i
*r
;
697 mtx_lock(rm
->rm_mtx
);
698 rv
= int_rman_release_resource(rm
, r
);
699 mtx_unlock(rm
->rm_mtx
);
704 rman_make_alignment_flags(uint32_t size
)
709 * Find the hightest bit set, and add one if more than one bit
710 * set. We're effectively computing the ceil(log2(size)) here.
712 for (i
= 31; i
> 0; i
--)
715 if (~(1 << i
) & size
)
718 return(RF_ALIGNMENT_LOG2(i
));
722 rman_set_start(struct resource
*r
, u_long start
)
724 r
->__r_i
->r_start
= start
;
728 rman_get_start(struct resource
*r
)
730 return (r
->__r_i
->r_start
);
734 rman_set_end(struct resource
*r
, u_long end
)
736 r
->__r_i
->r_end
= end
;
740 rman_get_end(struct resource
*r
)
742 return (r
->__r_i
->r_end
);
746 rman_get_size(struct resource
*r
)
748 return (r
->__r_i
->r_end
- r
->__r_i
->r_start
+ 1);
752 rman_get_flags(struct resource
*r
)
754 return (r
->__r_i
->r_flags
);
758 rman_set_virtual(struct resource
*r
, void *v
)
760 r
->__r_i
->r_virtual
= v
;
764 rman_get_virtual(struct resource
*r
)
766 return (r
->__r_i
->r_virtual
);
770 rman_set_bustag(struct resource
*r
, bus_space_tag_t t
)
776 rman_get_bustag(struct resource
*r
)
778 return (r
->r_bustag
);
782 rman_set_bushandle(struct resource
*r
, bus_space_handle_t h
)
788 rman_get_bushandle(struct resource
*r
)
790 return (r
->r_bushandle
);
794 rman_set_rid(struct resource
*r
, int rid
)
796 r
->__r_i
->r_rid
= rid
;
800 rman_get_rid(struct resource
*r
)
802 return (r
->__r_i
->r_rid
);
806 rman_set_device(struct resource
*r
, struct device
*dev
)
808 r
->__r_i
->r_dev
= dev
;
812 rman_get_device(struct resource
*r
)
814 return (r
->__r_i
->r_dev
);
818 rman_is_region_manager(struct resource
*r
, struct rman
*rm
)
821 return (r
->__r_i
->r_rm
== rm
);
825 * Sysctl interface for scanning the resource lists.
827 * We take two input parameters; the index into the list of resource
828 * managers, and the resource offset into the list.
831 sysctl_rman(SYSCTL_HANDLER_ARGS
)
833 int *name
= (int *)arg1
;
834 u_int namelen
= arg2
;
835 int rman_idx
, res_idx
;
837 struct resource_i
*res
;
839 struct u_resource ures
;
845 if (bus_data_generation_check(name
[0]))
851 * Find the indexed resource manager
854 TAILQ_FOREACH(rm
, &rman_head
, rm_link
) {
858 mtx_unlock(&rman_mtx
);
863 * If the resource index is -1, we want details on the
867 bzero(&urm
, sizeof(urm
));
868 urm
.rm_handle
= (uintptr_t)rm
;
869 strlcpy(urm
.rm_descr
, rm
->rm_descr
, RM_TEXTLEN
);
870 urm
.rm_start
= rm
->rm_start
;
871 urm
.rm_size
= rm
->rm_end
- rm
->rm_start
+ 1;
872 urm
.rm_type
= rm
->rm_type
;
874 error
= SYSCTL_OUT(req
, &urm
, sizeof(urm
));
879 * Find the indexed resource and return it.
881 mtx_lock(rm
->rm_mtx
);
882 TAILQ_FOREACH(res
, &rm
->rm_list
, r_link
) {
883 if (res_idx
-- == 0) {
884 bzero(&ures
, sizeof(ures
));
885 ures
.r_handle
= (uintptr_t)res
;
886 ures
.r_parent
= (uintptr_t)res
->r_rm
;
887 ures
.r_device
= (uintptr_t)res
->r_dev
;
888 if (res
->r_dev
!= NULL
) {
889 if (device_get_name(res
->r_dev
) != NULL
) {
890 snprintf(ures
.r_devname
, RM_TEXTLEN
,
892 device_get_name(res
->r_dev
),
893 device_get_unit(res
->r_dev
));
895 strlcpy(ures
.r_devname
, "nomatch",
899 ures
.r_devname
[0] = '\0';
901 ures
.r_start
= res
->r_start
;
902 ures
.r_size
= res
->r_end
- res
->r_start
+ 1;
903 ures
.r_flags
= res
->r_flags
;
905 mtx_unlock(rm
->rm_mtx
);
906 error
= SYSCTL_OUT(req
, &ures
, sizeof(ures
));
910 mtx_unlock(rm
->rm_mtx
);
914 SYSCTL_NODE(_hw_bus
, OID_AUTO
, rman
, CTLFLAG_RD
, sysctl_rman
,
915 "kernel resource manager");
919 dump_rman(struct rman
*rm
)
921 struct resource_i
*r
;
926 db_printf("rman: %s\n", rm
->rm_descr
);
927 db_printf(" 0x%lx-0x%lx (full range)\n", rm
->rm_start
, rm
->rm_end
);
928 TAILQ_FOREACH(r
, &rm
->rm_list
, r_link
) {
929 if (r
->r_dev
!= NULL
) {
930 devname
= device_get_nameunit(r
->r_dev
);
935 db_printf(" 0x%lx-0x%lx ", r
->r_start
, r
->r_end
);
937 db_printf("(%s)\n", devname
);
945 DB_SHOW_COMMAND(rman
, db_show_rman
)
949 dump_rman((struct rman
*)addr
);
952 DB_SHOW_COMMAND(allrman
, db_show_all_rman
)
956 TAILQ_FOREACH(rm
, &rman_head
, rm_link
)