Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / subr_rman.c
blobc0a1c729d23b3db8a8fe3d5063f0e50dc672e00d
1 /*-
2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
50 * implemented yet.
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
58 #include "opt_ddb.h"
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/limits.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
70 #include <sys/bus.h> /* XXX debugging */
71 #include <machine/bus.h>
72 #include <sys/rman.h>
73 #include <sys/sysctl.h>
75 #ifdef DDB
76 #include <ddb/ddb.h>
77 #endif
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space). That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
88 struct resource_i {
89 struct resource r_r;
90 TAILQ_ENTRY(resource_i) r_link;
91 LIST_ENTRY(resource_i) r_sharelink;
92 LIST_HEAD(, resource_i) *r_sharehead;
93 u_long r_start; /* index of the first entry in this resource */
94 u_long r_end; /* index of the last entry (inclusive) */
95 u_int r_flags;
96 void *r_virtual; /* virtual address of this resource */
97 struct device *r_dev; /* device which has allocated this resource */
98 struct rman *r_rm; /* resource manager from whence this came */
99 int r_rid; /* optional rid for this resource. */
102 int rman_debug = 0;
103 TUNABLE_INT("debug.rman_debug", &rman_debug);
104 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105 &rman_debug, 0, "rman debug");
107 #define DPRINTF(params) if (rman_debug) printf params
109 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
111 struct rman_head rman_head;
112 static struct mtx rman_mtx; /* mutex to protect rman_head */
113 static int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114 struct resource_i **whohas);
115 static int int_rman_deactivate_resource(struct resource_i *r);
116 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
118 static __inline struct resource_i *
119 int_alloc_resource(int malloc_flag)
121 struct resource_i *r;
123 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124 if (r != NULL) {
125 r->r_r.__r_i = r;
127 return (r);
131 rman_init(struct rman *rm)
133 static int once = 0;
135 if (once == 0) {
136 once = 1;
137 TAILQ_INIT(&rman_head);
138 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
141 if (rm->rm_type == RMAN_UNINIT)
142 panic("rman_init");
143 if (rm->rm_type == RMAN_GAUGE)
144 panic("implement RMAN_GAUGE");
146 TAILQ_INIT(&rm->rm_list);
147 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
148 if (rm->rm_mtx == NULL)
149 return ENOMEM;
150 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
152 mtx_lock(&rman_mtx);
153 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
154 mtx_unlock(&rman_mtx);
155 return 0;
159 rman_manage_region(struct rman *rm, u_long start, u_long end)
161 struct resource_i *r, *s, *t;
163 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
164 rm->rm_descr, start, end));
165 r = int_alloc_resource(M_NOWAIT);
166 if (r == NULL)
167 return ENOMEM;
168 r->r_start = start;
169 r->r_end = end;
170 r->r_rm = rm;
172 mtx_lock(rm->rm_mtx);
174 /* Skip entries before us. */
175 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
176 if (s->r_end == ULONG_MAX)
177 break;
178 if (s->r_end + 1 >= r->r_start)
179 break;
182 /* If we ran off the end of the list, insert at the tail. */
183 if (s == NULL) {
184 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
185 } else {
186 /* Check for any overlap with the current region. */
187 if (r->r_start <= s->r_end && r->r_end >= s->r_start)
188 return EBUSY;
190 /* Check for any overlap with the next region. */
191 t = TAILQ_NEXT(s, r_link);
192 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
193 return EBUSY;
196 * See if this region can be merged with the next region. If
197 * not, clear the pointer.
199 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
200 t = NULL;
202 /* See if we can merge with the current region. */
203 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
204 /* Can we merge all 3 regions? */
205 if (t != NULL) {
206 s->r_end = t->r_end;
207 TAILQ_REMOVE(&rm->rm_list, t, r_link);
208 free(r, M_RMAN);
209 free(t, M_RMAN);
210 } else {
211 s->r_end = r->r_end;
212 free(r, M_RMAN);
214 } else if (t != NULL) {
215 /* Can we merge with just the next region? */
216 t->r_start = r->r_start;
217 free(r, M_RMAN);
218 } else if (s->r_end < r->r_start) {
219 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
220 } else {
221 TAILQ_INSERT_BEFORE(s, r, r_link);
225 mtx_unlock(rm->rm_mtx);
226 return 0;
230 rman_init_from_resource(struct rman *rm, struct resource *r)
232 int rv;
234 if ((rv = rman_init(rm)) != 0)
235 return (rv);
236 return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
240 rman_fini(struct rman *rm)
242 struct resource_i *r;
244 mtx_lock(rm->rm_mtx);
245 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
246 if (r->r_flags & RF_ALLOCATED) {
247 mtx_unlock(rm->rm_mtx);
248 return EBUSY;
253 * There really should only be one of these if we are in this
254 * state and the code is working properly, but it can't hurt.
256 while (!TAILQ_EMPTY(&rm->rm_list)) {
257 r = TAILQ_FIRST(&rm->rm_list);
258 TAILQ_REMOVE(&rm->rm_list, r, r_link);
259 free(r, M_RMAN);
261 mtx_unlock(rm->rm_mtx);
262 mtx_lock(&rman_mtx);
263 TAILQ_REMOVE(&rman_head, rm, rm_link);
264 mtx_unlock(&rman_mtx);
265 mtx_destroy(rm->rm_mtx);
266 free(rm->rm_mtx, M_RMAN);
268 return 0;
271 struct resource *
272 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
273 u_long count, u_long bound, u_int flags,
274 struct device *dev)
276 u_int want_activate;
277 struct resource_i *r, *s, *rv;
278 u_long rstart, rend, amask, bmask;
280 rv = NULL;
282 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
283 "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
284 count, flags,
285 dev == NULL ? "<null>" : device_get_nameunit(dev)));
286 want_activate = (flags & RF_ACTIVE);
287 flags &= ~RF_ACTIVE;
289 mtx_lock(rm->rm_mtx);
291 for (r = TAILQ_FIRST(&rm->rm_list);
292 r && r->r_end < start;
293 r = TAILQ_NEXT(r, r_link))
296 if (r == NULL) {
297 DPRINTF(("could not find a region\n"));
298 goto out;
301 amask = (1ul << RF_ALIGNMENT(flags)) - 1;
302 /* If bound is 0, bmask will also be 0 */
303 bmask = ~(bound - 1);
305 * First try to find an acceptable totally-unshared region.
307 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
308 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
309 if (s->r_start + count - 1 > end) {
310 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
311 s->r_start, end));
312 break;
314 if (s->r_flags & RF_ALLOCATED) {
315 DPRINTF(("region is allocated\n"));
316 continue;
318 rstart = ulmax(s->r_start, start);
320 * Try to find a region by adjusting to boundary and alignment
321 * until both conditions are satisfied. This is not an optimal
322 * algorithm, but in most cases it isn't really bad, either.
324 do {
325 rstart = (rstart + amask) & ~amask;
326 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
327 rstart += bound - (rstart & ~bmask);
328 } while ((rstart & amask) != 0 && rstart < end &&
329 rstart < s->r_end);
330 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
331 if (rstart > rend) {
332 DPRINTF(("adjusted start exceeds end\n"));
333 continue;
335 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
336 rstart, rend, (rend - rstart + 1), count));
338 if ((rend - rstart + 1) >= count) {
339 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
340 rstart, rend, (rend - rstart + 1)));
341 if ((s->r_end - s->r_start + 1) == count) {
342 DPRINTF(("candidate region is entire chunk\n"));
343 rv = s;
344 rv->r_flags |= RF_ALLOCATED | flags;
345 rv->r_dev = dev;
346 goto out;
350 * If s->r_start < rstart and
351 * s->r_end > rstart + count - 1, then
352 * we need to split the region into three pieces
353 * (the middle one will get returned to the user).
354 * Otherwise, we are allocating at either the
355 * beginning or the end of s, so we only need to
356 * split it in two. The first case requires
357 * two new allocations; the second requires but one.
359 rv = int_alloc_resource(M_NOWAIT);
360 if (rv == NULL)
361 goto out;
362 rv->r_start = rstart;
363 rv->r_end = rstart + count - 1;
364 rv->r_flags = flags | RF_ALLOCATED;
365 rv->r_dev = dev;
366 rv->r_rm = rm;
368 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
369 DPRINTF(("splitting region in three parts: "
370 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
371 s->r_start, rv->r_start - 1,
372 rv->r_start, rv->r_end,
373 rv->r_end + 1, s->r_end));
375 * We are allocating in the middle.
377 r = int_alloc_resource(M_NOWAIT);
378 if (r == NULL) {
379 free(rv, M_RMAN);
380 rv = NULL;
381 goto out;
383 r->r_start = rv->r_end + 1;
384 r->r_end = s->r_end;
385 r->r_flags = s->r_flags;
386 r->r_rm = rm;
387 s->r_end = rv->r_start - 1;
388 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
389 r_link);
390 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
391 r_link);
392 } else if (s->r_start == rv->r_start) {
393 DPRINTF(("allocating from the beginning\n"));
395 * We are allocating at the beginning.
397 s->r_start = rv->r_end + 1;
398 TAILQ_INSERT_BEFORE(s, rv, r_link);
399 } else {
400 DPRINTF(("allocating at the end\n"));
402 * We are allocating at the end.
404 s->r_end = rv->r_start - 1;
405 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
406 r_link);
408 goto out;
413 * Now find an acceptable shared region, if the client's requirements
414 * allow sharing. By our implementation restriction, a candidate
415 * region must match exactly by both size and sharing type in order
416 * to be considered compatible with the client's request. (The
417 * former restriction could probably be lifted without too much
418 * additional work, but this does not seem warranted.)
420 DPRINTF(("no unshared regions found\n"));
421 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
422 goto out;
424 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
425 if (s->r_start > end)
426 break;
427 if ((s->r_flags & flags) != flags)
428 continue;
429 rstart = ulmax(s->r_start, start);
430 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
431 if (s->r_start >= start && s->r_end <= end
432 && (s->r_end - s->r_start + 1) == count &&
433 (s->r_start & amask) == 0 &&
434 ((s->r_start ^ s->r_end) & bmask) == 0) {
435 rv = int_alloc_resource(M_NOWAIT);
436 if (rv == NULL)
437 goto out;
438 rv->r_start = s->r_start;
439 rv->r_end = s->r_end;
440 rv->r_flags = s->r_flags &
441 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
442 rv->r_dev = dev;
443 rv->r_rm = rm;
444 if (s->r_sharehead == NULL) {
445 s->r_sharehead = malloc(sizeof *s->r_sharehead,
446 M_RMAN, M_NOWAIT | M_ZERO);
447 if (s->r_sharehead == NULL) {
448 free(rv, M_RMAN);
449 rv = NULL;
450 goto out;
452 LIST_INIT(s->r_sharehead);
453 LIST_INSERT_HEAD(s->r_sharehead, s,
454 r_sharelink);
455 s->r_flags |= RF_FIRSTSHARE;
457 rv->r_sharehead = s->r_sharehead;
458 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
459 goto out;
464 * We couldn't find anything.
466 out:
468 * If the user specified RF_ACTIVE in the initial flags,
469 * which is reflected in `want_activate', we attempt to atomically
470 * activate the resource. If this fails, we release the resource
471 * and indicate overall failure. (This behavior probably doesn't
472 * make sense for RF_TIMESHARE-type resources.)
474 if (rv && want_activate) {
475 struct resource_i *whohas;
476 if (int_rman_activate_resource(rm, rv, &whohas)) {
477 int_rman_release_resource(rm, rv);
478 rv = NULL;
482 mtx_unlock(rm->rm_mtx);
483 return (rv == NULL ? NULL : &rv->r_r);
486 struct resource *
487 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
488 u_int flags, struct device *dev)
491 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
492 dev));
495 static int
496 int_rman_activate_resource(struct rman *rm, struct resource_i *r,
497 struct resource_i **whohas)
499 struct resource_i *s;
500 int ok;
503 * If we are not timesharing, then there is nothing much to do.
504 * If we already have the resource, then there is nothing at all to do.
505 * If we are not on a sharing list with anybody else, then there is
506 * little to do.
508 if ((r->r_flags & RF_TIMESHARE) == 0
509 || (r->r_flags & RF_ACTIVE) != 0
510 || r->r_sharehead == NULL) {
511 r->r_flags |= RF_ACTIVE;
512 return 0;
515 ok = 1;
516 for (s = LIST_FIRST(r->r_sharehead); s && ok;
517 s = LIST_NEXT(s, r_sharelink)) {
518 if ((s->r_flags & RF_ACTIVE) != 0) {
519 ok = 0;
520 *whohas = s;
523 if (ok) {
524 r->r_flags |= RF_ACTIVE;
525 return 0;
527 return EBUSY;
531 rman_activate_resource(struct resource *re)
533 int rv;
534 struct resource_i *r, *whohas;
535 struct rman *rm;
537 r = re->__r_i;
538 rm = r->r_rm;
539 mtx_lock(rm->rm_mtx);
540 rv = int_rman_activate_resource(rm, r, &whohas);
541 mtx_unlock(rm->rm_mtx);
542 return rv;
546 rman_await_resource(struct resource *re, int pri, int timo)
548 int rv;
549 struct resource_i *r, *whohas;
550 struct rman *rm;
552 r = re->__r_i;
553 rm = r->r_rm;
554 mtx_lock(rm->rm_mtx);
555 for (;;) {
556 rv = int_rman_activate_resource(rm, r, &whohas);
557 if (rv != EBUSY)
558 return (rv); /* returns with mutex held */
560 if (r->r_sharehead == NULL)
561 panic("rman_await_resource");
562 whohas->r_flags |= RF_WANTED;
563 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
564 if (rv) {
565 mtx_unlock(rm->rm_mtx);
566 return (rv);
571 static int
572 int_rman_deactivate_resource(struct resource_i *r)
575 r->r_flags &= ~RF_ACTIVE;
576 if (r->r_flags & RF_WANTED) {
577 r->r_flags &= ~RF_WANTED;
578 wakeup(r->r_sharehead);
580 return 0;
584 rman_deactivate_resource(struct resource *r)
586 struct rman *rm;
588 rm = r->__r_i->r_rm;
589 mtx_lock(rm->rm_mtx);
590 int_rman_deactivate_resource(r->__r_i);
591 mtx_unlock(rm->rm_mtx);
592 return 0;
595 static int
596 int_rman_release_resource(struct rman *rm, struct resource_i *r)
598 struct resource_i *s, *t;
600 if (r->r_flags & RF_ACTIVE)
601 int_rman_deactivate_resource(r);
604 * Check for a sharing list first. If there is one, then we don't
605 * have to think as hard.
607 if (r->r_sharehead) {
609 * If a sharing list exists, then we know there are at
610 * least two sharers.
612 * If we are in the main circleq, appoint someone else.
614 LIST_REMOVE(r, r_sharelink);
615 s = LIST_FIRST(r->r_sharehead);
616 if (r->r_flags & RF_FIRSTSHARE) {
617 s->r_flags |= RF_FIRSTSHARE;
618 TAILQ_INSERT_BEFORE(r, s, r_link);
619 TAILQ_REMOVE(&rm->rm_list, r, r_link);
623 * Make sure that the sharing list goes away completely
624 * if the resource is no longer being shared at all.
626 if (LIST_NEXT(s, r_sharelink) == NULL) {
627 free(s->r_sharehead, M_RMAN);
628 s->r_sharehead = NULL;
629 s->r_flags &= ~RF_FIRSTSHARE;
631 goto out;
635 * Look at the adjacent resources in the list and see if our
636 * segment can be merged with any of them. If either of the
637 * resources is allocated or is not exactly adjacent then they
638 * cannot be merged with our segment.
640 s = TAILQ_PREV(r, resource_head, r_link);
641 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
642 s->r_end + 1 != r->r_start))
643 s = NULL;
644 t = TAILQ_NEXT(r, r_link);
645 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
646 r->r_end + 1 != t->r_start))
647 t = NULL;
649 if (s != NULL && t != NULL) {
651 * Merge all three segments.
653 s->r_end = t->r_end;
654 TAILQ_REMOVE(&rm->rm_list, r, r_link);
655 TAILQ_REMOVE(&rm->rm_list, t, r_link);
656 free(t, M_RMAN);
657 } else if (s != NULL) {
659 * Merge previous segment with ours.
661 s->r_end = r->r_end;
662 TAILQ_REMOVE(&rm->rm_list, r, r_link);
663 } else if (t != NULL) {
665 * Merge next segment with ours.
667 t->r_start = r->r_start;
668 TAILQ_REMOVE(&rm->rm_list, r, r_link);
669 } else {
671 * At this point, we know there is nothing we
672 * can potentially merge with, because on each
673 * side, there is either nothing there or what is
674 * there is still allocated. In that case, we don't
675 * want to remove r from the list; we simply want to
676 * change it to an unallocated region and return
677 * without freeing anything.
679 r->r_flags &= ~RF_ALLOCATED;
680 return 0;
683 out:
684 free(r, M_RMAN);
685 return 0;
689 rman_release_resource(struct resource *re)
691 int rv;
692 struct resource_i *r;
693 struct rman *rm;
695 r = re->__r_i;
696 rm = r->r_rm;
697 mtx_lock(rm->rm_mtx);
698 rv = int_rman_release_resource(rm, r);
699 mtx_unlock(rm->rm_mtx);
700 return (rv);
703 uint32_t
704 rman_make_alignment_flags(uint32_t size)
706 int i;
709 * Find the hightest bit set, and add one if more than one bit
710 * set. We're effectively computing the ceil(log2(size)) here.
712 for (i = 31; i > 0; i--)
713 if ((1 << i) & size)
714 break;
715 if (~(1 << i) & size)
716 i++;
718 return(RF_ALIGNMENT_LOG2(i));
721 void
722 rman_set_start(struct resource *r, u_long start)
724 r->__r_i->r_start = start;
727 u_long
728 rman_get_start(struct resource *r)
730 return (r->__r_i->r_start);
733 void
734 rman_set_end(struct resource *r, u_long end)
736 r->__r_i->r_end = end;
739 u_long
740 rman_get_end(struct resource *r)
742 return (r->__r_i->r_end);
745 u_long
746 rman_get_size(struct resource *r)
748 return (r->__r_i->r_end - r->__r_i->r_start + 1);
751 u_int
752 rman_get_flags(struct resource *r)
754 return (r->__r_i->r_flags);
757 void
758 rman_set_virtual(struct resource *r, void *v)
760 r->__r_i->r_virtual = v;
763 void *
764 rman_get_virtual(struct resource *r)
766 return (r->__r_i->r_virtual);
769 void
770 rman_set_bustag(struct resource *r, bus_space_tag_t t)
772 r->r_bustag = t;
775 bus_space_tag_t
776 rman_get_bustag(struct resource *r)
778 return (r->r_bustag);
781 void
782 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
784 r->r_bushandle = h;
787 bus_space_handle_t
788 rman_get_bushandle(struct resource *r)
790 return (r->r_bushandle);
793 void
794 rman_set_rid(struct resource *r, int rid)
796 r->__r_i->r_rid = rid;
800 rman_get_rid(struct resource *r)
802 return (r->__r_i->r_rid);
805 void
806 rman_set_device(struct resource *r, struct device *dev)
808 r->__r_i->r_dev = dev;
811 struct device *
812 rman_get_device(struct resource *r)
814 return (r->__r_i->r_dev);
818 rman_is_region_manager(struct resource *r, struct rman *rm)
821 return (r->__r_i->r_rm == rm);
825 * Sysctl interface for scanning the resource lists.
827 * We take two input parameters; the index into the list of resource
828 * managers, and the resource offset into the list.
830 static int
831 sysctl_rman(SYSCTL_HANDLER_ARGS)
833 int *name = (int *)arg1;
834 u_int namelen = arg2;
835 int rman_idx, res_idx;
836 struct rman *rm;
837 struct resource_i *res;
838 struct u_rman urm;
839 struct u_resource ures;
840 int error;
842 if (namelen != 3)
843 return (EINVAL);
845 if (bus_data_generation_check(name[0]))
846 return (EINVAL);
847 rman_idx = name[1];
848 res_idx = name[2];
851 * Find the indexed resource manager
853 mtx_lock(&rman_mtx);
854 TAILQ_FOREACH(rm, &rman_head, rm_link) {
855 if (rman_idx-- == 0)
856 break;
858 mtx_unlock(&rman_mtx);
859 if (rm == NULL)
860 return (ENOENT);
863 * If the resource index is -1, we want details on the
864 * resource manager.
866 if (res_idx == -1) {
867 bzero(&urm, sizeof(urm));
868 urm.rm_handle = (uintptr_t)rm;
869 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
870 urm.rm_start = rm->rm_start;
871 urm.rm_size = rm->rm_end - rm->rm_start + 1;
872 urm.rm_type = rm->rm_type;
874 error = SYSCTL_OUT(req, &urm, sizeof(urm));
875 return (error);
879 * Find the indexed resource and return it.
881 mtx_lock(rm->rm_mtx);
882 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
883 if (res_idx-- == 0) {
884 bzero(&ures, sizeof(ures));
885 ures.r_handle = (uintptr_t)res;
886 ures.r_parent = (uintptr_t)res->r_rm;
887 ures.r_device = (uintptr_t)res->r_dev;
888 if (res->r_dev != NULL) {
889 if (device_get_name(res->r_dev) != NULL) {
890 snprintf(ures.r_devname, RM_TEXTLEN,
891 "%s%d",
892 device_get_name(res->r_dev),
893 device_get_unit(res->r_dev));
894 } else {
895 strlcpy(ures.r_devname, "nomatch",
896 RM_TEXTLEN);
898 } else {
899 ures.r_devname[0] = '\0';
901 ures.r_start = res->r_start;
902 ures.r_size = res->r_end - res->r_start + 1;
903 ures.r_flags = res->r_flags;
905 mtx_unlock(rm->rm_mtx);
906 error = SYSCTL_OUT(req, &ures, sizeof(ures));
907 return (error);
910 mtx_unlock(rm->rm_mtx);
911 return (ENOENT);
914 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
915 "kernel resource manager");
917 #ifdef DDB
918 static void
919 dump_rman(struct rman *rm)
921 struct resource_i *r;
922 const char *devname;
924 if (db_pager_quit)
925 return;
926 db_printf("rman: %s\n", rm->rm_descr);
927 db_printf(" 0x%lx-0x%lx (full range)\n", rm->rm_start, rm->rm_end);
928 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
929 if (r->r_dev != NULL) {
930 devname = device_get_nameunit(r->r_dev);
931 if (devname == NULL)
932 devname = "nomatch";
933 } else
934 devname = NULL;
935 db_printf(" 0x%lx-0x%lx ", r->r_start, r->r_end);
936 if (devname != NULL)
937 db_printf("(%s)\n", devname);
938 else
939 db_printf("----\n");
940 if (db_pager_quit)
941 return;
945 DB_SHOW_COMMAND(rman, db_show_rman)
948 if (have_addr)
949 dump_rman((struct rman *)addr);
952 DB_SHOW_COMMAND(allrman, db_show_all_rman)
954 struct rman *rm;
956 TAILQ_FOREACH(rm, &rman_head, rm_link)
957 dump_rman(rm);
959 #endif