usr.sbin/makefs/ffs: Remove m_buf::b_is_hammer2
[dragonfly.git] / sys / kern / subr_rman.c
blob0f0bd7c446bd44868015f547568c37cbf31302da
1 /*
2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * $FreeBSD: src/sys/kern/subr_rman.c,v 1.10.2.1 2001/06/05 08:06:08 imp Exp $
33 * The kernel resource manager. This code is responsible for keeping track
34 * of hardware resources which are apportioned out to various drivers.
35 * It does not actually assign those resources, and it is not expected
36 * that end-device drivers will call into this code directly. Rather,
37 * the code which implements the buses that those devices are attached to,
38 * and the code which manages CPU resources, will call this code, and the
39 * end-device drivers will make upcalls to that code to actually perform
40 * the allocation.
42 * There are two sorts of resources managed by this code. The first is
43 * the more familiar array (RMAN_ARRAY) type; resources in this class
44 * consist of a sequence of individually-allocatable objects which have
45 * been numbered in some well-defined order. Most of the resources
46 * are of this type, as it is the most familiar. The second type is
47 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
48 * resources in which each instance is indistinguishable from every
49 * other instance). The principal anticipated application of gauges
50 * is in the context of power consumption, where a bus may have a specific
51 * power budget which all attached devices share. RMAN_GAUGE is not
52 * implemented yet.
54 * For array resources, we make one simplifying assumption: two clients
55 * sharing the same resource must use the same range of indices. That
56 * is to say, sharing of overlapping-but-not-identical regions is not
57 * permitted.
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/bus.h> /* XXX debugging */
66 #include <sys/rman.h>
67 #include <sys/sysctl.h>
69 static int rman_debug = 0;
70 TUNABLE_INT("debug.rman_debug", &rman_debug);
71 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
72 &rman_debug, 0, "rman debug");
74 #define DPRINTF(params) if (rman_debug) kprintf params
76 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
78 TAILQ_HEAD(rman_head, rman);
79 static struct rman_head rman_head;
80 static struct lwkt_token rman_tok; /* mutex to protect rman_head */
81 static int int_rman_activate_resource(struct rman *rm, struct resource *r,
82 struct resource **whohas);
83 static int int_rman_deactivate_resource(struct resource *r);
84 static int int_rman_release_resource(struct rman *rm, struct resource *r);
86 int
87 rman_init(struct rman *rm, int cpuid)
89 static int once;
91 if (once == 0) {
92 once = 1;
93 TAILQ_INIT(&rman_head);
94 lwkt_token_init(&rman_tok, "rman");
97 if (rm->rm_type == RMAN_UNINIT)
98 panic("rman_init");
99 if (rm->rm_type == RMAN_GAUGE)
100 panic("implement RMAN_GAUGE");
102 TAILQ_INIT(&rm->rm_list);
103 rm->rm_slock = kmalloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
104 if (rm->rm_slock == NULL)
105 return ENOMEM;
106 lwkt_token_init(rm->rm_slock, "rmanslock");
108 rm->rm_cpuid = cpuid;
109 rm->rm_hold = 0;
111 lwkt_gettoken(&rman_tok);
112 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
113 lwkt_reltoken(&rman_tok);
115 return 0;
119 * NB: this interface is not robust against programming errors which
120 * add multiple copies of the same region.
123 rman_manage_region(struct rman *rm, u_long start, u_long end)
125 struct resource *r, *s;
127 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
128 rm->rm_descr, start, end));
129 r = kmalloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
130 if (r == NULL)
131 return ENOMEM;
132 r->r_sharehead = 0;
133 r->r_start = start;
134 r->r_end = end;
135 r->r_flags = 0;
136 r->r_dev = 0;
137 r->r_rm = rm;
139 lwkt_gettoken(rm->rm_slock);
140 for (s = TAILQ_FIRST(&rm->rm_list);
141 s && s->r_end < r->r_start;
142 s = TAILQ_NEXT(s, r_link))
145 if (s == NULL)
146 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
147 else
148 TAILQ_INSERT_BEFORE(s, r, r_link);
150 lwkt_reltoken(rm->rm_slock);
151 return 0;
155 rman_fini(struct rman *rm)
157 struct resource *r;
160 * All resources must already have been deallocated.
162 lwkt_gettoken(rm->rm_slock);
163 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
164 if (r->r_flags & RF_ALLOCATED) {
165 lwkt_reltoken(rm->rm_slock);
166 return EBUSY;
171 * Protected list removal. Once removed, wait for any temporary
172 * holds to be dropped before actually destroying the resource.
174 lwkt_gettoken(&rman_tok);
175 TAILQ_REMOVE(&rman_head, rm, rm_link);
176 lwkt_reltoken(&rman_tok);
178 if (rm->rm_hold) {
179 kprintf("debug: rman_fini(): rm_hold race fixed on %s\n",
180 rm->rm_descr);
181 while (rm->rm_hold)
182 tsleep(rm, 0, "rmfree", 2);
186 * Destroy all elements remaining on rm_list
188 while ((r = TAILQ_FIRST(&rm->rm_list)) != NULL) {
189 TAILQ_REMOVE(&rm->rm_list, r, r_link);
190 kfree(r, M_RMAN);
192 lwkt_reltoken(rm->rm_slock);
195 * Final cleanup
197 lwkt_token_uninit(rm->rm_slock);
198 kfree(rm->rm_slock, M_RMAN);
199 rm->rm_slock = NULL;
201 return 0;
204 struct resource *
205 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
206 u_int flags, device_t dev)
208 u_int want_activate;
209 struct resource *r, *s, *rv;
210 u_long rstart, rend;
212 rv = NULL;
214 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
215 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end,
216 count, flags,
217 dev == NULL ? "<null>" : device_get_nameunit(dev)));
218 want_activate = (flags & RF_ACTIVE);
219 flags &= ~RF_ACTIVE;
221 lwkt_gettoken(rm->rm_slock);
223 for (r = TAILQ_FIRST(&rm->rm_list);
224 r && r->r_end < start + count - 1;
225 r = TAILQ_NEXT(r, r_link))
228 if (r == NULL) {
229 DPRINTF(("could not find a region\n"));
230 goto out;
234 * First try to find an acceptable totally-unshared region.
236 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
237 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
238 if (s->r_start > end - (count - 1)) {
239 DPRINTF(("s->r_start (%#lx) > end (%#lx)\n",
240 s->r_start, end));
241 break;
243 if (s->r_flags & RF_ALLOCATED) {
244 DPRINTF(("region is allocated\n"));
245 continue;
247 rstart = ulmax(s->r_start, start);
248 rstart = rounddown2(rstart + (1ul << RF_ALIGNMENT(flags)) - 1,
249 1ul << RF_ALIGNMENT(flags));
250 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
251 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
252 rstart, rend, (rend - rstart + 1), count));
254 if ((rend - rstart + 1) >= count) {
255 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
256 rstart, rend, (rend - rstart + 1)));
257 if ((s->r_end - s->r_start + 1) == count) {
258 DPRINTF(("candidate region is entire chunk\n"));
259 rv = s;
260 rv->r_flags |= RF_ALLOCATED | flags;
261 rv->r_dev = dev;
262 goto out;
266 * If s->r_start < rstart and
267 * s->r_end > rstart + count - 1, then
268 * we need to split the region into three pieces
269 * (the middle one will get returned to the user).
270 * Otherwise, we are allocating at either the
271 * beginning or the end of s, so we only need to
272 * split it in two. The first case requires
273 * two new allocations; the second requires but one.
275 rv = kmalloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
276 if (rv == NULL)
277 goto out;
278 rv->r_start = rstart;
279 rv->r_end = rstart + count - 1;
280 rv->r_flags = flags | RF_ALLOCATED;
281 rv->r_dev = dev;
282 rv->r_sharehead = 0;
283 rv->r_rm = rm;
285 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
286 DPRINTF(("splitting region in three parts: "
287 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
288 s->r_start, rv->r_start - 1,
289 rv->r_start, rv->r_end,
290 rv->r_end + 1, s->r_end));
292 * We are allocating in the middle.
294 r = kmalloc(sizeof *r, M_RMAN,
295 M_NOWAIT | M_ZERO);
296 if (r == NULL) {
297 kfree(rv, M_RMAN);
298 rv = NULL;
299 goto out;
301 r->r_start = rv->r_end + 1;
302 r->r_end = s->r_end;
303 r->r_flags = s->r_flags;
304 r->r_dev = 0;
305 r->r_sharehead = 0;
306 r->r_rm = rm;
307 s->r_end = rv->r_start - 1;
308 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
309 r_link);
310 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
311 r_link);
312 } else if (s->r_start == rv->r_start) {
313 DPRINTF(("allocating from the beginning\n"));
315 * We are allocating at the beginning.
317 s->r_start = rv->r_end + 1;
318 TAILQ_INSERT_BEFORE(s, rv, r_link);
319 } else {
320 DPRINTF(("allocating at the end\n"));
322 * We are allocating at the end.
324 s->r_end = rv->r_start - 1;
325 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
326 r_link);
328 goto out;
333 * Now find an acceptable shared region, if the client's requirements
334 * allow sharing. By our implementation restriction, a candidate
335 * region must match exactly by both size and sharing type in order
336 * to be considered compatible with the client's request. (The
337 * former restriction could probably be lifted without too much
338 * additional work, but this does not seem warranted.)
340 DPRINTF(("no unshared regions found\n"));
341 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
342 goto out;
344 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
345 if (s->r_start > end)
346 break;
347 if ((s->r_flags & flags) != flags)
348 continue;
349 rstart = ulmax(s->r_start, start);
350 rend = ulmin(s->r_end, ulmax(start + count, end));
351 if (s->r_start >= start && s->r_end <= end
352 && (s->r_end - s->r_start + 1) == count) {
353 rv = kmalloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
354 if (rv == NULL)
355 goto out;
356 rv->r_start = s->r_start;
357 rv->r_end = s->r_end;
358 rv->r_flags = s->r_flags &
359 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
360 rv->r_dev = dev;
361 rv->r_rm = rm;
362 if (s->r_sharehead == 0) {
363 s->r_sharehead = kmalloc(sizeof *s->r_sharehead,
364 M_RMAN,
365 M_NOWAIT | M_ZERO);
366 if (s->r_sharehead == 0) {
367 kfree(rv, M_RMAN);
368 rv = NULL;
369 goto out;
371 LIST_INIT(s->r_sharehead);
372 LIST_INSERT_HEAD(s->r_sharehead, s,
373 r_sharelink);
374 s->r_flags |= RF_FIRSTSHARE;
376 rv->r_sharehead = s->r_sharehead;
377 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
378 goto out;
383 * We couldn't find anything.
385 DPRINTF(("no region found\n"));
386 out:
388 * If the user specified RF_ACTIVE in the initial flags,
389 * which is reflected in `want_activate', we attempt to atomically
390 * activate the resource. If this fails, we release the resource
391 * and indicate overall failure. (This behavior probably doesn't
392 * make sense for RF_TIMESHARE-type resources.)
394 if (rv && want_activate) {
395 struct resource *whohas;
396 DPRINTF(("activating region\n"));
397 if (int_rman_activate_resource(rm, rv, &whohas)) {
398 int_rman_release_resource(rm, rv);
399 rv = NULL;
402 lwkt_reltoken(rm->rm_slock);
403 return (rv);
406 static int
407 int_rman_activate_resource(struct rman *rm, struct resource *r,
408 struct resource **whohas)
410 struct resource *s;
411 int ok;
414 * If we are not timesharing, then there is nothing much to do.
415 * If we already have the resource, then there is nothing at all to do.
416 * If we are not on a sharing list with anybody else, then there is
417 * little to do.
419 if ((r->r_flags & RF_TIMESHARE) == 0
420 || (r->r_flags & RF_ACTIVE) != 0
421 || r->r_sharehead == 0) {
422 r->r_flags |= RF_ACTIVE;
423 return 0;
426 ok = 1;
427 for (s = LIST_FIRST(r->r_sharehead); s && ok;
428 s = LIST_NEXT(s, r_sharelink)) {
429 if ((s->r_flags & RF_ACTIVE) != 0) {
430 ok = 0;
431 *whohas = s;
434 if (ok) {
435 r->r_flags |= RF_ACTIVE;
436 return 0;
438 return EBUSY;
442 rman_activate_resource(struct resource *r)
444 int rv;
445 struct resource *whohas;
446 struct rman *rm;
448 rm = r->r_rm;
449 lwkt_gettoken(rm->rm_slock);
450 rv = int_rman_activate_resource(rm, r, &whohas);
451 lwkt_reltoken(rm->rm_slock);
452 return rv;
455 #if 0
457 /* XXX */
459 rman_await_resource(struct resource *r, int slpflags, int timo)
461 int rv;
462 struct resource *whohas;
463 struct rman *rm;
465 rm = r->r_rm;
466 for (;;) {
467 lwkt_gettoken(rm->rm_slock);
468 rv = int_rman_activate_resource(rm, r, &whohas);
469 if (rv != EBUSY)
470 return (rv); /* returns with ilock held */
472 if (r->r_sharehead == 0)
473 panic("rman_await_resource");
475 * A critical section will hopefully will prevent a race
476 * between lwkt_reltoken and tsleep where a process
477 * could conceivably get in and release the resource
478 * before we have a chance to sleep on it. YYY
480 crit_enter();
481 whohas->r_flags |= RF_WANTED;
482 rv = tsleep(r->r_sharehead, slpflags, "rmwait", timo);
483 if (rv) {
484 lwkt_reltoken(rm->rm_slock);
485 crit_exit();
486 return rv;
488 crit_exit();
492 #endif
494 static int
495 int_rman_deactivate_resource(struct resource *r)
497 r->r_flags &= ~RF_ACTIVE;
498 if (r->r_flags & RF_WANTED) {
499 r->r_flags &= ~RF_WANTED;
500 wakeup(r->r_sharehead);
502 return 0;
506 rman_deactivate_resource(struct resource *r)
508 struct rman *rm;
510 rm = r->r_rm;
511 lwkt_gettoken(rm->rm_slock);
512 int_rman_deactivate_resource(r);
513 lwkt_reltoken(rm->rm_slock);
514 return 0;
517 static int
518 int_rman_release_resource(struct rman *rm, struct resource *r)
520 struct resource *s, *t;
522 if (r->r_flags & RF_ACTIVE)
523 int_rman_deactivate_resource(r);
526 * Check for a sharing list first. If there is one, then we don't
527 * have to think as hard.
529 if (r->r_sharehead) {
531 * If a sharing list exists, then we know there are at
532 * least two sharers.
534 * If we are in the main circleq, appoint someone else.
536 LIST_REMOVE(r, r_sharelink);
537 s = LIST_FIRST(r->r_sharehead);
538 if (r->r_flags & RF_FIRSTSHARE) {
539 s->r_flags |= RF_FIRSTSHARE;
540 TAILQ_INSERT_BEFORE(r, s, r_link);
541 TAILQ_REMOVE(&rm->rm_list, r, r_link);
545 * Make sure that the sharing list goes away completely
546 * if the resource is no longer being shared at all.
548 if (LIST_NEXT(s, r_sharelink) == 0) {
549 kfree(s->r_sharehead, M_RMAN);
550 s->r_sharehead = 0;
551 s->r_flags &= ~RF_FIRSTSHARE;
553 goto out;
557 * Look at the adjacent resources in the list and see if our
558 * segment can be merged with any of them.
560 s = TAILQ_PREV(r, resource_head, r_link);
561 t = TAILQ_NEXT(r, r_link);
563 if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
564 && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
566 * Merge all three segments.
568 s->r_end = t->r_end;
569 TAILQ_REMOVE(&rm->rm_list, r, r_link);
570 TAILQ_REMOVE(&rm->rm_list, t, r_link);
571 kfree(t, M_RMAN);
572 } else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
574 * Merge previous segment with ours.
576 s->r_end = r->r_end;
577 TAILQ_REMOVE(&rm->rm_list, r, r_link);
578 } else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
580 * Merge next segment with ours.
582 t->r_start = r->r_start;
583 TAILQ_REMOVE(&rm->rm_list, r, r_link);
584 } else {
586 * At this point, we know there is nothing we
587 * can potentially merge with, because on each
588 * side, there is either nothing there or what is
589 * there is still allocated. In that case, we don't
590 * want to remove r from the list; we simply want to
591 * change it to an unallocated region and return
592 * without freeing anything.
594 r->r_flags &= ~RF_ALLOCATED;
595 return 0;
598 out:
599 kfree(r, M_RMAN);
600 return 0;
604 rman_release_resource(struct resource *r)
606 struct rman *rm = r->r_rm;
607 int rv;
609 lwkt_gettoken(rm->rm_slock);
610 rv = int_rman_release_resource(rm, r);
611 lwkt_reltoken(rm->rm_slock);
612 return (rv);
616 * Find the hightest bit set, and add one if more than one bit
617 * set. We're effectively computing the ceil(log2(size)) here.
619 * This function cannot compute alignments above (1LU<<63)+1
620 * as this would require returning '64' which will not fit in
621 * the flags field and doesn't work well for calculations either.
623 uint32_t
624 rman_make_alignment_flags(size_t size)
626 int i;
628 for (i = 63; i; --i) {
629 if ((1LU << i) & size)
630 break;
632 if (~(1LU << i) & size)
633 ++i;
634 if (i == 64)
635 i = 63;
636 return(RF_ALIGNMENT_LOG2(i));
640 * Sysctl interface for scanning the resource lists.
642 * We take two input parameters; the index into the list of resource
643 * managers, and the resource offset into the list.
645 static int
646 sysctl_rman(SYSCTL_HANDLER_ARGS)
648 int *name = (int *)arg1;
649 u_int namelen = arg2;
650 int rman_idx, res_idx;
651 struct rman *rm;
652 struct resource *res;
653 struct u_rman urm;
654 struct u_resource ures;
655 int error;
657 if (namelen != 3)
658 return (EINVAL);
660 if (bus_data_generation_check(name[0]))
661 return (EINVAL);
662 rman_idx = name[1];
663 res_idx = name[2];
666 * Find the indexed resource manager
668 error = ENOENT;
669 lwkt_gettoken(&rman_tok);
671 TAILQ_FOREACH(rm, &rman_head, rm_link) {
672 if (rman_idx-- == 0)
673 break;
675 if (rm == NULL)
676 goto done;
679 * If the resource index is -1, we want details on the
680 * resource manager.
682 if (res_idx == -1) {
683 urm.rm_handle = (uintptr_t)rm;
684 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
685 urm.rm_start = rm->rm_start;
686 urm.rm_size = rm->rm_end - rm->rm_start + 1;
687 urm.rm_type = rm->rm_type;
689 error = SYSCTL_OUT(req, &urm, sizeof(urm));
690 goto done;
694 * Find the indexed resource and return it.
696 atomic_add_int(&rm->rm_hold, 1); /* temp prevent destruction */
697 lwkt_gettoken(rm->rm_slock);
699 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
700 if (res_idx-- == 0) {
701 ures.r_handle = (uintptr_t)res;
702 ures.r_parent = (uintptr_t)res->r_rm;
703 ures.r_device = (uintptr_t)res->r_dev;
704 if (res->r_dev != NULL) {
705 if (device_get_name(res->r_dev) != NULL) {
706 ksnprintf(ures.r_devname, RM_TEXTLEN,
707 "%s%d",
708 device_get_name(res->r_dev),
709 device_get_unit(res->r_dev));
710 } else {
711 strlcpy(ures.r_devname, "nomatch",
712 RM_TEXTLEN);
714 } else {
715 ures.r_devname[0] = '\0';
717 ures.r_start = res->r_start;
718 ures.r_size = res->r_end - res->r_start + 1;
719 ures.r_flags = res->r_flags;
721 error = SYSCTL_OUT(req, &ures, sizeof(ures));
722 break;
725 lwkt_reltoken(rm->rm_slock);
726 atomic_add_int(&rm->rm_hold, -1);
727 done:
728 lwkt_reltoken(&rman_tok);
730 return (ENOENT);
733 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
734 "kernel resource manager");