1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <device/device.h>
9 * Round a number up to an alignment.
11 * @param val The starting value.
12 * @param pow Alignment as a power of two.
13 * @return Rounded up number.
15 static resource_t
round(resource_t val
, unsigned long pow
)
17 return ALIGN_UP(val
, POWER_OF_2(pow
));
20 static const char *resource2str(const struct resource
*res
)
22 if (res
->flags
& IORESOURCE_IO
)
24 if (res
->flags
& IORESOURCE_PREFETCH
)
26 if (res
->flags
& IORESOURCE_MEM
)
31 static bool dev_has_children(const struct device
*dev
)
33 const struct bus
*bus
= dev
->link_list
;
34 return bus
&& bus
->children
;
37 #define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
40 * During pass 1, once all the requirements for downstream devices of a bridge are gathered,
41 * this function calculates the overall resource requirement for the bridge. It starts by
42 * picking the largest resource requirement downstream for the given resource type and works by
43 * adding requirements in descending order.
45 * Additionally, it takes alignment and limits of the downstream devices into consideration and
46 * ensures that they get propagated to the bridge resource. This is required to guarantee that
47 * the upstream bridge/domain honors the limit and alignment requirements for this bridge based
48 * on the tightest constraints downstream.
50 static void update_bridge_resource(const struct device
*bridge
, struct resource
*bridge_res
,
51 unsigned long type_match
, int print_depth
)
53 const struct device
*child
;
54 struct resource
*child_res
;
56 bool first_child_res
= true;
57 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
58 struct bus
*bus
= bridge
->link_list
;
63 * `base` keeps track of where the next allocation for child resource can take place
64 * from within the bridge resource window. Since the bridge resource window allocation
65 * is not performed yet, it can start at 0. Base gets updated every time a resource
66 * requirement is accounted for in the loop below. After scanning all these resources,
67 * base will indicate the total size requirement for the current bridge resource
72 res_printk(print_depth
, "%s %s: size: %llx align: %d gran: %d limit: %llx\n",
73 dev_path(bridge
), resource2str(bridge_res
), bridge_res
->size
,
74 bridge_res
->align
, bridge_res
->gran
, bridge_res
->limit
);
76 while ((child
= largest_resource(bus
, &child_res
, type_mask
, type_match
))) {
78 /* Size 0 resources can be skipped. */
83 * Propagate the resource alignment to the bridge resource if this is the first
84 * child resource with non-zero size being considered. For all other children
85 * resources, alignment is taken care of by updating the base to round up as per
86 * the child resource alignment. It is guaranteed that pass 2 follows the exact
87 * same method of picking the resource for allocation using
88 * largest_resource(). Thus, as long as the alignment for first child resource
89 * is propagated up to the bridge resource, it can be guaranteed that the
90 * alignment for all resources is appropriately met.
92 if (first_child_res
&& (child_res
->align
> bridge_res
->align
))
93 bridge_res
->align
= child_res
->align
;
95 first_child_res
= false;
98 * Propagate the resource limit to the bridge resource only if child resource
99 * limit is non-zero. If a downstream device has stricter requirements
100 * w.r.t. limits for any resource, that constraint needs to be propagated back
101 * up to the downstream bridges of the domain. This guarantees that the resource
102 * allocation which starts at the domain level takes into account all these
103 * constraints thus working on a global view.
105 if (child_res
->limit
&& (child_res
->limit
< bridge_res
->limit
))
106 bridge_res
->limit
= child_res
->limit
;
109 * Propagate the downstream resource request to allocate above 4G boundary to
110 * upstream bridge resource. This ensures that during pass 2, the resource
111 * allocator at domain level has a global view of all the downstream device
112 * requirements and thus address space is allocated as per updated flags in the
115 * Since the bridge resource is a single window, all the downstream resources of
116 * this bridge resource will be allocated space above 4G boundary.
118 if (child_res
->flags
& IORESOURCE_ABOVE_4G
)
119 bridge_res
->flags
|= IORESOURCE_ABOVE_4G
;
122 * Alignment value of 0 means that the child resource has no alignment
123 * requirements and so the base value remains unchanged here.
125 base
= round(base
, child_res
->align
);
127 res_printk(print_depth
+ 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
128 dev_path(child
), child_res
->index
, base
, base
+ child_res
->size
- 1,
129 resource2str(child_res
));
131 base
+= child_res
->size
;
135 * After all downstream device resources are scanned, `base` represents the total size
136 * requirement for the current bridge resource window. This size needs to be rounded up
137 * to the granularity requirement of the bridge to ensure that the upstream
138 * bridge/domain allocates big enough window.
140 bridge_res
->size
= round(base
, bridge_res
->gran
);
142 res_printk(print_depth
, "%s %s: size: %llx align: %d gran: %d limit: %llx done\n",
143 dev_path(bridge
), resource2str(bridge_res
), bridge_res
->size
,
144 bridge_res
->align
, bridge_res
->gran
, bridge_res
->limit
);
148 * During pass 1, resource allocator at bridge level gathers requirements from downstream
149 * devices and updates its own resource windows for the provided resource type.
151 static void compute_bridge_resources(const struct device
*bridge
, unsigned long type_match
,
154 const struct device
*child
;
155 struct resource
*res
;
156 struct bus
*bus
= bridge
->link_list
;
157 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
159 for (res
= bridge
->resource_list
; res
; res
= res
->next
) {
160 if (!(res
->flags
& IORESOURCE_BRIDGE
))
163 if ((res
->flags
& type_mask
) != type_match
)
167 * Ensure that the resource requirements for all downstream bridges are
168 * gathered before updating the window for current bridge resource.
170 for (child
= bus
->children
; child
; child
= child
->sibling
) {
171 if (!dev_has_children(child
))
173 compute_bridge_resources(child
, type_match
, print_depth
+ 1);
177 * Update the window for current bridge resource now that all downstream
178 * requirements are gathered.
180 update_bridge_resource(bridge
, res
, type_match
, print_depth
);
185 * During pass 1, resource allocator walks down the entire sub-tree of a domain. It gathers
186 * resource requirements for every downstream bridge by looking at the resource requests of its
187 * children. Thus, the requirement gathering begins at the leaf devices and is propagated back
188 * up to the downstream bridges of the domain.
190 * At domain level, it identifies every downstream bridge and walks down that bridge to gather
191 * requirements for each resource type i.e. i/o, mem and prefmem. Since bridges have separate
192 * windows for mem and prefmem, requirements for each need to be collected separately.
194 * Domain resource windows are fixed ranges and hence requirement gathering does not result in
195 * any changes to these fixed ranges.
197 static void compute_domain_resources(const struct device
*domain
)
199 const struct device
*child
;
200 const int print_depth
= 1;
202 if (domain
->link_list
== NULL
)
205 for (child
= domain
->link_list
->children
; child
; child
= child
->sibling
) {
207 /* Skip if this is not a bridge or has no children under it. */
208 if (!dev_has_children(child
))
211 compute_bridge_resources(child
, IORESOURCE_IO
, print_depth
);
212 compute_bridge_resources(child
, IORESOURCE_MEM
, print_depth
);
213 compute_bridge_resources(child
, IORESOURCE_MEM
| IORESOURCE_PREFETCH
,
218 static unsigned char get_alignment_by_resource_type(const struct resource
*res
)
220 if (res
->flags
& IORESOURCE_MEM
)
221 return 12; /* Page-aligned --> log2(4KiB) */
222 else if (res
->flags
& IORESOURCE_IO
)
223 return 0; /* No special alignment required --> log2(1) */
225 die("Unexpected resource type: flags(%d)!\n", res
->flags
);
229 * If the resource is NULL or if the resource is not assigned, then it cannot be used for
230 * allocation for downstream devices.
232 static bool is_resource_invalid(const struct resource
*res
)
234 return (res
== NULL
) || !(res
->flags
& IORESOURCE_ASSIGNED
);
237 static void initialize_domain_io_resource_memranges(struct memranges
*ranges
,
238 const struct resource
*res
,
239 unsigned long memrange_type
)
241 memranges_insert(ranges
, res
->base
, res
->limit
- res
->base
+ 1, memrange_type
);
244 static void initialize_domain_mem_resource_memranges(struct memranges
*ranges
,
245 const struct resource
*res
,
246 unsigned long memrange_type
)
249 resource_t res_limit
;
251 const resource_t limit_4g
= 0xffffffff;
253 res_base
= res
->base
;
254 res_limit
= res
->limit
;
257 * Split the resource into two separate ranges if it crosses the 4G boundary. Memrange
258 * type is set differently to ensure that memrange does not merge these two ranges. For
259 * the range above 4G boundary, given memrange type is ORed with IORESOURCE_ABOVE_4G.
261 if (res_base
<= limit_4g
) {
263 resource_t range_limit
;
265 /* Clip the resource limit at 4G boundary if necessary. */
266 range_limit
= MIN(res_limit
, limit_4g
);
267 memranges_insert(ranges
, res_base
, range_limit
- res_base
+ 1, memrange_type
);
270 * If the resource lies completely below the 4G boundary, nothing more needs to
273 if (res_limit
<= limit_4g
)
277 * If the resource window crosses the 4G boundary, then update res_base to add
278 * another entry for the range above the boundary.
280 res_base
= limit_4g
+ 1;
283 if (res_base
> res_limit
)
287 * If resource lies completely above the 4G boundary or if the resource was clipped to
288 * add two separate ranges, the range above 4G boundary has the resource flag
289 * IORESOURCE_ABOVE_4G set. This allows domain to handle any downstream requests for
290 * resource allocation above 4G differently.
292 memranges_insert(ranges
, res_base
, res_limit
- res_base
+ 1,
293 memrange_type
| IORESOURCE_ABOVE_4G
);
297 * This function initializes memranges for domain device. If the resource crosses 4G boundary,
298 * then this function splits it into two ranges -- one for the window below 4G and the other for
299 * the window above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to satisfy resource
300 * requests from downstream devices for allocations above 4G.
302 static void initialize_domain_memranges(struct memranges
*ranges
, const struct resource
*res
,
303 unsigned long memrange_type
)
305 unsigned char align
= get_alignment_by_resource_type(res
);
307 memranges_init_empty_with_alignment(ranges
, NULL
, 0, align
);
309 if (is_resource_invalid(res
))
312 if (res
->flags
& IORESOURCE_IO
)
313 initialize_domain_io_resource_memranges(ranges
, res
, memrange_type
);
315 initialize_domain_mem_resource_memranges(ranges
, res
, memrange_type
);
319 * This function initializes memranges for bridge device. Unlike domain, bridge does not need to
320 * care about resource window crossing 4G boundary. This is handled by the resource allocator at
321 * domain level to ensure that all downstream bridges are allocated space either above or below
322 * 4G boundary as per the state of IORESOURCE_ABOVE_4G for the respective bridge resource.
324 * So, this function creates a single range of the entire resource window available for the
325 * bridge resource. Thus all downstream resources of the bridge for the given resource type get
326 * allocated space from the same window. If there is any downstream resource of the bridge which
327 * requests allocation above 4G, then all other downstream resources of the same type under the
328 * bridge get allocated above 4G.
330 static void initialize_bridge_memranges(struct memranges
*ranges
, const struct resource
*res
,
331 unsigned long memrange_type
)
333 unsigned char align
= get_alignment_by_resource_type(res
);
335 memranges_init_empty_with_alignment(ranges
, NULL
, 0, align
);
337 if (is_resource_invalid(res
))
340 memranges_insert(ranges
, res
->base
, res
->limit
- res
->base
+ 1, memrange_type
);
343 static void print_resource_ranges(const struct device
*dev
, const struct memranges
*ranges
)
345 const struct range_entry
*r
;
347 printk(BIOS_INFO
, " %s: Resource ranges:\n", dev_path(dev
));
349 if (memranges_is_empty(ranges
))
350 printk(BIOS_INFO
, " * EMPTY!!\n");
352 memranges_each_entry(r
, ranges
) {
353 printk(BIOS_INFO
, " * Base: %llx, Size: %llx, Tag: %lx\n",
354 range_entry_base(r
), range_entry_size(r
), range_entry_tag(r
));
359 * This is where the actual allocation of resources happens during pass 2. Given the list of
360 * memory ranges corresponding to the resource of given type, it finds the biggest unallocated
361 * resource using the type mask on the downstream bus. This continues in a descending
362 * order until all resources of given type are allocated address space within the current
365 static void allocate_child_resources(struct bus
*bus
, struct memranges
*ranges
,
366 unsigned long type_mask
, unsigned long type_match
)
368 struct resource
*resource
= NULL
;
369 const struct device
*dev
;
371 while ((dev
= largest_resource(bus
, &resource
, type_mask
, type_match
))) {
376 if (memranges_steal(ranges
, resource
->limit
, resource
->size
, resource
->align
,
377 type_match
, &resource
->base
) == false) {
378 printk(BIOS_ERR
, " ERROR: Resource didn't fit!!! ");
379 printk(BIOS_DEBUG
, " %s %02lx * size: 0x%llx limit: %llx %s\n",
380 dev_path(dev
), resource
->index
,
381 resource
->size
, resource
->limit
, resource2str(resource
));
385 resource
->limit
= resource
->base
+ resource
->size
- 1;
386 resource
->flags
|= IORESOURCE_ASSIGNED
;
388 printk(BIOS_DEBUG
, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
389 dev_path(dev
), resource
->index
, resource
->base
,
390 resource
->size
? resource
->base
+ resource
->size
- 1 :
391 resource
->base
, resource
->limit
, resource2str(resource
));
395 static void update_constraints(struct memranges
*ranges
, const struct device
*dev
,
396 const struct resource
*res
)
401 printk(BIOS_DEBUG
, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
402 __func__
, dev_path(dev
), res
->index
, res
->base
,
403 res
->base
+ res
->size
- 1, resource2str(res
));
405 memranges_create_hole(ranges
, res
->base
, res
->size
);
409 * Scan the entire tree to identify any fixed resources allocated by any device to
410 * ensure that the address map for domain resources are appropriately updated.
412 * Domains can typically provide memrange for entire address space. So, this function
413 * punches holes in the address space for all fixed resources that are already
414 * defined. Both IO and normal memory resources are added as fixed. Both need to be
415 * removed from address space where dynamic resource allocations are sourced.
417 static void avoid_fixed_resources(struct memranges
*ranges
, const struct device
*dev
,
418 unsigned long mask_match
)
420 const struct resource
*res
;
421 const struct device
*child
;
422 const struct bus
*bus
;
424 for (res
= dev
->resource_list
; res
!= NULL
; res
= res
->next
) {
425 if ((res
->flags
& mask_match
) != mask_match
)
427 update_constraints(ranges
, dev
, res
);
430 bus
= dev
->link_list
;
434 for (child
= bus
->children
; child
!= NULL
; child
= child
->sibling
)
435 avoid_fixed_resources(ranges
, child
, mask_match
);
438 static void constrain_domain_resources(const struct device
*domain
, struct memranges
*ranges
,
441 unsigned long mask_match
= type
| IORESOURCE_FIXED
;
443 if (type
== IORESOURCE_IO
) {
445 * Don't allow allocations in the VGA I/O range. PCI has special cases for
448 memranges_create_hole(ranges
, 0x3b0, 0x3df - 0x3b0 + 1);
451 * Resource allocator no longer supports the legacy behavior where I/O resource
452 * allocation is guaranteed to avoid aliases over legacy PCI expansion card
457 avoid_fixed_resources(ranges
, domain
, mask_match
);
461 * This function creates a list of memranges of given type using the resource that is
462 * provided. If the given resource is NULL or if the resource window size is 0, then it creates
463 * an empty list. This results in resource allocation for that resource type failing for all
464 * downstream devices since there is nothing to allocate from.
466 * In case of domain, it applies additional constraints to ensure that the memranges do not
467 * overlap any of the fixed resources under that domain. Domain typically seems to provide
468 * memrange for entire address space. Thus, it is up to the chipset to add DRAM and all other
469 * windows which cannot be used for resource allocation as fixed resources.
471 static void setup_resource_ranges(const struct device
*dev
, const struct resource
*res
,
472 unsigned long type
, struct memranges
*ranges
)
474 printk(BIOS_DEBUG
, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
475 dev_path(dev
), resource2str(res
), res
->base
, res
->size
, res
->align
,
476 res
->gran
, res
->limit
);
478 if (dev
->path
.type
== DEVICE_PATH_DOMAIN
) {
479 initialize_domain_memranges(ranges
, res
, type
);
480 constrain_domain_resources(dev
, ranges
, type
);
482 initialize_bridge_memranges(ranges
, res
, type
);
485 print_resource_ranges(dev
, ranges
);
488 static void cleanup_resource_ranges(const struct device
*dev
, struct memranges
*ranges
,
489 const struct resource
*res
)
491 memranges_teardown(ranges
);
492 printk(BIOS_DEBUG
, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
493 dev_path(dev
), resource2str(res
), res
->base
, res
->size
, res
->align
,
494 res
->gran
, res
->limit
);
498 * Pass 2 of resource allocator at the bridge level loops through all the resources for the
499 * bridge and generates a list of memory ranges similar to that at the domain level. However,
500 * there is no need to apply any additional constraints since the window allocated to the bridge
501 * is guaranteed to be non-overlapping by the allocator at domain level.
503 * Allocation at the bridge level works the same as at domain level (starts with the biggest
504 * resource requirement from downstream devices and continues in descending order). One major
505 * difference at the bridge level is that it considers prefmem resources separately from mem
508 * Once allocation at the current bridge is complete, resource allocator continues walking down
509 * the downstream bridges until it hits the leaf devices.
511 static void allocate_bridge_resources(const struct device
*bridge
)
513 struct memranges ranges
;
514 const struct resource
*res
;
515 struct bus
*bus
= bridge
->link_list
;
516 unsigned long type_match
;
517 struct device
*child
;
518 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
520 for (res
= bridge
->resource_list
; res
; res
= res
->next
) {
524 if (!(res
->flags
& IORESOURCE_BRIDGE
))
527 type_match
= res
->flags
& type_mask
;
529 setup_resource_ranges(bridge
, res
, type_match
, &ranges
);
530 allocate_child_resources(bus
, &ranges
, type_mask
, type_match
);
531 cleanup_resource_ranges(bridge
, &ranges
, res
);
534 for (child
= bus
->children
; child
; child
= child
->sibling
) {
535 if (!dev_has_children(child
))
538 allocate_bridge_resources(child
);
542 static const struct resource
*find_domain_resource(const struct device
*domain
,
545 const struct resource
*res
;
547 for (res
= domain
->resource_list
; res
; res
= res
->next
) {
548 if (res
->flags
& IORESOURCE_FIXED
)
551 if ((res
->flags
& IORESOURCE_TYPE_MASK
) == type
)
559 * Pass 2 of resource allocator begins at the domain level. Every domain has two types of
560 * resources - io and mem. For each of these resources, this function creates a list of memory
561 * ranges that can be used for downstream resource allocation. This list is constrained to
562 * remove any fixed resources in the domain sub-tree of the given resource type. It then uses
563 * the memory ranges to apply best fit on the resource requirements of the downstream devices.
565 * Once resources are allocated to all downstream devices of the domain, it walks down each
566 * downstream bridge to continue the same process until resources are allocated to all devices
569 static void allocate_domain_resources(const struct device
*domain
)
571 struct memranges ranges
;
572 struct device
*child
;
573 const struct resource
*res
;
575 /* Resource type I/O */
576 res
= find_domain_resource(domain
, IORESOURCE_IO
);
578 setup_resource_ranges(domain
, res
, IORESOURCE_IO
, &ranges
);
579 allocate_child_resources(domain
->link_list
, &ranges
, IORESOURCE_TYPE_MASK
,
581 cleanup_resource_ranges(domain
, &ranges
, res
);
586 * Domain does not distinguish between mem and prefmem resources. Thus, the resource
587 * allocation at domain level considers mem and prefmem together when finding the best
588 * fit based on the biggest resource requirement.
590 * However, resource requests for allocation above 4G boundary need to be handled
591 * separately if the domain resource window crosses this boundary. There is a single
592 * window for resource of type IORESOURCE_MEM. When creating memranges, this resource
593 * is split into two separate ranges -- one for the window below 4G boundary and other
594 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set). Thus, when
595 * allocating child resources, requests for below and above the 4G boundary are handled
596 * separately by setting the type_mask and type_match to allocate_child_resources()
599 res
= find_domain_resource(domain
, IORESOURCE_MEM
);
601 setup_resource_ranges(domain
, res
, IORESOURCE_MEM
, &ranges
);
602 allocate_child_resources(domain
->link_list
, &ranges
,
603 IORESOURCE_TYPE_MASK
| IORESOURCE_ABOVE_4G
,
605 allocate_child_resources(domain
->link_list
, &ranges
,
606 IORESOURCE_TYPE_MASK
| IORESOURCE_ABOVE_4G
,
607 IORESOURCE_MEM
| IORESOURCE_ABOVE_4G
);
608 cleanup_resource_ranges(domain
, &ranges
, res
);
611 for (child
= domain
->link_list
->children
; child
; child
= child
->sibling
) {
612 if (!dev_has_children(child
))
615 /* Continue allocation for all downstream bridges. */
616 allocate_bridge_resources(child
);
621 * This function forms the guts of the resource allocator. It walks through the entire device
622 * tree for each domain two times.
624 * Every domain has a fixed set of ranges. These ranges cannot be relaxed based on the
625 * requirements of the downstream devices. They represent the available windows from which
626 * resources can be allocated to the different devices under the domain.
628 * In order to identify the requirements of downstream devices, resource allocator walks in a
629 * DFS fashion. It gathers the requirements from leaf devices and propagates those back up
630 * to their upstream bridges until the requirements for all the downstream devices of the domain
631 * are gathered. This is referred to as pass 1 of resource allocator.
633 * Once the requirements for all the devices under the domain are gathered, resource allocator
634 * walks a second time to allocate resources to downstream devices as per the
635 * requirements. It always picks the biggest resource request as per the type (i/o and mem) to
636 * allocate space from its fixed window to the immediate downstream device of the domain. In
637 * order to accomplish best fit for the resources, a list of ranges is maintained by each
638 * resource type (i/o and mem). Domain does not differentiate between mem and prefmem. Since
639 * they are allocated space from the same window, the resource allocator at the domain level
640 * ensures that the biggest requirement is selected independent of the prefetch type. Once the
641 * resource allocation for all immediate downstream devices is complete at the domain level,
642 * resource allocator walks down the subtree for each downstream bridge to continue the
643 * allocation process at the bridge level. Since bridges have separate windows for i/o, mem and
644 * prefmem, best fit algorithm at bridge level looks for the biggest requirement considering
645 * prefmem resources separately from non-prefmem resources. This continues until resource
646 * allocation is performed for all downstream bridges in the domain sub-tree. This is referred
647 * to as pass 2 of resource allocator.
649 * Some rules that are followed by the resource allocator:
650 * - Allocate resource locations for every device as long as the requirements can be satisfied.
651 * - If a resource cannot be allocated any address space, then that resource needs to be
652 * properly updated to ensure that it does not incorrectly overlap some address space reserved
653 * for a different purpose.
654 * - Don't overlap with resources in fixed locations.
655 * - Don't overlap and follow the rules of bridges -- downstream devices of bridges should use
656 * parts of the address space allocated to the bridge.
658 void allocate_resources(const struct device
*root
)
660 const struct device
*child
;
662 if ((root
== NULL
) || (root
->link_list
== NULL
))
665 for (child
= root
->link_list
->children
; child
; child
= child
->sibling
) {
667 if (child
->path
.type
!= DEVICE_PATH_DOMAIN
)
670 post_log_path(child
);
672 /* Pass 1 - Gather requirements. */
673 printk(BIOS_INFO
, "=== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
675 compute_domain_resources(child
);
677 /* Pass 2 - Allocate resources as per gathered requirements. */
678 printk(BIOS_INFO
, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
680 allocate_domain_resources(child
);
682 printk(BIOS_INFO
, "=== Resource allocator: %s - resource allocation complete ===\n",