soc/intel/common/acpi: Fix ACPI Namespace lookup failure, AE_ALREADY_EXISTS issue
[coreboot.git] / src / device / device.c
blobdcbaef1279e31d12d2ad2ca36cffe6885d3b6de1
1 /*
2 * This file is part of the coreboot project.
4 * It was originally based on the Linux kernel (arch/i386/kernel/pci-pc.c).
6 * Modifications are:
7 * Copyright (C) 2003 Eric Biederman <ebiederm@xmission.com>
8 * Copyright (C) 2003-2004 Linux Networx
9 * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx)
10 * Copyright (C) 2003 Ronald G. Minnich <rminnich@gmail.com>
11 * Copyright (C) 2004-2005 Li-Ta Lo <ollie@lanl.gov>
12 * Copyright (C) 2005-2006 Tyan
13 * (Written by Yinghai Lu <yhlu@tyan.com> for Tyan)
14 * Copyright (C) 2005-2006 Stefan Reinauer <stepan@openbios.org>
15 * Copyright (C) 2009 Myles Watson <mylesgw@gmail.com>
16 * Copyright (c) 1999--2000 Martin Mares <mj@suse.cz>
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; version 2 of the License.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
29 * Lots of mods by Ron Minnich <rminnich@lanl.gov>, with
30 * the final architecture guidance from Tom Merritt <tjm@codegen.com>.
32 * In particular, we changed from the one-pass original version to
33 * Tom's recommended multiple-pass version. I wasn't sure about doing
34 * it with multiple passes, until I actually started doing it and saw
35 * the wisdom of Tom's recommendations...
37 * Lots of cleanups by Eric Biederman to handle bridges, and to
38 * handle resource allocation for non-PCI devices.
41 #include <console/console.h>
42 #include <arch/io.h>
43 #include <device/device.h>
44 #include <device/pci_def.h>
45 #include <device/pci_ids.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <smp/spinlock.h>
49 #if IS_ENABLED(CONFIG_ARCH_X86)
50 #include <arch/ebda.h>
51 #endif
52 #include <timer.h>
54 /** Pointer to the last device */
55 extern struct device *last_dev;
56 /** Linked list of free resources */
57 struct resource *free_resources = NULL;
59 /**
60 * Initialize all chips of statically known devices.
62 * Will be called before bus enumeration to initialize chips stated in the
63 * device tree.
65 void dev_initialize_chips(void)
67 const struct device *dev;
69 for (dev = all_devices; dev; dev = dev->next) {
70 /* Initialize chip if we haven't yet. */
71 if (dev->chip_ops && dev->chip_ops->init &&
72 !dev->chip_ops->initialized) {
73 post_log_path(dev);
74 dev->chip_ops->init(dev->chip_info);
75 dev->chip_ops->initialized = 1;
78 post_log_clear();
81 /**
82 * Finalize all chips of statically known devices.
84 * This is the last call before calling the payload. This is a good place
85 * to lock registers or other final cleanup.
87 void dev_finalize_chips(void)
89 const struct device *dev;
91 for (dev = all_devices; dev; dev = dev->next) {
92 /* Initialize chip if we haven't yet. */
93 if (dev->chip_ops && dev->chip_ops->final &&
94 !dev->chip_ops->finalized) {
95 dev->chip_ops->final(dev->chip_info);
96 dev->chip_ops->finalized = 1;
101 DECLARE_SPIN_LOCK(dev_lock)
103 #if IS_ENABLED(CONFIG_GFXUMA)
104 /* IGD UMA memory */
105 uint64_t uma_memory_base = 0;
106 uint64_t uma_memory_size = 0;
107 #endif
110 * Allocate a new device structure.
112 * Allocate a new device structure and attach it to the device tree as a
113 * child of the parent bus.
115 * @param parent Parent bus the newly created device should be attached to.
116 * @param path Path to the device to be created.
117 * @return Pointer to the newly created device structure.
119 * @see device_path
121 static struct device *__alloc_dev(struct bus *parent, struct device_path *path)
123 struct device *dev, *child;
125 /* Find the last child of our parent. */
126 for (child = parent->children; child && child->sibling; /* */ )
127 child = child->sibling;
129 dev = malloc(sizeof(*dev));
130 if (dev == 0)
131 die("alloc_dev(): out of memory.\n");
133 memset(dev, 0, sizeof(*dev));
134 memcpy(&dev->path, path, sizeof(*path));
136 /* By default devices are enabled. */
137 dev->enabled = 1;
139 /* Add the new device to the list of children of the bus. */
140 dev->bus = parent;
141 if (child)
142 child->sibling = dev;
143 else
144 parent->children = dev;
146 /* Append a new device to the global device list.
147 * The list is used to find devices once everything is set up.
149 last_dev->next = dev;
150 last_dev = dev;
152 return dev;
155 struct device *alloc_dev(struct bus *parent, struct device_path *path)
157 struct device *dev;
158 spin_lock(&dev_lock);
159 dev = __alloc_dev(parent, path);
160 spin_unlock(&dev_lock);
161 return dev;
165 * See if a device structure already exists and if not allocate it.
167 * @param parent The bus to find the device on.
168 * @param path The relative path from the bus to the appropriate device.
169 * @return Pointer to a device structure for the device on bus at path.
171 struct device *alloc_find_dev(struct bus *parent, struct device_path *path)
173 struct device *child;
174 spin_lock(&dev_lock);
175 child = find_dev_path(parent, path);
176 if (!child)
177 child = __alloc_dev(parent, path);
178 spin_unlock(&dev_lock);
179 return child;
183 * Round a number up to an alignment.
185 * @param val The starting value.
186 * @param pow Alignment as a power of two.
187 * @return Rounded up number.
189 static resource_t round(resource_t val, unsigned long pow)
191 resource_t mask;
192 mask = (1ULL << pow) - 1ULL;
193 val += mask;
194 val &= ~mask;
195 return val;
198 static const char *resource2str(struct resource *res)
200 if (res->flags & IORESOURCE_IO)
201 return "io";
202 if (res->flags & IORESOURCE_PREFETCH)
203 return "prefmem";
204 if (res->flags & IORESOURCE_MEM)
205 return "mem";
206 return "undefined";
210 * Read the resources on all devices of a given bus.
212 * @param bus Bus to read the resources on.
214 static void read_resources(struct bus *bus)
216 struct device *curdev;
218 printk(BIOS_SPEW, "%s %s bus %x link: %d\n", dev_path(bus->dev),
219 __func__, bus->secondary, bus->link_num);
221 /* Walk through all devices and find which resources they need. */
222 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
223 struct bus *link;
225 if (!curdev->enabled)
226 continue;
228 if (!curdev->ops || !curdev->ops->read_resources) {
229 if (curdev->path.type != DEVICE_PATH_APIC)
230 printk(BIOS_ERR, "%s missing read_resources\n",
231 dev_path(curdev));
232 continue;
234 post_log_path(curdev);
235 curdev->ops->read_resources(curdev);
237 /* Read in the resources behind the current device's links. */
238 for (link = curdev->link_list; link; link = link->next)
239 read_resources(link);
241 post_log_clear();
242 printk(BIOS_SPEW, "%s read_resources bus %d link: %d done\n",
243 dev_path(bus->dev), bus->secondary, bus->link_num);
246 struct pick_largest_state {
247 struct resource *last;
248 const struct device *result_dev;
249 struct resource *result;
250 int seen_last;
253 static void pick_largest_resource(void *gp, struct device *dev,
254 struct resource *resource)
256 struct pick_largest_state *state = gp;
257 struct resource *last;
259 last = state->last;
261 /* Be certain to pick the successor to last. */
262 if (resource == last) {
263 state->seen_last = 1;
264 return;
266 if (resource->flags & IORESOURCE_FIXED)
267 return; /* Skip it. */
268 if (last && ((last->align < resource->align) ||
269 ((last->align == resource->align) &&
270 (last->size < resource->size)) ||
271 ((last->align == resource->align) &&
272 (last->size == resource->size) && (!state->seen_last)))) {
273 return;
275 if (!state->result ||
276 (state->result->align < resource->align) ||
277 ((state->result->align == resource->align) &&
278 (state->result->size < resource->size))) {
279 state->result_dev = dev;
280 state->result = resource;
284 static const struct device *largest_resource(struct bus *bus,
285 struct resource **result_res,
286 unsigned long type_mask,
287 unsigned long type)
289 struct pick_largest_state state;
291 state.last = *result_res;
292 state.result_dev = NULL;
293 state.result = NULL;
294 state.seen_last = 0;
296 search_bus_resources(bus, type_mask, type, pick_largest_resource,
297 &state);
299 *result_res = state.result;
300 return state.result_dev;
304 * This function is the guts of the resource allocator.
306 * The problem.
307 * - Allocate resource locations for every device.
308 * - Don't overlap, and follow the rules of bridges.
309 * - Don't overlap with resources in fixed locations.
310 * - Be efficient so we don't have ugly strategies.
312 * The strategy.
313 * - Devices that have fixed addresses are the minority so don't
314 * worry about them too much. Instead only use part of the address
315 * space for devices with programmable addresses. This easily handles
316 * everything except bridges.
318 * - PCI devices are required to have their sizes and their alignments
319 * equal. In this case an optimal solution to the packing problem
320 * exists. Allocate all devices from highest alignment to least
321 * alignment or vice versa. Use this.
323 * - So we can handle more than PCI run two allocation passes on bridges. The
324 * first to see how large the resources are behind the bridge, and what
325 * their alignment requirements are. The second to assign a safe address to
326 * the devices behind the bridge. This allows us to treat a bridge as just
327 * a device with a couple of resources, and not need to special case it in
328 * the allocator. Also this allows handling of other types of bridges.
330 * @param bus The bus we are traversing.
331 * @param bridge The bridge resource which must contain the bus' resources.
332 * @param type_mask This value gets ANDed with the resource type.
333 * @param type This value must match the result of the AND.
334 * @return TODO
336 static void compute_resources(struct bus *bus, struct resource *bridge,
337 unsigned long type_mask, unsigned long type)
339 const struct device *dev;
340 struct resource *resource;
341 resource_t base;
342 base = round(bridge->base, bridge->align);
344 printk(BIOS_SPEW, "%s %s: base: %llx size: %llx align: %d gran: %d"
345 " limit: %llx\n", dev_path(bus->dev), resource2str(bridge),
346 base, bridge->size, bridge->align,
347 bridge->gran, bridge->limit);
349 /* For each child which is a bridge, compute the resource needs. */
350 for (dev = bus->children; dev; dev = dev->sibling) {
351 struct resource *child_bridge;
353 if (!dev->link_list)
354 continue;
356 /* Find the resources with matching type flags. */
357 for (child_bridge = dev->resource_list; child_bridge;
358 child_bridge = child_bridge->next) {
359 struct bus* link;
361 if (!(child_bridge->flags & IORESOURCE_BRIDGE)
362 || (child_bridge->flags & type_mask) != type)
363 continue;
366 * Split prefetchable memory if combined. Many domains
367 * use the same address space for prefetchable memory
368 * and non-prefetchable memory. Bridges below them need
369 * it separated. Add the PREFETCH flag to the type_mask
370 * and type.
372 link = dev->link_list;
373 while (link && link->link_num !=
374 IOINDEX_LINK(child_bridge->index))
375 link = link->next;
377 if (link == NULL) {
378 printk(BIOS_ERR, "link %ld not found on %s\n",
379 IOINDEX_LINK(child_bridge->index),
380 dev_path(dev));
383 compute_resources(link, child_bridge,
384 type_mask | IORESOURCE_PREFETCH,
385 type | (child_bridge->flags &
386 IORESOURCE_PREFETCH));
390 /* Remember we haven't found anything yet. */
391 resource = NULL;
394 * Walk through all the resources on the current bus and compute the
395 * amount of address space taken by them. Take granularity and
396 * alignment into account.
398 while ((dev = largest_resource(bus, &resource, type_mask, type))) {
400 /* Size 0 resources can be skipped. */
401 if (!resource->size)
402 continue;
404 /* Propagate the resource alignment to the bridge resource. */
405 if (resource->align > bridge->align)
406 bridge->align = resource->align;
408 /* Propagate the resource limit to the bridge register. */
409 if (bridge->limit > resource->limit)
410 bridge->limit = resource->limit;
412 /* Warn if it looks like APICs aren't declared. */
413 if ((resource->limit == 0xffffffff) &&
414 (resource->flags & IORESOURCE_ASSIGNED)) {
415 printk(BIOS_ERR,
416 "Resource limit looks wrong! (no APIC?)\n");
417 printk(BIOS_ERR, "%s %02lx limit %08llx\n",
418 dev_path(dev), resource->index, resource->limit);
421 if (resource->flags & IORESOURCE_IO) {
423 * Don't allow potential aliases over the legacy PCI
424 * expansion card addresses. The legacy PCI decodes
425 * only 10 bits, uses 0x100 - 0x3ff. Therefore, only
426 * 0x00 - 0xff can be used out of each 0x400 block of
427 * I/O space.
429 if ((base & 0x300) != 0) {
430 base = (base & ~0x3ff) + 0x400;
433 * Don't allow allocations in the VGA I/O range.
434 * PCI has special cases for that.
436 else if ((base >= 0x3b0) && (base <= 0x3df)) {
437 base = 0x3e0;
440 /* Base must be aligned. */
441 base = round(base, resource->align);
442 resource->base = base;
443 base += resource->size;
445 printk(BIOS_SPEW, "%s %02lx * [0x%llx - 0x%llx] %s\n",
446 dev_path(dev), resource->index, resource->base,
447 resource->base + resource->size - 1,
448 resource2str(resource));
452 * A PCI bridge resource does not need to be a power of two size, but
453 * it does have a minimum granularity. Round the size up to that
454 * minimum granularity so we know not to place something else at an
455 * address positively decoded by the bridge.
457 bridge->size = round(base, bridge->gran) -
458 round(bridge->base, bridge->align);
460 printk(BIOS_SPEW, "%s %s: base: %llx size: %llx align: %d gran: %d"
461 " limit: %llx done\n", dev_path(bus->dev),
462 resource2str(bridge),
463 base, bridge->size, bridge->align, bridge->gran, bridge->limit);
467 * This function is the second part of the resource allocator.
469 * See the compute_resources function for a more detailed explanation.
471 * This function assigns the resources a value.
473 * @param bus The bus we are traversing.
474 * @param bridge The bridge resource which must contain the bus' resources.
475 * @param type_mask This value gets ANDed with the resource type.
476 * @param type This value must match the result of the AND.
478 * @see compute_resources
480 static void allocate_resources(struct bus *bus, struct resource *bridge,
481 unsigned long type_mask, unsigned long type)
483 const struct device *dev;
484 struct resource *resource;
485 resource_t base;
486 base = bridge->base;
488 printk(BIOS_SPEW, "%s %s: base:%llx size:%llx align:%d gran:%d "
489 "limit:%llx\n", dev_path(bus->dev),
490 resource2str(bridge),
491 base, bridge->size, bridge->align, bridge->gran, bridge->limit);
493 /* Remember we haven't found anything yet. */
494 resource = NULL;
497 * Walk through all the resources on the current bus and allocate them
498 * address space.
500 while ((dev = largest_resource(bus, &resource, type_mask, type))) {
502 /* Propagate the bridge limit to the resource register. */
503 if (resource->limit > bridge->limit)
504 resource->limit = bridge->limit;
506 /* Size 0 resources can be skipped. */
507 if (!resource->size) {
508 /* Set the base to limit so it doesn't confuse tolm. */
509 resource->base = resource->limit;
510 resource->flags |= IORESOURCE_ASSIGNED;
511 continue;
514 if (resource->flags & IORESOURCE_IO) {
516 * Don't allow potential aliases over the legacy PCI
517 * expansion card addresses. The legacy PCI decodes
518 * only 10 bits, uses 0x100 - 0x3ff. Therefore, only
519 * 0x00 - 0xff can be used out of each 0x400 block of
520 * I/O space.
522 if ((base & 0x300) != 0) {
523 base = (base & ~0x3ff) + 0x400;
526 * Don't allow allocations in the VGA I/O range.
527 * PCI has special cases for that.
529 else if ((base >= 0x3b0) && (base <= 0x3df)) {
530 base = 0x3e0;
534 if ((round(base, resource->align) + resource->size - 1) <=
535 resource->limit) {
536 /* Base must be aligned. */
537 base = round(base, resource->align);
538 resource->base = base;
539 resource->limit = resource->base + resource->size - 1;
540 resource->flags |= IORESOURCE_ASSIGNED;
541 resource->flags &= ~IORESOURCE_STORED;
542 base += resource->size;
543 } else {
544 printk(BIOS_ERR, "!! Resource didn't fit !!\n");
545 printk(BIOS_ERR, " aligned base %llx size %llx "
546 "limit %llx\n", round(base, resource->align),
547 resource->size, resource->limit);
548 printk(BIOS_ERR, " %llx needs to be <= %llx "
549 "(limit)\n", (round(base, resource->align) +
550 resource->size) - 1, resource->limit);
551 printk(BIOS_ERR, " %s%s %02lx * [0x%llx - 0x%llx]"
552 " %s\n", (resource->flags & IORESOURCE_ASSIGNED)
553 ? "Assigned: " : "", dev_path(dev),
554 resource->index, resource->base,
555 resource->base + resource->size - 1,
556 resource2str(resource));
559 printk(BIOS_SPEW, "%s %02lx * [0x%llx - 0x%llx] %s\n",
560 dev_path(dev), resource->index, resource->base,
561 resource->size ? resource->base + resource->size - 1 :
562 resource->base, resource2str(resource));
566 * A PCI bridge resource does not need to be a power of two size, but
567 * it does have a minimum granularity. Round the size up to that
568 * minimum granularity so we know not to place something else at an
569 * address positively decoded by the bridge.
572 bridge->flags |= IORESOURCE_ASSIGNED;
574 printk(BIOS_SPEW, "%s %s: next_base: %llx size: %llx align: %d "
575 "gran: %d done\n", dev_path(bus->dev),
576 resource2str(bridge), base, bridge->size, bridge->align,
577 bridge->gran);
579 /* For each child which is a bridge, allocate_resources. */
580 for (dev = bus->children; dev; dev = dev->sibling) {
581 struct resource *child_bridge;
583 if (!dev->link_list)
584 continue;
586 /* Find the resources with matching type flags. */
587 for (child_bridge = dev->resource_list; child_bridge;
588 child_bridge = child_bridge->next) {
589 struct bus* link;
591 if (!(child_bridge->flags & IORESOURCE_BRIDGE) ||
592 (child_bridge->flags & type_mask) != type)
593 continue;
596 * Split prefetchable memory if combined. Many domains
597 * use the same address space for prefetchable memory
598 * and non-prefetchable memory. Bridges below them need
599 * it separated. Add the PREFETCH flag to the type_mask
600 * and type.
602 link = dev->link_list;
603 while (link && link->link_num !=
604 IOINDEX_LINK(child_bridge->index))
605 link = link->next;
606 if (link == NULL)
607 printk(BIOS_ERR, "link %ld not found on %s\n",
608 IOINDEX_LINK(child_bridge->index),
609 dev_path(dev));
611 allocate_resources(link, child_bridge,
612 type_mask | IORESOURCE_PREFETCH,
613 type | (child_bridge->flags &
614 IORESOURCE_PREFETCH));
619 static int resource_is(struct resource *res, u32 type)
621 return (res->flags & IORESOURCE_TYPE_MASK) == type;
624 struct constraints {
625 struct resource io, mem;
628 static struct resource *resource_limit(struct constraints *limits,
629 struct resource *res)
631 struct resource *lim = NULL;
633 /* MEM, or I/O - skip any others. */
634 if (resource_is(res, IORESOURCE_MEM))
635 lim = &limits->mem;
636 else if (resource_is(res, IORESOURCE_IO))
637 lim = &limits->io;
639 return lim;
642 static void constrain_resources(const struct device *dev,
643 struct constraints* limits)
645 const struct device *child;
646 struct resource *res;
647 struct resource *lim;
648 struct bus *link;
650 /* Constrain limits based on the fixed resources of this device. */
651 for (res = dev->resource_list; res; res = res->next) {
652 if (!(res->flags & IORESOURCE_FIXED))
653 continue;
654 if (!res->size) {
655 /* It makes no sense to have 0-sized, fixed resources.*/
656 printk(BIOS_ERR, "skipping %s@%lx fixed resource, "
657 "size=0!\n", dev_path(dev), res->index);
658 continue;
661 lim = resource_limit(limits, res);
662 if (!lim)
663 continue;
666 * Is it a fixed resource outside the current known region?
667 * If so, we don't have to consider it - it will be handled
668 * correctly and doesn't affect current region's limits.
670 if (((res->base + res->size -1) < lim->base)
671 || (res->base > lim->limit))
672 continue;
674 printk(BIOS_SPEW, "%s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
675 __func__, dev_path(dev), res->index, res->base,
676 res->base + res->size - 1, resource2str(res));
679 * Choose to be above or below fixed resources. This check is
680 * signed so that "negative" amounts of space are handled
681 * correctly.
683 if ((signed long long)(lim->limit - (res->base + res->size -1))
684 > (signed long long)(res->base - lim->base))
685 lim->base = res->base + res->size;
686 else
687 lim->limit = res->base -1;
690 /* Descend into every enabled child and look for fixed resources. */
691 for (link = dev->link_list; link; link = link->next) {
692 for (child = link->children; child; child = child->sibling) {
693 if (child->enabled)
694 constrain_resources(child, limits);
699 static void avoid_fixed_resources(const struct device *dev)
701 struct constraints limits;
702 struct resource *res;
703 struct resource *lim;
705 printk(BIOS_SPEW, "%s: %s\n", __func__, dev_path(dev));
707 /* Initialize constraints to maximum size. */
708 limits.io.base = 0;
709 limits.io.limit = 0xffffffffffffffffULL;
710 limits.mem.base = 0;
711 limits.mem.limit = 0xffffffffffffffffULL;
713 /* Constrain the limits to dev's initial resources. */
714 for (res = dev->resource_list; res; res = res->next) {
715 if ((res->flags & IORESOURCE_FIXED))
716 continue;
717 printk(BIOS_SPEW, "%s:@%s %02lx limit %08llx\n", __func__,
718 dev_path(dev), res->index, res->limit);
720 lim = resource_limit(&limits, res);
721 if (!lim)
722 continue;
724 if (res->base > lim->base)
725 lim->base = res->base;
726 if (res->limit < lim->limit)
727 lim->limit = res->limit;
730 /* Look through the tree for fixed resources and update the limits. */
731 constrain_resources(dev, &limits);
733 /* Update dev's resources with new limits. */
734 for (res = dev->resource_list; res; res = res->next) {
735 if ((res->flags & IORESOURCE_FIXED))
736 continue;
738 lim = resource_limit(&limits, res);
739 if (!lim)
740 continue;
742 /* Is the resource outside the limits? */
743 if (lim->base > res->base)
744 res->base = lim->base;
745 if (res->limit > lim->limit)
746 res->limit = lim->limit;
748 /* MEM resources need to start at the highest address manageable. */
749 if (res->flags & IORESOURCE_MEM)
750 res->base = resource_max(res);
752 printk(BIOS_SPEW, "%s:@%s %02lx base %08llx limit %08llx\n",
753 __func__, dev_path(dev), res->index, res->base, res->limit);
757 struct device *vga_pri = NULL;
758 static void set_vga_bridge_bits(void)
761 * FIXME: Modify set_vga_bridge() so it is less PCI-centric!
762 * This function knows too much about PCI stuff, it should be just
763 * an iterator/visitor.
766 /* FIXME: Handle the VGA palette snooping. */
767 struct device *dev, *vga, *vga_onboard;
768 struct bus *bus;
770 bus = 0;
771 vga = 0;
772 vga_onboard = 0;
774 dev = NULL;
775 while ((dev = dev_find_class(PCI_CLASS_DISPLAY_VGA << 8, dev))) {
776 if (!dev->enabled)
777 continue;
779 printk(BIOS_DEBUG, "found VGA at %s\n", dev_path(dev));
781 if (dev->on_mainboard) {
782 vga_onboard = dev;
783 } else {
784 vga = dev;
787 /* It isn't safe to enable all VGA cards. */
788 dev->command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
791 if (!vga)
792 vga = vga_onboard;
794 if (CONFIG_ONBOARD_VGA_IS_PRIMARY && vga_onboard)
795 vga = vga_onboard;
797 /* If we prefer plugin VGA over chipset VGA, the chipset might
798 want to know. */
799 if (!CONFIG_ONBOARD_VGA_IS_PRIMARY && (vga != vga_onboard) &&
800 vga_onboard && vga_onboard->ops && vga_onboard->ops->disable) {
801 printk(BIOS_DEBUG, "Use plugin graphics over integrated.\n");
802 vga_onboard->ops->disable(vga_onboard);
805 if (vga) {
806 /* VGA is first add-on card or the only onboard VGA. */
807 printk(BIOS_DEBUG, "Setting up VGA for %s\n", dev_path(vga));
808 /* All legacy VGA cards have MEM & I/O space registers. */
809 vga->command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
810 vga_pri = vga;
811 bus = vga->bus;
814 /* Now walk up the bridges setting the VGA enable. */
815 while (bus) {
816 printk(BIOS_DEBUG, "Setting PCI_BRIDGE_CTL_VGA for bridge %s\n",
817 dev_path(bus->dev));
818 bus->bridge_ctrl |= PCI_BRIDGE_CTL_VGA;
819 bus = (bus == bus->dev->bus) ? 0 : bus->dev->bus;
824 * Assign the computed resources to the devices on the bus.
826 * Use the device specific set_resources() method to store the computed
827 * resources to hardware. For bridge devices, the set_resources() method
828 * has to recurse into every down stream buses.
830 * Mutual recursion:
831 * assign_resources() -> device_operation::set_resources()
832 * device_operation::set_resources() -> assign_resources()
834 * @param bus Pointer to the structure for this bus.
836 void assign_resources(struct bus *bus)
838 struct device *curdev;
840 printk(BIOS_SPEW, "%s assign_resources, bus %d link: %d\n",
841 dev_path(bus->dev), bus->secondary, bus->link_num);
843 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
844 if (!curdev->enabled || !curdev->resource_list)
845 continue;
847 if (!curdev->ops || !curdev->ops->set_resources) {
848 printk(BIOS_ERR, "%s missing set_resources\n",
849 dev_path(curdev));
850 continue;
852 post_log_path(curdev);
853 curdev->ops->set_resources(curdev);
855 post_log_clear();
856 printk(BIOS_SPEW, "%s assign_resources, bus %d link: %d\n",
857 dev_path(bus->dev), bus->secondary, bus->link_num);
861 * Enable the resources for devices on a link.
863 * Enable resources of the device by calling the device specific
864 * enable_resources() method.
866 * The parent's resources should be enabled first to avoid having enabling
867 * order problem. This is done by calling the parent's enable_resources()
868 * method before its children's enable_resources() methods.
870 * @param link The link whose devices' resources are to be enabled.
872 static void enable_resources(struct bus *link)
874 struct device *dev;
875 struct bus *c_link;
877 for (dev = link->children; dev; dev = dev->sibling) {
878 if (dev->enabled && dev->ops && dev->ops->enable_resources) {
879 post_log_path(dev);
880 dev->ops->enable_resources(dev);
884 for (dev = link->children; dev; dev = dev->sibling) {
885 for (c_link = dev->link_list; c_link; c_link = c_link->next)
886 enable_resources(c_link);
888 post_log_clear();
892 * Reset all of the devices on a bus and clear the bus's reset_needed flag.
894 * @param bus Pointer to the bus structure.
895 * @return 1 if the bus was successfully reset, 0 otherwise.
897 int reset_bus(struct bus *bus)
899 if (bus && bus->dev && bus->dev->ops && bus->dev->ops->reset_bus) {
900 bus->dev->ops->reset_bus(bus);
901 bus->reset_needed = 0;
902 return 1;
904 return 0;
908 * Scan for devices on a bus.
910 * If there are bridges on the bus, recursively scan the buses behind the
911 * bridges. If the setting up and tuning of the bus causes a reset to be
912 * required, reset the bus and scan it again.
914 * @param busdev Pointer to the bus device.
916 static void scan_bus(struct device *busdev)
918 int do_scan_bus;
919 struct stopwatch sw;
921 stopwatch_init(&sw);
923 if (!busdev->enabled)
924 return;
926 printk(BIOS_SPEW, "%s scanning...\n", dev_path(busdev));
928 post_log_path(busdev);
930 do_scan_bus = 1;
931 while (do_scan_bus) {
932 struct bus *link;
933 busdev->ops->scan_bus(busdev);
934 do_scan_bus = 0;
935 for (link = busdev->link_list; link; link = link->next) {
936 if (link->reset_needed) {
937 if (reset_bus(link))
938 do_scan_bus = 1;
939 else
940 busdev->bus->reset_needed = 1;
945 printk(BIOS_DEBUG, "%s: scanning of bus %s took %ld usecs\n",
946 __func__, dev_path(busdev), stopwatch_duration_usecs(&sw));
949 void scan_bridges(struct bus *bus)
951 struct device *child;
953 for (child = bus->children; child; child = child->sibling) {
954 if (!child->ops || !child->ops->scan_bus)
955 continue;
956 scan_bus(child);
961 * Determine the existence of devices and extend the device tree.
963 * Most of the devices in the system are listed in the mainboard devicetree.cb
964 * file. The device structures for these devices are generated at compile
965 * time by the config tool and are organized into the device tree. This
966 * function determines if the devices created at compile time actually exist
967 * in the physical system.
969 * For devices in the physical system but not listed in devicetree.cb,
970 * the device structures have to be created at run time and attached to the
971 * device tree.
973 * This function starts from the root device 'dev_root', scans the buses in
974 * the system recursively, and modifies the device tree according to the
975 * result of the probe.
977 * This function has no idea how to scan and probe buses and devices at all.
978 * It depends on the bus/device specific scan_bus() method to do it. The
979 * scan_bus() method also has to create the device structure and attach
980 * it to the device tree.
982 void dev_enumerate(void)
984 struct device *root;
986 printk(BIOS_INFO, "Enumerating buses...\n");
988 root = &dev_root;
990 show_all_devs(BIOS_SPEW, "Before device enumeration.");
991 printk(BIOS_SPEW, "Compare with tree...\n");
992 show_devs_tree(root, BIOS_SPEW, 0);
994 if (root->chip_ops && root->chip_ops->enable_dev)
995 root->chip_ops->enable_dev(root);
997 if (!root->ops || !root->ops->scan_bus) {
998 printk(BIOS_ERR, "dev_root missing scan_bus operation");
999 return;
1001 scan_bus(root);
1002 post_log_clear();
1003 printk(BIOS_INFO, "done\n");
1007 * Configure devices on the devices tree.
1009 * Starting at the root of the device tree, travel it recursively in two
1010 * passes. In the first pass, we compute and allocate resources (ranges)
1011 * required by each device. In the second pass, the resources ranges are
1012 * relocated to their final position and stored to the hardware.
1014 * I/O resources grow upward. MEM resources grow downward.
1016 * Since the assignment is hierarchical we set the values into the dev_root
1017 * struct.
1019 void dev_configure(void)
1021 struct resource *res;
1022 const struct device *root;
1023 const struct device *child;
1025 set_vga_bridge_bits();
1027 printk(BIOS_INFO, "Allocating resources...\n");
1029 root = &dev_root;
1032 * Each domain should create resources which contain the entire address
1033 * space for IO, MEM, and PREFMEM resources in the domain. The
1034 * allocation of device resources will be done from this address space.
1037 /* Read the resources for the entire tree. */
1039 printk(BIOS_INFO, "Reading resources...\n");
1040 read_resources(root->link_list);
1041 printk(BIOS_INFO, "Done reading resources.\n");
1043 print_resource_tree(root, BIOS_SPEW, "After reading.");
1045 /* Compute resources for all domains. */
1046 for (child = root->link_list->children; child; child = child->sibling) {
1047 if (!(child->path.type == DEVICE_PATH_DOMAIN))
1048 continue;
1049 post_log_path(child);
1050 for (res = child->resource_list; res; res = res->next) {
1051 if (res->flags & IORESOURCE_FIXED)
1052 continue;
1053 if (res->flags & IORESOURCE_MEM) {
1054 compute_resources(child->link_list,
1055 res, IORESOURCE_TYPE_MASK, IORESOURCE_MEM);
1056 continue;
1058 if (res->flags & IORESOURCE_IO) {
1059 compute_resources(child->link_list,
1060 res, IORESOURCE_TYPE_MASK, IORESOURCE_IO);
1061 continue;
1066 /* For all domains. */
1067 for (child = root->link_list->children; child; child=child->sibling)
1068 if (child->path.type == DEVICE_PATH_DOMAIN)
1069 avoid_fixed_resources(child);
1071 /* Store the computed resource allocations into device registers ... */
1072 printk(BIOS_INFO, "Setting resources...\n");
1073 for (child = root->link_list->children; child; child = child->sibling) {
1074 if (!(child->path.type == DEVICE_PATH_DOMAIN))
1075 continue;
1076 post_log_path(child);
1077 for (res = child->resource_list; res; res = res->next) {
1078 if (res->flags & IORESOURCE_FIXED)
1079 continue;
1080 if (res->flags & IORESOURCE_MEM) {
1081 allocate_resources(child->link_list,
1082 res, IORESOURCE_TYPE_MASK, IORESOURCE_MEM);
1083 continue;
1085 if (res->flags & IORESOURCE_IO) {
1086 allocate_resources(child->link_list,
1087 res, IORESOURCE_TYPE_MASK, IORESOURCE_IO);
1088 continue;
1092 assign_resources(root->link_list);
1093 printk(BIOS_INFO, "Done setting resources.\n");
1094 print_resource_tree(root, BIOS_SPEW, "After assigning values.");
1096 printk(BIOS_INFO, "Done allocating resources.\n");
1100 * Enable devices on the device tree.
1102 * Starting at the root, walk the tree and enable all devices/bridges by
1103 * calling the device's enable_resources() method.
1105 void dev_enable(void)
1107 struct bus *link;
1109 printk(BIOS_INFO, "Enabling resources...\n");
1111 /* Now enable everything. */
1112 for (link = dev_root.link_list; link; link = link->next)
1113 enable_resources(link);
1115 printk(BIOS_INFO, "done.\n");
1119 * Initialize a specific device.
1121 * The parent should be initialized first to avoid having an ordering problem.
1122 * This is done by calling the parent's init() method before its children's
1123 * init() methods.
1125 * @param dev The device to be initialized.
1127 static void init_dev(struct device *dev)
1129 if (!dev->enabled)
1130 return;
1132 if (!dev->initialized && dev->ops && dev->ops->init) {
1133 #if IS_ENABLED(CONFIG_HAVE_MONOTONIC_TIMER)
1134 struct stopwatch sw;
1135 stopwatch_init(&sw);
1136 #endif
1137 if (dev->path.type == DEVICE_PATH_I2C) {
1138 printk(BIOS_DEBUG, "smbus: %s[%d]->",
1139 dev_path(dev->bus->dev), dev->bus->link_num);
1142 printk(BIOS_DEBUG, "%s init ...\n", dev_path(dev));
1143 dev->initialized = 1;
1144 dev->ops->init(dev);
1145 #if IS_ENABLED(CONFIG_HAVE_MONOTONIC_TIMER)
1146 printk(BIOS_DEBUG, "%s init finished in %ld usecs\n", dev_path(dev),
1147 stopwatch_duration_usecs(&sw));
1148 #endif
1152 static void init_link(struct bus *link)
1154 struct device *dev;
1155 struct bus *c_link;
1157 for (dev = link->children; dev; dev = dev->sibling) {
1158 post_code(POST_BS_DEV_INIT);
1159 post_log_path(dev);
1160 init_dev(dev);
1163 for (dev = link->children; dev; dev = dev->sibling) {
1164 for (c_link = dev->link_list; c_link; c_link = c_link->next)
1165 init_link(c_link);
1170 * Initialize all devices in the global device tree.
1172 * Starting at the root device, call the device's init() method to do
1173 * device-specific setup, then call each child's init() method.
1175 void dev_initialize(void)
1177 struct bus *link;
1179 printk(BIOS_INFO, "Initializing devices...\n");
1181 #if IS_ENABLED(CONFIG_ARCH_X86)
1183 * Initialize EBDA area in ramstage if early
1184 * initialization is not done.
1186 if (!IS_ENABLED(CONFIG_EARLY_EBDA_INIT))
1187 /* Ensure EBDA is prepared before Option ROMs. */
1188 setup_default_ebda();
1189 #endif
1191 /* First call the mainboard init. */
1192 init_dev(&dev_root);
1194 /* Now initialize everything. */
1195 for (link = dev_root.link_list; link; link = link->next)
1196 init_link(link);
1197 post_log_clear();
1199 printk(BIOS_INFO, "Devices initialized\n");
1200 show_all_devs(BIOS_SPEW, "After init.");
1204 * Finalize a specific device.
1206 * The parent should be finalized first to avoid having an ordering problem.
1207 * This is done by calling the parent's final() method before its childrens'
1208 * final() methods.
1210 * @param dev The device to be initialized.
1212 static void final_dev(struct device *dev)
1214 if (!dev->enabled)
1215 return;
1217 if (dev->ops && dev->ops->final) {
1218 printk(BIOS_DEBUG, "%s final\n", dev_path(dev));
1219 dev->ops->final(dev);
1223 static void final_link(struct bus *link)
1225 struct device *dev;
1226 struct bus *c_link;
1228 for (dev = link->children; dev; dev = dev->sibling)
1229 final_dev(dev);
1231 for (dev = link->children; dev; dev = dev->sibling) {
1232 for (c_link = dev->link_list; c_link; c_link = c_link->next)
1233 final_link(c_link);
1237 * Finalize all devices in the global device tree.
1239 * Starting at the root device, call the device's final() method to do
1240 * device-specific cleanup, then call each child's final() method.
1242 void dev_finalize(void)
1244 struct bus *link;
1246 printk(BIOS_INFO, "Finalize devices...\n");
1248 /* First call the mainboard finalize. */
1249 final_dev(&dev_root);
1251 /* Now finalize everything. */
1252 for (link = dev_root.link_list; link; link = link->next)
1253 final_link(link);
1255 printk(BIOS_INFO, "Devices finalized\n");