9348 mii: duplicate 'const' declaration specifier
[unleashed.git] / usr / src / uts / common / io / busra.c
blobada50d3fffc4cf4f92842f75871d94f8557ffa4b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2012 Milan Jurik. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
28 #if defined(DEBUG)
29 #define BUSRA_DEBUG
30 #endif
33 * This module provides a set of resource management interfaces
34 * to manage bus resources globally in the system.
36 * The bus nexus drivers are typically responsible to setup resource
37 * maps for the bus resources available for a bus instance. However
38 * this module also provides resource setup functions for PCI bus
39 * (used by both SPARC and X86 platforms) and ISA bus instances (used
40 * only for X86 platforms).
43 #include <sys/types.h>
44 #include <sys/systm.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/ddi_impldefs.h>
49 #include <sys/ndi_impldefs.h>
50 #include <sys/kmem.h>
51 #include <sys/pctypes.h>
52 #include <sys/modctl.h>
53 #include <sys/debug.h>
54 #include <sys/spl.h>
55 #include <sys/pci.h>
56 #include <sys/autoconf.h>
58 #if defined(BUSRA_DEBUG)
59 int busra_debug = 0;
60 #define DEBUGPRT \
61 if (busra_debug) cmn_err
63 #else
64 #define DEBUGPRT \
65 if (0) cmn_err
66 #endif
70 * global mutex that protects the global list of resource maps.
72 kmutex_t ra_lock;
75 * basic resource element
77 struct ra_resource {
78 struct ra_resource *ra_next;
79 uint64_t ra_base;
80 uint64_t ra_len;
84 * link list element for the list of dips (and their resource ranges)
85 * for a particular resource type.
86 * ra_rangeset points to the list of resources available
87 * for this type and this dip.
89 struct ra_dip_type {
90 struct ra_dip_type *ra_next;
91 struct ra_resource *ra_rangeset;
92 dev_info_t *ra_dip;
97 * link list element for list of types resources. Each element
98 * has all resources for a particular type.
100 struct ra_type_map {
101 struct ra_type_map *ra_next;
102 struct ra_dip_type *ra_dip_list;
103 char *type;
108 * place holder to keep the head of the whole global list.
109 * the address of the first typemap would be stored in it.
111 static struct ra_type_map *ra_map_list_head = NULL;
115 * This is the loadable module wrapper.
116 * It is essentially boilerplate so isn't documented
118 extern struct mod_ops mod_miscops;
120 #ifdef BUSRA_DEBUG
121 void ra_dump_all(char *, dev_info_t *);
122 #endif
124 /* internal function prototypes */
125 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
126 struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
127 uint32_t flag);
128 static int isnot_pow2(uint64_t value);
129 static int claim_pci_busnum(dev_info_t *dip, void *arg);
130 static int ra_map_exist(dev_info_t *dip, char *type);
132 static int pci_get_available_prop(dev_info_t *dip, uint64_t base,
133 uint64_t len, char *busra_type);
134 static int pci_put_available_prop(dev_info_t *dip, uint64_t base,
135 uint64_t len, char *busra_type);
136 static uint32_t pci_type_ra2pci(char *type);
137 static boolean_t is_pcie_fabric(dev_info_t *dip);
139 #define PCI_ADDR_TYPE_MASK (PCI_REG_ADDR_M | PCI_REG_PF_M)
140 #define PCI_ADDR_TYPE_INVAL 0xffffffff
142 #define RA_INSERT(prev, el) \
143 el->ra_next = *prev; \
144 *prev = el;
146 #define RA_REMOVE(prev, el) \
147 *prev = el->ra_next;
150 static struct modlmisc modlmisc = {
151 &mod_miscops, /* Type of module. This one is a module */
152 "Bus Resource Allocator (BUSRA)", /* Name of the module. */
155 static struct modlinkage modlinkage = {
156 MODREV_1, (void *)&modlmisc, NULL
160 _init()
162 int ret;
164 mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
165 (void *)(intptr_t)__ipltospl(SPL7 - 1));
166 if ((ret = mod_install(&modlinkage)) != 0) {
167 mutex_destroy(&ra_lock);
169 return (ret);
173 _fini()
175 int ret;
177 mutex_enter(&ra_lock);
179 if (ra_map_list_head != NULL) {
180 mutex_exit(&ra_lock);
181 return (EBUSY);
184 ret = mod_remove(&modlinkage);
186 mutex_exit(&ra_lock);
188 if (ret == 0)
189 mutex_destroy(&ra_lock);
191 return (ret);
195 _info(struct modinfo *modinfop)
198 return (mod_info(&modlinkage, modinfop));
202 * set up an empty resource map for a given type and dip
205 ndi_ra_map_setup(dev_info_t *dip, char *type)
207 struct ra_type_map *typemapp;
208 struct ra_dip_type *dipmap;
209 struct ra_dip_type **backdip;
210 struct ra_type_map **backtype;
213 mutex_enter(&ra_lock);
215 dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
217 if (dipmap == NULL) {
218 if (backtype == NULL) {
219 typemapp = (struct ra_type_map *)
220 kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
221 typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
222 KM_SLEEP);
223 (void) strcpy(typemapp->type, type);
224 RA_INSERT(&ra_map_list_head, typemapp);
225 } else {
226 typemapp = *backtype;
228 if (backdip == NULL) {
229 /* allocate and insert in list of dips for this type */
230 dipmap = (struct ra_dip_type *)
231 kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
232 dipmap->ra_dip = dip;
233 RA_INSERT(&typemapp->ra_dip_list, dipmap);
237 mutex_exit(&ra_lock);
238 return (NDI_SUCCESS);
242 * destroys a resource map for a given dip and type
245 ndi_ra_map_destroy(dev_info_t *dip, char *type)
247 struct ra_dip_type *dipmap;
248 struct ra_dip_type **backdip;
249 struct ra_type_map **backtype, *typemap;
250 struct ra_resource *range;
252 mutex_enter(&ra_lock);
253 dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
255 if (dipmap == NULL) {
256 mutex_exit(&ra_lock);
257 return (NDI_FAILURE);
261 * destroy all resources for this dip
262 * remove dip from type list
264 ASSERT((backdip != NULL) && (backtype != NULL));
265 while (dipmap->ra_rangeset != NULL) {
266 range = dipmap->ra_rangeset;
267 RA_REMOVE(&dipmap->ra_rangeset, range);
268 kmem_free((caddr_t)range, sizeof (*range));
270 /* remove from dip list */
271 RA_REMOVE(backdip, dipmap);
272 kmem_free((caddr_t)dipmap, sizeof (*dipmap));
273 if ((*backtype)->ra_dip_list == NULL) {
275 * This was the last dip with this resource type.
276 * Remove the type from the global list.
278 typemap = *backtype;
279 RA_REMOVE(backtype, (*backtype));
280 kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
281 kmem_free((caddr_t)typemap, sizeof (*typemap));
284 mutex_exit(&ra_lock);
285 return (NDI_SUCCESS);
288 static int
289 ra_map_exist(dev_info_t *dip, char *type)
291 struct ra_dip_type **backdip;
292 struct ra_type_map **backtype;
294 mutex_enter(&ra_lock);
295 if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
296 mutex_exit(&ra_lock);
297 return (NDI_FAILURE);
300 mutex_exit(&ra_lock);
301 return (NDI_SUCCESS);
304 * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
305 * if found, backdip and backtype will be updated to point to the previous
306 * dip in the list and previous type for this dip in the list.
307 * If no such type at all in the resource list both backdip and backtype
308 * will be null. If the type found but no dip, back dip will be null.
311 static struct ra_dip_type *
312 find_dip_map_resources(dev_info_t *dip, char *type,
313 struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
314 uint32_t flag)
316 struct ra_type_map **prevmap;
317 struct ra_dip_type *dipmap, **prevdip;
319 ASSERT(mutex_owned(&ra_lock));
320 prevdip = NULL;
321 dipmap = NULL;
322 prevmap = &ra_map_list_head;
324 while (*prevmap) {
325 if (strcmp((*prevmap)->type, type) == 0)
326 break;
327 prevmap = &(*prevmap)->ra_next;
330 if (*prevmap) {
331 for (; dip != NULL; dip = ddi_get_parent(dip)) {
332 prevdip = &(*prevmap)->ra_dip_list;
333 dipmap = *prevdip;
335 while (dipmap) {
336 if (dipmap->ra_dip == dip)
337 break;
338 prevdip = &dipmap->ra_next;
339 dipmap = dipmap->ra_next;
342 if (dipmap != NULL) {
343 /* found it */
344 break;
347 if (!(flag & NDI_RA_PASS)) {
348 break;
353 *backtype = (*prevmap == NULL) ? NULL: prevmap;
354 *backdip = (dipmap == NULL) ? NULL: prevdip;
356 return (dipmap);
360 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
361 uint32_t flag)
363 struct ra_dip_type *dipmap;
364 struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
365 struct ra_resource *mapp, **backp;
366 uint64_t newend, mapend;
367 struct ra_dip_type **backdip;
368 struct ra_type_map **backtype;
370 if (len == 0) {
371 return (NDI_SUCCESS);
374 mutex_enter(&ra_lock);
376 if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
377 flag)) == NULL) {
378 mutex_exit(&ra_lock);
379 return (NDI_FAILURE);
382 mapp = dipmap->ra_rangeset;
383 backp = &dipmap->ra_rangeset;
385 /* now find where range lies and fix things up */
386 newend = base + len;
387 for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
388 mapend = mapp->ra_base + mapp->ra_len;
390 /* check for overlap first */
391 if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
392 (base > mapp->ra_base && base < mapend)) {
393 /* overlap with mapp */
394 overlapmap = mapp;
395 goto overlap;
396 } else if ((base == mapend && mapp->ra_next) &&
397 (newend > mapp->ra_next->ra_base)) {
398 /* overlap with mapp->ra_next */
399 overlapmap = mapp->ra_next;
400 goto overlap;
403 if (newend == mapp->ra_base) {
404 /* simple - on front */
405 mapp->ra_base = base;
406 mapp->ra_len += len;
408 * don't need to check if it merges with
409 * previous since that would match on on end
411 break;
412 } else if (base == mapend) {
413 /* simple - on end */
414 mapp->ra_len += len;
415 if (mapp->ra_next &&
416 (newend == mapp->ra_next->ra_base)) {
417 /* merge with next node */
418 oldmap = mapp->ra_next;
419 mapp->ra_len += oldmap->ra_len;
420 RA_REMOVE(&mapp->ra_next, oldmap);
421 kmem_free((caddr_t)oldmap, sizeof (*oldmap));
423 break;
424 } else if (base < mapp->ra_base) {
425 /* somewhere in between so just an insert */
426 newmap = (struct ra_resource *)
427 kmem_zalloc(sizeof (*newmap), KM_SLEEP);
428 newmap->ra_base = base;
429 newmap->ra_len = len;
430 RA_INSERT(backp, newmap);
431 break;
434 if (mapp == NULL) {
435 /* stick on end */
436 newmap = (struct ra_resource *)
437 kmem_zalloc(sizeof (*newmap), KM_SLEEP);
438 newmap->ra_base = base;
439 newmap->ra_len = len;
440 RA_INSERT(backp, newmap);
443 mutex_exit(&ra_lock);
446 * Update dip's "available" property, adding this piece of
447 * resource to the pool.
449 (void) pci_put_available_prop(dipmap->ra_dip, base, len, type);
450 done:
451 return (NDI_SUCCESS);
453 overlap:
455 * Bad free may happen on some x86 platforms with BIOS exporting
456 * incorrect resource maps. The system is otherwise functioning
457 * normally. We send such messages to syslog only.
459 cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
460 (void *)dip, type);
461 cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
462 PRIX64 " overlaps with existing resource base 0x%" PRIx64
463 ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
464 overlapmap->ra_len);
466 mutex_exit(&ra_lock);
467 return (NDI_FAILURE);
470 /* check to see if value is power of 2 or not. */
471 static int
472 isnot_pow2(uint64_t value)
474 uint32_t low;
475 uint32_t hi;
477 low = value & 0xffffffff;
478 hi = value >> 32;
481 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
482 * won't work correctly for 64bit values
484 if ((ddi_ffs(low) == ddi_fls(low)) &&
485 (ddi_ffs(hi) == ddi_fls(hi)))
486 return (0);
487 return (1);
490 static void
491 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
492 uint64_t base, uint64_t len)
494 struct ra_resource *newmap;
495 uint64_t newlen;
497 if (base != mapp->ra_base) {
498 /* in the middle or end */
499 newlen = base - mapp->ra_base;
500 if ((mapp->ra_len - newlen) == len) {
501 /* on the end */
502 mapp->ra_len = newlen;
503 } else {
504 /* in the middle */
505 newmap = (struct ra_resource *)
506 kmem_zalloc(sizeof (*newmap), KM_SLEEP);
507 newmap->ra_base = base + len;
508 newmap->ra_len = mapp->ra_len - (len + newlen);
509 mapp->ra_len = newlen;
510 RA_INSERT(&(mapp->ra_next), newmap);
512 } else {
513 /* at the beginning */
514 mapp->ra_base += len;
515 mapp->ra_len -= len;
516 if (mapp->ra_len == 0) {
517 /* remove the whole node */
518 RA_REMOVE(backp, mapp);
519 kmem_free((caddr_t)mapp, sizeof (*mapp));
525 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
526 uint64_t *retlenp, char *type, uint32_t flag)
528 struct ra_dip_type *dipmap;
529 struct ra_resource *mapp, **backp, **backlargestp;
530 uint64_t mask = 0;
531 uint64_t len, remlen, largestbase, largestlen;
532 uint64_t base, oldbase, lower, upper;
533 struct ra_dip_type **backdip;
534 struct ra_type_map **backtype;
535 int rval = NDI_FAILURE;
538 len = req->ra_len;
540 if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
541 if (isnot_pow2(req->ra_len)) {
542 DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
543 PRIx64, req->ra_len);
544 *retbasep = 0;
545 *retlenp = 0;
546 return (NDI_FAILURE);
550 mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
551 req->ra_align_mask;
554 mutex_enter(&ra_lock);
555 dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
556 if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
557 mutex_exit(&ra_lock);
558 DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
559 return (NDI_FAILURE);
562 DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
563 PRIx64 "\n", (void *)mapp, len, mask);
565 backp = &(dipmap->ra_rangeset);
566 backlargestp = NULL;
567 largestbase = 0;
568 largestlen = 0;
570 lower = 0;
571 upper = ~(uint64_t)0;
573 if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
574 /* bounded so skip to first possible */
575 lower = req->ra_boundbase;
576 upper = req->ra_boundlen + lower;
577 if ((upper == 0) || (upper < req->ra_boundlen))
578 upper = ~(uint64_t)0;
579 DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
580 PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
581 "\n", mapp->ra_len, len, mapp->ra_base, mask);
582 for (; mapp != NULL && (mapp->ra_base + mapp->ra_len) < lower;
583 backp = &(mapp->ra_next), mapp = mapp->ra_next) {
584 if (((mapp->ra_len + mapp->ra_base) == 0) ||
585 ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
587 * This elements end goes beyond max uint64_t.
588 * potential candidate, check end against lower
589 * would not be precise.
591 break;
593 DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
594 PRIx64 "\n", mapp->ra_len, mapp->ra_base);
599 if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
600 /* first fit - not user specified */
601 DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
602 "lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
603 for (; mapp != NULL && mapp->ra_base <= upper;
604 backp = &(mapp->ra_next), mapp = mapp->ra_next) {
606 DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
607 ", len = %" PRIx64 "", mapp->ra_len, len);
608 base = mapp->ra_base;
609 if (base < lower) {
610 base = lower;
611 DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
612 ", ra_base=%" PRIx64 ", mask=%" PRIx64,
613 base, mapp->ra_base, mask);
616 if ((base & mask) != 0) {
617 oldbase = base;
619 * failed a critical constraint
620 * adjust and see if it still fits
622 base = base & ~mask;
623 base += (mask + 1);
624 DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
625 base);
628 * Check to see if the new base is past
629 * the end of the resource.
631 if (base >= (oldbase + mapp->ra_len + 1)) {
632 continue;
636 if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
637 if ((upper - mapp->ra_base) < mapp->ra_len)
638 remlen = upper - base;
639 else
640 remlen = mapp->ra_len -
641 (base - mapp->ra_base);
643 if ((backlargestp == NULL) ||
644 (largestlen < remlen)) {
646 backlargestp = backp;
647 largestbase = base;
648 largestlen = remlen;
652 if (mapp->ra_len >= len) {
653 /* a candidate -- apply constraints */
654 if ((len > (mapp->ra_len -
655 (base - mapp->ra_base))) ||
656 ((len - 1 + base) > upper)) {
657 continue;
660 /* we have a fit */
662 DEBUGPRT(CE_CONT, "\thave a fit\n");
664 adjust_link(backp, mapp, base, len);
665 rval = NDI_SUCCESS;
666 break;
670 } else {
671 /* want an exact value/fit */
672 base = req->ra_addr;
673 len = req->ra_len;
674 for (; mapp != NULL && mapp->ra_base <= upper;
675 backp = &(mapp->ra_next), mapp = mapp->ra_next) {
676 if (base >= mapp->ra_base &&
677 ((base - mapp->ra_base) < mapp->ra_len)) {
679 * This is the node with the requested base in
680 * its range
682 if ((len > mapp->ra_len) ||
683 (base - mapp->ra_base >
684 mapp->ra_len - len)) {
685 /* length requirement not satisfied */
686 if (req->ra_flags &
687 NDI_RA_ALLOC_PARTIAL_OK) {
688 if ((upper - mapp->ra_base)
689 < mapp->ra_len)
690 remlen = upper - base;
691 else
692 remlen =
693 mapp->ra_len -
694 (base -
695 mapp->ra_base);
697 backlargestp = backp;
698 largestbase = base;
699 largestlen = remlen;
700 base = 0;
701 } else {
702 /* We have a match */
703 adjust_link(backp, mapp, base, len);
704 rval = NDI_SUCCESS;
706 break;
711 if ((rval != NDI_SUCCESS) &&
712 (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
713 (backlargestp != NULL)) {
714 adjust_link(backlargestp, *backlargestp, largestbase,
715 largestlen);
717 base = largestbase;
718 len = largestlen;
719 rval = NDI_RA_PARTIAL_REQ;
722 mutex_exit(&ra_lock);
724 if (rval == NDI_FAILURE) {
725 *retbasep = 0;
726 *retlenp = 0;
727 } else {
728 *retbasep = base;
729 *retlenp = len;
733 * Update dip's "available" property, substract this piece of
734 * resource from the pool.
736 if ((rval == NDI_SUCCESS) || (rval == NDI_RA_PARTIAL_REQ))
737 (void) pci_get_available_prop(dipmap->ra_dip,
738 *retbasep, *retlenp, type);
740 return (rval);
744 * isa_resource_setup
745 * check for /used-resources and initialize
746 * based on info there. If no /used-resources,
747 * fail.
750 isa_resource_setup()
752 dev_info_t *used, *usedpdip;
754 * note that at this time bootconf creates 32 bit properties for
755 * io-space and device-memory
757 struct iorange {
758 uint32_t base;
759 uint32_t len;
760 } *iorange;
761 struct memrange {
762 uint32_t base;
763 uint32_t len;
764 } *memrange;
765 uint32_t *irq;
766 int proplen;
767 int i, len;
768 int maxrange;
769 ndi_ra_request_t req;
770 uint64_t retbase;
771 uint64_t retlen;
773 used = ddi_find_devinfo("used-resources", -1, 0);
774 if (used == NULL) {
775 DEBUGPRT(CE_CONT,
776 "isa_resource_setup: used-resources not found");
777 return (NDI_FAILURE);
781 * initialize to all resources being present
782 * and then remove the ones in use.
785 usedpdip = ddi_root_node();
787 DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
788 (void *)used, (void *)usedpdip);
790 if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
791 return (NDI_FAILURE);
794 /* initialize io space, highest end base is 0xffff */
795 /* note that length is highest addr + 1 since starts from 0 */
797 (void) ndi_ra_free(usedpdip, 0, 0xffff + 1, NDI_RA_TYPE_IO, 0);
799 if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
800 "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
801 maxrange = proplen / sizeof (struct iorange);
802 /* remove the "used" I/O resources */
803 for (i = 0; i < maxrange; i++) {
804 bzero((caddr_t)&req, sizeof (req));
805 req.ra_addr = (uint64_t)iorange[i].base;
806 req.ra_len = (uint64_t)iorange[i].len;
807 req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
808 (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
809 NDI_RA_TYPE_IO, 0);
812 kmem_free((caddr_t)iorange, proplen);
815 if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
816 return (NDI_FAILURE);
818 /* initialize memory space where highest end base is 0xffffffff */
819 /* note that length is highest addr + 1 since starts from 0 */
820 (void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
821 NDI_RA_TYPE_MEM, 0);
823 if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
824 "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
825 maxrange = proplen / sizeof (struct memrange);
826 /* remove the "used" memory resources */
827 for (i = 0; i < maxrange; i++) {
828 bzero((caddr_t)&req, sizeof (req));
829 req.ra_addr = (uint64_t)memrange[i].base;
830 req.ra_len = (uint64_t)memrange[i].len;
831 req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
832 (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
833 NDI_RA_TYPE_MEM, 0);
836 kmem_free((caddr_t)memrange, proplen);
839 if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
840 return (NDI_FAILURE);
843 /* initialize the interrupt space */
844 (void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
846 #if defined(__i386) || defined(__amd64)
847 bzero(&req, sizeof (req));
848 req.ra_addr = 2; /* 2 == 9 so never allow */
849 req.ra_len = 1;
850 req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
851 (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
852 NDI_RA_TYPE_INTR, 0);
853 #endif
855 if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
856 "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
857 /* Initialize available interrupts by negating the used */
858 len = (proplen / sizeof (uint32_t));
859 for (i = 0; i < len; i++) {
860 bzero((caddr_t)&req, sizeof (req));
861 req.ra_addr = (uint64_t)irq[i];
862 req.ra_len = 1;
863 req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
864 (void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
865 NDI_RA_TYPE_INTR, 0);
867 kmem_free((caddr_t)irq, proplen);
870 #ifdef BUSRA_DEBUG
871 if (busra_debug) {
872 (void) ra_dump_all(NULL, usedpdip);
874 #endif
875 return (NDI_SUCCESS);
879 #ifdef BUSRA_DEBUG
880 void
881 ra_dump_all(char *type, dev_info_t *dip)
884 struct ra_type_map *typemap;
885 struct ra_dip_type *dipmap;
886 struct ra_resource *res;
888 typemap = (struct ra_type_map *)ra_map_list_head;
890 for (; typemap != NULL; typemap = typemap->ra_next) {
891 if (type != NULL) {
892 if (strcmp(typemap->type, type) != 0)
893 continue;
895 cmn_err(CE_CONT, "type is %s\n", typemap->type);
896 for (dipmap = typemap->ra_dip_list; dipmap != NULL;
897 dipmap = dipmap->ra_next) {
898 if (dip != NULL) {
899 if ((dipmap->ra_dip) != dip)
900 continue;
902 cmn_err(CE_CONT, " dip is %p\n",
903 (void *)dipmap->ra_dip);
904 for (res = dipmap->ra_rangeset; res != NULL;
905 res = res->ra_next) {
906 cmn_err(CE_CONT, "\t range is %" PRIx64
907 " %" PRIx64 "\n", res->ra_base,
908 res->ra_len);
910 if (dip != NULL)
911 break;
913 if (type != NULL)
914 break;
917 #endif
919 struct bus_range { /* 1275 "bus-range" property definition */
920 uint32_t lo;
921 uint32_t hi;
922 } pci_bus_range;
924 struct busnum_ctrl {
925 int rv;
926 dev_info_t *dip;
927 struct bus_range *range;
932 * Setup resource map for the pci bus node based on the "available"
933 * property and "bus-range" property.
936 pci_resource_setup(dev_info_t *dip)
938 pci_regspec_t *regs;
939 int rlen, rcount, i;
940 char bus_type[16] = "(unknown)";
941 int len;
942 struct busnum_ctrl ctrl;
943 int circular_count;
944 int rval = NDI_SUCCESS;
947 * If this is a pci bus node then look for "available" property
948 * to find the available resources on this bus.
950 len = sizeof (bus_type);
951 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
952 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
953 (caddr_t)&bus_type, &len) != DDI_SUCCESS)
954 return (NDI_FAILURE);
956 /* it is not a pci/pci-ex bus type */
957 if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
958 return (NDI_FAILURE);
961 * The pci-hotplug project addresses adding the call
962 * to pci_resource_setup from pci nexus driver.
963 * However that project would initially be only for x86,
964 * so for sparc pcmcia-pci support we still need to call
965 * pci_resource_setup in pcic driver. Once all pci nexus drivers
966 * are updated to call pci_resource_setup this portion of the
967 * code would really become an assert to make sure this
968 * function is not called for the same dip twice.
971 * Another user for the check below is hotplug PCI/PCIe bridges.
973 * For PCI/PCIE devices under a PCIE hierarchy, ndi_ra_alloc/free
974 * will update the devinfo node's "available" property, to reflect
975 * the fact that a piece of resource has been removed/added to
976 * a devinfo node.
977 * During probe of a new PCI bridge in the hotplug case, PCI
978 * configurator firstly allocates maximum MEM/IO from its parent,
979 * then calls ndi_ra_free() to use these resources to setup busra
980 * pool for the new bridge, as well as adding these resources to
981 * the "available" property of the new devinfo node. Then configu-
982 * rator will attach driver for the bridge before probing its
983 * children, and the bridge driver will then initialize its hotplug
984 * contollers (if it supports hotplug) and HPC driver will call
985 * this function to setup the busra pool, but the resource pool
986 * has already been setup at the first of pcicfg_probe_bridge(),
987 * thus we need the check below to return directly in this case.
988 * Otherwise the ndi_ra_free() below will see overlapping resources.
991 if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
992 return (NDI_FAILURE);
998 * Create empty resource maps first.
1000 * NOTE: If all the allocated resources are already assigned to
1001 * device(s) in the hot plug slot then "available" property may not
1002 * be present. But, subsequent hot plug operation may unconfigure
1003 * the device in the slot and try to free up it's resources. So,
1004 * at the minimum we should create empty maps here.
1006 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
1007 return (NDI_FAILURE);
1010 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
1011 return (NDI_FAILURE);
1014 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
1015 return (NDI_FAILURE);
1018 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
1019 NDI_FAILURE) {
1020 return (NDI_FAILURE);
1023 /* read the "available" property if it is available */
1024 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1025 "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
1027 * Remove "available" property as the entries will be
1028 * re-created in ndi_ra_free() below, note prom based
1029 * property will not be removed. But in ndi_ra_free()
1030 * we'll be creating non prom based property entries.
1032 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "available");
1034 * create the available resource list for both memory and
1035 * io space
1037 rcount = rlen / sizeof (pci_regspec_t);
1038 for (i = 0; i < rcount; i++) {
1039 switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
1040 case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1041 (void) ndi_ra_free(dip,
1042 (uint64_t)regs[i].pci_phys_low,
1043 (uint64_t)regs[i].pci_size_low,
1044 (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1045 NDI_RA_TYPE_PCI_PREFETCH_MEM :
1046 NDI_RA_TYPE_MEM,
1048 break;
1049 case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1050 (void) ndi_ra_free(dip,
1051 ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1052 ((uint64_t)(regs[i].pci_phys_low)),
1053 ((uint64_t)(regs[i].pci_size_hi) << 32) |
1054 ((uint64_t)(regs[i].pci_size_low)),
1055 (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1056 NDI_RA_TYPE_PCI_PREFETCH_MEM :
1057 NDI_RA_TYPE_MEM,
1059 break;
1060 case PCI_REG_ADDR_G(PCI_ADDR_IO):
1061 (void) ndi_ra_free(dip,
1062 (uint64_t)regs[i].pci_phys_low,
1063 (uint64_t)regs[i].pci_size_low,
1064 NDI_RA_TYPE_IO,
1066 break;
1067 case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1068 break;
1069 default:
1070 cmn_err(CE_WARN,
1071 "pci_resource_setup: bad addr type: %x\n",
1072 PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1073 break;
1076 kmem_free(regs, rlen);
1080 * update resource map for available bus numbers if the node
1081 * has available-bus-range or bus-range property.
1083 len = sizeof (struct bus_range);
1084 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1085 "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1086 DDI_SUCCESS) {
1088 * Add bus numbers in the range to the free list.
1090 (void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1091 (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1092 1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1093 } else {
1095 * We don't have an available-bus-range property. If, instead,
1096 * we have a bus-range property we add all the bus numbers
1097 * in that range to the free list but we must then scan
1098 * for pci-pci bridges on this bus to find out the if there
1099 * are any of those bus numbers already in use. If so, we can
1100 * reclaim them.
1102 len = sizeof (struct bus_range);
1103 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
1104 DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1105 &len) == DDI_SUCCESS) {
1106 if (pci_bus_range.lo != pci_bus_range.hi) {
1108 * Add bus numbers other than the secondary
1109 * bus number to the free list.
1111 (void) ndi_ra_free(dip,
1112 (uint64_t)pci_bus_range.lo + 1,
1113 (uint64_t)pci_bus_range.hi -
1114 (uint64_t)pci_bus_range.lo,
1115 NDI_RA_TYPE_PCI_BUSNUM, 0);
1117 /* scan for pci-pci bridges */
1118 ctrl.rv = DDI_SUCCESS;
1119 ctrl.dip = dip;
1120 ctrl.range = &pci_bus_range;
1121 ndi_devi_enter(dip, &circular_count);
1122 ddi_walk_devs(ddi_get_child(dip),
1123 claim_pci_busnum, (void *)&ctrl);
1124 ndi_devi_exit(dip, circular_count);
1125 if (ctrl.rv != DDI_SUCCESS) {
1126 /* failed to create the map */
1127 (void) ndi_ra_map_destroy(dip,
1128 NDI_RA_TYPE_PCI_BUSNUM);
1129 rval = NDI_FAILURE;
1135 #ifdef BUSRA_DEBUG
1136 if (busra_debug) {
1137 (void) ra_dump_all(NULL, dip);
1139 #endif
1141 return (rval);
1145 * If the device is a PCI bus device (i.e bus-range property exists) then
1146 * claim the bus numbers used by the device from the specified bus
1147 * resource map.
1149 static int
1150 claim_pci_busnum(dev_info_t *dip, void *arg)
1152 struct bus_range pci_bus_range;
1153 struct busnum_ctrl *ctrl;
1154 ndi_ra_request_t req;
1155 char bus_type[16] = "(unknown)";
1156 int len;
1157 uint64_t base;
1158 uint64_t retlen;
1160 ctrl = (struct busnum_ctrl *)arg;
1162 /* check if this is a PCI bus node */
1163 len = sizeof (bus_type);
1164 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1165 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1166 (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1167 return (DDI_WALK_PRUNECHILD);
1169 /* it is not a pci/pci-ex bus type */
1170 if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1171 return (DDI_WALK_PRUNECHILD);
1173 /* look for the bus-range property */
1174 len = sizeof (struct bus_range);
1175 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1176 "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1177 if ((pci_bus_range.lo >= ctrl->range->lo) &&
1178 (pci_bus_range.hi <= ctrl->range->hi)) {
1180 /* claim the bus range from the bus resource map */
1181 bzero((caddr_t)&req, sizeof (req));
1182 req.ra_addr = (uint64_t)pci_bus_range.lo;
1183 req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1184 req.ra_len = (uint64_t)pci_bus_range.hi -
1185 (uint64_t)pci_bus_range.lo + 1;
1186 if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1187 NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1188 return (DDI_WALK_PRUNECHILD);
1193 * Error return.
1195 ctrl->rv = DDI_FAILURE;
1196 return (DDI_WALK_TERMINATE);
1199 void
1200 pci_resource_destroy(dev_info_t *dip)
1202 (void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1204 (void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1206 (void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1208 (void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1213 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1215 int i;
1217 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1218 return (NDI_FAILURE);
1219 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1220 return (NDI_FAILURE);
1221 if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1222 return (NDI_FAILURE);
1224 /* for each entry in the PCI "available" property */
1225 for (i = 0; i < entries; i++, avail_p++) {
1226 if (avail_p->pci_phys_hi == -1u)
1227 goto err;
1229 switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1230 case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1231 (void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low,
1232 (uint64_t)avail_p->pci_size_low,
1233 (avail_p->pci_phys_hi & PCI_REG_PF_M) ?
1234 NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
1237 break;
1238 case PCI_REG_ADDR_G(PCI_ADDR_IO):
1239 (void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low,
1240 (uint64_t)avail_p->pci_size_low, NDI_RA_TYPE_IO, 0);
1241 break;
1242 default:
1243 goto err;
1246 #ifdef BUSRA_DEBUG
1247 if (busra_debug) {
1248 (void) ra_dump_all(NULL, dip);
1250 #endif
1251 return (NDI_SUCCESS);
1253 err:
1254 cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1255 i, avail_p->pci_phys_hi);
1256 return (NDI_FAILURE);
1260 * Return true if the devinfo node resides on PCI or PCI Express bus,
1261 * sitting in a PCI Express hierarchy.
1263 static boolean_t
1264 is_pcie_fabric(dev_info_t *dip)
1266 dev_info_t *root = ddi_root_node();
1267 dev_info_t *pdip;
1268 boolean_t found = B_FALSE;
1269 char *bus;
1272 * Is this pci/pcie ?
1274 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1275 DDI_PROP_DONTPASS, "device_type", &bus) !=
1276 DDI_PROP_SUCCESS) {
1277 DEBUGPRT(CE_WARN, "is_pcie_fabric: cannot find "
1278 "\"device_type\" property for dip %p\n", (void *)dip);
1279 return (B_FALSE);
1282 if (strcmp(bus, "pciex") == 0) {
1283 /* pcie bus, done */
1284 ddi_prop_free(bus);
1285 return (B_TRUE);
1286 } else if (strcmp(bus, "pci") == 0) {
1288 * pci bus, fall through to check if it resides in
1289 * a pcie hierarchy.
1291 ddi_prop_free(bus);
1292 } else {
1293 /* other bus, return failure */
1294 ddi_prop_free(bus);
1295 return (B_FALSE);
1299 * Does this device reside in a pcie fabric ?
1301 for (pdip = ddi_get_parent(dip); pdip && (pdip != root) &&
1302 !found; pdip = ddi_get_parent(pdip)) {
1303 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
1304 DDI_PROP_DONTPASS, "device_type", &bus) !=
1305 DDI_PROP_SUCCESS)
1306 break;
1308 if (strcmp(bus, "pciex") == 0)
1309 found = B_TRUE;
1311 ddi_prop_free(bus);
1314 return (found);
1318 * Remove a piece of IO/MEM resource from "available" property of 'dip'.
1320 static int
1321 pci_get_available_prop(dev_info_t *dip, uint64_t base, uint64_t len,
1322 char *busra_type)
1324 pci_regspec_t *regs, *newregs;
1325 uint_t status;
1326 int rlen, rcount;
1327 int i, j, k;
1328 uint64_t dlen;
1329 boolean_t found = B_FALSE;
1330 uint32_t type;
1332 /* check if we're manipulating MEM/IO resource */
1333 if ((type = pci_type_ra2pci(busra_type)) == PCI_ADDR_TYPE_INVAL)
1334 return (DDI_SUCCESS);
1336 /* check if dip is a pci/pcie device resides in a pcie fabric */
1337 if (!is_pcie_fabric(dip))
1338 return (DDI_SUCCESS);
1340 status = ddi_getlongprop(DDI_DEV_T_ANY, dip,
1341 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1342 "available", (caddr_t)&regs, &rlen);
1344 ASSERT(status == DDI_SUCCESS);
1345 if (status != DDI_SUCCESS)
1346 return (status);
1349 * The updated "available" property will at most have one more entry
1350 * than existing one (when the requested range is in the middle of
1351 * the matched property entry)
1353 newregs = kmem_alloc(rlen + sizeof (pci_regspec_t), KM_SLEEP);
1355 rcount = rlen / sizeof (pci_regspec_t);
1356 for (i = 0, j = 0; i < rcount; i++) {
1357 if (type == (regs[i].pci_phys_hi & PCI_ADDR_TYPE_MASK)) {
1358 uint64_t range_base, range_len;
1360 range_base = ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1361 ((uint64_t)(regs[i].pci_phys_low));
1362 range_len = ((uint64_t)(regs[i].pci_size_hi) << 32) |
1363 ((uint64_t)(regs[i].pci_size_low));
1365 if ((base < range_base) ||
1366 (base + len > range_base + range_len)) {
1368 * not a match, copy the entry
1370 goto copy_entry;
1374 * range_base base base+len range_base
1375 * +range_len
1376 * +------------+-----------+----------+
1377 * | |///////////| |
1378 * +------------+-----------+----------+
1381 * Found a match, remove the range out of this entry.
1383 found = B_TRUE;
1385 dlen = base - range_base;
1386 if (dlen != 0) {
1387 newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1388 newregs[j].pci_phys_mid =
1389 (uint32_t)(range_base >> 32);
1390 newregs[j].pci_phys_low =
1391 (uint32_t)(range_base);
1392 newregs[j].pci_size_hi = (uint32_t)(dlen >> 32);
1393 newregs[j].pci_size_low = (uint32_t)dlen;
1394 j++;
1397 dlen = (range_base + range_len) - (base + len);
1398 if (dlen != 0) {
1399 newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1400 newregs[j].pci_phys_mid =
1401 (uint32_t)((base + len)>> 32);
1402 newregs[j].pci_phys_low =
1403 (uint32_t)(base + len);
1404 newregs[j].pci_size_hi = (uint32_t)(dlen >> 32);
1405 newregs[j].pci_size_low = (uint32_t)dlen;
1406 j++;
1410 * We've allocated the resource from the matched
1411 * entry, almost finished but still need to copy
1412 * the rest entries from the original property
1413 * array.
1415 for (k = i + 1; k < rcount; k++) {
1416 newregs[j] = regs[k];
1417 j++;
1420 goto done;
1422 } else {
1423 copy_entry:
1424 newregs[j] = regs[i];
1425 j++;
1429 done:
1431 * This should not fail so assert it. For non-debug kernel we don't
1432 * want to panic thus only logging a warning message.
1434 ASSERT(found == B_TRUE);
1435 if (!found) {
1436 cmn_err(CE_WARN, "pci_get_available_prop: failed to remove "
1437 "resource from dip %p : base 0x%" PRIx64 ", len 0x%" PRIX64
1438 ", type 0x%x\n", (void *)dip, base, len, type);
1439 kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1440 kmem_free(regs, rlen);
1442 return (DDI_FAILURE);
1446 * Found the resources from parent, update the "available"
1447 * property.
1449 if (j == 0) {
1450 /* all the resources are consumed, remove the property */
1451 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "available");
1452 } else {
1454 * There are still resource available in the parent dip,
1455 * update with the remaining resources.
1457 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1458 "available", (int *)newregs,
1459 (j * sizeof (pci_regspec_t)) / sizeof (int));
1462 kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1463 kmem_free(regs, rlen);
1465 return (DDI_SUCCESS);
1469 * Add a piece of IO/MEM resource to "available" property of 'dip'.
1471 static int
1472 pci_put_available_prop(dev_info_t *dip, uint64_t base, uint64_t len,
1473 char *busra_type)
1475 pci_regspec_t *regs, *newregs;
1476 uint_t status;
1477 int rlen, rcount;
1478 int i, j, k;
1479 int matched = 0;
1480 uint64_t orig_base = base;
1481 uint64_t orig_len = len;
1482 uint32_t type;
1484 /* check if we're manipulating MEM/IO resource */
1485 if ((type = pci_type_ra2pci(busra_type)) == PCI_ADDR_TYPE_INVAL)
1486 return (DDI_SUCCESS);
1488 /* check if dip is a pci/pcie device resides in a pcie fabric */
1489 if (!is_pcie_fabric(dip))
1490 return (DDI_SUCCESS);
1492 status = ddi_getlongprop(DDI_DEV_T_ANY, dip,
1493 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1494 "available", (caddr_t)&regs, &rlen);
1496 switch (status) {
1497 case DDI_PROP_NOT_FOUND:
1498 goto not_found;
1500 case DDI_PROP_SUCCESS:
1501 break;
1503 default:
1504 return (status);
1508 * The "available" property exist on the node, try to put this
1509 * resource back, merge if there are adjacent resources.
1511 * The updated "available" property will at most have one more entry
1512 * than existing one (when there is no adjacent entries thus the new
1513 * resource is appended at the end)
1515 newregs = kmem_alloc(rlen + sizeof (pci_regspec_t), KM_SLEEP);
1517 rcount = rlen / sizeof (pci_regspec_t);
1518 for (i = 0, j = 0; i < rcount; i++) {
1519 if (type == (regs[i].pci_phys_hi & PCI_ADDR_TYPE_MASK)) {
1520 uint64_t range_base, range_len;
1522 range_base = ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1523 ((uint64_t)(regs[i].pci_phys_low));
1524 range_len = ((uint64_t)(regs[i].pci_size_hi) << 32) |
1525 ((uint64_t)(regs[i].pci_size_low));
1527 if ((base + len < range_base) ||
1528 (base > range_base + range_len)) {
1530 * Not adjacent, copy the entry and contiue
1532 goto copy_entry;
1536 * Adjacent or overlap?
1538 * Should not have overlapping resources so assert it.
1539 * For non-debug kernel we don't want to panic thus
1540 * only logging a warning message.
1542 #if 0
1543 ASSERT((base + len == range_base) ||
1544 (base == range_base + range_len));
1545 #endif
1546 if ((base + len != range_base) &&
1547 (base != range_base + range_len)) {
1548 cmn_err(CE_WARN, "pci_put_available_prop: "
1549 "failed to add resource to dip %p : "
1550 "base 0x%" PRIx64 ", len 0x%" PRIx64 " "
1551 "overlaps with existing resource "
1552 "base 0x%" PRIx64 ", len 0x%" PRIx64 "\n",
1553 (void *)dip, orig_base, orig_len,
1554 range_base, range_len);
1556 goto failure;
1560 * On the left:
1562 * base range_base
1563 * +-------------+-------------+
1564 * |/////////////| |
1565 * +-------------+-------------+
1566 * len range_len
1568 * On the right:
1570 * range_base base
1571 * +-------------+-------------+
1572 * | |/////////////|
1573 * +-------------+-------------+
1574 * range_len len
1577 * There are at most two piece of resources adjacent
1578 * with this resource, assert it.
1580 ASSERT(matched < 2);
1582 if (!(matched < 2)) {
1583 cmn_err(CE_WARN, "pci_put_available_prop: "
1584 "failed to add resource to dip %p : "
1585 "base 0x%" PRIx64 ", len 0x%" PRIx64 " "
1586 "found overlaps in existing resources\n",
1587 (void *)dip, orig_base, orig_len);
1589 goto failure;
1592 /* setup base & len to refer to the merged range */
1593 len += range_len;
1594 if (base == range_base + range_len)
1595 base = range_base;
1597 if (matched == 0) {
1599 * One adjacent entry, add this resource in
1601 newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1602 newregs[j].pci_phys_mid =
1603 (uint32_t)(base >> 32);
1604 newregs[j].pci_phys_low = (uint32_t)(base);
1605 newregs[j].pci_size_hi = (uint32_t)(len >> 32);
1606 newregs[j].pci_size_low = (uint32_t)len;
1608 matched = 1;
1609 k = j;
1610 j++;
1611 } else { /* matched == 1 */
1613 * Two adjacent entries, merge them together
1615 newregs[k].pci_phys_hi = regs[i].pci_phys_hi;
1616 newregs[k].pci_phys_mid =
1617 (uint32_t)(base >> 32);
1618 newregs[k].pci_phys_low = (uint32_t)(base);
1619 newregs[k].pci_size_hi = (uint32_t)(len >> 32);
1620 newregs[k].pci_size_low = (uint32_t)len;
1622 matched = 2;
1624 } else {
1625 copy_entry:
1626 newregs[j] = regs[i];
1627 j++;
1631 if (matched == 0) {
1632 /* No adjacent entries, append at end */
1633 ASSERT(j == rcount);
1636 * According to page 15 of 1275 spec, bit "n" of "available"
1637 * should be set to 1.
1639 newregs[j].pci_phys_hi = type;
1640 newregs[j].pci_phys_hi |= PCI_REG_REL_M;
1642 newregs[j].pci_phys_mid = (uint32_t)(base >> 32);
1643 newregs[j].pci_phys_low = (uint32_t)base;
1644 newregs[j].pci_size_hi = (uint32_t)(len >> 32);
1645 newregs[j].pci_size_low = (uint32_t)len;
1647 j++;
1650 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1651 "available", (int *)newregs,
1652 (j * sizeof (pci_regspec_t)) / sizeof (int));
1654 kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1655 kmem_free(regs, rlen);
1656 return (DDI_SUCCESS);
1658 not_found:
1660 * There is no "available" property on the parent node, create it.
1662 newregs = kmem_alloc(sizeof (pci_regspec_t), KM_SLEEP);
1665 * According to page 15 of 1275 spec, bit "n" of "available" should
1666 * be set to 1.
1668 newregs[0].pci_phys_hi = type;
1669 newregs[0].pci_phys_hi |= PCI_REG_REL_M;
1671 newregs[0].pci_phys_mid = (uint32_t)(base >> 32);
1672 newregs[0].pci_phys_low = (uint32_t)base;
1673 newregs[0].pci_size_hi = (uint32_t)(len >> 32);
1674 newregs[0].pci_size_low = (uint32_t)len;
1676 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1677 "available", (int *)newregs,
1678 sizeof (pci_regspec_t) / sizeof (int));
1679 kmem_free(newregs, sizeof (pci_regspec_t));
1680 return (DDI_SUCCESS);
1682 failure:
1683 kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1684 kmem_free(regs, rlen);
1685 return (DDI_FAILURE);
1688 static uint32_t
1689 pci_type_ra2pci(char *type)
1691 uint32_t pci_type = PCI_ADDR_TYPE_INVAL;
1694 * No 64 bit mem support for now
1696 if (strcmp(type, NDI_RA_TYPE_IO) == 0) {
1697 pci_type = PCI_ADDR_IO;
1699 } else if (strcmp(type, NDI_RA_TYPE_MEM) == 0) {
1700 pci_type = PCI_ADDR_MEM32;
1702 } else if (strcmp(type, NDI_RA_TYPE_PCI_PREFETCH_MEM) == 0) {
1703 pci_type = PCI_ADDR_MEM32;
1704 pci_type |= PCI_REG_PF_M;
1707 return (pci_type);