2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/delay.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
30 /* ********** from siutils.c *********** */
32 #include <pcie_core.h>
38 #define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
39 (sih->chiprev == 0) && \
40 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
45 get_erom_ent(si_t
*sih
, u32
**eromptr
, u32 mask
, u32 match
)
48 uint inv
= 0, nom
= 0;
51 ent
= R_REG(*eromptr
);
57 if ((ent
& ER_VALID
) == 0) {
62 if (ent
== (ER_END
| ER_VALID
))
65 if ((ent
& mask
) == match
)
71 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__
, ent
));
73 SI_VMSG((" after %d invalid and %d non-matching entries\n",
80 get_asd(si_t
*sih
, u32
**eromptr
, uint sp
, uint ad
, uint st
,
81 u32
*addrl
, u32
*addrh
, u32
*sizel
, u32
*sizeh
)
85 asd
= get_erom_ent(sih
, eromptr
, ER_VALID
, ER_VALID
);
86 if (((asd
& ER_TAG1
) != ER_ADD
) ||
87 (((asd
& AD_SP_MASK
) >> AD_SP_SHIFT
) != sp
) ||
88 ((asd
& AD_ST_MASK
) != st
)) {
89 /* This is not what we want, "push" it back */
93 *addrl
= asd
& AD_ADDR_MASK
;
95 *addrh
= get_erom_ent(sih
, eromptr
, 0, 0);
99 sz
= asd
& AD_SZ_MASK
;
100 if (sz
== AD_SZ_SZD
) {
101 szd
= get_erom_ent(sih
, eromptr
, 0, 0);
102 *sizel
= szd
& SD_SZ_MASK
;
104 *sizeh
= get_erom_ent(sih
, eromptr
, 0, 0);
106 *sizel
= AD_SZ_BASE
<< (sz
>> AD_SZ_SHIFT
);
108 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
109 sp
, ad
, st
, *sizeh
, *sizel
, *addrh
, *addrl
));
114 static void ai_hwfixup(si_info_t
*sii
)
118 /* parse the enumeration rom to identify all cores */
119 void ai_scan(si_t
*sih
, void *regs
, uint devid
)
121 si_info_t
*sii
= SI_INFO(sih
);
122 chipcregs_t
*cc
= (chipcregs_t
*) regs
;
123 u32 erombase
, *eromptr
, *eromlim
;
125 erombase
= R_REG(&cc
->eromptr
);
127 switch (sih
->bustype
) {
129 eromptr
= (u32
*) REG_MAP(erombase
, SI_CORE_SIZE
);
133 /* Set wrappers address */
134 sii
->curwrap
= (void *)((unsigned long)regs
+ SI_CORE_SIZE
);
136 /* Now point the window at the erom */
137 pci_write_config_dword(sii
->pbus
, PCI_BAR0_WIN
, erombase
);
143 eromptr
= (u32
*)(unsigned long)erombase
;
147 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
151 eromlim
= eromptr
+ (ER_REMAPCONTROL
/ sizeof(u32
));
153 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs
, erombase
, eromptr
, eromlim
));
154 while (eromptr
< eromlim
) {
155 u32 cia
, cib
, cid
, mfg
, crev
, nmw
, nsw
, nmp
, nsp
;
156 u32 mpd
, asd
, addrl
, addrh
, sizel
, sizeh
;
163 /* Grok a component */
164 cia
= get_erom_ent(sih
, &eromptr
, ER_TAG
, ER_CI
);
165 if (cia
== (ER_END
| ER_VALID
)) {
166 SI_VMSG(("Found END of erom after %d cores\n",
172 cib
= get_erom_ent(sih
, &eromptr
, 0, 0);
174 if ((cib
& ER_TAG
) != ER_CI
) {
175 SI_ERROR(("CIA not followed by CIB\n"));
179 cid
= (cia
& CIA_CID_MASK
) >> CIA_CID_SHIFT
;
180 mfg
= (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
181 crev
= (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
182 nmw
= (cib
& CIB_NMW_MASK
) >> CIB_NMW_SHIFT
;
183 nsw
= (cib
& CIB_NSW_MASK
) >> CIB_NSW_SHIFT
;
184 nmp
= (cib
& CIB_NMP_MASK
) >> CIB_NMP_SHIFT
;
185 nsp
= (cib
& CIB_NSP_MASK
) >> CIB_NSP_SHIFT
;
187 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg
, cid
, crev
, base
, nmw
, nsw
, nmp
, nsp
));
189 if (((mfg
== MFGID_ARM
) && (cid
== DEF_AI_COMP
)) || (nsp
== 0))
191 if ((nmw
+ nsw
== 0)) {
192 /* A component which is not a core */
193 if (cid
== OOB_ROUTER_CORE_ID
) {
194 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
,
195 &addrl
, &addrh
, &sizel
, &sizeh
);
197 sii
->oob_router
= addrl
;
204 /* sii->eromptr[idx] = base; */
207 sii
->coreid
[idx
] = cid
;
209 for (i
= 0; i
< nmp
; i
++) {
210 mpd
= get_erom_ent(sih
, &eromptr
, ER_VALID
, ER_VALID
);
211 if ((mpd
& ER_TAG
) != ER_MP
) {
212 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid
));
215 SI_VMSG((" Master port %d, mp: %d id: %d\n", i
,
216 (mpd
& MPD_MP_MASK
) >> MPD_MP_SHIFT
,
217 (mpd
& MPD_MUI_MASK
) >> MPD_MUI_SHIFT
));
220 /* First Slave Address Descriptor should be port 0:
221 * the main register space for the core
224 get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
, &addrl
, &addrh
,
227 /* Try again to see if it is a bridge */
229 get_asd(sih
, &eromptr
, 0, 0, AD_ST_BRIDGE
, &addrl
,
230 &addrh
, &sizel
, &sizeh
);
233 else if ((addrh
!= 0) || (sizeh
!= 0)
234 || (sizel
!= SI_CORE_SIZE
)) {
235 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid
, asd
));
239 sii
->coresba
[idx
] = addrl
;
240 sii
->coresba_size
[idx
] = sizel
;
241 /* Get any more ASDs in port 0 */
245 get_asd(sih
, &eromptr
, 0, j
, AD_ST_SLAVE
, &addrl
,
246 &addrh
, &sizel
, &sizeh
);
247 if ((asd
!= 0) && (j
== 1) && (sizel
== SI_CORE_SIZE
)) {
248 sii
->coresba2
[idx
] = addrl
;
249 sii
->coresba2_size
[idx
] = sizel
;
254 /* Go through the ASDs for other slave ports */
255 for (i
= 1; i
< nsp
; i
++) {
259 get_asd(sih
, &eromptr
, i
, j
++, AD_ST_SLAVE
,
260 &addrl
, &addrh
, &sizel
, &sizeh
);
263 SI_ERROR((" SP %d has no address descriptors\n",
269 /* Now get master wrappers */
270 for (i
= 0; i
< nmw
; i
++) {
272 get_asd(sih
, &eromptr
, i
, 0, AD_ST_MWRAP
, &addrl
,
273 &addrh
, &sizel
, &sizeh
);
275 SI_ERROR(("Missing descriptor for MW %d\n", i
));
278 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
279 SI_ERROR(("Master wrapper %d is not 4KB\n", i
));
283 sii
->wrapba
[idx
] = addrl
;
286 /* And finally slave wrappers */
287 for (i
= 0; i
< nsw
; i
++) {
288 uint fwp
= (nsp
== 1) ? 0 : 1;
290 get_asd(sih
, &eromptr
, fwp
+ i
, 0, AD_ST_SWRAP
,
291 &addrl
, &addrh
, &sizel
, &sizeh
);
293 SI_ERROR(("Missing descriptor for SW %d\n", i
));
296 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
297 SI_ERROR(("Slave wrapper %d is not 4KB\n", i
));
300 if ((nmw
== 0) && (i
== 0))
301 sii
->wrapba
[idx
] = addrl
;
304 /* Don't record bridges */
312 SI_ERROR(("Reached end of erom without finding END"));
319 /* This function changes the logical "focus" to the indicated core.
320 * Return the current core's virtual address.
322 void *ai_setcoreidx(si_t
*sih
, uint coreidx
)
324 si_info_t
*sii
= SI_INFO(sih
);
325 u32 addr
= sii
->coresba
[coreidx
];
326 u32 wrap
= sii
->wrapba
[coreidx
];
329 if (coreidx
>= sii
->numcores
)
332 switch (sih
->bustype
) {
335 if (!sii
->regs
[coreidx
]) {
336 sii
->regs
[coreidx
] = REG_MAP(addr
, SI_CORE_SIZE
);
338 sii
->curmap
= regs
= sii
->regs
[coreidx
];
339 if (!sii
->wrappers
[coreidx
]) {
340 sii
->wrappers
[coreidx
] = REG_MAP(wrap
, SI_CORE_SIZE
);
342 sii
->curwrap
= sii
->wrappers
[coreidx
];
346 /* point bar0 window */
347 pci_write_config_dword(sii
->pbus
, PCI_BAR0_WIN
, addr
);
349 /* point bar0 2nd 4KB window */
350 pci_write_config_dword(sii
->pbus
, PCI_BAR0_WIN2
, wrap
);
355 sii
->curmap
= regs
= (void *)(unsigned long)addr
;
356 sii
->curwrap
= (void *)(unsigned long)wrap
;
365 sii
->curidx
= coreidx
;
370 /* Return the number of address spaces in current core */
371 int ai_numaddrspaces(si_t
*sih
)
376 /* Return the address of the nth address space in the current core */
377 u32
ai_addrspace(si_t
*sih
, uint asidx
)
386 return sii
->coresba
[cidx
];
388 return sii
->coresba2
[cidx
];
390 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__
, asidx
));
395 /* Return the size of the nth address space in the current core */
396 u32
ai_addrspacesize(si_t
*sih
, uint asidx
)
405 return sii
->coresba_size
[cidx
];
407 return sii
->coresba2_size
[cidx
];
409 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__
, asidx
));
414 uint
ai_flag(si_t
*sih
)
420 if (BCM47162_DMP()) {
421 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__
));
426 return R_REG(&ai
->oobselouta30
) & 0x1f;
429 void ai_setint(si_t
*sih
, int siflag
)
433 uint
ai_corevendor(si_t
*sih
)
439 cia
= sii
->cia
[sii
->curidx
];
440 return (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
443 uint
ai_corerev(si_t
*sih
)
449 cib
= sii
->cib
[sii
->curidx
];
450 return (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
453 bool ai_iscoreup(si_t
*sih
)
461 return (((R_REG(&ai
->ioctrl
) & (SICF_FGC
| SICF_CLOCK_EN
)) ==
463 && ((R_REG(&ai
->resetctrl
) & AIRC_RESET
) == 0));
466 void ai_core_cflags_wo(si_t
*sih
, u32 mask
, u32 val
)
474 if (BCM47162_DMP()) {
475 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
483 w
= ((R_REG(&ai
->ioctrl
) & ~mask
) | val
);
484 W_REG(&ai
->ioctrl
, w
);
488 u32
ai_core_cflags(si_t
*sih
, u32 mask
, u32 val
)
495 if (BCM47162_DMP()) {
496 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
504 w
= ((R_REG(&ai
->ioctrl
) & ~mask
) | val
);
505 W_REG(&ai
->ioctrl
, w
);
508 return R_REG(&ai
->ioctrl
);
511 u32
ai_core_sflags(si_t
*sih
, u32 mask
, u32 val
)
518 if (BCM47162_DMP()) {
519 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__
));
526 w
= ((R_REG(&ai
->iostatus
) & ~mask
) | val
);
527 W_REG(&ai
->iostatus
, w
);
530 return R_REG(&ai
->iostatus
);
533 /* *************** from siutils.c ************** */
534 /* local prototypes */
535 static si_info_t
*ai_doattach(si_info_t
*sii
, uint devid
, void *regs
,
536 uint bustype
, void *sdh
, char **vars
,
538 static bool ai_buscore_prep(si_info_t
*sii
, uint bustype
, uint devid
,
540 static bool ai_buscore_setup(si_info_t
*sii
, chipcregs_t
*cc
, uint bustype
,
541 u32 savewin
, uint
*origidx
, void *regs
);
542 static void ai_nvram_process(si_info_t
*sii
, char *pvars
);
544 /* dev path concatenation util */
545 static char *ai_devpathvar(si_t
*sih
, char *var
, int len
, const char *name
);
546 static bool _ai_clkctl_cc(si_info_t
*sii
, uint mode
);
547 static bool ai_ispcie(si_info_t
*sii
);
549 /* global variable to indicate reservation/release of gpio's */
550 static u32 ai_gpioreservation
;
553 * Allocate a si handle.
554 * devid - pci device id (used to determine chip#)
555 * osh - opaque OS handle
556 * regs - virtual address of initial core registers
557 * bustype - pci/sb/sdio/etc
558 * vars - pointer to a pointer area for "environment" variables
559 * varsz - pointer to int to return the size of the vars
561 si_t
*ai_attach(uint devid
, void *regs
, uint bustype
,
562 void *sdh
, char **vars
, uint
*varsz
)
566 /* alloc si_info_t */
567 sii
= kmalloc(sizeof(si_info_t
), GFP_ATOMIC
);
569 SI_ERROR(("si_attach: malloc failed!\n"));
573 if (ai_doattach(sii
, devid
, regs
, bustype
, sdh
, vars
, varsz
) ==
578 sii
->vars
= vars
? *vars
: NULL
;
579 sii
->varsz
= varsz
? *varsz
: 0;
584 /* global kernel resource */
585 static si_info_t ksii
;
587 static bool ai_buscore_prep(si_info_t
*sii
, uint bustype
, uint devid
,
590 /* kludge to enable the clock on the 4306 which lacks a slowclock */
591 if (bustype
== PCI_BUS
&& !ai_ispcie(sii
))
592 ai_clkctl_xtal(&sii
->pub
, XTAL
| PLL
, ON
);
596 static bool ai_buscore_setup(si_info_t
*sii
, chipcregs_t
*cc
, uint bustype
,
597 u32 savewin
, uint
*origidx
, void *regs
)
601 uint pciidx
, pcieidx
, pcirev
, pcierev
;
603 cc
= ai_setcoreidx(&sii
->pub
, SI_CC_IDX
);
606 /* get chipcommon rev */
607 sii
->pub
.ccrev
= (int)ai_corerev(&sii
->pub
);
609 /* get chipcommon chipstatus */
610 if (sii
->pub
.ccrev
>= 11)
611 sii
->pub
.chipst
= R_REG(&cc
->chipstatus
);
613 /* get chipcommon capabilites */
614 sii
->pub
.cccaps
= R_REG(&cc
->capabilities
);
615 /* get chipcommon extended capabilities */
617 if (sii
->pub
.ccrev
>= 35)
618 sii
->pub
.cccaps_ext
= R_REG(&cc
->capabilities_ext
);
620 /* get pmu rev and caps */
621 if (sii
->pub
.cccaps
& CC_CAP_PMU
) {
622 sii
->pub
.pmucaps
= R_REG(&cc
->pmucapabilities
);
623 sii
->pub
.pmurev
= sii
->pub
.pmucaps
& PCAP_REV_MASK
;
626 /* figure out bus/orignal core idx */
627 sii
->pub
.buscoretype
= NODEV_CORE_ID
;
628 sii
->pub
.buscorerev
= NOREV
;
629 sii
->pub
.buscoreidx
= BADIDX
;
632 pcirev
= pcierev
= NOREV
;
633 pciidx
= pcieidx
= BADIDX
;
635 for (i
= 0; i
< sii
->numcores
; i
++) {
638 ai_setcoreidx(&sii
->pub
, i
);
639 cid
= ai_coreid(&sii
->pub
);
640 crev
= ai_corerev(&sii
->pub
);
642 /* Display cores found */
643 SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
644 i
, cid
, crev
, sii
->coresba
[i
], sii
->regs
[i
]));
646 if (bustype
== PCI_BUS
) {
647 if (cid
== PCI_CORE_ID
) {
651 } else if (cid
== PCIE_CORE_ID
) {
658 /* find the core idx before entering this func. */
659 if ((savewin
&& (savewin
== sii
->coresba
[i
])) ||
660 (regs
== sii
->regs
[i
]))
671 sii
->pub
.buscoretype
= PCI_CORE_ID
;
672 sii
->pub
.buscorerev
= pcirev
;
673 sii
->pub
.buscoreidx
= pciidx
;
675 sii
->pub
.buscoretype
= PCIE_CORE_ID
;
676 sii
->pub
.buscorerev
= pcierev
;
677 sii
->pub
.buscoreidx
= pcieidx
;
680 SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii
->pub
.buscoreidx
,
681 sii
->pub
.buscoretype
, sii
->pub
.buscorerev
));
683 /* fixup necessary chip/core configurations */
684 if (sii
->pub
.bustype
== PCI_BUS
) {
687 sii
->pch
= (void *)pcicore_init(
688 &sii
->pub
, sii
->pbus
,
689 (void *)PCIEREGS(sii
));
690 if (sii
->pch
== NULL
)
694 if (ai_pci_fixcfg(&sii
->pub
)) {
695 SI_ERROR(("si_doattach: si_pci_fixcfg failed\n"));
700 /* return to the original core */
701 ai_setcoreidx(&sii
->pub
, *origidx
);
706 static __used
void ai_nvram_process(si_info_t
*sii
, char *pvars
)
710 /* get boardtype and boardrev */
711 switch (sii
->pub
.bustype
) {
713 /* do a pci config read to get subsystem id and subvendor id */
714 pci_read_config_dword(sii
->pbus
, PCI_SUBSYSTEM_VENDOR_ID
, &w
);
715 /* Let nvram variables override subsystem Vend/ID */
716 sii
->pub
.boardvendor
= (u16
)ai_getdevpathintvar(&sii
->pub
,
718 if (sii
->pub
.boardvendor
== 0)
719 sii
->pub
.boardvendor
= w
& 0xffff;
721 SI_ERROR(("Overriding boardvendor: 0x%x instead of "
722 "0x%x\n", sii
->pub
.boardvendor
, w
& 0xffff));
723 sii
->pub
.boardtype
= (u16
)ai_getdevpathintvar(&sii
->pub
,
725 if (sii
->pub
.boardtype
== 0)
726 sii
->pub
.boardtype
= (w
>> 16) & 0xffff;
728 SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n"
729 , sii
->pub
.boardtype
, (w
>> 16) & 0xffff));
732 sii
->pub
.boardvendor
= getintvar(pvars
, "manfid");
733 sii
->pub
.boardtype
= getintvar(pvars
, "prodid");
738 sii
->pub
.boardvendor
= PCI_VENDOR_ID_BROADCOM
;
739 sii
->pub
.boardtype
= getintvar(pvars
, "prodid");
740 if (pvars
== NULL
|| (sii
->pub
.boardtype
== 0)) {
741 sii
->pub
.boardtype
= getintvar(NULL
, "boardtype");
742 if (sii
->pub
.boardtype
== 0)
743 sii
->pub
.boardtype
= 0xffff;
748 if (sii
->pub
.boardtype
== 0) {
749 SI_ERROR(("si_doattach: unknown board type\n"));
750 ASSERT(sii
->pub
.boardtype
);
753 sii
->pub
.boardflags
= getintvar(pvars
, "boardflags");
756 static si_info_t
*ai_doattach(si_info_t
*sii
, uint devid
,
757 void *regs
, uint bustype
, void *pbus
,
758 char **vars
, uint
*varsz
)
760 struct si_pub
*sih
= &sii
->pub
;
766 ASSERT(GOODREGS(regs
));
768 memset((unsigned char *) sii
, 0, sizeof(si_info_t
));
772 sih
->buscoreidx
= BADIDX
;
777 /* check to see if we are a si core mimic'ing a pci core */
778 if (bustype
== PCI_BUS
) {
779 pci_read_config_dword(sii
->pbus
, PCI_SPROM_CONTROL
, &w
);
780 if (w
== 0xffffffff) {
781 SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
782 " switching to SI devid:0x%x\n",
788 /* find Chipcommon address */
789 if (bustype
== PCI_BUS
) {
790 pci_read_config_dword(sii
->pbus
, PCI_BAR0_WIN
, &savewin
);
791 if (!GOODCOREADDR(savewin
, SI_ENUM_BASE
))
792 savewin
= SI_ENUM_BASE
;
793 pci_write_config_dword(sii
->pbus
, PCI_BAR0_WIN
,
795 cc
= (chipcregs_t
*) regs
;
797 cc
= (chipcregs_t
*) REG_MAP(SI_ENUM_BASE
, SI_CORE_SIZE
);
800 sih
->bustype
= bustype
;
802 /* bus/core/clk setup for register access */
803 if (!ai_buscore_prep(sii
, bustype
, devid
, pbus
)) {
804 SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
810 * ChipID recognition.
811 * We assume we can read chipid at offset 0 from the regs arg.
812 * If we add other chiptypes (or if we need to support old sdio
813 * hosts w/o chipcommon), some way of recognizing them needs to
816 w
= R_REG(&cc
->chipid
);
817 sih
->socitype
= (w
& CID_TYPE_MASK
) >> CID_TYPE_SHIFT
;
818 /* Might as wll fill in chip id rev & pkg */
819 sih
->chip
= w
& CID_ID_MASK
;
820 sih
->chiprev
= (w
& CID_REV_MASK
) >> CID_REV_SHIFT
;
821 sih
->chippkg
= (w
& CID_PKG_MASK
) >> CID_PKG_SHIFT
;
823 sih
->issim
= IS_SIM(sih
->chippkg
);
826 if (sii
->pub
.socitype
== SOCI_AI
) {
827 SI_MSG(("Found chip type AI (0x%08x)\n", w
));
828 /* pass chipc address instead of original core base */
829 ai_scan(&sii
->pub
, (void *)cc
, devid
);
831 SI_ERROR(("Found chip of unknown type (0x%08x)\n", w
));
834 /* no cores found, bail out */
835 if (sii
->numcores
== 0) {
836 SI_ERROR(("si_doattach: could not find any cores\n"));
839 /* bus/core/clk setup */
841 if (!ai_buscore_setup(sii
, cc
, bustype
, savewin
, &origidx
, regs
)) {
842 SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
846 /* assume current core is CC */
847 if ((sii
->pub
.ccrev
== 0x25)
849 ((sih
->chip
== BCM43236_CHIP_ID
850 || sih
->chip
== BCM43235_CHIP_ID
851 || sih
->chip
== BCM43238_CHIP_ID
)
852 && (sii
->pub
.chiprev
<= 2))) {
854 if ((cc
->chipstatus
& CST43236_BP_CLK
) != 0) {
856 clkdiv
= R_REG(&cc
->clkdiv
);
857 /* otp_clk_div is even number, 120/14 < 9mhz */
858 clkdiv
= (clkdiv
& ~CLKD_OTP
) | (14 << CLKD_OTP_SHIFT
);
859 W_REG(&cc
->clkdiv
, clkdiv
);
860 SI_ERROR(("%s: set clkdiv to %x\n", __func__
, clkdiv
));
865 /* Init nvram from flash if it exists */
866 nvram_init((void *)&(sii
->pub
));
868 /* Init nvram from sprom/otp if they exist */
870 (&sii
->pub
, bustype
, regs
, vars
, varsz
)) {
871 SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
874 pvars
= vars
? *vars
: NULL
;
875 ai_nvram_process(sii
, pvars
);
877 /* === NVRAM, clock is ready === */
878 cc
= (chipcregs_t
*) ai_setcore(sih
, CC_CORE_ID
, 0);
879 W_REG(&cc
->gpiopullup
, 0);
880 W_REG(&cc
->gpiopulldown
, 0);
881 ai_setcoreidx(sih
, origidx
);
883 /* PMU specific initializations */
884 if (PMUCTL_ENAB(sih
)) {
887 si_pmu_chip_init(sih
);
888 xtalfreq
= getintvar(pvars
, "xtalfreq");
889 /* If xtalfreq var not available, try to measure it */
891 xtalfreq
= si_pmu_measure_alpclk(sih
);
892 si_pmu_pll_init(sih
, xtalfreq
);
893 si_pmu_res_init(sih
);
894 si_pmu_swreg_init(sih
);
897 /* setup the GPIO based LED powersave register */
898 w
= getintvar(pvars
, "leddc");
900 w
= DEFAULT_GPIOTIMERVAL
;
901 ai_corereg(sih
, SI_CC_IDX
, offsetof(chipcregs_t
, gpiotimerval
), ~0, w
);
904 ASSERT(sii
->pch
!= NULL
);
905 pcicore_attach(sii
->pch
, pvars
, SI_DOATTACH
);
908 if ((sih
->chip
== BCM43224_CHIP_ID
) ||
909 (sih
->chip
== BCM43421_CHIP_ID
)) {
911 * enable 12 mA drive strenth for 43224 and
912 * set chipControl register bit 15
914 if (sih
->chiprev
== 0) {
915 SI_MSG(("Applying 43224A0 WARs\n"));
916 ai_corereg(sih
, SI_CC_IDX
,
917 offsetof(chipcregs_t
, chipcontrol
),
918 CCTRL43224_GPIO_TOGGLE
,
919 CCTRL43224_GPIO_TOGGLE
);
920 si_pmu_chipcontrol(sih
, 0, CCTRL_43224A0_12MA_LED_DRIVE
,
921 CCTRL_43224A0_12MA_LED_DRIVE
);
923 if (sih
->chiprev
>= 1) {
924 SI_MSG(("Applying 43224B0+ WARs\n"));
925 si_pmu_chipcontrol(sih
, 0, CCTRL_43224B0_12MA_LED_DRIVE
,
926 CCTRL_43224B0_12MA_LED_DRIVE
);
930 if (sih
->chip
== BCM4313_CHIP_ID
) {
932 * enable 12 mA drive strenth for 4313 and
933 * set chipControl register bit 1
935 SI_MSG(("Applying 4313 WARs\n"));
936 si_pmu_chipcontrol(sih
, 0, CCTRL_4313_12MA_LED_DRIVE
,
937 CCTRL_4313_12MA_LED_DRIVE
);
940 if (sih
->chip
== BCM4331_CHIP_ID
) {
941 /* Enable Ext PA lines depending on chip package option */
942 ai_chipcontrl_epa4331(sih
, true);
947 if (sih
->bustype
== PCI_BUS
) {
949 pcicore_deinit(sii
->pch
);
956 /* may be called with core in reset */
957 void ai_detach(si_t
*sih
)
962 struct si_pub
*si_local
= NULL
;
963 bcopy(&sih
, &si_local
, sizeof(si_t
**));
970 if (sih
->bustype
== SI_BUS
)
971 for (idx
= 0; idx
< SI_MAXCORES
; idx
++)
972 if (sii
->regs
[idx
]) {
973 iounmap(sii
->regs
[idx
]);
974 sii
->regs
[idx
] = NULL
;
977 nvram_exit((void *)si_local
); /* free up nvram buffers */
979 if (sih
->bustype
== PCI_BUS
) {
981 pcicore_deinit(sii
->pch
);
989 /* register driver interrupt disabling and restoring callback functions */
991 ai_register_intr_callback(si_t
*sih
, void *intrsoff_fn
, void *intrsrestore_fn
,
992 void *intrsenabled_fn
, void *intr_arg
)
997 sii
->intr_arg
= intr_arg
;
998 sii
->intrsoff_fn
= (si_intrsoff_t
) intrsoff_fn
;
999 sii
->intrsrestore_fn
= (si_intrsrestore_t
) intrsrestore_fn
;
1000 sii
->intrsenabled_fn
= (si_intrsenabled_t
) intrsenabled_fn
;
1001 /* save current core id. when this function called, the current core
1002 * must be the core which provides driver functions(il, et, wl, etc.)
1004 sii
->dev_coreid
= sii
->coreid
[sii
->curidx
];
1007 void ai_deregister_intr_callback(si_t
*sih
)
1012 sii
->intrsoff_fn
= NULL
;
1015 uint
ai_coreid(si_t
*sih
)
1020 return sii
->coreid
[sii
->curidx
];
1023 uint
ai_coreidx(si_t
*sih
)
1031 bool ai_backplane64(si_t
*sih
)
1033 return (sih
->cccaps
& CC_CAP_BKPLN64
) != 0;
1036 /* return index of coreid or BADIDX if not found */
1037 uint
ai_findcoreidx(si_t
*sih
, uint coreid
, uint coreunit
)
1047 for (i
= 0; i
< sii
->numcores
; i
++)
1048 if (sii
->coreid
[i
] == coreid
) {
1049 if (found
== coreunit
)
1058 * This function changes logical "focus" to the indicated core;
1059 * must be called with interrupts off.
1060 * Moreover, callers should keep interrupts off during switching
1061 * out of and back to d11 core.
1063 void *ai_setcore(si_t
*sih
, uint coreid
, uint coreunit
)
1067 idx
= ai_findcoreidx(sih
, coreid
, coreunit
);
1071 if (sih
->socitype
== SOCI_AI
)
1072 return ai_setcoreidx(sih
, idx
);
1079 /* Turn off interrupt as required by ai_setcore, before switch core */
1080 void *ai_switch_core(si_t
*sih
, uint coreid
, uint
*origidx
, uint
*intr_val
)
1088 /* Overloading the origidx variable to remember the coreid,
1089 * this works because the core ids cannot be confused with
1093 if (coreid
== CC_CORE_ID
)
1094 return (void *)CCREGS_FAST(sii
);
1095 else if (coreid
== sih
->buscoretype
)
1096 return (void *)PCIEREGS(sii
);
1098 INTR_OFF(sii
, *intr_val
);
1099 *origidx
= sii
->curidx
;
1100 cc
= ai_setcore(sih
, coreid
, 0);
1106 /* restore coreidx and restore interrupt */
1107 void ai_restore_core(si_t
*sih
, uint coreid
, uint intr_val
)
1113 && ((coreid
== CC_CORE_ID
) || (coreid
== sih
->buscoretype
)))
1116 ai_setcoreidx(sih
, coreid
);
1117 INTR_RESTORE(sii
, intr_val
);
1120 void ai_write_wrapperreg(si_t
*sih
, u32 offset
, u32 val
)
1122 si_info_t
*sii
= SI_INFO(sih
);
1123 u32
*w
= (u32
*) sii
->curwrap
;
1124 W_REG(w
+ (offset
/ 4), val
);
1129 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
1130 * operation, switch back to the original core, and return the new value.
1132 * When using the silicon backplane, no fiddling with interrupts or core
1133 * switches is needed.
1135 * Also, when using pci/pcie, we can optimize away the core switching for pci
1136 * registers and (on newer pci cores) chipcommon registers.
1138 uint
ai_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
1149 ASSERT(GOODIDX(coreidx
));
1150 ASSERT(regoff
< SI_CORE_SIZE
);
1151 ASSERT((val
& ~mask
) == 0);
1153 if (coreidx
>= SI_MAXCORES
)
1156 if (sih
->bustype
== SI_BUS
) {
1157 /* If internal bus, we can always get at everything */
1159 /* map if does not exist */
1160 if (!sii
->regs
[coreidx
]) {
1161 sii
->regs
[coreidx
] = REG_MAP(sii
->coresba
[coreidx
],
1163 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
1165 r
= (u32
*) ((unsigned char *) sii
->regs
[coreidx
] + regoff
);
1166 } else if (sih
->bustype
== PCI_BUS
) {
1168 * If pci/pcie, we can get at pci/pcie regs
1169 * and on newer cores to chipc
1171 if ((sii
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
1172 /* Chipc registers are mapped at 12KB */
1175 r
= (u32
*) ((char *)sii
->curmap
+
1176 PCI_16KB0_CCREGS_OFFSET
+ regoff
);
1177 } else if (sii
->pub
.buscoreidx
== coreidx
) {
1179 * pci registers are at either in the last 2KB of
1180 * an 8KB window or, in pcie and pci rev 13 at 8KB
1184 r
= (u32
*) ((char *)sii
->curmap
+
1185 PCI_16KB0_PCIREGS_OFFSET
+
1188 r
= (u32
*) ((char *)sii
->curmap
+
1189 ((regoff
>= SBCONFIGOFF
) ?
1190 PCI_BAR0_PCISBR_OFFSET
:
1191 PCI_BAR0_PCIREGS_OFFSET
) +
1197 INTR_OFF(sii
, intr_val
);
1199 /* save current core index */
1200 origidx
= ai_coreidx(&sii
->pub
);
1203 r
= (u32
*) ((unsigned char *) ai_setcoreidx(&sii
->pub
, coreidx
)
1210 w
= (R_REG(r
) & ~mask
) | val
;
1218 /* restore core index */
1219 if (origidx
!= coreidx
)
1220 ai_setcoreidx(&sii
->pub
, origidx
);
1222 INTR_RESTORE(sii
, intr_val
);
1228 void ai_core_disable(si_t
*sih
, u32 bits
)
1236 ASSERT(GOODREGS(sii
->curwrap
));
1239 /* if core is already in reset, just return */
1240 if (R_REG(&ai
->resetctrl
) & AIRC_RESET
)
1243 W_REG(&ai
->ioctrl
, bits
);
1244 dummy
= R_REG(&ai
->ioctrl
);
1247 W_REG(&ai
->resetctrl
, AIRC_RESET
);
1251 /* reset and re-enable a core
1253 * bits - core specific bits that are set during and after reset sequence
1254 * resetbits - core specific bits that are set only during reset sequence
1256 void ai_core_reset(si_t
*sih
, u32 bits
, u32 resetbits
)
1263 ASSERT(GOODREGS(sii
->curwrap
));
1267 * Must do the disable sequence first to work
1268 * for arbitrary current core state.
1270 ai_core_disable(sih
, (bits
| resetbits
));
1273 * Now do the initialization sequence.
1275 W_REG(&ai
->ioctrl
, (bits
| SICF_FGC
| SICF_CLOCK_EN
));
1276 dummy
= R_REG(&ai
->ioctrl
);
1277 W_REG(&ai
->resetctrl
, 0);
1280 W_REG(&ai
->ioctrl
, (bits
| SICF_CLOCK_EN
));
1281 dummy
= R_REG(&ai
->ioctrl
);
1285 /* return the slow clock source - LPO, XTAL, or PCI */
1286 static uint
ai_slowclk_src(si_info_t
*sii
)
1291 ASSERT(SI_FAST(sii
) || ai_coreid(&sii
->pub
) == CC_CORE_ID
);
1293 if (sii
->pub
.ccrev
< 6) {
1294 if (sii
->pub
.bustype
== PCI_BUS
) {
1295 pci_read_config_dword(sii
->pbus
, PCI_GPIO_OUT
,
1297 if (val
& PCI_CFG_GPIO_SCS
)
1301 } else if (sii
->pub
.ccrev
< 10) {
1302 cc
= (chipcregs_t
*) ai_setcoreidx(&sii
->pub
, sii
->curidx
);
1303 return R_REG(&cc
->slow_clk_ctl
) & SCC_SS_MASK
;
1304 } else /* Insta-clock */
1308 /* return the ILP (slowclock) min or max frequency */
1309 static uint
ai_slowclk_freq(si_info_t
*sii
, bool max_freq
, chipcregs_t
*cc
)
1314 ASSERT(SI_FAST(sii
) || ai_coreid(&sii
->pub
) == CC_CORE_ID
);
1317 * shouldn't be here unless we've established
1318 * the chip has dynamic clk control
1320 ASSERT(R_REG(&cc
->capabilities
) & CC_CAP_PWR_CTL
);
1322 slowclk
= ai_slowclk_src(sii
);
1323 if (sii
->pub
.ccrev
< 6) {
1324 if (slowclk
== SCC_SS_PCI
)
1325 return max_freq
? (PCIMAXFREQ
/ 64)
1326 : (PCIMINFREQ
/ 64);
1328 return max_freq
? (XTALMAXFREQ
/ 32)
1329 : (XTALMINFREQ
/ 32);
1330 } else if (sii
->pub
.ccrev
< 10) {
1332 (((R_REG(&cc
->slow_clk_ctl
) & SCC_CD_MASK
) >>
1334 if (slowclk
== SCC_SS_LPO
)
1335 return max_freq
? LPOMAXFREQ
: LPOMINFREQ
;
1336 else if (slowclk
== SCC_SS_XTAL
)
1337 return max_freq
? (XTALMAXFREQ
/ div
)
1338 : (XTALMINFREQ
/ div
);
1339 else if (slowclk
== SCC_SS_PCI
)
1340 return max_freq
? (PCIMAXFREQ
/ div
)
1341 : (PCIMINFREQ
/ div
);
1345 /* Chipc rev 10 is InstaClock */
1346 div
= R_REG(&cc
->system_clk_ctl
) >> SYCC_CD_SHIFT
;
1347 div
= 4 * (div
+ 1);
1348 return max_freq
? XTALMAXFREQ
: (XTALMINFREQ
/ div
);
1353 static void ai_clkctl_setdelay(si_info_t
*sii
, void *chipcregs
)
1355 chipcregs_t
*cc
= (chipcregs_t
*) chipcregs
;
1356 uint slowmaxfreq
, pll_delay
, slowclk
;
1357 uint pll_on_delay
, fref_sel_delay
;
1359 pll_delay
= PLL_DELAY
;
1362 * If the slow clock is not sourced by the xtal then
1363 * add the xtal_on_delay since the xtal will also be
1364 * powered down by dynamic clk control logic.
1367 slowclk
= ai_slowclk_src(sii
);
1368 if (slowclk
!= SCC_SS_XTAL
)
1369 pll_delay
+= XTAL_ON_DELAY
;
1371 /* Starting with 4318 it is ILP that is used for the delays */
1373 ai_slowclk_freq(sii
, (sii
->pub
.ccrev
>= 10) ? false : true, cc
);
1375 pll_on_delay
= ((slowmaxfreq
* pll_delay
) + 999999) / 1000000;
1376 fref_sel_delay
= ((slowmaxfreq
* FREF_DELAY
) + 999999) / 1000000;
1378 W_REG(&cc
->pll_on_delay
, pll_on_delay
);
1379 W_REG(&cc
->fref_sel_delay
, fref_sel_delay
);
1382 /* initialize power control delay registers */
1383 void ai_clkctl_init(si_t
*sih
)
1390 if (!CCCTL_ENAB(sih
))
1394 fast
= SI_FAST(sii
);
1396 origidx
= sii
->curidx
;
1397 cc
= (chipcregs_t
*) ai_setcore(sih
, CC_CORE_ID
, 0);
1401 cc
= (chipcregs_t
*) CCREGS_FAST(sii
);
1407 /* set all Instaclk chip ILP to 1 MHz */
1408 if (sih
->ccrev
>= 10)
1409 SET_REG(&cc
->system_clk_ctl
, SYCC_CD_MASK
,
1410 (ILP_DIV_1MHZ
<< SYCC_CD_SHIFT
));
1412 ai_clkctl_setdelay(sii
, (void *)cc
);
1415 ai_setcoreidx(sih
, origidx
);
1419 * return the value suitable for writing to the
1420 * dot11 core FAST_PWRUP_DELAY register
1422 u16
ai_clkctl_fast_pwrup_delay(si_t
*sih
)
1433 if (PMUCTL_ENAB(sih
)) {
1434 INTR_OFF(sii
, intr_val
);
1435 fpdelay
= si_pmu_fast_pwrup_delay(sih
);
1436 INTR_RESTORE(sii
, intr_val
);
1440 if (!CCCTL_ENAB(sih
))
1443 fast
= SI_FAST(sii
);
1446 origidx
= sii
->curidx
;
1447 INTR_OFF(sii
, intr_val
);
1448 cc
= (chipcregs_t
*) ai_setcore(sih
, CC_CORE_ID
, 0);
1452 cc
= (chipcregs_t
*) CCREGS_FAST(sii
);
1458 slowminfreq
= ai_slowclk_freq(sii
, false, cc
);
1459 fpdelay
= (((R_REG(&cc
->pll_on_delay
) + 2) * 1000000) +
1460 (slowminfreq
- 1)) / slowminfreq
;
1464 ai_setcoreidx(sih
, origidx
);
1465 INTR_RESTORE(sii
, intr_val
);
1470 /* turn primary xtal and/or pll off/on */
1471 int ai_clkctl_xtal(si_t
*sih
, uint what
, bool on
)
1478 switch (sih
->bustype
) {
1481 /* pcie core doesn't have any mapping to control the xtal pu */
1485 pci_read_config_dword(sii
->pbus
, PCI_GPIO_IN
, &in
);
1486 pci_read_config_dword(sii
->pbus
, PCI_GPIO_OUT
, &out
);
1487 pci_read_config_dword(sii
->pbus
, PCI_GPIO_OUTEN
, &outen
);
1490 * Avoid glitching the clock if GPRS is already using it.
1491 * We can't actually read the state of the PLLPD so we infer it
1492 * by the value of XTAL_PU which *is* readable via gpioin.
1494 if (on
&& (in
& PCI_CFG_GPIO_XTAL
))
1498 outen
|= PCI_CFG_GPIO_XTAL
;
1500 outen
|= PCI_CFG_GPIO_PLL
;
1503 /* turn primary xtal on */
1505 out
|= PCI_CFG_GPIO_XTAL
;
1507 out
|= PCI_CFG_GPIO_PLL
;
1508 pci_write_config_dword(sii
->pbus
,
1510 pci_write_config_dword(sii
->pbus
,
1511 PCI_GPIO_OUTEN
, outen
);
1512 udelay(XTAL_ON_DELAY
);
1517 out
&= ~PCI_CFG_GPIO_PLL
;
1518 pci_write_config_dword(sii
->pbus
,
1524 out
&= ~PCI_CFG_GPIO_XTAL
;
1526 out
|= PCI_CFG_GPIO_PLL
;
1527 pci_write_config_dword(sii
->pbus
,
1529 pci_write_config_dword(sii
->pbus
,
1530 PCI_GPIO_OUTEN
, outen
);
1541 * clock control policy function throught chipcommon
1543 * set dynamic clk control mode (forceslow, forcefast, dynamic)
1544 * returns true if we are forcing fast clock
1545 * this is a wrapper over the next internal function
1546 * to allow flexible policy settings for outside caller
1548 bool ai_clkctl_cc(si_t
*sih
, uint mode
)
1554 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1558 if (PCI_FORCEHT(sii
))
1559 return mode
== CLK_FAST
;
1561 return _ai_clkctl_cc(sii
, mode
);
1564 /* clk control mechanism through chipcommon, no policy checking */
1565 static bool _ai_clkctl_cc(si_info_t
*sii
, uint mode
)
1571 bool fast
= SI_FAST(sii
);
1573 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1574 if (sii
->pub
.ccrev
< 6)
1578 * Chips with ccrev 10 are EOL and they
1579 * don't have SYCC_HR which we use below
1581 ASSERT(sii
->pub
.ccrev
!= 10);
1584 INTR_OFF(sii
, intr_val
);
1585 origidx
= sii
->curidx
;
1587 if ((sii
->pub
.bustype
== SI_BUS
) &&
1588 ai_setcore(&sii
->pub
, MIPS33_CORE_ID
, 0) &&
1589 (ai_corerev(&sii
->pub
) <= 7) && (sii
->pub
.ccrev
>= 10))
1592 cc
= (chipcregs_t
*) ai_setcore(&sii
->pub
, CC_CORE_ID
, 0);
1594 cc
= (chipcregs_t
*) CCREGS_FAST(sii
);
1600 if (!CCCTL_ENAB(&sii
->pub
) && (sii
->pub
.ccrev
< 20))
1604 case CLK_FAST
: /* FORCEHT, fast (pll) clock */
1605 if (sii
->pub
.ccrev
< 10) {
1607 * don't forget to force xtal back
1608 * on before we clear SCC_DYN_XTAL..
1610 ai_clkctl_xtal(&sii
->pub
, XTAL
, ON
);
1611 SET_REG(&cc
->slow_clk_ctl
,
1612 (SCC_XC
| SCC_FS
| SCC_IP
), SCC_IP
);
1613 } else if (sii
->pub
.ccrev
< 20) {
1614 OR_REG(&cc
->system_clk_ctl
, SYCC_HR
);
1616 OR_REG(&cc
->clk_ctl_st
, CCS_FORCEHT
);
1619 /* wait for the PLL */
1620 if (PMUCTL_ENAB(&sii
->pub
)) {
1621 u32 htavail
= CCS_HTAVAIL
;
1622 SPINWAIT(((R_REG(&cc
->clk_ctl_st
) & htavail
)
1623 == 0), PMU_MAX_TRANSITION_DLY
);
1624 ASSERT(R_REG(&cc
->clk_ctl_st
) & htavail
);
1630 case CLK_DYNAMIC
: /* enable dynamic clock control */
1631 if (sii
->pub
.ccrev
< 10) {
1632 scc
= R_REG(&cc
->slow_clk_ctl
);
1633 scc
&= ~(SCC_FS
| SCC_IP
| SCC_XC
);
1634 if ((scc
& SCC_SS_MASK
) != SCC_SS_XTAL
)
1636 W_REG(&cc
->slow_clk_ctl
, scc
);
1639 * for dynamic control, we have to
1640 * release our xtal_pu "force on"
1643 ai_clkctl_xtal(&sii
->pub
, XTAL
, OFF
);
1644 } else if (sii
->pub
.ccrev
< 20) {
1646 AND_REG(&cc
->system_clk_ctl
, ~SYCC_HR
);
1648 AND_REG(&cc
->clk_ctl_st
, ~CCS_FORCEHT
);
1658 ai_setcoreidx(&sii
->pub
, origidx
);
1659 INTR_RESTORE(sii
, intr_val
);
1661 return mode
== CLK_FAST
;
1664 /* Build device path. Support SI, PCI, and JTAG for now. */
1665 int ai_devpath(si_t
*sih
, char *path
, int size
)
1669 ASSERT(path
!= NULL
);
1670 ASSERT(size
>= SI_DEVPATH_BUFSZ
);
1672 if (!path
|| size
<= 0)
1675 switch (sih
->bustype
) {
1678 slen
= snprintf(path
, (size_t) size
, "sb/%u/", ai_coreidx(sih
));
1681 ASSERT((SI_INFO(sih
))->pbus
!= NULL
);
1682 slen
= snprintf(path
, (size_t) size
, "pci/%u/%u/",
1683 ((struct pci_dev
*)((SI_INFO(sih
))->pbus
))->bus
->number
,
1685 ((struct pci_dev
*)((SI_INFO(sih
))->pbus
))->devfn
));
1694 if (slen
< 0 || slen
>= size
) {
1702 /* Get a variable, but only if it has a devpath prefix */
1703 char *ai_getdevpathvar(si_t
*sih
, const char *name
)
1705 char varname
[SI_DEVPATH_BUFSZ
+ 32];
1707 ai_devpathvar(sih
, varname
, sizeof(varname
), name
);
1709 return getvar(NULL
, varname
);
1712 /* Get a variable, but only if it has a devpath prefix */
1713 int ai_getdevpathintvar(si_t
*sih
, const char *name
)
1715 #if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
1716 return getintvar(NULL
, name
);
1718 char varname
[SI_DEVPATH_BUFSZ
+ 32];
1720 ai_devpathvar(sih
, varname
, sizeof(varname
), name
);
1722 return getintvar(NULL
, varname
);
1726 char *ai_getnvramflvar(si_t
*sih
, const char *name
)
1728 return getvar(NULL
, name
);
1731 /* Concatenate the dev path with a varname into the given 'var' buffer
1732 * and return the 'var' pointer. Nothing is done to the arguments if
1733 * len == 0 or var is NULL, var is still returned. On overflow, the
1734 * first char will be set to '\0'.
1736 static char *ai_devpathvar(si_t
*sih
, char *var
, int len
, const char *name
)
1740 if (!var
|| len
<= 0)
1743 if (ai_devpath(sih
, var
, len
) == 0) {
1744 path_len
= strlen(var
);
1746 if (strlen(name
) + 1 > (uint
) (len
- path_len
))
1749 strncpy(var
+ path_len
, name
, len
- path_len
- 1);
1755 /* return true if PCIE capability exists in the pci config space */
1756 static __used
bool ai_ispcie(si_info_t
*sii
)
1760 if (sii
->pub
.bustype
!= PCI_BUS
)
1764 pcicore_find_pci_capability(sii
->pbus
, PCI_CAP_ID_EXP
, NULL
,
1772 bool ai_pci_war16165(si_t
*sih
)
1778 return PCI(sii
) && (sih
->buscorerev
<= 10);
1781 void ai_pci_up(si_t
*sih
)
1787 /* if not pci bus, we're done */
1788 if (sih
->bustype
!= PCI_BUS
)
1791 if (PCI_FORCEHT(sii
))
1792 _ai_clkctl_cc(sii
, CLK_FAST
);
1795 pcicore_up(sii
->pch
, SI_PCIUP
);
1799 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
1800 void ai_pci_sleep(si_t
*sih
)
1806 pcicore_sleep(sii
->pch
);
1809 /* Unconfigure and/or apply various WARs when going down */
1810 void ai_pci_down(si_t
*sih
)
1816 /* if not pci bus, we're done */
1817 if (sih
->bustype
!= PCI_BUS
)
1820 /* release FORCEHT since chip is going to "down" state */
1821 if (PCI_FORCEHT(sii
))
1822 _ai_clkctl_cc(sii
, CLK_DYNAMIC
);
1824 pcicore_down(sii
->pch
, SI_PCIDOWN
);
1828 * Configure the pci core for pci client (NIC) action
1829 * coremask is the bitvec of cores by index to be enabled.
1831 void ai_pci_setup(si_t
*sih
, uint coremask
)
1834 struct sbpciregs
*pciregs
= NULL
;
1840 if (sii
->pub
.bustype
!= PCI_BUS
)
1843 ASSERT(PCI(sii
) || PCIE(sii
));
1844 ASSERT(sii
->pub
.buscoreidx
!= BADIDX
);
1847 /* get current core index */
1850 /* we interrupt on this backplane flag number */
1851 siflag
= ai_flag(sih
);
1853 /* switch over to pci core */
1854 pciregs
= ai_setcoreidx(sih
, sii
->pub
.buscoreidx
);
1858 * Enable sb->pci interrupts. Assume
1859 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1861 if (PCIE(sii
) || (PCI(sii
) && ((sii
->pub
.buscorerev
) >= 6))) {
1862 /* pci config write to set this core bit in PCIIntMask */
1863 pci_read_config_dword(sii
->pbus
, PCI_INT_MASK
, &w
);
1864 w
|= (coremask
<< PCI_SBIM_SHIFT
);
1865 pci_write_config_dword(sii
->pbus
, PCI_INT_MASK
, w
);
1867 /* set sbintvec bit for our flag number */
1868 ai_setint(sih
, siflag
);
1872 OR_REG(&pciregs
->sbtopci2
,
1873 (SBTOPCI_PREF
| SBTOPCI_BURST
));
1874 if (sii
->pub
.buscorerev
>= 11) {
1875 OR_REG(&pciregs
->sbtopci2
,
1876 SBTOPCI_RC_READMULTI
);
1877 w
= R_REG(&pciregs
->clkrun
);
1878 W_REG(&pciregs
->clkrun
,
1879 (w
| PCI_CLKRUN_DSBL
));
1880 w
= R_REG(&pciregs
->clkrun
);
1883 /* switch back to previous core */
1884 ai_setcoreidx(sih
, idx
);
1889 * Fixup SROMless PCI device's configuration.
1890 * The current core may be changed upon return.
1892 int ai_pci_fixcfg(si_t
*sih
)
1894 uint origidx
, pciidx
;
1895 struct sbpciregs
*pciregs
= NULL
;
1896 sbpcieregs_t
*pcieregs
= NULL
;
1898 u16 val16
, *reg16
= NULL
;
1900 si_info_t
*sii
= SI_INFO(sih
);
1902 ASSERT(sii
->pub
.bustype
== PCI_BUS
);
1904 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
1905 /* save the current index */
1906 origidx
= ai_coreidx(&sii
->pub
);
1908 /* check 'pi' is correct and fix it if not */
1909 if (sii
->pub
.buscoretype
== PCIE_CORE_ID
) {
1910 pcieregs
= ai_setcore(&sii
->pub
, PCIE_CORE_ID
, 0);
1912 ASSERT(pcieregs
!= NULL
);
1913 reg16
= &pcieregs
->sprom
[SRSH_PI_OFFSET
];
1914 } else if (sii
->pub
.buscoretype
== PCI_CORE_ID
) {
1915 pciregs
= ai_setcore(&sii
->pub
, PCI_CORE_ID
, 0);
1917 ASSERT(pciregs
!= NULL
);
1918 reg16
= &pciregs
->sprom
[SRSH_PI_OFFSET
];
1920 pciidx
= ai_coreidx(&sii
->pub
);
1921 val16
= R_REG(reg16
);
1922 if (((val16
& SRSH_PI_MASK
) >> SRSH_PI_SHIFT
) != (u16
) pciidx
) {
1924 (u16
) (pciidx
<< SRSH_PI_SHIFT
) | (val16
&
1926 W_REG(reg16
, val16
);
1929 /* restore the original index */
1930 ai_setcoreidx(&sii
->pub
, origidx
);
1932 pcicore_hwup(sii
->pch
);
1936 /* mask&set gpiocontrol bits */
1937 u32
ai_gpiocontrol(si_t
*sih
, u32 mask
, u32 val
, u8 priority
)
1943 /* gpios could be shared on router platforms
1944 * ignore reservation if it's high priority (e.g., test apps)
1946 if ((priority
!= GPIO_HI_PRIORITY
) &&
1947 (sih
->bustype
== SI_BUS
) && (val
|| mask
)) {
1948 mask
= priority
? (ai_gpioreservation
& mask
) :
1949 ((ai_gpioreservation
| mask
) & ~(ai_gpioreservation
));
1953 regoff
= offsetof(chipcregs_t
, gpiocontrol
);
1954 return ai_corereg(sih
, SI_CC_IDX
, regoff
, mask
, val
);
1957 void ai_chipcontrl_epa4331(si_t
*sih
, bool on
)
1965 origidx
= ai_coreidx(sih
);
1967 cc
= (chipcregs_t
*) ai_setcore(sih
, CC_CORE_ID
, 0);
1969 val
= R_REG(&cc
->chipcontrol
);
1972 if (sih
->chippkg
== 9 || sih
->chippkg
== 0xb) {
1973 /* Ext PA Controls for 4331 12x9 Package */
1974 W_REG(&cc
->chipcontrol
, val
|
1975 (CCTRL4331_EXTPA_EN
|
1976 CCTRL4331_EXTPA_ON_GPIO2_5
));
1978 /* Ext PA Controls for 4331 12x12 Package */
1979 W_REG(&cc
->chipcontrol
,
1980 val
| (CCTRL4331_EXTPA_EN
));
1983 val
&= ~(CCTRL4331_EXTPA_EN
| CCTRL4331_EXTPA_ON_GPIO2_5
);
1984 W_REG(&cc
->chipcontrol
, val
);
1987 ai_setcoreidx(sih
, origidx
);
1990 /* Enable BT-COEX & Ex-PA for 4313 */
1991 void ai_epa_4313war(si_t
*sih
)
1998 origidx
= ai_coreidx(sih
);
2000 cc
= (chipcregs_t
*) ai_setcore(sih
, CC_CORE_ID
, 0);
2003 W_REG(&cc
->gpiocontrol
,
2004 R_REG(&cc
->gpiocontrol
) | GPIO_CTRL_EPA_EN_MASK
);
2006 ai_setcoreidx(sih
, origidx
);
2009 /* check if the device is removed */
2010 bool ai_deviceremoved(si_t
*sih
)
2017 switch (sih
->bustype
) {
2019 ASSERT(sii
->pbus
!= NULL
);
2020 pci_read_config_dword(sii
->pbus
, PCI_VENDOR_ID
, &w
);
2021 if ((w
& 0xFFFF) != PCI_VENDOR_ID_BROADCOM
)
2028 bool ai_is_sprom_available(si_t
*sih
)
2030 if (sih
->ccrev
>= 31) {
2036 if ((sih
->cccaps
& CC_CAP_SROM
) == 0)
2040 origidx
= sii
->curidx
;
2041 cc
= ai_setcoreidx(sih
, SI_CC_IDX
);
2042 sromctrl
= R_REG(&cc
->sromcontrol
);
2043 ai_setcoreidx(sih
, origidx
);
2044 return sromctrl
& SRC_PRESENT
;
2047 switch (sih
->chip
) {
2048 case BCM4329_CHIP_ID
:
2049 return (sih
->chipst
& CST4329_SPROM_SEL
) != 0;
2050 case BCM4319_CHIP_ID
:
2051 return (sih
->chipst
& CST4319_SPROM_SEL
) != 0;
2052 case BCM4336_CHIP_ID
:
2053 return (sih
->chipst
& CST4336_SPROM_PRESENT
) != 0;
2054 case BCM4330_CHIP_ID
:
2055 return (sih
->chipst
& CST4330_SPROM_PRESENT
) != 0;
2056 case BCM4313_CHIP_ID
:
2057 return (sih
->chipst
& CST4313_SPROM_PRESENT
) != 0;
2058 case BCM4331_CHIP_ID
:
2059 return (sih
->chipst
& CST4331_SPROM_PRESENT
) != 0;
2065 bool ai_is_otp_disabled(si_t
*sih
)
2067 switch (sih
->chip
) {
2068 case BCM4329_CHIP_ID
:
2069 return (sih
->chipst
& CST4329_SPROM_OTP_SEL_MASK
) ==
2071 case BCM4319_CHIP_ID
:
2072 return (sih
->chipst
& CST4319_SPROM_OTP_SEL_MASK
) ==
2074 case BCM4336_CHIP_ID
:
2075 return (sih
->chipst
& CST4336_OTP_PRESENT
) == 0;
2076 case BCM4330_CHIP_ID
:
2077 return (sih
->chipst
& CST4330_OTP_PRESENT
) == 0;
2078 case BCM4313_CHIP_ID
:
2079 return (sih
->chipst
& CST4313_OTP_PRESENT
) == 0;
2080 /* These chips always have their OTP on */
2081 case BCM43224_CHIP_ID
:
2082 case BCM43225_CHIP_ID
:
2083 case BCM43421_CHIP_ID
:
2084 case BCM43235_CHIP_ID
:
2085 case BCM43236_CHIP_ID
:
2086 case BCM43238_CHIP_ID
:
2087 case BCM4331_CHIP_ID
:
2093 bool ai_is_otp_powered(si_t
*sih
)
2095 if (PMUCTL_ENAB(sih
))
2096 return si_pmu_is_otp_powered(sih
);
2100 void ai_otp_power(si_t
*sih
, bool on
)
2102 if (PMUCTL_ENAB(sih
))
2103 si_pmu_otp_power(sih
, on
);