staging: brcm80211: cleanup code in source file aiutils.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / brcm80211 / util / aiutils.c
blob549a61278934b71ce79fdcf55cdfb0f49f4e00dd
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/delay.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <bcmdefs.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <bcmutils.h>
24 #include <aiutils.h>
25 #include <hndsoc.h>
26 #include <sbchipc.h>
27 #include <pcicfg.h>
28 #include <bcmdevs.h>
30 /* ********** from siutils.c *********** */
31 #include <pci_core.h>
32 #include <pcie_core.h>
33 #include <nicpci.h>
34 #include <bcmnvram.h>
35 #include <bcmsrom.h>
36 #include <wlc_pmu.h>
38 #define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
39 (sih->chiprev == 0) && \
40 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
42 /* EROM parsing */
44 static u32
45 get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
47 u32 ent;
48 uint inv = 0, nom = 0;
50 while (true) {
51 ent = R_REG(*eromptr);
52 (*eromptr)++;
54 if (mask == 0)
55 break;
57 if ((ent & ER_VALID) == 0) {
58 inv++;
59 continue;
62 if (ent == (ER_END | ER_VALID))
63 break;
65 if ((ent & mask) == match)
66 break;
68 nom++;
71 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
72 if (inv + nom) {
73 SI_VMSG((" after %d invalid and %d non-matching entries\n",
74 inv, nom));
76 return ent;
79 static u32
80 get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
81 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
83 u32 asd, sz, szd;
85 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
86 if (((asd & ER_TAG1) != ER_ADD) ||
87 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
88 ((asd & AD_ST_MASK) != st)) {
89 /* This is not what we want, "push" it back */
90 (*eromptr)--;
91 return 0;
93 *addrl = asd & AD_ADDR_MASK;
94 if (asd & AD_AG32)
95 *addrh = get_erom_ent(sih, eromptr, 0, 0);
96 else
97 *addrh = 0;
98 *sizeh = 0;
99 sz = asd & AD_SZ_MASK;
100 if (sz == AD_SZ_SZD) {
101 szd = get_erom_ent(sih, eromptr, 0, 0);
102 *sizel = szd & SD_SZ_MASK;
103 if (szd & SD_SG32)
104 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
105 } else
106 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
108 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
109 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
111 return asd;
114 static void ai_hwfixup(si_info_t *sii)
118 /* parse the enumeration rom to identify all cores */
119 void ai_scan(si_t *sih, void *regs, uint devid)
121 si_info_t *sii = SI_INFO(sih);
122 chipcregs_t *cc = (chipcregs_t *) regs;
123 u32 erombase, *eromptr, *eromlim;
125 erombase = R_REG(&cc->eromptr);
127 switch (sih->bustype) {
128 case SI_BUS:
129 eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
130 break;
132 case PCI_BUS:
133 /* Set wrappers address */
134 sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
136 /* Now point the window at the erom */
137 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
138 eromptr = regs;
139 break;
141 case SPI_BUS:
142 case SDIO_BUS:
143 eromptr = (u32 *)(unsigned long)erombase;
144 break;
146 default:
147 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
148 sih->bustype));
149 return;
151 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
153 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
154 while (eromptr < eromlim) {
155 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
156 u32 mpd, asd, addrl, addrh, sizel, sizeh;
157 u32 *base;
158 uint i, j, idx;
159 bool br;
161 br = false;
163 /* Grok a component */
164 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
165 if (cia == (ER_END | ER_VALID)) {
166 SI_VMSG(("Found END of erom after %d cores\n",
167 sii->numcores));
168 ai_hwfixup(sii);
169 return;
171 base = eromptr - 1;
172 cib = get_erom_ent(sih, &eromptr, 0, 0);
174 if ((cib & ER_TAG) != ER_CI) {
175 SI_ERROR(("CIA not followed by CIB\n"));
176 goto error;
179 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
180 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
181 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
182 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
183 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
184 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
185 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
187 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
189 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
190 continue;
191 if ((nmw + nsw == 0)) {
192 /* A component which is not a core */
193 if (cid == OOB_ROUTER_CORE_ID) {
194 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
195 &addrl, &addrh, &sizel, &sizeh);
196 if (asd != 0) {
197 sii->oob_router = addrl;
200 continue;
203 idx = sii->numcores;
204 /* sii->eromptr[idx] = base; */
205 sii->cia[idx] = cia;
206 sii->cib[idx] = cib;
207 sii->coreid[idx] = cid;
209 for (i = 0; i < nmp; i++) {
210 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
211 if ((mpd & ER_TAG) != ER_MP) {
212 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
213 goto error;
215 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
216 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
217 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
220 /* First Slave Address Descriptor should be port 0:
221 * the main register space for the core
223 asd =
224 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
225 &sizel, &sizeh);
226 if (asd == 0) {
227 /* Try again to see if it is a bridge */
228 asd =
229 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
230 &addrh, &sizel, &sizeh);
231 if (asd != 0)
232 br = true;
233 else if ((addrh != 0) || (sizeh != 0)
234 || (sizel != SI_CORE_SIZE)) {
235 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
236 goto error;
239 sii->coresba[idx] = addrl;
240 sii->coresba_size[idx] = sizel;
241 /* Get any more ASDs in port 0 */
242 j = 1;
243 do {
244 asd =
245 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
246 &addrh, &sizel, &sizeh);
247 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
248 sii->coresba2[idx] = addrl;
249 sii->coresba2_size[idx] = sizel;
251 j++;
252 } while (asd != 0);
254 /* Go through the ASDs for other slave ports */
255 for (i = 1; i < nsp; i++) {
256 j = 0;
257 do {
258 asd =
259 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
260 &addrl, &addrh, &sizel, &sizeh);
261 } while (asd != 0);
262 if (j == 0) {
263 SI_ERROR((" SP %d has no address descriptors\n",
264 i));
265 goto error;
269 /* Now get master wrappers */
270 for (i = 0; i < nmw; i++) {
271 asd =
272 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
273 &addrh, &sizel, &sizeh);
274 if (asd == 0) {
275 SI_ERROR(("Missing descriptor for MW %d\n", i));
276 goto error;
278 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
279 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
280 goto error;
282 if (i == 0)
283 sii->wrapba[idx] = addrl;
286 /* And finally slave wrappers */
287 for (i = 0; i < nsw; i++) {
288 uint fwp = (nsp == 1) ? 0 : 1;
289 asd =
290 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
291 &addrl, &addrh, &sizel, &sizeh);
292 if (asd == 0) {
293 SI_ERROR(("Missing descriptor for SW %d\n", i));
294 goto error;
296 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
297 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
298 goto error;
300 if ((nmw == 0) && (i == 0))
301 sii->wrapba[idx] = addrl;
304 /* Don't record bridges */
305 if (br)
306 continue;
308 /* Done with core */
309 sii->numcores++;
312 SI_ERROR(("Reached end of erom without finding END"));
314 error:
315 sii->numcores = 0;
316 return;
319 /* This function changes the logical "focus" to the indicated core.
320 * Return the current core's virtual address.
322 void *ai_setcoreidx(si_t *sih, uint coreidx)
324 si_info_t *sii = SI_INFO(sih);
325 u32 addr = sii->coresba[coreidx];
326 u32 wrap = sii->wrapba[coreidx];
327 void *regs;
329 if (coreidx >= sii->numcores)
330 return NULL;
332 switch (sih->bustype) {
333 case SI_BUS:
334 /* map new one */
335 if (!sii->regs[coreidx]) {
336 sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
338 sii->curmap = regs = sii->regs[coreidx];
339 if (!sii->wrappers[coreidx]) {
340 sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
342 sii->curwrap = sii->wrappers[coreidx];
343 break;
345 case PCI_BUS:
346 /* point bar0 window */
347 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
348 regs = sii->curmap;
349 /* point bar0 2nd 4KB window */
350 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
351 break;
353 case SPI_BUS:
354 case SDIO_BUS:
355 sii->curmap = regs = (void *)(unsigned long)addr;
356 sii->curwrap = (void *)(unsigned long)wrap;
357 break;
359 default:
360 regs = NULL;
361 break;
364 sii->curmap = regs;
365 sii->curidx = coreidx;
367 return regs;
370 /* Return the number of address spaces in current core */
371 int ai_numaddrspaces(si_t *sih)
373 return 2;
376 /* Return the address of the nth address space in the current core */
377 u32 ai_addrspace(si_t *sih, uint asidx)
379 si_info_t *sii;
380 uint cidx;
382 sii = SI_INFO(sih);
383 cidx = sii->curidx;
385 if (asidx == 0)
386 return sii->coresba[cidx];
387 else if (asidx == 1)
388 return sii->coresba2[cidx];
389 else {
390 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
391 return 0;
395 /* Return the size of the nth address space in the current core */
396 u32 ai_addrspacesize(si_t *sih, uint asidx)
398 si_info_t *sii;
399 uint cidx;
401 sii = SI_INFO(sih);
402 cidx = sii->curidx;
404 if (asidx == 0)
405 return sii->coresba_size[cidx];
406 else if (asidx == 1)
407 return sii->coresba2_size[cidx];
408 else {
409 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
410 return 0;
414 uint ai_flag(si_t *sih)
416 si_info_t *sii;
417 aidmp_t *ai;
419 sii = SI_INFO(sih);
420 if (BCM47162_DMP()) {
421 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
422 return sii->curidx;
424 ai = sii->curwrap;
426 return R_REG(&ai->oobselouta30) & 0x1f;
429 void ai_setint(si_t *sih, int siflag)
433 uint ai_corevendor(si_t *sih)
435 si_info_t *sii;
436 u32 cia;
438 sii = SI_INFO(sih);
439 cia = sii->cia[sii->curidx];
440 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
443 uint ai_corerev(si_t *sih)
445 si_info_t *sii;
446 u32 cib;
448 sii = SI_INFO(sih);
449 cib = sii->cib[sii->curidx];
450 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
453 bool ai_iscoreup(si_t *sih)
455 si_info_t *sii;
456 aidmp_t *ai;
458 sii = SI_INFO(sih);
459 ai = sii->curwrap;
461 return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
462 SICF_CLOCK_EN)
463 && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
466 void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
468 si_info_t *sii;
469 aidmp_t *ai;
470 u32 w;
472 sii = SI_INFO(sih);
474 if (BCM47162_DMP()) {
475 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
476 __func__));
477 return;
480 ai = sii->curwrap;
482 if (mask || val) {
483 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
484 W_REG(&ai->ioctrl, w);
488 u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
490 si_info_t *sii;
491 aidmp_t *ai;
492 u32 w;
494 sii = SI_INFO(sih);
495 if (BCM47162_DMP()) {
496 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
497 __func__));
498 return 0;
501 ai = sii->curwrap;
503 if (mask || val) {
504 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
505 W_REG(&ai->ioctrl, w);
508 return R_REG(&ai->ioctrl);
511 u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
513 si_info_t *sii;
514 aidmp_t *ai;
515 u32 w;
517 sii = SI_INFO(sih);
518 if (BCM47162_DMP()) {
519 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
520 return 0;
523 ai = sii->curwrap;
525 if (mask || val) {
526 w = ((R_REG(&ai->iostatus) & ~mask) | val);
527 W_REG(&ai->iostatus, w);
530 return R_REG(&ai->iostatus);
533 /* *************** from siutils.c ************** */
534 /* local prototypes */
535 static si_info_t *ai_doattach(si_info_t *sii, uint devid, void *regs,
536 uint bustype, void *sdh, char **vars,
537 uint *varsz);
538 static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
539 void *sdh);
540 static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
541 u32 savewin, uint *origidx, void *regs);
542 static void ai_nvram_process(si_info_t *sii, char *pvars);
544 /* dev path concatenation util */
545 static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name);
546 static bool _ai_clkctl_cc(si_info_t *sii, uint mode);
547 static bool ai_ispcie(si_info_t *sii);
549 /* global variable to indicate reservation/release of gpio's */
550 static u32 ai_gpioreservation;
553 * Allocate a si handle.
554 * devid - pci device id (used to determine chip#)
555 * osh - opaque OS handle
556 * regs - virtual address of initial core registers
557 * bustype - pci/sb/sdio/etc
558 * vars - pointer to a pointer area for "environment" variables
559 * varsz - pointer to int to return the size of the vars
561 si_t *ai_attach(uint devid, void *regs, uint bustype,
562 void *sdh, char **vars, uint *varsz)
564 si_info_t *sii;
566 /* alloc si_info_t */
567 sii = kmalloc(sizeof(si_info_t), GFP_ATOMIC);
568 if (sii == NULL) {
569 SI_ERROR(("si_attach: malloc failed!\n"));
570 return NULL;
573 if (ai_doattach(sii, devid, regs, bustype, sdh, vars, varsz) ==
574 NULL) {
575 kfree(sii);
576 return NULL;
578 sii->vars = vars ? *vars : NULL;
579 sii->varsz = varsz ? *varsz : 0;
581 return (si_t *) sii;
584 /* global kernel resource */
585 static si_info_t ksii;
587 static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
588 void *sdh)
590 /* kludge to enable the clock on the 4306 which lacks a slowclock */
591 if (bustype == PCI_BUS && !ai_ispcie(sii))
592 ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
593 return true;
596 static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
597 u32 savewin, uint *origidx, void *regs)
599 bool pci, pcie;
600 uint i;
601 uint pciidx, pcieidx, pcirev, pcierev;
603 cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
604 ASSERT(cc);
606 /* get chipcommon rev */
607 sii->pub.ccrev = (int)ai_corerev(&sii->pub);
609 /* get chipcommon chipstatus */
610 if (sii->pub.ccrev >= 11)
611 sii->pub.chipst = R_REG(&cc->chipstatus);
613 /* get chipcommon capabilites */
614 sii->pub.cccaps = R_REG(&cc->capabilities);
615 /* get chipcommon extended capabilities */
617 if (sii->pub.ccrev >= 35)
618 sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
620 /* get pmu rev and caps */
621 if (sii->pub.cccaps & CC_CAP_PMU) {
622 sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
623 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
626 /* figure out bus/orignal core idx */
627 sii->pub.buscoretype = NODEV_CORE_ID;
628 sii->pub.buscorerev = NOREV;
629 sii->pub.buscoreidx = BADIDX;
631 pci = pcie = false;
632 pcirev = pcierev = NOREV;
633 pciidx = pcieidx = BADIDX;
635 for (i = 0; i < sii->numcores; i++) {
636 uint cid, crev;
638 ai_setcoreidx(&sii->pub, i);
639 cid = ai_coreid(&sii->pub);
640 crev = ai_corerev(&sii->pub);
642 /* Display cores found */
643 SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
644 i, cid, crev, sii->coresba[i], sii->regs[i]));
646 if (bustype == PCI_BUS) {
647 if (cid == PCI_CORE_ID) {
648 pciidx = i;
649 pcirev = crev;
650 pci = true;
651 } else if (cid == PCIE_CORE_ID) {
652 pcieidx = i;
653 pcierev = crev;
654 pcie = true;
658 /* find the core idx before entering this func. */
659 if ((savewin && (savewin == sii->coresba[i])) ||
660 (regs == sii->regs[i]))
661 *origidx = i;
664 if (pci && pcie) {
665 if (ai_ispcie(sii))
666 pci = false;
667 else
668 pcie = false;
670 if (pci) {
671 sii->pub.buscoretype = PCI_CORE_ID;
672 sii->pub.buscorerev = pcirev;
673 sii->pub.buscoreidx = pciidx;
674 } else if (pcie) {
675 sii->pub.buscoretype = PCIE_CORE_ID;
676 sii->pub.buscorerev = pcierev;
677 sii->pub.buscoreidx = pcieidx;
680 SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx,
681 sii->pub.buscoretype, sii->pub.buscorerev));
683 /* fixup necessary chip/core configurations */
684 if (sii->pub.bustype == PCI_BUS) {
685 if (SI_FAST(sii)) {
686 if (!sii->pch) {
687 sii->pch = (void *)pcicore_init(
688 &sii->pub, sii->pbus,
689 (void *)PCIEREGS(sii));
690 if (sii->pch == NULL)
691 return false;
694 if (ai_pci_fixcfg(&sii->pub)) {
695 SI_ERROR(("si_doattach: si_pci_fixcfg failed\n"));
696 return false;
700 /* return to the original core */
701 ai_setcoreidx(&sii->pub, *origidx);
703 return true;
706 static __used void ai_nvram_process(si_info_t *sii, char *pvars)
708 uint w = 0;
710 /* get boardtype and boardrev */
711 switch (sii->pub.bustype) {
712 case PCI_BUS:
713 /* do a pci config read to get subsystem id and subvendor id */
714 pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
715 /* Let nvram variables override subsystem Vend/ID */
716 sii->pub.boardvendor = (u16)ai_getdevpathintvar(&sii->pub,
717 "boardvendor");
718 if (sii->pub.boardvendor == 0)
719 sii->pub.boardvendor = w & 0xffff;
720 else
721 SI_ERROR(("Overriding boardvendor: 0x%x instead of "
722 "0x%x\n", sii->pub.boardvendor, w & 0xffff));
723 sii->pub.boardtype = (u16)ai_getdevpathintvar(&sii->pub,
724 "boardtype");
725 if (sii->pub.boardtype == 0)
726 sii->pub.boardtype = (w >> 16) & 0xffff;
727 else
728 SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n"
729 , sii->pub.boardtype, (w >> 16) & 0xffff));
730 break;
732 sii->pub.boardvendor = getintvar(pvars, "manfid");
733 sii->pub.boardtype = getintvar(pvars, "prodid");
734 break;
736 case SI_BUS:
737 case JTAG_BUS:
738 sii->pub.boardvendor = PCI_VENDOR_ID_BROADCOM;
739 sii->pub.boardtype = getintvar(pvars, "prodid");
740 if (pvars == NULL || (sii->pub.boardtype == 0)) {
741 sii->pub.boardtype = getintvar(NULL, "boardtype");
742 if (sii->pub.boardtype == 0)
743 sii->pub.boardtype = 0xffff;
745 break;
748 if (sii->pub.boardtype == 0) {
749 SI_ERROR(("si_doattach: unknown board type\n"));
750 ASSERT(sii->pub.boardtype);
753 sii->pub.boardflags = getintvar(pvars, "boardflags");
756 static si_info_t *ai_doattach(si_info_t *sii, uint devid,
757 void *regs, uint bustype, void *pbus,
758 char **vars, uint *varsz)
760 struct si_pub *sih = &sii->pub;
761 u32 w, savewin;
762 chipcregs_t *cc;
763 char *pvars = NULL;
764 uint origidx;
766 ASSERT(GOODREGS(regs));
768 memset((unsigned char *) sii, 0, sizeof(si_info_t));
770 savewin = 0;
772 sih->buscoreidx = BADIDX;
774 sii->curmap = regs;
775 sii->pbus = pbus;
777 /* check to see if we are a si core mimic'ing a pci core */
778 if (bustype == PCI_BUS) {
779 pci_read_config_dword(sii->pbus, PCI_SPROM_CONTROL, &w);
780 if (w == 0xffffffff) {
781 SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
782 " switching to SI devid:0x%x\n",
783 __func__, devid));
784 bustype = SI_BUS;
788 /* find Chipcommon address */
789 if (bustype == PCI_BUS) {
790 pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
791 if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
792 savewin = SI_ENUM_BASE;
793 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
794 SI_ENUM_BASE);
795 cc = (chipcregs_t *) regs;
796 } else {
797 cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
800 sih->bustype = bustype;
802 /* bus/core/clk setup for register access */
803 if (!ai_buscore_prep(sii, bustype, devid, pbus)) {
804 SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
805 bustype));
806 return NULL;
810 * ChipID recognition.
811 * We assume we can read chipid at offset 0 from the regs arg.
812 * If we add other chiptypes (or if we need to support old sdio
813 * hosts w/o chipcommon), some way of recognizing them needs to
814 * be added here.
816 w = R_REG(&cc->chipid);
817 sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
818 /* Might as wll fill in chip id rev & pkg */
819 sih->chip = w & CID_ID_MASK;
820 sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
821 sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
823 sih->issim = IS_SIM(sih->chippkg);
825 /* scan for cores */
826 if (sii->pub.socitype == SOCI_AI) {
827 SI_MSG(("Found chip type AI (0x%08x)\n", w));
828 /* pass chipc address instead of original core base */
829 ai_scan(&sii->pub, (void *)cc, devid);
830 } else {
831 SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
832 return NULL;
834 /* no cores found, bail out */
835 if (sii->numcores == 0) {
836 SI_ERROR(("si_doattach: could not find any cores\n"));
837 return NULL;
839 /* bus/core/clk setup */
840 origidx = SI_CC_IDX;
841 if (!ai_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
842 SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
843 goto exit;
846 /* assume current core is CC */
847 if ((sii->pub.ccrev == 0x25)
849 ((sih->chip == BCM43236_CHIP_ID
850 || sih->chip == BCM43235_CHIP_ID
851 || sih->chip == BCM43238_CHIP_ID)
852 && (sii->pub.chiprev <= 2))) {
854 if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
855 uint clkdiv;
856 clkdiv = R_REG(&cc->clkdiv);
857 /* otp_clk_div is even number, 120/14 < 9mhz */
858 clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
859 W_REG(&cc->clkdiv, clkdiv);
860 SI_ERROR(("%s: set clkdiv to %x\n", __func__, clkdiv));
862 udelay(10);
865 /* Init nvram from flash if it exists */
866 nvram_init((void *)&(sii->pub));
868 /* Init nvram from sprom/otp if they exist */
869 if (srom_var_init
870 (&sii->pub, bustype, regs, vars, varsz)) {
871 SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
872 goto exit;
874 pvars = vars ? *vars : NULL;
875 ai_nvram_process(sii, pvars);
877 /* === NVRAM, clock is ready === */
878 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
879 W_REG(&cc->gpiopullup, 0);
880 W_REG(&cc->gpiopulldown, 0);
881 ai_setcoreidx(sih, origidx);
883 /* PMU specific initializations */
884 if (PMUCTL_ENAB(sih)) {
885 u32 xtalfreq;
886 si_pmu_init(sih);
887 si_pmu_chip_init(sih);
888 xtalfreq = getintvar(pvars, "xtalfreq");
889 /* If xtalfreq var not available, try to measure it */
890 if (xtalfreq == 0)
891 xtalfreq = si_pmu_measure_alpclk(sih);
892 si_pmu_pll_init(sih, xtalfreq);
893 si_pmu_res_init(sih);
894 si_pmu_swreg_init(sih);
897 /* setup the GPIO based LED powersave register */
898 w = getintvar(pvars, "leddc");
899 if (w == 0)
900 w = DEFAULT_GPIOTIMERVAL;
901 ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, gpiotimerval), ~0, w);
903 if (PCIE(sii)) {
904 ASSERT(sii->pch != NULL);
905 pcicore_attach(sii->pch, pvars, SI_DOATTACH);
908 if ((sih->chip == BCM43224_CHIP_ID) ||
909 (sih->chip == BCM43421_CHIP_ID)) {
911 * enable 12 mA drive strenth for 43224 and
912 * set chipControl register bit 15
914 if (sih->chiprev == 0) {
915 SI_MSG(("Applying 43224A0 WARs\n"));
916 ai_corereg(sih, SI_CC_IDX,
917 offsetof(chipcregs_t, chipcontrol),
918 CCTRL43224_GPIO_TOGGLE,
919 CCTRL43224_GPIO_TOGGLE);
920 si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
921 CCTRL_43224A0_12MA_LED_DRIVE);
923 if (sih->chiprev >= 1) {
924 SI_MSG(("Applying 43224B0+ WARs\n"));
925 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
926 CCTRL_43224B0_12MA_LED_DRIVE);
930 if (sih->chip == BCM4313_CHIP_ID) {
932 * enable 12 mA drive strenth for 4313 and
933 * set chipControl register bit 1
935 SI_MSG(("Applying 4313 WARs\n"));
936 si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
937 CCTRL_4313_12MA_LED_DRIVE);
940 if (sih->chip == BCM4331_CHIP_ID) {
941 /* Enable Ext PA lines depending on chip package option */
942 ai_chipcontrl_epa4331(sih, true);
945 return sii;
946 exit:
947 if (sih->bustype == PCI_BUS) {
948 if (sii->pch)
949 pcicore_deinit(sii->pch);
950 sii->pch = NULL;
953 return NULL;
956 /* may be called with core in reset */
957 void ai_detach(si_t *sih)
959 si_info_t *sii;
960 uint idx;
962 struct si_pub *si_local = NULL;
963 bcopy(&sih, &si_local, sizeof(si_t **));
965 sii = SI_INFO(sih);
967 if (sii == NULL)
968 return;
970 if (sih->bustype == SI_BUS)
971 for (idx = 0; idx < SI_MAXCORES; idx++)
972 if (sii->regs[idx]) {
973 iounmap(sii->regs[idx]);
974 sii->regs[idx] = NULL;
977 nvram_exit((void *)si_local); /* free up nvram buffers */
979 if (sih->bustype == PCI_BUS) {
980 if (sii->pch)
981 pcicore_deinit(sii->pch);
982 sii->pch = NULL;
985 if (sii != &ksii)
986 kfree(sii);
989 /* register driver interrupt disabling and restoring callback functions */
990 void
991 ai_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
992 void *intrsenabled_fn, void *intr_arg)
994 si_info_t *sii;
996 sii = SI_INFO(sih);
997 sii->intr_arg = intr_arg;
998 sii->intrsoff_fn = (si_intrsoff_t) intrsoff_fn;
999 sii->intrsrestore_fn = (si_intrsrestore_t) intrsrestore_fn;
1000 sii->intrsenabled_fn = (si_intrsenabled_t) intrsenabled_fn;
1001 /* save current core id. when this function called, the current core
1002 * must be the core which provides driver functions(il, et, wl, etc.)
1004 sii->dev_coreid = sii->coreid[sii->curidx];
1007 void ai_deregister_intr_callback(si_t *sih)
1009 si_info_t *sii;
1011 sii = SI_INFO(sih);
1012 sii->intrsoff_fn = NULL;
1015 uint ai_coreid(si_t *sih)
1017 si_info_t *sii;
1019 sii = SI_INFO(sih);
1020 return sii->coreid[sii->curidx];
1023 uint ai_coreidx(si_t *sih)
1025 si_info_t *sii;
1027 sii = SI_INFO(sih);
1028 return sii->curidx;
1031 bool ai_backplane64(si_t *sih)
1033 return (sih->cccaps & CC_CAP_BKPLN64) != 0;
1036 /* return index of coreid or BADIDX if not found */
1037 uint ai_findcoreidx(si_t *sih, uint coreid, uint coreunit)
1039 si_info_t *sii;
1040 uint found;
1041 uint i;
1043 sii = SI_INFO(sih);
1045 found = 0;
1047 for (i = 0; i < sii->numcores; i++)
1048 if (sii->coreid[i] == coreid) {
1049 if (found == coreunit)
1050 return i;
1051 found++;
1054 return BADIDX;
1058 * This function changes logical "focus" to the indicated core;
1059 * must be called with interrupts off.
1060 * Moreover, callers should keep interrupts off during switching
1061 * out of and back to d11 core.
1063 void *ai_setcore(si_t *sih, uint coreid, uint coreunit)
1065 uint idx;
1067 idx = ai_findcoreidx(sih, coreid, coreunit);
1068 if (!GOODIDX(idx))
1069 return NULL;
1071 if (sih->socitype == SOCI_AI)
1072 return ai_setcoreidx(sih, idx);
1073 else {
1074 ASSERT(0);
1075 return NULL;
1079 /* Turn off interrupt as required by ai_setcore, before switch core */
1080 void *ai_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
1082 void *cc;
1083 si_info_t *sii;
1085 sii = SI_INFO(sih);
1087 if (SI_FAST(sii)) {
1088 /* Overloading the origidx variable to remember the coreid,
1089 * this works because the core ids cannot be confused with
1090 * core indices.
1092 *origidx = coreid;
1093 if (coreid == CC_CORE_ID)
1094 return (void *)CCREGS_FAST(sii);
1095 else if (coreid == sih->buscoretype)
1096 return (void *)PCIEREGS(sii);
1098 INTR_OFF(sii, *intr_val);
1099 *origidx = sii->curidx;
1100 cc = ai_setcore(sih, coreid, 0);
1101 ASSERT(cc != NULL);
1103 return cc;
1106 /* restore coreidx and restore interrupt */
1107 void ai_restore_core(si_t *sih, uint coreid, uint intr_val)
1109 si_info_t *sii;
1111 sii = SI_INFO(sih);
1112 if (SI_FAST(sii)
1113 && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
1114 return;
1116 ai_setcoreidx(sih, coreid);
1117 INTR_RESTORE(sii, intr_val);
1120 void ai_write_wrapperreg(si_t *sih, u32 offset, u32 val)
1122 si_info_t *sii = SI_INFO(sih);
1123 u32 *w = (u32 *) sii->curwrap;
1124 W_REG(w + (offset / 4), val);
1125 return;
1129 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
1130 * operation, switch back to the original core, and return the new value.
1132 * When using the silicon backplane, no fiddling with interrupts or core
1133 * switches is needed.
1135 * Also, when using pci/pcie, we can optimize away the core switching for pci
1136 * registers and (on newer pci cores) chipcommon registers.
1138 uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1140 uint origidx = 0;
1141 u32 *r = NULL;
1142 uint w;
1143 uint intr_val = 0;
1144 bool fast = false;
1145 si_info_t *sii;
1147 sii = SI_INFO(sih);
1149 ASSERT(GOODIDX(coreidx));
1150 ASSERT(regoff < SI_CORE_SIZE);
1151 ASSERT((val & ~mask) == 0);
1153 if (coreidx >= SI_MAXCORES)
1154 return 0;
1156 if (sih->bustype == SI_BUS) {
1157 /* If internal bus, we can always get at everything */
1158 fast = true;
1159 /* map if does not exist */
1160 if (!sii->regs[coreidx]) {
1161 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
1162 SI_CORE_SIZE);
1163 ASSERT(GOODREGS(sii->regs[coreidx]));
1165 r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
1166 } else if (sih->bustype == PCI_BUS) {
1168 * If pci/pcie, we can get at pci/pcie regs
1169 * and on newer cores to chipc
1171 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1172 /* Chipc registers are mapped at 12KB */
1174 fast = true;
1175 r = (u32 *) ((char *)sii->curmap +
1176 PCI_16KB0_CCREGS_OFFSET + regoff);
1177 } else if (sii->pub.buscoreidx == coreidx) {
1179 * pci registers are at either in the last 2KB of
1180 * an 8KB window or, in pcie and pci rev 13 at 8KB
1182 fast = true;
1183 if (SI_FAST(sii))
1184 r = (u32 *) ((char *)sii->curmap +
1185 PCI_16KB0_PCIREGS_OFFSET +
1186 regoff);
1187 else
1188 r = (u32 *) ((char *)sii->curmap +
1189 ((regoff >= SBCONFIGOFF) ?
1190 PCI_BAR0_PCISBR_OFFSET :
1191 PCI_BAR0_PCIREGS_OFFSET) +
1192 regoff);
1196 if (!fast) {
1197 INTR_OFF(sii, intr_val);
1199 /* save current core index */
1200 origidx = ai_coreidx(&sii->pub);
1202 /* switch core */
1203 r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx)
1204 + regoff);
1206 ASSERT(r != NULL);
1208 /* mask and set */
1209 if (mask || val) {
1210 w = (R_REG(r) & ~mask) | val;
1211 W_REG(r, w);
1214 /* readback */
1215 w = R_REG(r);
1217 if (!fast) {
1218 /* restore core index */
1219 if (origidx != coreidx)
1220 ai_setcoreidx(&sii->pub, origidx);
1222 INTR_RESTORE(sii, intr_val);
1225 return w;
1228 void ai_core_disable(si_t *sih, u32 bits)
1230 si_info_t *sii;
1231 u32 dummy;
1232 aidmp_t *ai;
1234 sii = SI_INFO(sih);
1236 ASSERT(GOODREGS(sii->curwrap));
1237 ai = sii->curwrap;
1239 /* if core is already in reset, just return */
1240 if (R_REG(&ai->resetctrl) & AIRC_RESET)
1241 return;
1243 W_REG(&ai->ioctrl, bits);
1244 dummy = R_REG(&ai->ioctrl);
1245 udelay(10);
1247 W_REG(&ai->resetctrl, AIRC_RESET);
1248 udelay(1);
1251 /* reset and re-enable a core
1252 * inputs:
1253 * bits - core specific bits that are set during and after reset sequence
1254 * resetbits - core specific bits that are set only during reset sequence
1256 void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
1258 si_info_t *sii;
1259 aidmp_t *ai;
1260 u32 dummy;
1262 sii = SI_INFO(sih);
1263 ASSERT(GOODREGS(sii->curwrap));
1264 ai = sii->curwrap;
1267 * Must do the disable sequence first to work
1268 * for arbitrary current core state.
1270 ai_core_disable(sih, (bits | resetbits));
1273 * Now do the initialization sequence.
1275 W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1276 dummy = R_REG(&ai->ioctrl);
1277 W_REG(&ai->resetctrl, 0);
1278 udelay(1);
1280 W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
1281 dummy = R_REG(&ai->ioctrl);
1282 udelay(1);
1285 /* return the slow clock source - LPO, XTAL, or PCI */
1286 static uint ai_slowclk_src(si_info_t *sii)
1288 chipcregs_t *cc;
1289 u32 val;
1291 ASSERT(SI_FAST(sii) || ai_coreid(&sii->pub) == CC_CORE_ID);
1293 if (sii->pub.ccrev < 6) {
1294 if (sii->pub.bustype == PCI_BUS) {
1295 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
1296 &val);
1297 if (val & PCI_CFG_GPIO_SCS)
1298 return SCC_SS_PCI;
1300 return SCC_SS_XTAL;
1301 } else if (sii->pub.ccrev < 10) {
1302 cc = (chipcregs_t *) ai_setcoreidx(&sii->pub, sii->curidx);
1303 return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
1304 } else /* Insta-clock */
1305 return SCC_SS_XTAL;
1308 /* return the ILP (slowclock) min or max frequency */
1309 static uint ai_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
1311 u32 slowclk;
1312 uint div;
1314 ASSERT(SI_FAST(sii) || ai_coreid(&sii->pub) == CC_CORE_ID);
1317 * shouldn't be here unless we've established
1318 * the chip has dynamic clk control
1320 ASSERT(R_REG(&cc->capabilities) & CC_CAP_PWR_CTL);
1322 slowclk = ai_slowclk_src(sii);
1323 if (sii->pub.ccrev < 6) {
1324 if (slowclk == SCC_SS_PCI)
1325 return max_freq ? (PCIMAXFREQ / 64)
1326 : (PCIMINFREQ / 64);
1327 else
1328 return max_freq ? (XTALMAXFREQ / 32)
1329 : (XTALMINFREQ / 32);
1330 } else if (sii->pub.ccrev < 10) {
1331 div = 4 *
1332 (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
1333 SCC_CD_SHIFT) + 1);
1334 if (slowclk == SCC_SS_LPO)
1335 return max_freq ? LPOMAXFREQ : LPOMINFREQ;
1336 else if (slowclk == SCC_SS_XTAL)
1337 return max_freq ? (XTALMAXFREQ / div)
1338 : (XTALMINFREQ / div);
1339 else if (slowclk == SCC_SS_PCI)
1340 return max_freq ? (PCIMAXFREQ / div)
1341 : (PCIMINFREQ / div);
1342 else
1343 ASSERT(0);
1344 } else {
1345 /* Chipc rev 10 is InstaClock */
1346 div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
1347 div = 4 * (div + 1);
1348 return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
1350 return 0;
1353 static void ai_clkctl_setdelay(si_info_t *sii, void *chipcregs)
1355 chipcregs_t *cc = (chipcregs_t *) chipcregs;
1356 uint slowmaxfreq, pll_delay, slowclk;
1357 uint pll_on_delay, fref_sel_delay;
1359 pll_delay = PLL_DELAY;
1362 * If the slow clock is not sourced by the xtal then
1363 * add the xtal_on_delay since the xtal will also be
1364 * powered down by dynamic clk control logic.
1367 slowclk = ai_slowclk_src(sii);
1368 if (slowclk != SCC_SS_XTAL)
1369 pll_delay += XTAL_ON_DELAY;
1371 /* Starting with 4318 it is ILP that is used for the delays */
1372 slowmaxfreq =
1373 ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
1375 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
1376 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
1378 W_REG(&cc->pll_on_delay, pll_on_delay);
1379 W_REG(&cc->fref_sel_delay, fref_sel_delay);
1382 /* initialize power control delay registers */
1383 void ai_clkctl_init(si_t *sih)
1385 si_info_t *sii;
1386 uint origidx = 0;
1387 chipcregs_t *cc;
1388 bool fast;
1390 if (!CCCTL_ENAB(sih))
1391 return;
1393 sii = SI_INFO(sih);
1394 fast = SI_FAST(sii);
1395 if (!fast) {
1396 origidx = sii->curidx;
1397 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1398 if (cc == NULL)
1399 return;
1400 } else {
1401 cc = (chipcregs_t *) CCREGS_FAST(sii);
1402 if (cc == NULL)
1403 return;
1405 ASSERT(cc != NULL);
1407 /* set all Instaclk chip ILP to 1 MHz */
1408 if (sih->ccrev >= 10)
1409 SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
1410 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
1412 ai_clkctl_setdelay(sii, (void *)cc);
1414 if (!fast)
1415 ai_setcoreidx(sih, origidx);
1419 * return the value suitable for writing to the
1420 * dot11 core FAST_PWRUP_DELAY register
1422 u16 ai_clkctl_fast_pwrup_delay(si_t *sih)
1424 si_info_t *sii;
1425 uint origidx = 0;
1426 chipcregs_t *cc;
1427 uint slowminfreq;
1428 u16 fpdelay;
1429 uint intr_val = 0;
1430 bool fast;
1432 sii = SI_INFO(sih);
1433 if (PMUCTL_ENAB(sih)) {
1434 INTR_OFF(sii, intr_val);
1435 fpdelay = si_pmu_fast_pwrup_delay(sih);
1436 INTR_RESTORE(sii, intr_val);
1437 return fpdelay;
1440 if (!CCCTL_ENAB(sih))
1441 return 0;
1443 fast = SI_FAST(sii);
1444 fpdelay = 0;
1445 if (!fast) {
1446 origidx = sii->curidx;
1447 INTR_OFF(sii, intr_val);
1448 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1449 if (cc == NULL)
1450 goto done;
1451 } else {
1452 cc = (chipcregs_t *) CCREGS_FAST(sii);
1453 if (cc == NULL)
1454 goto done;
1456 ASSERT(cc != NULL);
1458 slowminfreq = ai_slowclk_freq(sii, false, cc);
1459 fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
1460 (slowminfreq - 1)) / slowminfreq;
1462 done:
1463 if (!fast) {
1464 ai_setcoreidx(sih, origidx);
1465 INTR_RESTORE(sii, intr_val);
1467 return fpdelay;
1470 /* turn primary xtal and/or pll off/on */
1471 int ai_clkctl_xtal(si_t *sih, uint what, bool on)
1473 si_info_t *sii;
1474 u32 in, out, outen;
1476 sii = SI_INFO(sih);
1478 switch (sih->bustype) {
1480 case PCI_BUS:
1481 /* pcie core doesn't have any mapping to control the xtal pu */
1482 if (PCIE(sii))
1483 return -1;
1485 pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
1486 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
1487 pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
1490 * Avoid glitching the clock if GPRS is already using it.
1491 * We can't actually read the state of the PLLPD so we infer it
1492 * by the value of XTAL_PU which *is* readable via gpioin.
1494 if (on && (in & PCI_CFG_GPIO_XTAL))
1495 return 0;
1497 if (what & XTAL)
1498 outen |= PCI_CFG_GPIO_XTAL;
1499 if (what & PLL)
1500 outen |= PCI_CFG_GPIO_PLL;
1502 if (on) {
1503 /* turn primary xtal on */
1504 if (what & XTAL) {
1505 out |= PCI_CFG_GPIO_XTAL;
1506 if (what & PLL)
1507 out |= PCI_CFG_GPIO_PLL;
1508 pci_write_config_dword(sii->pbus,
1509 PCI_GPIO_OUT, out);
1510 pci_write_config_dword(sii->pbus,
1511 PCI_GPIO_OUTEN, outen);
1512 udelay(XTAL_ON_DELAY);
1515 /* turn pll on */
1516 if (what & PLL) {
1517 out &= ~PCI_CFG_GPIO_PLL;
1518 pci_write_config_dword(sii->pbus,
1519 PCI_GPIO_OUT, out);
1520 mdelay(2);
1522 } else {
1523 if (what & XTAL)
1524 out &= ~PCI_CFG_GPIO_XTAL;
1525 if (what & PLL)
1526 out |= PCI_CFG_GPIO_PLL;
1527 pci_write_config_dword(sii->pbus,
1528 PCI_GPIO_OUT, out);
1529 pci_write_config_dword(sii->pbus,
1530 PCI_GPIO_OUTEN, outen);
1533 default:
1534 return -1;
1537 return 0;
1541 * clock control policy function throught chipcommon
1543 * set dynamic clk control mode (forceslow, forcefast, dynamic)
1544 * returns true if we are forcing fast clock
1545 * this is a wrapper over the next internal function
1546 * to allow flexible policy settings for outside caller
1548 bool ai_clkctl_cc(si_t *sih, uint mode)
1550 si_info_t *sii;
1552 sii = SI_INFO(sih);
1554 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1555 if (sih->ccrev < 6)
1556 return false;
1558 if (PCI_FORCEHT(sii))
1559 return mode == CLK_FAST;
1561 return _ai_clkctl_cc(sii, mode);
1564 /* clk control mechanism through chipcommon, no policy checking */
1565 static bool _ai_clkctl_cc(si_info_t *sii, uint mode)
1567 uint origidx = 0;
1568 chipcregs_t *cc;
1569 u32 scc;
1570 uint intr_val = 0;
1571 bool fast = SI_FAST(sii);
1573 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1574 if (sii->pub.ccrev < 6)
1575 return false;
1578 * Chips with ccrev 10 are EOL and they
1579 * don't have SYCC_HR which we use below
1581 ASSERT(sii->pub.ccrev != 10);
1583 if (!fast) {
1584 INTR_OFF(sii, intr_val);
1585 origidx = sii->curidx;
1587 if ((sii->pub.bustype == SI_BUS) &&
1588 ai_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
1589 (ai_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
1590 goto done;
1592 cc = (chipcregs_t *) ai_setcore(&sii->pub, CC_CORE_ID, 0);
1593 } else {
1594 cc = (chipcregs_t *) CCREGS_FAST(sii);
1595 if (cc == NULL)
1596 goto done;
1598 ASSERT(cc != NULL);
1600 if (!CCCTL_ENAB(&sii->pub) && (sii->pub.ccrev < 20))
1601 goto done;
1603 switch (mode) {
1604 case CLK_FAST: /* FORCEHT, fast (pll) clock */
1605 if (sii->pub.ccrev < 10) {
1607 * don't forget to force xtal back
1608 * on before we clear SCC_DYN_XTAL..
1610 ai_clkctl_xtal(&sii->pub, XTAL, ON);
1611 SET_REG(&cc->slow_clk_ctl,
1612 (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
1613 } else if (sii->pub.ccrev < 20) {
1614 OR_REG(&cc->system_clk_ctl, SYCC_HR);
1615 } else {
1616 OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
1619 /* wait for the PLL */
1620 if (PMUCTL_ENAB(&sii->pub)) {
1621 u32 htavail = CCS_HTAVAIL;
1622 SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
1623 == 0), PMU_MAX_TRANSITION_DLY);
1624 ASSERT(R_REG(&cc->clk_ctl_st) & htavail);
1625 } else {
1626 udelay(PLL_DELAY);
1628 break;
1630 case CLK_DYNAMIC: /* enable dynamic clock control */
1631 if (sii->pub.ccrev < 10) {
1632 scc = R_REG(&cc->slow_clk_ctl);
1633 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
1634 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
1635 scc |= SCC_XC;
1636 W_REG(&cc->slow_clk_ctl, scc);
1639 * for dynamic control, we have to
1640 * release our xtal_pu "force on"
1642 if (scc & SCC_XC)
1643 ai_clkctl_xtal(&sii->pub, XTAL, OFF);
1644 } else if (sii->pub.ccrev < 20) {
1645 /* Instaclock */
1646 AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
1647 } else {
1648 AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
1650 break;
1652 default:
1653 ASSERT(0);
1656 done:
1657 if (!fast) {
1658 ai_setcoreidx(&sii->pub, origidx);
1659 INTR_RESTORE(sii, intr_val);
1661 return mode == CLK_FAST;
1664 /* Build device path. Support SI, PCI, and JTAG for now. */
1665 int ai_devpath(si_t *sih, char *path, int size)
1667 int slen;
1669 ASSERT(path != NULL);
1670 ASSERT(size >= SI_DEVPATH_BUFSZ);
1672 if (!path || size <= 0)
1673 return -1;
1675 switch (sih->bustype) {
1676 case SI_BUS:
1677 case JTAG_BUS:
1678 slen = snprintf(path, (size_t) size, "sb/%u/", ai_coreidx(sih));
1679 break;
1680 case PCI_BUS:
1681 ASSERT((SI_INFO(sih))->pbus != NULL);
1682 slen = snprintf(path, (size_t) size, "pci/%u/%u/",
1683 ((struct pci_dev *)((SI_INFO(sih))->pbus))->bus->number,
1684 PCI_SLOT(
1685 ((struct pci_dev *)((SI_INFO(sih))->pbus))->devfn));
1686 break;
1688 default:
1689 slen = -1;
1690 ASSERT(0);
1691 break;
1694 if (slen < 0 || slen >= size) {
1695 path[0] = '\0';
1696 return -1;
1699 return 0;
1702 /* Get a variable, but only if it has a devpath prefix */
1703 char *ai_getdevpathvar(si_t *sih, const char *name)
1705 char varname[SI_DEVPATH_BUFSZ + 32];
1707 ai_devpathvar(sih, varname, sizeof(varname), name);
1709 return getvar(NULL, varname);
1712 /* Get a variable, but only if it has a devpath prefix */
1713 int ai_getdevpathintvar(si_t *sih, const char *name)
1715 #if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
1716 return getintvar(NULL, name);
1717 #else
1718 char varname[SI_DEVPATH_BUFSZ + 32];
1720 ai_devpathvar(sih, varname, sizeof(varname), name);
1722 return getintvar(NULL, varname);
1723 #endif
1726 char *ai_getnvramflvar(si_t *sih, const char *name)
1728 return getvar(NULL, name);
1731 /* Concatenate the dev path with a varname into the given 'var' buffer
1732 * and return the 'var' pointer. Nothing is done to the arguments if
1733 * len == 0 or var is NULL, var is still returned. On overflow, the
1734 * first char will be set to '\0'.
1736 static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name)
1738 uint path_len;
1740 if (!var || len <= 0)
1741 return var;
1743 if (ai_devpath(sih, var, len) == 0) {
1744 path_len = strlen(var);
1746 if (strlen(name) + 1 > (uint) (len - path_len))
1747 var[0] = '\0';
1748 else
1749 strncpy(var + path_len, name, len - path_len - 1);
1752 return var;
1755 /* return true if PCIE capability exists in the pci config space */
1756 static __used bool ai_ispcie(si_info_t *sii)
1758 u8 cap_ptr;
1760 if (sii->pub.bustype != PCI_BUS)
1761 return false;
1763 cap_ptr =
1764 pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
1765 NULL);
1766 if (!cap_ptr)
1767 return false;
1769 return true;
1772 bool ai_pci_war16165(si_t *sih)
1774 si_info_t *sii;
1776 sii = SI_INFO(sih);
1778 return PCI(sii) && (sih->buscorerev <= 10);
1781 void ai_pci_up(si_t *sih)
1783 si_info_t *sii;
1785 sii = SI_INFO(sih);
1787 /* if not pci bus, we're done */
1788 if (sih->bustype != PCI_BUS)
1789 return;
1791 if (PCI_FORCEHT(sii))
1792 _ai_clkctl_cc(sii, CLK_FAST);
1794 if (PCIE(sii))
1795 pcicore_up(sii->pch, SI_PCIUP);
1799 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
1800 void ai_pci_sleep(si_t *sih)
1802 si_info_t *sii;
1804 sii = SI_INFO(sih);
1806 pcicore_sleep(sii->pch);
1809 /* Unconfigure and/or apply various WARs when going down */
1810 void ai_pci_down(si_t *sih)
1812 si_info_t *sii;
1814 sii = SI_INFO(sih);
1816 /* if not pci bus, we're done */
1817 if (sih->bustype != PCI_BUS)
1818 return;
1820 /* release FORCEHT since chip is going to "down" state */
1821 if (PCI_FORCEHT(sii))
1822 _ai_clkctl_cc(sii, CLK_DYNAMIC);
1824 pcicore_down(sii->pch, SI_PCIDOWN);
1828 * Configure the pci core for pci client (NIC) action
1829 * coremask is the bitvec of cores by index to be enabled.
1831 void ai_pci_setup(si_t *sih, uint coremask)
1833 si_info_t *sii;
1834 struct sbpciregs *pciregs = NULL;
1835 u32 siflag = 0, w;
1836 uint idx = 0;
1838 sii = SI_INFO(sih);
1840 if (sii->pub.bustype != PCI_BUS)
1841 return;
1843 ASSERT(PCI(sii) || PCIE(sii));
1844 ASSERT(sii->pub.buscoreidx != BADIDX);
1846 if (PCI(sii)) {
1847 /* get current core index */
1848 idx = sii->curidx;
1850 /* we interrupt on this backplane flag number */
1851 siflag = ai_flag(sih);
1853 /* switch over to pci core */
1854 pciregs = ai_setcoreidx(sih, sii->pub.buscoreidx);
1858 * Enable sb->pci interrupts. Assume
1859 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1861 if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
1862 /* pci config write to set this core bit in PCIIntMask */
1863 pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
1864 w |= (coremask << PCI_SBIM_SHIFT);
1865 pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
1866 } else {
1867 /* set sbintvec bit for our flag number */
1868 ai_setint(sih, siflag);
1871 if (PCI(sii)) {
1872 OR_REG(&pciregs->sbtopci2,
1873 (SBTOPCI_PREF | SBTOPCI_BURST));
1874 if (sii->pub.buscorerev >= 11) {
1875 OR_REG(&pciregs->sbtopci2,
1876 SBTOPCI_RC_READMULTI);
1877 w = R_REG(&pciregs->clkrun);
1878 W_REG(&pciregs->clkrun,
1879 (w | PCI_CLKRUN_DSBL));
1880 w = R_REG(&pciregs->clkrun);
1883 /* switch back to previous core */
1884 ai_setcoreidx(sih, idx);
1889 * Fixup SROMless PCI device's configuration.
1890 * The current core may be changed upon return.
1892 int ai_pci_fixcfg(si_t *sih)
1894 uint origidx, pciidx;
1895 struct sbpciregs *pciregs = NULL;
1896 sbpcieregs_t *pcieregs = NULL;
1897 void *regs = NULL;
1898 u16 val16, *reg16 = NULL;
1900 si_info_t *sii = SI_INFO(sih);
1902 ASSERT(sii->pub.bustype == PCI_BUS);
1904 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
1905 /* save the current index */
1906 origidx = ai_coreidx(&sii->pub);
1908 /* check 'pi' is correct and fix it if not */
1909 if (sii->pub.buscoretype == PCIE_CORE_ID) {
1910 pcieregs = ai_setcore(&sii->pub, PCIE_CORE_ID, 0);
1911 regs = pcieregs;
1912 ASSERT(pcieregs != NULL);
1913 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
1914 } else if (sii->pub.buscoretype == PCI_CORE_ID) {
1915 pciregs = ai_setcore(&sii->pub, PCI_CORE_ID, 0);
1916 regs = pciregs;
1917 ASSERT(pciregs != NULL);
1918 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
1920 pciidx = ai_coreidx(&sii->pub);
1921 val16 = R_REG(reg16);
1922 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16) pciidx) {
1923 val16 =
1924 (u16) (pciidx << SRSH_PI_SHIFT) | (val16 &
1925 ~SRSH_PI_MASK);
1926 W_REG(reg16, val16);
1929 /* restore the original index */
1930 ai_setcoreidx(&sii->pub, origidx);
1932 pcicore_hwup(sii->pch);
1933 return 0;
1936 /* mask&set gpiocontrol bits */
1937 u32 ai_gpiocontrol(si_t *sih, u32 mask, u32 val, u8 priority)
1939 uint regoff;
1941 regoff = 0;
1943 /* gpios could be shared on router platforms
1944 * ignore reservation if it's high priority (e.g., test apps)
1946 if ((priority != GPIO_HI_PRIORITY) &&
1947 (sih->bustype == SI_BUS) && (val || mask)) {
1948 mask = priority ? (ai_gpioreservation & mask) :
1949 ((ai_gpioreservation | mask) & ~(ai_gpioreservation));
1950 val &= mask;
1953 regoff = offsetof(chipcregs_t, gpiocontrol);
1954 return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
1957 void ai_chipcontrl_epa4331(si_t *sih, bool on)
1959 si_info_t *sii;
1960 chipcregs_t *cc;
1961 uint origidx;
1962 u32 val;
1964 sii = SI_INFO(sih);
1965 origidx = ai_coreidx(sih);
1967 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1969 val = R_REG(&cc->chipcontrol);
1971 if (on) {
1972 if (sih->chippkg == 9 || sih->chippkg == 0xb) {
1973 /* Ext PA Controls for 4331 12x9 Package */
1974 W_REG(&cc->chipcontrol, val |
1975 (CCTRL4331_EXTPA_EN |
1976 CCTRL4331_EXTPA_ON_GPIO2_5));
1977 } else {
1978 /* Ext PA Controls for 4331 12x12 Package */
1979 W_REG(&cc->chipcontrol,
1980 val | (CCTRL4331_EXTPA_EN));
1982 } else {
1983 val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
1984 W_REG(&cc->chipcontrol, val);
1987 ai_setcoreidx(sih, origidx);
1990 /* Enable BT-COEX & Ex-PA for 4313 */
1991 void ai_epa_4313war(si_t *sih)
1993 si_info_t *sii;
1994 chipcregs_t *cc;
1995 uint origidx;
1997 sii = SI_INFO(sih);
1998 origidx = ai_coreidx(sih);
2000 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
2002 /* EPA Fix */
2003 W_REG(&cc->gpiocontrol,
2004 R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
2006 ai_setcoreidx(sih, origidx);
2009 /* check if the device is removed */
2010 bool ai_deviceremoved(si_t *sih)
2012 u32 w;
2013 si_info_t *sii;
2015 sii = SI_INFO(sih);
2017 switch (sih->bustype) {
2018 case PCI_BUS:
2019 ASSERT(sii->pbus != NULL);
2020 pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
2021 if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
2022 return true;
2023 break;
2025 return false;
2028 bool ai_is_sprom_available(si_t *sih)
2030 if (sih->ccrev >= 31) {
2031 si_info_t *sii;
2032 uint origidx;
2033 chipcregs_t *cc;
2034 u32 sromctrl;
2036 if ((sih->cccaps & CC_CAP_SROM) == 0)
2037 return false;
2039 sii = SI_INFO(sih);
2040 origidx = sii->curidx;
2041 cc = ai_setcoreidx(sih, SI_CC_IDX);
2042 sromctrl = R_REG(&cc->sromcontrol);
2043 ai_setcoreidx(sih, origidx);
2044 return sromctrl & SRC_PRESENT;
2047 switch (sih->chip) {
2048 case BCM4329_CHIP_ID:
2049 return (sih->chipst & CST4329_SPROM_SEL) != 0;
2050 case BCM4319_CHIP_ID:
2051 return (sih->chipst & CST4319_SPROM_SEL) != 0;
2052 case BCM4336_CHIP_ID:
2053 return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
2054 case BCM4330_CHIP_ID:
2055 return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
2056 case BCM4313_CHIP_ID:
2057 return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
2058 case BCM4331_CHIP_ID:
2059 return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
2060 default:
2061 return true;
2065 bool ai_is_otp_disabled(si_t *sih)
2067 switch (sih->chip) {
2068 case BCM4329_CHIP_ID:
2069 return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
2070 CST4329_OTP_PWRDN;
2071 case BCM4319_CHIP_ID:
2072 return (sih->chipst & CST4319_SPROM_OTP_SEL_MASK) ==
2073 CST4319_OTP_PWRDN;
2074 case BCM4336_CHIP_ID:
2075 return (sih->chipst & CST4336_OTP_PRESENT) == 0;
2076 case BCM4330_CHIP_ID:
2077 return (sih->chipst & CST4330_OTP_PRESENT) == 0;
2078 case BCM4313_CHIP_ID:
2079 return (sih->chipst & CST4313_OTP_PRESENT) == 0;
2080 /* These chips always have their OTP on */
2081 case BCM43224_CHIP_ID:
2082 case BCM43225_CHIP_ID:
2083 case BCM43421_CHIP_ID:
2084 case BCM43235_CHIP_ID:
2085 case BCM43236_CHIP_ID:
2086 case BCM43238_CHIP_ID:
2087 case BCM4331_CHIP_ID:
2088 default:
2089 return false;
2093 bool ai_is_otp_powered(si_t *sih)
2095 if (PMUCTL_ENAB(sih))
2096 return si_pmu_is_otp_powered(sih);
2097 return true;
2100 void ai_otp_power(si_t *sih, bool on)
2102 if (PMUCTL_ENAB(sih))
2103 si_pmu_otp_power(sih, on);
2104 udelay(1000);