Support for BCM5365.
[tomato.git] / release / src / shared / sbutils.c
blob0a04130660854fac0f7f09524ffc988583851e30
1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright 2007, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id$
15 #include <typedefs.h>
16 #include <bcmdefs.h>
17 #include <osl.h>
18 #include <bcmutils.h>
19 #include <sbutils.h>
20 #include <bcmdevs.h>
21 #include <sbconfig.h>
22 #include <sbchipc.h>
23 #include <sbpci.h>
24 #include <sbpcie.h>
25 #include <pcicfg.h>
26 #include <sbpcmcia.h>
27 #include <sbsocram.h>
28 #include <bcmnvram.h>
29 #include <bcmsrom.h>
30 #include <hndpmu.h>
32 /* debug/trace */
33 #define SB_ERROR(args)
35 #define SB_MSG(args)
37 typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
38 typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
39 typedef bool (*sb_intrsenabled_t)(void *intr_arg);
41 typedef struct gpioh_item {
42 void *arg;
43 bool level;
44 gpio_handler_t handler;
45 uint32 event;
46 struct gpioh_item *next;
47 } gpioh_item_t;
49 /* misc sb info needed by some of the routines */
50 typedef struct sb_info {
52 struct sb_pub sb; /* back plane public state (must be first field) */
54 void *osh; /* osl os handle */
55 void *sdh; /* bcmsdh handle */
57 void *curmap; /* current regs va */
58 void *regs[SB_MAXCORES]; /* other regs va */
60 uint curidx; /* current core index */
61 uint dev_coreid; /* the core provides driver functions */
63 bool memseg; /* flag to toggle MEM_SEG register */
65 uint numcores; /* # discovered cores */
66 uint coreid[SB_MAXCORES]; /* id of each core */
67 uint32 coresba[SB_MAXCORES]; /* backplane address of each core */
69 void *intr_arg; /* interrupt callback function arg */
70 sb_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
71 sb_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
72 sb_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
74 uint8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
75 bool pr42767_war;
76 uint8 pcie_polarity;
77 bool pcie_war_ovr; /* Override ASPM/Clkreq settings */
79 uint8 pmecap_offset; /* PM Capability offset in the config space */
80 bool pmecap; /* Capable of generating PME */
82 gpioh_item_t *gpioh_head; /* GPIO event handlers list */
84 char *vars;
85 uint varsz;
86 } sb_info_t;
88 /* local prototypes */
89 static sb_info_t * sb_doattach(sb_info_t *si, uint devid, osl_t *osh, void *regs,
90 uint bustype, void *sdh, char **vars, uint *varsz);
91 static void sb_scan(sb_info_t *si, void *regs, uint devid);
92 static uint _sb_coreidx(sb_info_t *si, uint32 sba);
93 static uint _sb_scan(sb_info_t *si, uint32 sba, void *regs, uint bus, uint32 sbba,
94 uint ncores);
95 static uint32 _sb_coresba(sb_info_t *si);
96 static void *_sb_setcoreidx(sb_info_t *si, uint coreidx);
97 static uint sb_chip2numcores(uint chip);
98 static bool sb_ispcie(sb_info_t *si);
99 static uint8 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id,
100 uchar *buf, uint32 *buflen);
101 static int sb_pci_fixcfg(sb_info_t *si);
102 /* routines to access mdio slave device registers */
103 static int sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint readdr, uint val);
104 static int sb_pcie_mdioread(sb_info_t *si, uint physmedia, uint readdr, uint *ret_val);
106 /* dev path concatenation util */
107 static char *sb_devpathvar(sb_t *sbh, char *var, int len, const char *name);
109 /* WARs */
110 static void sb_war43448(sb_t *sbh);
111 static void sb_war43448_aspm(sb_t *sbh);
112 static void sb_war32414_forceHT(sb_t *sbh, bool forceHT);
113 static void sb_war30841(sb_info_t *si);
114 static void sb_war42767(sb_t *sbh);
115 static void sb_war42767_clkreq(sb_t *sbh);
117 /* delay needed between the mdio control/ mdiodata register data access */
118 #define PR28829_DELAY() OSL_DELAY(10)
120 /* size that can take bitfielddump */
121 #define BITFIELD_DUMP_SIZE 32
123 /* global variable to indicate reservation/release of gpio's */
124 static uint32 sb_gpioreservation = 0;
126 /* global flag to prevent shared resources from being initialized multiple times in sb_attach() */
127 static bool sb_onetimeinit = FALSE;
129 #define SB_INFO(sbh) (sb_info_t*)(uintptr)sbh
130 #define SET_SBREG(si, r, mask, val) \
131 W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
132 #define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SB_MAXCORES * SB_CORE_SIZE)) && \
133 ISALIGNED((x), SB_CORE_SIZE))
134 #define GOODREGS(regs) ((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
135 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
136 #define BADCOREADDR 0
137 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
138 #define BADIDX (SB_MAXCORES+1)
139 #define NOREV -1 /* Invalid rev */
141 #define PCI(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
142 #define PCIE(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
143 #define PCMCIA(si) ((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->memseg == TRUE))
145 /* sonicsrev */
146 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
147 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
149 #define R_SBREG(si, sbr) sb_read_sbreg((si), (sbr))
150 #define W_SBREG(si, sbr, v) sb_write_sbreg((si), (sbr), (v))
151 #define AND_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
152 #define OR_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
155 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
156 * after core switching to avoid invalid register accesss inside ISR.
158 #define INTR_OFF(si, intr_val) \
159 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
160 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
161 #define INTR_RESTORE(si, intr_val) \
162 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
163 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
165 /* dynamic clock control defines */
166 #define LPOMINFREQ 25000 /* low power oscillator min */
167 #define LPOMAXFREQ 43000 /* low power oscillator max */
168 #define XTALMINFREQ 19800000 /* 20 MHz - 1% */
169 #define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
170 #define PCIMINFREQ 25000000 /* 25 MHz */
171 #define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
173 #define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
174 #define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
176 /* force HT war check */
177 #define FORCEHT_WAR32414(si) \
178 (((PCIE(si)) && (si->sb.chip == BCM4311_CHIP_ID) && ((si->sb.chiprev <= 1))) || \
179 ((PCI(si) || PCIE(si)) && (si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3)))
181 #define PCIE_ASPMWARS(si) \
182 ((PCIE(si)) && ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)))
184 /* GPIO Based LED powersave defines */
185 #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
186 #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
188 #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
190 static uint32
191 sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
193 uint8 tmp;
194 uint32 val, intr_val = 0;
198 * compact flash only has 11 bits address, while we needs 12 bits address.
199 * MEM_SEG will be OR'd with other 11 bits address in hardware,
200 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
201 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
203 if (PCMCIA(si)) {
204 INTR_OFF(si, intr_val);
205 tmp = 1;
206 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
207 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
210 val = R_REG(si->osh, sbr);
212 if (PCMCIA(si)) {
213 tmp = 0;
214 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
215 INTR_RESTORE(si, intr_val);
218 return (val);
221 static void
222 sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
224 uint8 tmp;
225 volatile uint32 dummy;
226 uint32 intr_val = 0;
230 * compact flash only has 11 bits address, while we needs 12 bits address.
231 * MEM_SEG will be OR'd with other 11 bits address in hardware,
232 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
233 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
235 if (PCMCIA(si)) {
236 INTR_OFF(si, intr_val);
237 tmp = 1;
238 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
239 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
242 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
243 #ifdef IL_BIGENDIAN
244 dummy = R_REG(si->osh, sbr);
245 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
246 dummy = R_REG(si->osh, sbr);
247 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
248 #else
249 dummy = R_REG(si->osh, sbr);
250 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
251 dummy = R_REG(si->osh, sbr);
252 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
253 #endif /* IL_BIGENDIAN */
254 } else
255 W_REG(si->osh, sbr, v);
257 if (PCMCIA(si)) {
258 tmp = 0;
259 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
260 INTR_RESTORE(si, intr_val);
265 * Allocate a sb handle.
266 * devid - pci device id (used to determine chip#)
267 * osh - opaque OS handle
268 * regs - virtual address of initial core registers
269 * bustype - pci/pcmcia/sb/sdio/etc
270 * vars - pointer to a pointer area for "environment" variables
271 * varsz - pointer to int to return the size of the vars
273 sb_t *
274 BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
275 uint bustype, void *sdh, char **vars, uint *varsz)
277 sb_info_t *si;
279 /* alloc sb_info_t */
280 if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
281 SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
282 return (NULL);
285 if (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
286 MFREE(osh, si, sizeof(sb_info_t));
287 return (NULL);
289 si->vars = vars ? *vars : NULL;
290 si->varsz = varsz ? *varsz : 0;
292 return (sb_t *)si;
295 /* Using sb_kattach depends on SB_BUS support, either implicit */
296 /* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
297 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
299 /* global kernel resource */
300 static sb_info_t ksi;
302 /* generic kernel variant of sb_attach() */
303 sb_t *
304 BCMINITFN(sb_kattach)(osl_t *osh)
306 static bool ksi_attached = FALSE;
308 if (!ksi_attached) {
309 void *regs = (void *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
311 if (sb_doattach(&ksi, BCM4710_DEVICE_ID, osh, regs,
312 SB_BUS, NULL,
313 osh != SB_OSH ? &ksi.vars : NULL,
314 osh != SB_OSH ? &ksi.varsz : NULL) == NULL) {
315 SB_ERROR(("sb_kattach: sb_doattach failed\n"));
316 return NULL;
319 ksi_attached = TRUE;
322 return &ksi.sb;
324 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
326 static sb_info_t *
327 BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
328 uint bustype, void *sdh, char **vars, uint *varsz)
330 uint origidx;
331 chipcregs_t *cc;
332 sbconfig_t *sb;
333 uint32 w;
334 char *pvars;
336 ASSERT(GOODREGS(regs));
338 bzero((uchar*)si, sizeof(sb_info_t));
340 si->sb.buscoreidx = BADIDX;
342 si->curmap = regs;
343 si->sdh = sdh;
344 si->osh = osh;
346 /* check to see if we are a sb core mimic'ing a pci core */
347 if (bustype == PCI_BUS) {
348 if (OSL_PCI_READ_CONFIG(si->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff) {
349 SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB "
350 "devid:0x%x\n", __FUNCTION__, devid));
351 bustype = SB_BUS;
354 si->sb.bustype = bustype;
355 if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
356 SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
357 si->sb.bustype, BUSTYPE(si->sb.bustype)));
358 return NULL;
361 /* need to set memseg flag for CF card first before any sb registers access */
362 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
363 si->memseg = TRUE;
365 /* kludge to enable the clock on the 4306 which lacks a slowclock */
366 if (BUSTYPE(si->sb.bustype) == PCI_BUS && !sb_ispcie(si))
367 sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
369 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
370 w = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
371 if (!GOODCOREADDR(w, SB_ENUM_BASE))
372 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32), SB_ENUM_BASE);
376 /* get sonics backplane revision */
377 sb = REGS2SB(regs);
378 si->sb.sonicsrev = (R_SBREG(si, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
380 /* scan for cores */
381 sb_scan(si, regs, devid);
383 /* no cores found, bail out */
384 if (si->numcores == 0) {
385 SB_ERROR(("sb_doattach: could not find any cores\n"));
386 return NULL;
389 /* save the current core index */
390 origidx = si->curidx;
392 /* don't go beyond if there is no chipc core in the chip */
393 if (!(cc = sb_setcore(&si->sb, SB_CC, 0)))
394 return si;
396 if (BUSTYPE(si->sb.bustype) == SB_BUS &&
397 (si->sb.chip == BCM4712_CHIP_ID) &&
398 (si->sb.chippkg != BCM4712LARGE_PKG_ID) &&
399 (si->sb.chiprev <= 3))
400 OR_REG(si->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
402 /* fixup necessary chip/core configurations */
403 if (BUSTYPE(si->sb.bustype) == PCI_BUS && sb_pci_fixcfg(si)) {
404 SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
405 return NULL;
409 /* Switch back to the original core, nvram/srom init needs it */
410 sb_setcoreidx(&si->sb, origidx);
412 /* Init nvram from flash if it exists */
413 nvram_init((void *)&si->sb);
415 /* Init nvram from sprom/otp if they exist */
416 if (srom_var_init(&si->sb, BUSTYPE(si->sb.bustype), regs, si->osh, vars, varsz)) {
417 SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
418 return (NULL);
420 pvars = vars ? *vars : NULL;
422 /* PMU specific initializations */
423 if ((si->sb.cccaps & CC_CAP_PMU) && !sb_onetimeinit) {
424 sb_pmu_init(&si->sb, si->osh);
425 /* Find out Crystal frequency and init PLL */
426 sb_pmu_pll_init(&si->sb, si->osh, getintvar(pvars, "xtalfreq"));
427 /* Initialize PMU resources (up/dn timers, dep masks, etc.) */
428 sb_pmu_res_init(&si->sb, si->osh);
431 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
432 w = getintvar(pvars, "regwindowsz");
433 si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
436 /* get boardtype and boardrev */
437 switch (BUSTYPE(si->sb.bustype)) {
438 case PCI_BUS:
439 /* do a pci config read to get subsystem id and subvendor id */
440 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_SVID, sizeof(uint32));
441 /* Let nvram variables override subsystem Vend/ID */
442 if ((si->sb.boardvendor = (uint16)sb_getdevpathintvar(&si->sb, "boardvendor")) == 0)
443 si->sb.boardvendor = w & 0xffff;
444 else
445 SB_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n",
446 si->sb.boardvendor, w & 0xffff));
447 if ((si->sb.boardtype = (uint16)sb_getdevpathintvar(&si->sb, "boardtype")) == 0)
448 si->sb.boardtype = (w >> 16) & 0xffff;
449 else
450 SB_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n",
451 si->sb.boardtype, (w >> 16) & 0xffff));
452 break;
454 case PCMCIA_BUS:
455 si->sb.boardvendor = getintvar(pvars, "manfid");
456 si->sb.boardtype = getintvar(pvars, "prodid");
457 break;
459 case SB_BUS:
460 case JTAG_BUS:
461 si->sb.boardvendor = VENDOR_BROADCOM;
462 if (pvars == NULL || ((si->sb.boardtype = getintvar(pvars, "prodid")) == 0))
463 if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
464 si->sb.boardtype = 0xffff;
465 break;
468 if (si->sb.boardtype == 0) {
469 SB_ERROR(("sb_doattach: unknown board type\n"));
470 ASSERT(si->sb.boardtype);
473 si->sb.boardflags = getintvar(pvars, "boardflags");
475 /* setup the GPIO based LED powersave register */
476 if (si->sb.ccrev >= 16) {
477 if ((pvars == NULL) || ((w = getintvar(pvars, "leddc")) == 0))
478 w = DEFAULT_GPIOTIMERVAL;
479 sb_corereg(&si->sb, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
482 /* Determine if this board needs override */
483 if (PCIE(si) && (si->sb.chip == BCM4321_CHIP_ID))
484 si->pcie_war_ovr = ((si->sb.boardvendor == VENDOR_APPLE) &&
485 ((uint8)getintvar(pvars, "sromrev") == 4) &&
486 ((uint8)getintvar(pvars, "boardrev") <= 0x71)) ||
487 ((uint32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR);
489 if (PCIE_ASPMWARS(si)) {
490 sb_war43448_aspm((void *)si);
491 sb_war42767_clkreq((void *)si);
494 if (FORCEHT_WAR32414(si)) {
495 si->sb.pr32414 = TRUE;
496 sb_clkctl_init(&si->sb);
497 sb_war32414_forceHT(&si->sb, 1);
500 if (PCIE(si) && ((si->sb.buscorerev == 6) || (si->sb.buscorerev == 7)))
501 si->sb.pr42780 = TRUE;
503 if (PCIE_ASPMWARS(si))
504 sb_pcieclkreq(&si->sb, 1, 0);
506 if (PCIE(si) &&
507 (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
508 ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
509 sb_set_initiator_to(&si->sb, 0x3, sb_findcoreidx(&si->sb, SB_D11, 0));
511 /* Disable gpiopullup and gpiopulldown */
512 if (!sb_onetimeinit && si->sb.ccrev >= 20) {
513 cc = (chipcregs_t *)sb_setcore(&si->sb, SB_CC, 0);
514 W_REG(osh, &cc->gpiopullup, 0);
515 W_REG(osh, &cc->gpiopulldown, 0);
516 sb_setcoreidx(&si->sb, origidx);
520 #ifdef HNDRTE
521 sb_onetimeinit = TRUE;
522 #endif
524 return (si);
527 /* Enable/Disable clkreq for PCIE (4311B0/4321B1) */
528 void
529 BCMINITFN(sb_war42780_clkreq)(sb_t *sbh, bool clkreq)
531 sb_info_t *si;
533 si = SB_INFO(sbh);
535 /* Don't change clkreq value if serdespll war has not yet been applied */
536 if (!si->pr42767_war && PCIE_ASPMWARS(si))
537 return;
539 sb_pcieclkreq(sbh, 1, (int32)clkreq);
542 static void
543 BCMINITFN(sb_war43448)(sb_t *sbh)
545 sb_info_t *si;
547 si = SB_INFO(sbh);
549 /* if not pcie bus, we're done */
550 if (!PCIE(si) || !PCIE_ASPMWARS(si))
551 return;
553 /* Restore the polarity */
554 if (si->pcie_polarity != 0)
555 sb_pcie_mdiowrite((void *)(uintptr)&si->sb, MDIODATA_DEV_RX,
556 SERDES_RX_CTRL, si->pcie_polarity);
559 static void
560 BCMINITFN(sb_war43448_aspm)(sb_t *sbh)
562 uint32 w;
563 uint16 val16, *reg16;
564 sbpcieregs_t *pcieregs;
565 sb_info_t *si;
567 si = SB_INFO(sbh);
569 /* if not pcie bus, we're done */
570 if (!PCIE(si) || !PCIE_ASPMWARS(si))
571 return;
573 /* no ASPM stuff on QT or VSIM */
574 if (si->sb.chippkg == HDLSIM_PKG_ID || si->sb.chippkg == HWSIM_PKG_ID)
575 return;
577 pcieregs = (sbpcieregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
579 /* Enable ASPM in the shadow SROM and Link control */
580 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
581 val16 = R_REG(si->osh, reg16);
582 if (!si->pcie_war_ovr)
583 val16 |= SRSH_ASPM_ENB;
584 else
585 val16 &= ~SRSH_ASPM_ENB;
586 W_REG(si->osh, reg16, val16);
588 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
589 if (!si->pcie_war_ovr)
590 w |= PCIE_ASPM_ENAB;
591 else
592 w &= ~PCIE_ASPM_ENAB;
593 OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32), w);
596 static void
597 BCMINITFN(sb_war32414_forceHT)(sb_t *sbh, bool forceHT)
599 sb_info_t *si;
600 uint32 val = 0;
602 si = SB_INFO(sbh);
604 ASSERT(FORCEHT_WAR32414(si));
607 if (forceHT)
608 val = SYCC_HR;
609 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, system_clk_ctl),
610 SYCC_HR, val);
613 uint
614 sb_coreid(sb_t *sbh)
616 sb_info_t *si;
617 sbconfig_t *sb;
619 si = SB_INFO(sbh);
620 sb = REGS2SB(si->curmap);
622 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
625 uint
626 sb_flag(sb_t *sbh)
628 sb_info_t *si;
629 sbconfig_t *sb;
631 si = SB_INFO(sbh);
632 sb = REGS2SB(si->curmap);
634 return R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
637 uint
638 sb_coreidx(sb_t *sbh)
640 sb_info_t *si;
642 si = SB_INFO(sbh);
643 return (si->curidx);
646 /* return core index of the core with address 'sba' */
647 static uint
648 BCMINITFN(_sb_coreidx)(sb_info_t *si, uint32 sba)
650 uint i;
652 for (i = 0; i < si->numcores; i ++)
653 if (sba == si->coresba[i])
654 return i;
655 return BADIDX;
658 /* return core address of the current core */
659 static uint32
660 BCMINITFN(_sb_coresba)(sb_info_t *si)
662 uint32 sbaddr;
664 switch (BUSTYPE(si->sb.bustype)) {
665 case SB_BUS: {
666 sbconfig_t *sb = REGS2SB(si->curmap);
667 sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
668 break;
671 case PCI_BUS:
672 sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
673 break;
675 case PCMCIA_BUS: {
676 uint8 tmp = 0;
677 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
678 sbaddr = (uint32)tmp << 12;
679 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
680 sbaddr |= (uint32)tmp << 16;
681 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
682 sbaddr |= (uint32)tmp << 24;
683 break;
687 #ifdef BCMJTAG
688 case JTAG_BUS:
689 sbaddr = (uint32)(uintptr)si->curmap;
690 break;
691 #endif /* BCMJTAG */
693 default:
694 sbaddr = BADCOREADDR;
695 break;
698 SB_MSG(("_sb_coresba: current core is 0x%08x\n", sbaddr));
699 return sbaddr;
702 uint
703 sb_corevendor(sb_t *sbh)
705 sb_info_t *si;
706 sbconfig_t *sb;
708 si = SB_INFO(sbh);
709 sb = REGS2SB(si->curmap);
711 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
714 uint
715 sb_corerev(sb_t *sbh)
717 sb_info_t *si;
718 sbconfig_t *sb;
719 uint sbidh;
721 si = SB_INFO(sbh);
722 sb = REGS2SB(si->curmap);
723 sbidh = R_SBREG(si, &sb->sbidhigh);
725 return (SBCOREREV(sbidh));
728 void *
729 sb_osh(sb_t *sbh)
731 sb_info_t *si;
733 si = SB_INFO(sbh);
734 return si->osh;
737 void
738 sb_setosh(sb_t *sbh, osl_t *osh)
740 sb_info_t *si;
742 si = SB_INFO(sbh);
743 if (si->osh != NULL) {
744 SB_ERROR(("osh is already set....\n"));
745 ASSERT(!si->osh);
747 si->osh = osh;
750 /* set sbtmstatelow core-specific flags */
751 void
752 sb_coreflags_wo(sb_t *sbh, uint32 mask, uint32 val)
754 sb_info_t *si;
755 sbconfig_t *sb;
756 uint32 w;
758 si = SB_INFO(sbh);
759 sb = REGS2SB(si->curmap);
761 ASSERT((val & ~mask) == 0);
763 /* mask and set */
764 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
765 W_SBREG(si, &sb->sbtmstatelow, w);
768 /* set/clear sbtmstatelow core-specific flags */
769 uint32
770 sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
772 sb_info_t *si;
773 sbconfig_t *sb;
774 uint32 w;
776 si = SB_INFO(sbh);
777 sb = REGS2SB(si->curmap);
779 ASSERT((val & ~mask) == 0);
781 /* mask and set */
782 if (mask || val) {
783 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
784 W_SBREG(si, &sb->sbtmstatelow, w);
787 /* return the new value
788 * for write operation, the following readback ensures the completion of write opration.
790 return (R_SBREG(si, &sb->sbtmstatelow));
793 /* set/clear sbtmstatehigh core-specific flags */
794 uint32
795 sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
797 sb_info_t *si;
798 sbconfig_t *sb;
799 uint32 w;
801 si = SB_INFO(sbh);
802 sb = REGS2SB(si->curmap);
804 ASSERT((val & ~mask) == 0);
805 ASSERT((mask & ~SBTMH_FL_MASK) == 0);
807 /* mask and set */
808 if (mask || val) {
809 w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
810 W_SBREG(si, &sb->sbtmstatehigh, w);
813 /* return the new value */
814 return (R_SBREG(si, &sb->sbtmstatehigh));
817 /* Run bist on current core. Caller needs to take care of core-specific bist hazards */
819 sb_corebist(sb_t *sbh)
821 uint32 sblo;
822 sb_info_t *si;
823 sbconfig_t *sb;
824 int result = 0;
826 si = SB_INFO(sbh);
827 sb = REGS2SB(si->curmap);
829 sblo = R_SBREG(si, &sb->sbtmstatelow);
830 W_SBREG(si, &sb->sbtmstatelow, (sblo | SBTML_FGC | SBTML_BE));
832 SPINWAIT(((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTD) == 0), 100000);
834 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTF)
835 result = BCME_ERROR;
837 W_SBREG(si, &sb->sbtmstatelow, sblo);
839 return result;
842 bool
843 sb_iscoreup(sb_t *sbh)
845 sb_info_t *si;
846 sbconfig_t *sb;
848 si = SB_INFO(sbh);
849 sb = REGS2SB(si->curmap);
851 return ((R_SBREG(si, &sb->sbtmstatelow) &
852 (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
856 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
857 * switch back to the original core, and return the new value.
859 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
861 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
862 * and (on newer pci cores) chipcommon registers.
864 uint
865 sb_corereg(sb_t *sbh, uint coreidx, uint regoff, uint mask, uint val)
867 uint origidx = 0;
868 uint32 *r = NULL;
869 uint w;
870 uint intr_val = 0;
871 bool fast = FALSE;
872 sb_info_t *si;
874 si = SB_INFO(sbh);
876 ASSERT(GOODIDX(coreidx));
877 ASSERT(regoff < SB_CORE_SIZE);
878 ASSERT((val & ~mask) == 0);
880 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
881 /* If internal bus, we can always get at everything */
882 fast = TRUE;
883 /* map if does not exist */
884 if (!si->regs[coreidx]) {
885 si->regs[coreidx] = (void*)REG_MAP(si->coresba[coreidx],
886 SB_CORE_SIZE);
887 ASSERT(GOODREGS(si->regs[coreidx]));
889 r = (uint32 *)((uchar *)si->regs[coreidx] + regoff);
890 } else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
891 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
893 if ((si->coreid[coreidx] == SB_CC) &&
894 ((si->sb.buscoretype == SB_PCIE) ||
895 (si->sb.buscorerev >= 13))) {
896 /* Chipc registers are mapped at 12KB */
898 fast = TRUE;
899 r = (uint32 *)((char *)si->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
900 } else if (si->sb.buscoreidx == coreidx) {
901 /* pci registers are at either in the last 2KB of an 8KB window
902 * or, in pcie and pci rev 13 at 8KB
904 fast = TRUE;
905 if ((si->sb.buscoretype == SB_PCIE) ||
906 (si->sb.buscorerev >= 13))
907 r = (uint32 *)((char *)si->curmap +
908 PCI_16KB0_PCIREGS_OFFSET + regoff);
909 else
910 r = (uint32 *)((char *)si->curmap +
911 ((regoff >= SBCONFIGOFF) ?
912 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
913 regoff);
917 if (!fast) {
918 INTR_OFF(si, intr_val);
920 /* save current core index */
921 origidx = sb_coreidx(&si->sb);
923 /* switch core */
924 r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
926 ASSERT(r);
928 /* mask and set */
929 if (mask || val) {
930 if (regoff >= SBCONFIGOFF) {
931 w = (R_SBREG(si, r) & ~mask) | val;
932 W_SBREG(si, r, w);
933 } else {
934 w = (R_REG(si->osh, r) & ~mask) | val;
935 W_REG(si->osh, r, w);
939 /* readback */
940 if (regoff >= SBCONFIGOFF)
941 w = R_SBREG(si, r);
942 else {
943 #if defined(BCM5354)
944 if ((si->sb.chip == BCM5354_CHIP_ID) &&
945 (coreidx == SB_CC_IDX) &&
946 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
947 w = val;
948 } else
949 #endif /* BCM5354 */
950 w = R_REG(si->osh, r);
953 if (!fast) {
954 /* restore core index */
955 if (origidx != coreidx)
956 sb_setcoreidx(&si->sb, origidx);
958 INTR_RESTORE(si, intr_val);
961 return (w);
964 #define DWORD_ALIGN(x) (x & ~(0x03))
965 #define BYTE_POS(x) (x & 0x3)
966 #define WORD_POS(x) (x & 0x1)
968 #define BYTE_SHIFT(x) (8 * BYTE_POS(x))
969 #define WORD_SHIFT(x) (16 * WORD_POS(x))
971 #define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
972 #define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
974 #define read_pci_cfg_byte(a) \
975 (BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
977 #define read_pci_cfg_word(a) \
978 (WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
981 /* return cap_offset if requested capability exists in the PCI config space */
982 static uint8
983 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
985 uint8 cap_id;
986 uint8 cap_ptr = 0;
987 uint32 bufsize;
988 uint8 byte_val;
990 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
991 goto end;
993 /* check for Header type 0 */
994 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
995 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
996 goto end;
998 /* check if the capability pointer field exists */
999 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
1000 if (!(byte_val & PCI_CAPPTR_PRESENT))
1001 goto end;
1003 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
1004 /* check if the capability pointer is 0x00 */
1005 if (cap_ptr == 0x00)
1006 goto end;
1008 /* loop thr'u the capability list and see if the pcie capabilty exists */
1010 cap_id = read_pci_cfg_byte(cap_ptr);
1012 while (cap_id != req_cap_id) {
1013 cap_ptr = read_pci_cfg_byte((cap_ptr+1));
1014 if (cap_ptr == 0x00) break;
1015 cap_id = read_pci_cfg_byte(cap_ptr);
1017 if (cap_id != req_cap_id) {
1018 goto end;
1020 /* found the caller requested capability */
1021 if ((buf != NULL) && (buflen != NULL)) {
1022 uint8 cap_data;
1024 bufsize = *buflen;
1025 if (!bufsize) goto end;
1026 *buflen = 0;
1027 /* copy the cpability data excluding cap ID and next ptr */
1028 cap_data = cap_ptr + 2;
1029 if ((bufsize + cap_data) > SZPCR)
1030 bufsize = SZPCR - cap_data;
1031 *buflen = bufsize;
1032 while (bufsize--) {
1033 *buf = read_pci_cfg_byte(cap_data);
1034 cap_data++;
1035 buf++;
1038 end:
1039 return cap_ptr;
1042 uint8
1043 sb_pcieclkreq(sb_t *sbh, uint32 mask, uint32 val)
1045 sb_info_t *si;
1046 uint32 reg_val;
1047 uint8 offset;
1049 si = SB_INFO(sbh);
1051 offset = si->pciecap_lcreg_offset;
1052 if (!offset)
1053 return 0;
1055 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1056 /* set operation */
1057 if (mask) {
1058 if (val)
1059 reg_val |= PCIE_CLKREQ_ENAB;
1060 else
1061 reg_val &= ~PCIE_CLKREQ_ENAB;
1062 OSL_PCI_WRITE_CONFIG(si->osh, offset, sizeof(uint32), reg_val);
1063 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1065 if (reg_val & PCIE_CLKREQ_ENAB)
1066 return 1;
1067 else
1068 return 0;
1073 /* return TRUE if PCIE capability exists in the pci config space */
1074 static bool
1075 sb_ispcie(sb_info_t *si)
1077 uint8 cap_ptr;
1079 cap_ptr = sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL);
1080 if (!cap_ptr)
1081 return FALSE;
1083 si->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
1085 return TRUE;
1088 /* Wake-on-wireless-LAN (WOWL) support functions */
1089 /* return TRUE if PM capability exists in the pci config space */
1090 bool
1091 sb_pci_pmecap(sb_t *sbh)
1093 uint8 cap_ptr;
1094 uint32 pmecap;
1095 sb_info_t *si;
1097 si = SB_INFO(sbh);
1099 if (si == NULL || !(PCI(si) || PCIE(si)))
1100 return FALSE;
1102 if (!si->pmecap_offset) {
1103 cap_ptr = sb_find_pci_capability(si, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL);
1104 if (!cap_ptr)
1105 return FALSE;
1107 si->pmecap_offset = cap_ptr;
1109 pmecap = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset, sizeof(uint32));
1111 /* At least one state can generate PME */
1112 si->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
1115 return (si->pmecap);
1118 /* Enable PME generation and disable clkreq */
1119 void
1120 sb_pci_pmeen(sb_t *sbh)
1122 sb_info_t *si;
1123 uint32 w;
1124 si = SB_INFO(sbh);
1126 /* if not pmecapable return */
1127 if (!sb_pci_pmecap(sbh))
1128 return;
1130 w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1131 w |= (PME_CSR_PME_EN);
1132 OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1134 /* Disable clkreq */
1135 if (si->pr42767_war) {
1136 sb_pcieclkreq(sbh, 1, 0);
1137 si->pr42767_war = FALSE;
1138 } else if (si->sb.pr42780) {
1139 sb_pcieclkreq(sbh, 1, 1);
1143 /* Disable PME generation, clear the PME status bit if set and
1144 * return TRUE if PME status set
1146 bool
1147 sb_pci_pmeclr(sb_t *sbh)
1149 sb_info_t *si;
1150 uint32 w;
1151 bool ret = FALSE;
1153 si = SB_INFO(sbh);
1155 if (!sb_pci_pmecap(sbh))
1156 return ret;
1158 w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1160 SB_ERROR(("sb_pci_pmeclr PMECSR : 0x%x\n", w));
1161 ret = (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
1163 /* PMESTAT is cleared by writing 1 to it */
1164 w &= ~(PME_CSR_PME_EN);
1166 OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1168 return ret;
1171 /* Scan the enumeration space to find all cores starting from the given
1172 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
1173 * is the default core address at chip POR time and 'regs' is the virtual
1174 * address that the default core is mapped at. 'ncores' is the number of
1175 * cores expected on bus 'sbba'. It returns the total number of cores
1176 * starting from bus 'sbba', inclusive.
1178 #define SB_MAXBUSES 2
1179 static uint
1180 BCMINITFN(_sb_scan)(sb_info_t *si, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
1182 uint next;
1183 uint ncc = 0;
1184 uint i;
1186 if (bus >= SB_MAXBUSES) {
1187 SB_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
1188 return 0;
1190 SB_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
1192 /* Scan all cores on the bus starting from core 0.
1193 * Core addresses must be contiguous on each bus.
1195 for (i = 0, next = si->numcores; i < numcores && next < SB_MAXCORES; i++, next++) {
1196 si->coresba[next] = sbba + i * SB_CORE_SIZE;
1198 /* keep and reuse the initial register mapping */
1199 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->coresba[next] == sba) {
1200 SB_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
1201 si->regs[next] = regs;
1204 /* change core to 'next' and read its coreid */
1205 si->curmap = _sb_setcoreidx(si, next);
1206 si->curidx = next;
1208 si->coreid[next] = sb_coreid(&si->sb);
1210 /* core specific processing... */
1211 /* chipc on bus SB_ENUM_BASE provides # cores in the chip and lots of
1212 * other stuff.
1214 if (sbba == SB_ENUM_BASE && si->coreid[next] == SB_CC) {
1215 chipcregs_t *cc = (chipcregs_t *)si->curmap;
1217 /* get chip id and rev */
1218 si->sb.chip = R_REG(si->osh, &cc->chipid) & CID_ID_MASK;
1219 si->sb.chiprev = (R_REG(si->osh, &cc->chipid) & CID_REV_MASK) >>
1220 CID_REV_SHIFT;
1221 si->sb.chippkg = (R_REG(si->osh, &cc->chipid) & CID_PKG_MASK) >>
1222 CID_PKG_SHIFT;
1224 /* get chipcommon rev */
1225 si->sb.ccrev = (int)sb_corerev(&si->sb);
1227 /* get chipcommon chipstatus */
1228 if (si->sb.ccrev >= 11)
1229 si->sb.chipst = R_REG(si->osh, &cc->chipstatus);
1231 /* get chipcommon capabilites */
1232 si->sb.cccaps = R_REG(si->osh, &cc->capabilities);
1234 /* get pmu rev and caps */
1235 if ((si->sb.cccaps & CC_CAP_PMU)) {
1236 si->sb.pmucaps = R_REG(si->osh, &cc->pmucapabilities);
1237 si->sb.pmurev = si->sb.pmucaps & PCAP_REV_MASK;
1240 /* determine numcores - this is the total # cores in the chip */
1241 if (((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
1242 numcores = (R_REG(si->osh, &cc->chipid) & CID_CC_MASK) >>
1243 CID_CC_SHIFT;
1244 else
1245 numcores = sb_chip2numcores(si->sb.chip);
1246 SB_MSG(("_sb_scan: there are %u cores in the chip\n", numcores));
1248 /* scan bridged SB(s) and add results to the end of the list */
1249 else if (si->coreid[next] == SB_OCP) {
1250 sbconfig_t *sb = REGS2SB(si->curmap);
1251 uint32 nsbba = R_SBREG(si, &sb->sbadmatch1);
1252 uint nsbcc;
1254 si->numcores = next + 1;
1256 if ((nsbba & 0xfff00000) != SB_ENUM_BASE)
1257 continue;
1258 nsbba &= 0xfffff000;
1259 if (_sb_coreidx(si, nsbba) != BADIDX)
1260 continue;
1262 nsbcc = (R_SBREG(si, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
1263 nsbcc = _sb_scan(si, sba, regs, bus + 1, nsbba, nsbcc);
1264 if (sbba == SB_ENUM_BASE)
1265 numcores -= nsbcc;
1266 ncc += nsbcc;
1270 SB_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
1272 si->numcores = i + ncc;
1273 return si->numcores;
1276 /* scan the sb enumerated space to identify all cores */
1277 static void
1278 BCMINITFN(sb_scan)(sb_info_t *si, void *regs, uint devid)
1280 uint origidx;
1281 uint32 origsba;
1282 uint i;
1283 bool pci;
1284 bool pcie;
1285 uint pciidx;
1286 uint pcieidx;
1287 uint pcirev;
1288 uint pcierev;
1289 uint numcores;
1291 /* Save the current core info and validate it later till we know
1292 * for sure what is good and what is bad.
1294 origsba = _sb_coresba(si);
1295 origidx = BADIDX;
1297 /* Use devid as initial chipid and we'll update it later in _sb_scan */
1298 si->sb.chip = devid;
1300 /* Support chipcommon-less chips for a little while longer so the old
1301 * sdio host fpga continues to work until we can get the new one working
1302 * reliably. This particular chip has 2 cores - codec/sdio and pci.
1304 if (devid == SDIOH_FPGA_ID)
1305 numcores = 2;
1306 /* Expect at least one core on 0x18000000 and it must be chipcommon where
1307 * the core count for the whole chip is kept.
1309 else
1310 numcores = 1;
1312 /* scan all SB(s) starting from SB_ENUM_BASE */
1313 si->numcores = _sb_scan(si, origsba, regs, 0, SB_ENUM_BASE, numcores);
1314 if (si->numcores == 0)
1315 return;
1317 /* figure out bus/orignal core idx */
1318 si->sb.buscorerev = NOREV;
1319 si->sb.buscoreidx = BADIDX;
1321 pci = pcie = FALSE;
1322 pcirev = pcierev = NOREV;
1323 pciidx = pcieidx = BADIDX;
1325 for (i = 0; i < si->numcores; i++) {
1326 sb_setcoreidx(&si->sb, i);
1328 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1329 if (si->coreid[i] == SB_PCI) {
1330 pciidx = i;
1331 pcirev = sb_corerev(&si->sb);
1332 pci = TRUE;
1333 } else if (si->coreid[i] == SB_PCIE) {
1334 pcieidx = i;
1335 pcierev = sb_corerev(&si->sb);
1336 pcie = TRUE;
1338 } else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1339 if (si->coreid[i] == SB_PCMCIA) {
1340 si->sb.buscorerev = sb_corerev(&si->sb);
1341 si->sb.buscoretype = si->coreid[i];
1342 si->sb.buscoreidx = i;
1346 /* find the core idx before entering this func. */
1347 if (origsba == si->coresba[i])
1348 origidx = i;
1351 if (pci && pcie) {
1352 if (sb_ispcie(si))
1353 pci = FALSE;
1354 else
1355 pcie = FALSE;
1357 if (pci) {
1358 si->sb.buscoretype = SB_PCI;
1359 si->sb.buscorerev = pcirev;
1360 si->sb.buscoreidx = pciidx;
1361 } else if (pcie) {
1362 si->sb.buscoretype = SB_PCIE;
1363 si->sb.buscorerev = pcierev;
1364 si->sb.buscoreidx = pcieidx;
1367 /* return to the original core */
1368 if (origidx != BADIDX)
1369 sb_setcoreidx(&si->sb, origidx);
1370 ASSERT(origidx != BADIDX);
1373 /* may be called with core in reset */
1374 void
1375 sb_detach(sb_t *sbh)
1377 sb_info_t *si;
1378 uint idx;
1380 si = SB_INFO(sbh);
1382 if (si == NULL)
1383 return;
1385 if (BUSTYPE(si->sb.bustype) == SB_BUS)
1386 for (idx = 0; idx < SB_MAXCORES; idx++)
1387 if (si->regs[idx]) {
1388 REG_UNMAP(si->regs[idx]);
1389 si->regs[idx] = NULL;
1391 #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
1392 if (si != &ksi)
1393 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
1394 MFREE(si->osh, si, sizeof(sb_info_t));
1397 /* convert chip number to number of i/o cores */
1398 static uint
1399 BCMINITFN(sb_chip2numcores)(uint chip)
1401 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
1402 return (6);
1403 if (chip == BCM4704_CHIP_ID)
1404 return (9);
1405 if (chip == BCM5365_CHIP_ID)
1406 return (7);
1408 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
1409 ASSERT(0);
1410 return (1);
1413 /* return index of coreid or BADIDX if not found */
1414 uint
1415 sb_findcoreidx(sb_t *sbh, uint coreid, uint coreunit)
1417 sb_info_t *si;
1418 uint found;
1419 uint i;
1421 si = SB_INFO(sbh);
1423 found = 0;
1425 for (i = 0; i < si->numcores; i++)
1426 if (si->coreid[i] == coreid) {
1427 if (found == coreunit)
1428 return (i);
1429 found++;
1432 return (BADIDX);
1436 * this function changes logical "focus" to the indiciated core,
1437 * must be called with interrupt off.
1438 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1440 void*
1441 sb_setcoreidx(sb_t *sbh, uint coreidx)
1443 sb_info_t *si;
1445 si = SB_INFO(sbh);
1447 if (coreidx >= si->numcores)
1448 return (NULL);
1451 * If the user has provided an interrupt mask enabled function,
1452 * then assert interrupts are disabled before switching the core.
1454 ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
1456 si->curmap = _sb_setcoreidx(si, coreidx);
1457 si->curidx = coreidx;
1459 return (si->curmap);
1462 /* This function changes the logical "focus" to the indiciated core.
1463 * Return the current core's virtual address.
1465 static void *
1466 _sb_setcoreidx(sb_info_t *si, uint coreidx)
1468 uint32 sbaddr = si->coresba[coreidx];
1469 void *regs;
1471 switch (BUSTYPE(si->sb.bustype)) {
1472 case SB_BUS:
1473 /* map new one */
1474 if (!si->regs[coreidx]) {
1475 si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
1476 ASSERT(GOODREGS(si->regs[coreidx]));
1478 regs = si->regs[coreidx];
1479 break;
1481 case PCI_BUS:
1482 /* point bar0 window */
1483 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
1484 regs = si->curmap;
1485 break;
1487 case PCMCIA_BUS: {
1488 uint8 tmp = (sbaddr >> 12) & 0x0f;
1489 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
1490 tmp = (sbaddr >> 16) & 0xff;
1491 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
1492 tmp = (sbaddr >> 24) & 0xff;
1493 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
1494 regs = si->curmap;
1495 break;
1498 #ifdef BCMJTAG
1499 case JTAG_BUS:
1500 /* map new one */
1501 if (!si->regs[coreidx]) {
1502 si->regs[coreidx] = (void *)(uintptr)sbaddr;
1503 ASSERT(GOODREGS(si->regs[coreidx]));
1505 regs = si->regs[coreidx];
1506 break;
1507 #endif /* BCMJTAG */
1509 default:
1510 ASSERT(0);
1511 regs = NULL;
1512 break;
1515 return regs;
1519 * this function changes logical "focus" to the indiciated core,
1520 * must be called with interrupt off.
1521 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1523 void*
1524 sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
1526 uint idx;
1528 idx = sb_findcoreidx(sbh, coreid, coreunit);
1529 if (!GOODIDX(idx))
1530 return (NULL);
1532 return (sb_setcoreidx(sbh, idx));
1535 /* return chip number */
1536 uint
1537 BCMINITFN(sb_chip)(sb_t *sbh)
1539 sb_info_t *si;
1541 si = SB_INFO(sbh);
1542 return (si->sb.chip);
1545 /* return chip revision number */
1546 uint
1547 BCMINITFN(sb_chiprev)(sb_t *sbh)
1549 sb_info_t *si;
1551 si = SB_INFO(sbh);
1552 return (si->sb.chiprev);
1555 /* return chip common revision number */
1556 uint
1557 BCMINITFN(sb_chipcrev)(sb_t *sbh)
1559 sb_info_t *si;
1561 si = SB_INFO(sbh);
1562 return (si->sb.ccrev);
1565 /* return chip package option */
1566 uint
1567 BCMINITFN(sb_chippkg)(sb_t *sbh)
1569 sb_info_t *si;
1571 si = SB_INFO(sbh);
1572 return (si->sb.chippkg);
1575 /* return PCI core rev. */
1576 uint
1577 BCMINITFN(sb_pcirev)(sb_t *sbh)
1579 sb_info_t *si;
1581 si = SB_INFO(sbh);
1582 return (si->sb.buscorerev);
1585 bool
1586 BCMINITFN(sb_war16165)(sb_t *sbh)
1588 sb_info_t *si;
1590 si = SB_INFO(sbh);
1592 return (PCI(si) && (si->sb.buscorerev <= 10));
1595 static void
1596 BCMINITFN(sb_war30841)(sb_info_t *si)
1598 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
1599 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
1600 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
1603 /* return PCMCIA core rev. */
1604 uint
1605 BCMINITFN(sb_pcmciarev)(sb_t *sbh)
1607 sb_info_t *si;
1609 si = SB_INFO(sbh);
1610 return (si->sb.buscorerev);
1613 /* return board vendor id */
1614 uint
1615 BCMINITFN(sb_boardvendor)(sb_t *sbh)
1617 sb_info_t *si;
1619 si = SB_INFO(sbh);
1620 return (si->sb.boardvendor);
1623 /* return boardtype */
1624 uint
1625 BCMINITFN(sb_boardtype)(sb_t *sbh)
1627 sb_info_t *si;
1628 char *var;
1630 si = SB_INFO(sbh);
1632 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
1633 /* boardtype format is a hex string */
1634 si->sb.boardtype = getintvar(NULL, "boardtype");
1636 /* backward compatibility for older boardtype string format */
1637 if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
1638 if (!strcmp(var, "bcm94710dev"))
1639 si->sb.boardtype = BCM94710D_BOARD;
1640 else if (!strcmp(var, "bcm94710ap"))
1641 si->sb.boardtype = BCM94710AP_BOARD;
1642 else if (!strcmp(var, "bu4710"))
1643 si->sb.boardtype = BU4710_BOARD;
1644 else if (!strcmp(var, "bcm94702mn"))
1645 si->sb.boardtype = BCM94702MN_BOARD;
1646 else if (!strcmp(var, "bcm94710r1"))
1647 si->sb.boardtype = BCM94710R1_BOARD;
1648 else if (!strcmp(var, "bcm94710r4"))
1649 si->sb.boardtype = BCM94710R4_BOARD;
1650 else if (!strcmp(var, "bcm94702cpci"))
1651 si->sb.boardtype = BCM94702CPCI_BOARD;
1652 else if (!strcmp(var, "bcm95380_rr"))
1653 si->sb.boardtype = BCM95380RR_BOARD;
1657 return (si->sb.boardtype);
1660 /* return bus type of sbh device */
1661 uint
1662 sb_bus(sb_t *sbh)
1664 sb_info_t *si;
1666 si = SB_INFO(sbh);
1667 return (si->sb.bustype);
1670 /* return bus core type */
1671 uint
1672 sb_buscoretype(sb_t *sbh)
1674 sb_info_t *si;
1676 si = SB_INFO(sbh);
1678 return (si->sb.buscoretype);
1681 /* return bus core revision */
1682 uint
1683 sb_buscorerev(sb_t *sbh)
1685 sb_info_t *si;
1686 si = SB_INFO(sbh);
1688 return (si->sb.buscorerev);
1691 /* return list of found cores */
1692 uint
1693 sb_corelist(sb_t *sbh, uint coreid[])
1695 sb_info_t *si;
1697 si = SB_INFO(sbh);
1699 bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof(uint)));
1700 return (si->numcores);
1703 /* return current register mapping */
1704 void *
1705 sb_coreregs(sb_t *sbh)
1707 sb_info_t *si;
1709 si = SB_INFO(sbh);
1710 ASSERT(GOODREGS(si->curmap));
1712 return (si->curmap);
1715 #if defined(BCMDBG_ASSERT)
1716 /* traverse all cores to find and clear source of serror */
1717 static void
1718 sb_serr_clear(sb_info_t *si)
1720 sbconfig_t *sb;
1721 uint origidx;
1722 uint i, intr_val = 0;
1723 void * corereg = NULL;
1725 INTR_OFF(si, intr_val);
1726 origidx = sb_coreidx(&si->sb);
1728 for (i = 0; i < si->numcores; i++) {
1729 corereg = sb_setcoreidx(&si->sb, i);
1730 if (NULL != corereg) {
1731 sb = REGS2SB(corereg);
1732 if ((R_SBREG(si, &sb->sbtmstatehigh)) & SBTMH_SERR) {
1733 AND_SBREG(si, &sb->sbtmstatehigh, ~SBTMH_SERR);
1734 SB_ERROR(("sb_serr_clear: SError at core 0x%x\n",
1735 sb_coreid(&si->sb)));
1740 sb_setcoreidx(&si->sb, origidx);
1741 INTR_RESTORE(si, intr_val);
1745 * Check if any inband, outband or timeout errors has happened and clear them.
1746 * Must be called with chip clk on !
1748 bool
1749 sb_taclear(sb_t *sbh)
1751 sb_info_t *si;
1752 sbconfig_t *sb;
1753 uint origidx;
1754 uint intr_val = 0;
1755 bool rc = FALSE;
1756 uint32 inband = 0, serror = 0, timeout = 0;
1757 void *corereg = NULL;
1758 volatile uint32 imstate, tmstate;
1760 si = SB_INFO(sbh);
1762 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1763 volatile uint32 stcmd;
1765 /* inband error is Target abort for PCI */
1766 stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32));
1767 inband = stcmd & PCI_CFG_CMD_STAT_TA;
1768 if (inband) {
1769 OSL_PCI_WRITE_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
1772 /* serror */
1773 stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32));
1774 serror = stcmd & PCI_SBIM_STATUS_SERR;
1775 if (serror) {
1776 sb_serr_clear(si);
1777 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
1780 /* timeout */
1781 imstate = sb_corereg(sbh, si->sb.buscoreidx,
1782 SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
1783 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1784 sb_corereg(sbh, si->sb.buscoreidx,
1785 SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
1786 (imstate & ~(SBIM_IBE | SBIM_TO)));
1787 /* inband = imstate & SBIM_IBE; same as TA above */
1788 timeout = imstate & SBIM_TO;
1789 if (timeout) {
1793 if (inband) {
1794 /* dump errlog for sonics >= 2.3 */
1795 if (si->sb.sonicsrev == SONICS_2_2)
1797 else {
1798 uint32 imerrlog, imerrloga;
1799 imerrlog = sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOG, 0, 0);
1800 if (imerrlog & SBTMEL_EC) {
1801 imerrloga = sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOGA,
1802 0, 0);
1803 /* clear errlog */
1804 sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOG, ~0, 0);
1805 SB_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
1806 imerrlog, imerrloga));
1812 } else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1814 INTR_OFF(si, intr_val);
1815 origidx = sb_coreidx(sbh);
1817 corereg = sb_setcore(sbh, SB_PCMCIA, 0);
1818 if (NULL != corereg) {
1819 sb = REGS2SB(corereg);
1821 imstate = R_SBREG(si, &sb->sbimstate);
1822 /* handle surprise removal */
1823 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1824 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1825 inband = imstate & SBIM_IBE;
1826 timeout = imstate & SBIM_TO;
1828 tmstate = R_SBREG(si, &sb->sbtmstatehigh);
1829 if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
1830 if (!inband) {
1831 serror = 1;
1832 sb_serr_clear(si);
1834 OR_SBREG(si, &sb->sbtmstatelow, SBTML_INT_ACK);
1835 AND_SBREG(si, &sb->sbtmstatelow, ~SBTML_INT_ACK);
1838 sb_setcoreidx(sbh, origidx);
1839 INTR_RESTORE(si, intr_val);
1844 if (inband | timeout | serror) {
1845 rc = TRUE;
1846 SB_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
1847 inband, serror, timeout));
1850 return (rc);
1852 #endif
1854 /* do buffered registers update */
1855 void
1856 sb_commit(sb_t *sbh)
1858 sb_info_t *si;
1859 uint origidx;
1860 uint intr_val = 0;
1862 si = SB_INFO(sbh);
1864 origidx = si->curidx;
1865 ASSERT(GOODIDX(origidx));
1867 INTR_OFF(si, intr_val);
1869 /* switch over to chipcommon core if there is one, else use pci */
1870 if (si->sb.ccrev != NOREV) {
1871 chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
1873 /* do the buffer registers update */
1874 W_REG(si->osh, &ccregs->broadcastaddress, SB_COMMIT);
1875 W_REG(si->osh, &ccregs->broadcastdata, 0x0);
1876 } else if (PCI(si)) {
1877 sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
1879 /* do the buffer registers update */
1880 W_REG(si->osh, &pciregs->bcastaddr, SB_COMMIT);
1881 W_REG(si->osh, &pciregs->bcastdata, 0x0);
1882 } else
1883 ASSERT(0);
1885 /* restore core index */
1886 sb_setcoreidx(sbh, origidx);
1887 INTR_RESTORE(si, intr_val);
1890 /* reset and re-enable a core
1891 * inputs:
1892 * bits - core specific bits that are set during and after reset sequence
1893 * resetbits - core specific bits that are set only during reset sequence
1895 void
1896 sb_core_reset(sb_t *sbh, uint32 bits, uint32 resetbits)
1898 sb_info_t *si;
1899 sbconfig_t *sb;
1900 volatile uint32 dummy;
1902 si = SB_INFO(sbh);
1903 ASSERT(GOODREGS(si->curmap));
1904 sb = REGS2SB(si->curmap);
1907 * Must do the disable sequence first to work for arbitrary current core state.
1909 sb_core_disable(sbh, (bits | resetbits));
1912 * Now do the initialization sequence.
1915 /* set reset while enabling the clock and forcing them on throughout the core */
1916 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits | resetbits));
1917 dummy = R_SBREG(si, &sb->sbtmstatelow);
1918 OSL_DELAY(1);
1920 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
1921 W_SBREG(si, &sb->sbtmstatehigh, 0);
1923 if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1924 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1927 /* clear reset and allow it to propagate throughout the core */
1928 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1929 dummy = R_SBREG(si, &sb->sbtmstatelow);
1930 OSL_DELAY(1);
1932 /* leave clock enabled */
1933 W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
1934 dummy = R_SBREG(si, &sb->sbtmstatelow);
1935 OSL_DELAY(1);
1938 void
1939 sb_core_tofixup(sb_t *sbh)
1941 sb_info_t *si;
1942 sbconfig_t *sb;
1944 si = SB_INFO(sbh);
1946 if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) ||
1947 (PCI(si) && (si->sb.buscorerev >= 5)))
1948 return;
1950 ASSERT(GOODREGS(si->curmap));
1951 sb = REGS2SB(si->curmap);
1953 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
1954 SET_SBREG(si, &sb->sbimconfiglow,
1955 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1956 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1957 } else {
1958 if (sb_coreid(sbh) == SB_PCI) {
1959 SET_SBREG(si, &sb->sbimconfiglow,
1960 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1961 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1962 } else {
1963 SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1967 sb_commit(sbh);
1971 * Set the initiator timeout for the "master core".
1972 * The master core is defined to be the core in control
1973 * of the chip and so it issues accesses to non-memory
1974 * locations (Because of dma *any* core can access memeory).
1976 * The routine uses the bus to decide who is the master:
1977 * SB_BUS => mips
1978 * JTAG_BUS => chipc
1979 * PCI_BUS => pci or pcie
1980 * PCMCIA_BUS => pcmcia
1981 * SDIO_BUS => pcmcia
1983 * This routine exists so callers can disable initiator
1984 * timeouts so accesses to very slow devices like otp
1985 * won't cause an abort. The routine allows arbitrary
1986 * settings of the service and request timeouts, though.
1988 * Returns the timeout state before changing it or -1
1989 * on error.
1992 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1994 uint32
1995 sb_set_initiator_to(sb_t *sbh, uint32 to, uint idx)
1997 sb_info_t *si;
1998 uint origidx;
1999 uint intr_val = 0;
2000 uint32 tmp, ret = 0xffffffff;
2001 sbconfig_t *sb;
2003 si = SB_INFO(sbh);
2005 if ((to & ~TO_MASK) != 0)
2006 return ret;
2008 /* Figure out the master core */
2009 if (idx == BADIDX) {
2010 switch (BUSTYPE(si->sb.bustype)) {
2011 case PCI_BUS:
2012 idx = si->sb.buscoreidx;
2013 break;
2014 case JTAG_BUS:
2015 idx = SB_CC_IDX;
2016 break;
2017 case PCMCIA_BUS:
2018 idx = sb_findcoreidx(sbh, SB_PCMCIA, 0);
2019 break;
2020 case SB_BUS:
2021 idx = sb_findcoreidx(sbh, SB_MIPS33, 0);
2022 break;
2023 default:
2024 ASSERT(0);
2026 if (idx == BADIDX)
2027 return ret;
2030 INTR_OFF(si, intr_val);
2031 origidx = sb_coreidx(sbh);
2033 sb = REGS2SB(sb_setcoreidx(sbh, idx));
2035 tmp = R_SBREG(si, &sb->sbimconfiglow);
2036 ret = tmp & TO_MASK;
2037 W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
2039 sb_commit(sbh);
2040 sb_setcoreidx(sbh, origidx);
2041 INTR_RESTORE(si, intr_val);
2042 return ret;
2045 void
2046 sb_core_disable(sb_t *sbh, uint32 bits)
2048 sb_info_t *si;
2049 volatile uint32 dummy;
2050 uint32 rej;
2051 sbconfig_t *sb;
2053 si = SB_INFO(sbh);
2055 ASSERT(GOODREGS(si->curmap));
2056 sb = REGS2SB(si->curmap);
2058 /* if core is already in reset, just return */
2059 if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
2060 return;
2062 /* reject value changed between sonics 2.2 and 2.3 */
2063 if (si->sb.sonicsrev == SONICS_2_2)
2064 rej = (1 << SBTML_REJ_SHIFT);
2065 else
2066 rej = (2 << SBTML_REJ_SHIFT);
2068 /* if clocks are not enabled, put into reset and return */
2069 if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
2070 goto disable;
2072 /* set target reject and spin until busy is clear (preserve core-specific bits) */
2073 OR_SBREG(si, &sb->sbtmstatelow, rej);
2074 dummy = R_SBREG(si, &sb->sbtmstatelow);
2075 OSL_DELAY(1);
2076 SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
2077 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY)
2078 SB_ERROR(("%s: target state still busy\n", __FUNCTION__));
2080 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
2081 OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
2082 dummy = R_SBREG(si, &sb->sbimstate);
2083 OSL_DELAY(1);
2084 SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
2087 /* set reset and reject while enabling the clocks */
2088 W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
2089 dummy = R_SBREG(si, &sb->sbtmstatelow);
2090 OSL_DELAY(10);
2092 /* don't forget to clear the initiator reject bit */
2093 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
2094 AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
2096 disable:
2097 /* leave reset and reject asserted */
2098 W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
2099 OSL_DELAY(1);
2102 /* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
2103 void
2104 sb_watchdog(sb_t *sbh, uint ticks)
2106 /* make sure we come up in fast clock mode; or if clearing, clear clock */
2107 if (ticks)
2108 sb_clkctl_clk(sbh, CLK_FAST);
2109 else
2110 sb_clkctl_clk(sbh, CLK_DYNAMIC);
2112 #if defined(BCM4328)
2113 if (sbh->chip == BCM4328_CHIP_ID && ticks != 0)
2114 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, min_res_mask),
2115 PMURES_BIT(RES4328_ROM_SWITCH),
2116 PMURES_BIT(RES4328_ROM_SWITCH));
2117 #endif
2119 /* instant NMI */
2120 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
2123 /* initialize the pcmcia core */
2124 void
2125 sb_pcmcia_init(sb_t *sbh)
2127 sb_info_t *si;
2128 uint8 cor = 0;
2130 si = SB_INFO(sbh);
2132 /* enable d11 mac interrupts */
2133 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2134 cor |= COR_IRQEN | COR_FUNEN;
2135 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2140 void
2141 BCMINITFN(sb_pci_up)(sb_t *sbh)
2143 sb_info_t *si;
2145 si = SB_INFO(sbh);
2147 /* if not pci bus, we're done */
2148 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2149 return;
2151 if (FORCEHT_WAR32414(si))
2152 sb_war32414_forceHT(sbh, 1);
2154 if (PCIE_ASPMWARS(si) || si->sb.pr42780)
2155 sb_pcieclkreq(sbh, 1, 0);
2157 if (PCIE(si) &&
2158 (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
2159 ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
2160 sb_set_initiator_to((void *)si, 0x3, sb_findcoreidx((void *)si, SB_D11, 0));
2164 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
2165 void
2166 BCMUNINITFN(sb_pci_sleep)(sb_t *sbh)
2168 sb_info_t *si;
2169 uint32 w;
2170 si = SB_INFO(sbh);
2172 /* if not pci bus, we're done */
2173 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2174 return;
2176 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
2177 w &= ~PCIE_CAP_LCREG_ASPML1;
2178 OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32), w);
2181 /* Unconfigure and/or apply various WARs when going down */
2182 void
2183 BCMINITFN(sb_pci_down)(sb_t *sbh)
2185 sb_info_t *si;
2187 si = SB_INFO(sbh);
2189 /* if not pci bus, we're done */
2190 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2191 return;
2193 if (FORCEHT_WAR32414(si))
2194 sb_war32414_forceHT(sbh, 0);
2196 if (si->pr42767_war) {
2197 sb_pcieclkreq(sbh, 1, 1);
2198 si->pr42767_war = FALSE;
2199 } else if (si->sb.pr42780) {
2200 sb_pcieclkreq(sbh, 1, 1);
2204 static void
2205 BCMINITFN(sb_war42767_clkreq)(sb_t *sbh)
2207 sbpcieregs_t *pcieregs;
2208 uint16 val16, *reg16;
2209 sb_info_t *si;
2211 si = SB_INFO(sbh);
2213 /* if not pcie bus, we're done */
2214 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2215 return;
2217 pcieregs = (sbpcieregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
2218 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
2219 val16 = R_REG(si->osh, reg16);
2220 /* if clockreq is not advertized advertize it */
2221 if (!si->pcie_war_ovr) {
2222 val16 |= SRSH_CLKREQ_ENB;
2223 si->pr42767_war = TRUE;
2225 si->sb.pr42780 = TRUE;
2226 } else
2227 val16 &= ~SRSH_CLKREQ_ENB;
2228 W_REG(si->osh, reg16, val16);
2231 static void
2232 BCMINITFN(sb_war42767)(sb_t *sbh)
2234 uint32 w = 0;
2235 sb_info_t *si;
2237 si = SB_INFO(sbh);
2239 /* if not pcie bus, we're done */
2240 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2241 return;
2243 sb_pcie_mdioread(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
2244 if (w & PLL_CTRL_FREQDET_EN) {
2245 w &= ~PLL_CTRL_FREQDET_EN;
2246 sb_pcie_mdiowrite(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
2251 * Configure the pci core for pci client (NIC) action
2252 * coremask is the bitvec of cores by index to be enabled.
2254 void
2255 BCMINITFN(sb_pci_setup)(sb_t *sbh, uint coremask)
2257 sb_info_t *si;
2258 sbconfig_t *sb;
2259 sbpciregs_t *pciregs;
2260 uint32 sbflag;
2261 uint32 w;
2262 uint idx;
2264 si = SB_INFO(sbh);
2266 /* if not pci bus, we're done */
2267 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2268 return;
2270 ASSERT(PCI(si) || PCIE(si));
2271 ASSERT(si->sb.buscoreidx != BADIDX);
2273 /* get current core index */
2274 idx = si->curidx;
2276 /* we interrupt on this backplane flag number */
2277 ASSERT(GOODREGS(si->curmap));
2278 sb = REGS2SB(si->curmap);
2279 sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
2281 /* switch over to pci core */
2282 pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
2283 sb = REGS2SB(pciregs);
2286 * Enable sb->pci interrupts. Assume
2287 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
2289 if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
2290 /* pci config write to set this core bit in PCIIntMask */
2291 w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
2292 w |= (coremask << PCI_SBIM_SHIFT);
2293 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
2294 } else {
2295 /* set sbintvec bit for our flag number */
2296 OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
2299 if (PCI(si)) {
2300 OR_REG(si->osh, &pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
2301 if (si->sb.buscorerev >= 11)
2302 OR_REG(si->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
2303 if (si->sb.buscorerev < 5) {
2304 SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
2305 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
2306 sb_commit(sbh);
2310 /* PCIE workarounds */
2311 if (PCIE(si)) {
2312 if ((si->sb.buscorerev == 0) || (si->sb.buscorerev == 1)) {
2313 w = sb_pcie_readreg((void *)(uintptr)sbh,
2314 (void *)(uintptr)PCIE_PCIEREGS,
2315 PCIE_TLP_WORKAROUNDSREG);
2316 w |= 0x8;
2317 sb_pcie_writereg((void *)(uintptr)sbh,
2318 (void *)(uintptr)PCIE_PCIEREGS,
2319 PCIE_TLP_WORKAROUNDSREG, w);
2322 if (si->sb.buscorerev == 1) {
2323 w = sb_pcie_readreg((void *)(uintptr)sbh,
2324 (void *)(uintptr)PCIE_PCIEREGS,
2325 PCIE_DLLP_LCREG);
2326 w |= (0x40);
2327 sb_pcie_writereg((void *)(uintptr)sbh,
2328 (void *)(uintptr)PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
2331 if (si->sb.buscorerev == 0)
2332 sb_war30841(si);
2334 if ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)) {
2335 w = sb_pcie_readreg((void *)(uintptr)sbh,
2336 (void *)(uintptr)PCIE_PCIEREGS,
2337 PCIE_DLLP_PMTHRESHREG);
2338 w &= ~(PCIE_L1THRESHOLDTIME_MASK);
2339 w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
2340 sb_pcie_writereg((void *)(uintptr)sbh, (void *)(uintptr)PCIE_PCIEREGS,
2341 PCIE_DLLP_PMTHRESHREG, w);
2343 sb_war43448(sbh);
2345 sb_war42767(sbh);
2347 sb_war43448_aspm(sbh);
2348 sb_war42767_clkreq(sbh);
2352 /* switch back to previous core */
2353 sb_setcoreidx(sbh, idx);
2356 uint32
2357 sb_base(uint32 admatch)
2359 uint32 base;
2360 uint type;
2362 type = admatch & SBAM_TYPE_MASK;
2363 ASSERT(type < 3);
2365 base = 0;
2367 if (type == 0) {
2368 base = admatch & SBAM_BASE0_MASK;
2369 } else if (type == 1) {
2370 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2371 base = admatch & SBAM_BASE1_MASK;
2372 } else if (type == 2) {
2373 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2374 base = admatch & SBAM_BASE2_MASK;
2377 return (base);
2380 uint32
2381 sb_size(uint32 admatch)
2383 uint32 size;
2384 uint type;
2386 type = admatch & SBAM_TYPE_MASK;
2387 ASSERT(type < 3);
2389 size = 0;
2391 if (type == 0) {
2392 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
2393 } else if (type == 1) {
2394 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2395 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
2396 } else if (type == 2) {
2397 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2398 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
2401 return (size);
2404 /* return the core-type instantiation # of the current core */
2405 uint
2406 sb_coreunit(sb_t *sbh)
2408 sb_info_t *si;
2409 uint idx;
2410 uint coreid;
2411 uint coreunit;
2412 uint i;
2414 si = SB_INFO(sbh);
2415 coreunit = 0;
2417 idx = si->curidx;
2419 ASSERT(GOODREGS(si->curmap));
2420 coreid = sb_coreid(sbh);
2422 /* count the cores of our type */
2423 for (i = 0; i < idx; i++)
2424 if (si->coreid[i] == coreid)
2425 coreunit++;
2427 return (coreunit);
2430 static uint32
2431 BCMINITFN(factor6)(uint32 x)
2433 switch (x) {
2434 case CC_F6_2: return 2;
2435 case CC_F6_3: return 3;
2436 case CC_F6_4: return 4;
2437 case CC_F6_5: return 5;
2438 case CC_F6_6: return 6;
2439 case CC_F6_7: return 7;
2440 default: return 0;
2444 /* calculate the speed the SB would run at given a set of clockcontrol values */
2445 uint32
2446 BCMINITFN(sb_clock_rate)(uint32 pll_type, uint32 n, uint32 m)
2448 uint32 n1, n2, clock, m1, m2, m3, mc;
2450 n1 = n & CN_N1_MASK;
2451 n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
2453 if (pll_type == PLL_TYPE6) {
2454 if (m & CC_T6_MMASK)
2455 return CC_T6_M1;
2456 else
2457 return CC_T6_M0;
2458 } else if ((pll_type == PLL_TYPE1) ||
2459 (pll_type == PLL_TYPE3) ||
2460 (pll_type == PLL_TYPE4) ||
2461 (pll_type == PLL_TYPE7)) {
2462 n1 = factor6(n1);
2463 n2 += CC_F5_BIAS;
2464 } else if (pll_type == PLL_TYPE2) {
2465 n1 += CC_T2_BIAS;
2466 n2 += CC_T2_BIAS;
2467 ASSERT((n1 >= 2) && (n1 <= 7));
2468 ASSERT((n2 >= 5) && (n2 <= 23));
2469 } else if (pll_type == PLL_TYPE5) {
2470 return (100000000);
2471 } else
2472 ASSERT(0);
2473 /* PLL types 3 and 7 use BASE2 (25Mhz) */
2474 if ((pll_type == PLL_TYPE3) ||
2475 (pll_type == PLL_TYPE7)) {
2476 clock = CC_CLOCK_BASE2 * n1 * n2;
2477 } else
2478 clock = CC_CLOCK_BASE1 * n1 * n2;
2480 if (clock == 0)
2481 return 0;
2483 m1 = m & CC_M1_MASK;
2484 m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
2485 m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
2486 mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
2488 if ((pll_type == PLL_TYPE1) ||
2489 (pll_type == PLL_TYPE3) ||
2490 (pll_type == PLL_TYPE4) ||
2491 (pll_type == PLL_TYPE7)) {
2492 m1 = factor6(m1);
2493 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
2494 m2 += CC_F5_BIAS;
2495 else
2496 m2 = factor6(m2);
2497 m3 = factor6(m3);
2499 switch (mc) {
2500 case CC_MC_BYPASS: return (clock);
2501 case CC_MC_M1: return (clock / m1);
2502 case CC_MC_M1M2: return (clock / (m1 * m2));
2503 case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
2504 case CC_MC_M1M3: return (clock / (m1 * m3));
2505 default: return (0);
2507 } else {
2508 ASSERT(pll_type == PLL_TYPE2);
2510 m1 += CC_T2_BIAS;
2511 m2 += CC_T2M2_BIAS;
2512 m3 += CC_T2_BIAS;
2513 ASSERT((m1 >= 2) && (m1 <= 7));
2514 ASSERT((m2 >= 3) && (m2 <= 10));
2515 ASSERT((m3 >= 2) && (m3 <= 7));
2517 if ((mc & CC_T2MC_M1BYP) == 0)
2518 clock /= m1;
2519 if ((mc & CC_T2MC_M2BYP) == 0)
2520 clock /= m2;
2521 if ((mc & CC_T2MC_M3BYP) == 0)
2522 clock /= m3;
2524 return (clock);
2528 /* returns the current speed the SB is running at */
2529 uint32
2530 BCMINITFN(sb_clock)(sb_t *sbh)
2532 sb_info_t *si;
2533 chipcregs_t *cc;
2534 uint32 n, m;
2535 uint idx;
2536 uint32 pll_type, rate;
2537 uint intr_val = 0;
2539 si = SB_INFO(sbh);
2540 idx = si->curidx;
2541 pll_type = PLL_TYPE1;
2543 INTR_OFF(si, intr_val);
2545 cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
2546 ASSERT(cc);
2548 if (sbh->cccaps & CC_CAP_PMU) {
2549 rate = sb_pmu_cpu_clock(sbh, si->osh);
2550 goto exit;
2553 pll_type = sbh->cccaps & CC_CAP_PLL_MASK;
2554 n = R_REG(si->osh, &cc->clockcontrol_n);
2555 if (pll_type == PLL_TYPE6)
2556 m = R_REG(si->osh, &cc->clockcontrol_m3);
2557 else if (pll_type == PLL_TYPE3)
2558 m = R_REG(si->osh, &cc->clockcontrol_m2);
2559 else
2560 m = R_REG(si->osh, &cc->clockcontrol_sb);
2562 if (sb_chip(sbh) == BCM5365_CHIP_ID)
2564 rate = 200000000; /* PLL_TYPE3 */
2565 } else {
2566 /* calculate rate */
2567 rate = sb_clock_rate(pll_type, n, m);
2570 if (pll_type == PLL_TYPE3)
2571 rate = rate / 2;
2573 exit:
2574 /* switch back to previous core */
2575 sb_setcoreidx(sbh, idx);
2577 INTR_RESTORE(si, intr_val);
2579 return rate;
2582 uint32
2583 BCMINITFN(sb_alp_clock)(sb_t *sbh)
2585 uint32 clock = ALP_CLOCK;
2587 if (sbh->cccaps & CC_CAP_PMU)
2588 clock = sb_pmu_alp_clock(sbh, sb_osh(sbh));
2590 return clock;
2593 /* change logical "focus" to the gpio core for optimized access */
2594 void*
2595 sb_gpiosetcore(sb_t *sbh)
2597 sb_info_t *si;
2599 si = SB_INFO(sbh);
2601 return (sb_setcoreidx(sbh, SB_CC_IDX));
2604 /* mask&set gpiocontrol bits */
2605 uint32
2606 sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2608 sb_info_t *si;
2609 uint regoff;
2611 si = SB_INFO(sbh);
2612 regoff = 0;
2614 /* gpios could be shared on router platforms
2615 * ignore reservation if it's high priority (e.g., test apps)
2617 if ((priority != GPIO_HI_PRIORITY) &&
2618 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2619 mask = priority ? (sb_gpioreservation & mask) :
2620 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2621 val &= mask;
2624 regoff = OFFSETOF(chipcregs_t, gpiocontrol);
2625 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2628 /* mask&set gpio output enable bits */
2629 uint32
2630 sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2632 sb_info_t *si;
2633 uint regoff;
2635 si = SB_INFO(sbh);
2636 regoff = 0;
2638 /* gpios could be shared on router platforms
2639 * ignore reservation if it's high priority (e.g., test apps)
2641 if ((priority != GPIO_HI_PRIORITY) &&
2642 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2643 mask = priority ? (sb_gpioreservation & mask) :
2644 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2645 val &= mask;
2648 regoff = OFFSETOF(chipcregs_t, gpioouten);
2649 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2652 /* mask&set gpio output bits */
2653 uint32
2654 sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2656 sb_info_t *si;
2657 uint regoff;
2659 si = SB_INFO(sbh);
2660 regoff = 0;
2662 /* gpios could be shared on router platforms
2663 * ignore reservation if it's high priority (e.g., test apps)
2665 if ((priority != GPIO_HI_PRIORITY) &&
2666 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2667 mask = priority ? (sb_gpioreservation & mask) :
2668 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2669 val &= mask;
2672 regoff = OFFSETOF(chipcregs_t, gpioout);
2673 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2676 /* reserve one gpio */
2677 uint32
2678 sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2680 sb_info_t *si;
2682 si = SB_INFO(sbh);
2684 /* only cores on SB_BUS share GPIO's and only applcation users need to
2685 * reserve/release GPIO
2687 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2688 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2689 return -1;
2691 /* make sure only one bit is set */
2692 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2693 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2694 return -1;
2697 /* already reserved */
2698 if (sb_gpioreservation & gpio_bitmask)
2699 return -1;
2700 /* set reservation */
2701 sb_gpioreservation |= gpio_bitmask;
2703 return sb_gpioreservation;
2706 /* release one gpio */
2708 * releasing the gpio doesn't change the current value on the GPIO last write value
2709 * persists till some one overwrites it
2712 uint32
2713 sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2715 sb_info_t *si;
2717 si = SB_INFO(sbh);
2719 /* only cores on SB_BUS share GPIO's and only applcation users need to
2720 * reserve/release GPIO
2722 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2723 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2724 return -1;
2726 /* make sure only one bit is set */
2727 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2728 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2729 return -1;
2732 /* already released */
2733 if (!(sb_gpioreservation & gpio_bitmask))
2734 return -1;
2736 /* clear reservation */
2737 sb_gpioreservation &= ~gpio_bitmask;
2739 return sb_gpioreservation;
2742 /* return the current gpioin register value */
2743 uint32
2744 sb_gpioin(sb_t *sbh)
2746 sb_info_t *si;
2747 uint regoff;
2749 si = SB_INFO(sbh);
2750 regoff = 0;
2752 regoff = OFFSETOF(chipcregs_t, gpioin);
2753 return (sb_corereg(sbh, SB_CC_IDX, regoff, 0, 0));
2756 /* mask&set gpio interrupt polarity bits */
2757 uint32
2758 sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2760 sb_info_t *si;
2761 uint regoff;
2763 si = SB_INFO(sbh);
2764 regoff = 0;
2766 /* gpios could be shared on router platforms */
2767 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2768 mask = priority ? (sb_gpioreservation & mask) :
2769 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2770 val &= mask;
2773 regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
2774 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2777 /* mask&set gpio interrupt mask bits */
2778 uint32
2779 sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2781 sb_info_t *si;
2782 uint regoff;
2784 si = SB_INFO(sbh);
2785 regoff = 0;
2787 /* gpios could be shared on router platforms */
2788 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2789 mask = priority ? (sb_gpioreservation & mask) :
2790 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2791 val &= mask;
2794 regoff = OFFSETOF(chipcregs_t, gpiointmask);
2795 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2798 /* assign the gpio to an led */
2799 uint32
2800 sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
2802 sb_info_t *si;
2804 si = SB_INFO(sbh);
2805 if (si->sb.ccrev < 16)
2806 return -1;
2808 /* gpio led powersave reg */
2809 return (sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
2812 /* mask&set gpio timer val */
2813 uint32
2814 sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
2816 sb_info_t *si;
2817 si = SB_INFO(sbh);
2819 if (si->sb.ccrev < 16)
2820 return -1;
2822 return (sb_corereg(sbh, SB_CC_IDX,
2823 OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
2826 uint32
2827 sb_gpiopull(sb_t *sbh, bool updown, uint32 mask, uint32 val)
2829 sb_info_t *si;
2830 uint offs;
2832 si = SB_INFO(sbh);
2833 if (si->sb.ccrev < 20)
2834 return -1;
2836 offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
2837 return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2840 uint32
2841 sb_gpioevent(sb_t *sbh, uint regtype, uint32 mask, uint32 val)
2843 sb_info_t *si;
2844 uint offs;
2846 si = SB_INFO(sbh);
2847 if (si->sb.ccrev < 11)
2848 return -1;
2850 if (regtype == GPIO_REGEVT)
2851 offs = OFFSETOF(chipcregs_t, gpioevent);
2852 else if (regtype == GPIO_REGEVT_INTMSK)
2853 offs = OFFSETOF(chipcregs_t, gpioeventintmask);
2854 else if (regtype == GPIO_REGEVT_INTPOL)
2855 offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
2856 else
2857 return -1;
2859 return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2862 void*
2863 BCMINITFN(sb_gpio_handler_register)(sb_t *sbh, uint32 event,
2864 bool level, gpio_handler_t cb, void *arg)
2866 sb_info_t *si;
2867 gpioh_item_t *gi;
2869 ASSERT(event);
2870 ASSERT(cb);
2872 si = SB_INFO(sbh);
2873 if (si->sb.ccrev < 11)
2874 return NULL;
2876 if ((gi = MALLOC(si->osh, sizeof(gpioh_item_t))) == NULL)
2877 return NULL;
2879 bzero(gi, sizeof(gpioh_item_t));
2880 gi->event = event;
2881 gi->handler = cb;
2882 gi->arg = arg;
2883 gi->level = level;
2885 gi->next = si->gpioh_head;
2886 si->gpioh_head = gi;
2888 return (void*)(gi);
2891 void
2892 BCMINITFN(sb_gpio_handler_unregister)(sb_t *sbh, void* gpioh)
2894 sb_info_t *si;
2895 gpioh_item_t *p, *n;
2897 si = SB_INFO(sbh);
2898 if (si->sb.ccrev < 11)
2899 return;
2901 ASSERT(si->gpioh_head);
2902 if ((void*)si->gpioh_head == gpioh) {
2903 si->gpioh_head = si->gpioh_head->next;
2904 MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
2905 return;
2907 else {
2908 p = si->gpioh_head;
2909 n = p->next;
2910 while (n) {
2911 if ((void*)n == gpioh) {
2912 p->next = n->next;
2913 MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
2914 return;
2916 p = n;
2917 n = n->next;
2921 ASSERT(0); /* Not found in list */
2924 void
2925 sb_gpio_handler_process(sb_t *sbh)
2927 sb_info_t *si;
2928 gpioh_item_t *h;
2929 uint32 status;
2930 uint32 level = sb_gpioin(sbh);
2931 uint32 edge = sb_gpioevent(sbh, GPIO_REGEVT, 0, 0);
2933 si = SB_INFO(sbh);
2934 for (h = si->gpioh_head; h != NULL; h = h->next) {
2935 if (h->handler) {
2936 status = (h->level ? level : edge);
2938 if (status & h->event)
2939 h->handler(status, h->arg);
2943 sb_gpioevent(sbh, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
2946 uint32
2947 sb_gpio_int_enable(sb_t *sbh, bool enable)
2949 sb_info_t *si;
2950 uint offs;
2952 si = SB_INFO(sbh);
2953 if (si->sb.ccrev < 11)
2954 return -1;
2956 offs = OFFSETOF(chipcregs_t, intmask);
2957 return (sb_corereg(sbh, SB_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
2961 /* return the slow clock source - LPO, XTAL, or PCI */
2962 static uint
2963 sb_slowclk_src(sb_info_t *si)
2965 chipcregs_t *cc;
2968 ASSERT(sb_coreid(&si->sb) == SB_CC);
2970 if (si->sb.ccrev < 6) {
2971 if ((BUSTYPE(si->sb.bustype) == PCI_BUS) &&
2972 (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32)) &
2973 PCI_CFG_GPIO_SCS))
2974 return (SCC_SS_PCI);
2975 else
2976 return (SCC_SS_XTAL);
2977 } else if (si->sb.ccrev < 10) {
2978 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2979 return (R_REG(si->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
2980 } else /* Insta-clock */
2981 return (SCC_SS_XTAL);
2984 /* return the ILP (slowclock) min or max frequency */
2985 static uint
2986 sb_slowclk_freq(sb_info_t *si, bool max_freq)
2988 chipcregs_t *cc;
2989 uint32 slowclk;
2990 uint div;
2993 ASSERT(sb_coreid(&si->sb) == SB_CC);
2995 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2997 /* shouldn't be here unless we've established the chip has dynamic clk control */
2998 ASSERT(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
3000 slowclk = sb_slowclk_src(si);
3001 if (si->sb.ccrev < 6) {
3002 if (slowclk == SCC_SS_PCI)
3003 return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
3004 else
3005 return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
3006 } else if (si->sb.ccrev < 10) {
3007 div = 4 * (((R_REG(si->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
3008 if (slowclk == SCC_SS_LPO)
3009 return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
3010 else if (slowclk == SCC_SS_XTAL)
3011 return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
3012 else if (slowclk == SCC_SS_PCI)
3013 return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
3014 else
3015 ASSERT(0);
3016 } else {
3017 /* Chipc rev 10 is InstaClock */
3018 div = R_REG(si->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
3019 div = 4 * (div + 1);
3020 return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
3022 return (0);
3025 static void
3026 BCMINITFN(sb_clkctl_setdelay)(sb_info_t *si, void *chipcregs)
3028 chipcregs_t * cc;
3029 uint slowmaxfreq, pll_delay, slowclk;
3030 uint pll_on_delay, fref_sel_delay;
3032 pll_delay = PLL_DELAY;
3034 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
3035 * since the xtal will also be powered down by dynamic clk control logic.
3038 slowclk = sb_slowclk_src(si);
3039 if (slowclk != SCC_SS_XTAL)
3040 pll_delay += XTAL_ON_DELAY;
3042 /* Starting with 4318 it is ILP that is used for the delays */
3043 slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
3045 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
3046 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
3048 cc = (chipcregs_t *)chipcregs;
3049 W_REG(si->osh, &cc->pll_on_delay, pll_on_delay);
3050 W_REG(si->osh, &cc->fref_sel_delay, fref_sel_delay);
3053 /* initialize power control delay registers */
3054 void
3055 BCMINITFN(sb_clkctl_init)(sb_t *sbh)
3057 sb_info_t *si;
3058 uint origidx;
3059 chipcregs_t *cc;
3061 si = SB_INFO(sbh);
3063 origidx = si->curidx;
3065 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
3066 return;
3068 if ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev < 2))
3069 W_REG(si->osh, &cc->chipcontrol,
3070 (si->sb.chiprev == 0) ? CHIPCTRL_4321A0_DEFAULT : CHIPCTRL_4321A1_DEFAULT);
3072 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL))
3073 goto done;
3075 /* set all Instaclk chip ILP to 1 MHz */
3076 if (si->sb.ccrev >= 10)
3077 SET_REG(si->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
3078 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
3080 sb_clkctl_setdelay(si, (void *)(uintptr)cc);
3082 done:
3083 sb_setcoreidx(sbh, origidx);
3086 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
3087 uint16
3088 BCMINITFN(sb_clkctl_fast_pwrup_delay)(sb_t *sbh)
3090 sb_info_t *si;
3091 uint origidx;
3092 chipcregs_t *cc;
3093 uint slowminfreq;
3094 uint16 fpdelay;
3095 uint intr_val = 0;
3097 si = SB_INFO(sbh);
3098 fpdelay = 0;
3099 origidx = si->curidx;
3101 INTR_OFF(si, intr_val);
3103 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
3104 goto done;
3106 if (sbh->cccaps & CC_CAP_PMU) {
3107 fpdelay = sb_pmu_fast_pwrup_delay(sbh, si->osh);
3108 goto done;
3111 if (!(sbh->cccaps & CC_CAP_PWR_CTL))
3112 goto done;
3114 slowminfreq = sb_slowclk_freq(si, FALSE);
3115 fpdelay = (((R_REG(si->osh, &cc->pll_on_delay) + 2) * 1000000) +
3116 (slowminfreq - 1)) / slowminfreq;
3118 done:
3119 sb_setcoreidx(sbh, origidx);
3120 INTR_RESTORE(si, intr_val);
3121 return (fpdelay);
3124 /* turn primary xtal and/or pll off/on */
3126 sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
3128 sb_info_t *si;
3129 uint32 in, out, outen;
3131 si = SB_INFO(sbh);
3133 switch (BUSTYPE(si->sb.bustype)) {
3136 case PCMCIA_BUS:
3137 return (0);
3140 case PCI_BUS:
3142 /* pcie core doesn't have any mapping to control the xtal pu */
3143 if (PCIE(si))
3144 return -1;
3146 in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof(uint32));
3147 out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32));
3148 outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32));
3151 * Avoid glitching the clock if GPRS is already using it.
3152 * We can't actually read the state of the PLLPD so we infer it
3153 * by the value of XTAL_PU which *is* readable via gpioin.
3155 if (on && (in & PCI_CFG_GPIO_XTAL))
3156 return (0);
3158 if (what & XTAL)
3159 outen |= PCI_CFG_GPIO_XTAL;
3160 if (what & PLL)
3161 outen |= PCI_CFG_GPIO_PLL;
3163 if (on) {
3164 /* turn primary xtal on */
3165 if (what & XTAL) {
3166 out |= PCI_CFG_GPIO_XTAL;
3167 if (what & PLL)
3168 out |= PCI_CFG_GPIO_PLL;
3169 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3170 sizeof(uint32), out);
3171 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
3172 sizeof(uint32), outen);
3173 OSL_DELAY(XTAL_ON_DELAY);
3176 /* turn pll on */
3177 if (what & PLL) {
3178 out &= ~PCI_CFG_GPIO_PLL;
3179 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3180 sizeof(uint32), out);
3181 OSL_DELAY(2000);
3183 } else {
3184 if (what & XTAL)
3185 out &= ~PCI_CFG_GPIO_XTAL;
3186 if (what & PLL)
3187 out |= PCI_CFG_GPIO_PLL;
3188 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32), out);
3189 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32),
3190 outen);
3193 default:
3194 return (-1);
3197 return (0);
3200 /* set dynamic clk control mode (forceslow, forcefast, dynamic) */
3201 /* returns true if we are forcing fast clock */
3202 bool
3203 sb_clkctl_clk(sb_t *sbh, uint mode)
3205 sb_info_t *si;
3206 uint origidx;
3207 chipcregs_t *cc;
3208 uint32 scc;
3209 uint intr_val = 0;
3211 si = SB_INFO(sbh);
3213 /* chipcommon cores prior to rev6 don't support dynamic clock control */
3214 if (si->sb.ccrev < 6)
3215 return (FALSE);
3218 /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
3219 ASSERT(si->sb.ccrev != 10);
3221 INTR_OFF(si, intr_val);
3223 origidx = si->curidx;
3225 if (sb_setcore(sbh, SB_MIPS33, 0) && (sb_corerev(&si->sb) <= 7) &&
3226 (BUSTYPE(si->sb.bustype) == SB_BUS) && (si->sb.ccrev >= 10))
3227 goto done;
3229 if (FORCEHT_WAR32414(si))
3230 goto done;
3232 cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
3233 ASSERT(cc != NULL);
3235 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL) && (si->sb.ccrev < 20))
3236 goto done;
3238 switch (mode) {
3239 case CLK_FAST: /* force fast (pll) clock */
3240 if (si->sb.ccrev < 10) {
3241 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
3242 sb_clkctl_xtal(&si->sb, XTAL, ON);
3244 SET_REG(si->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
3245 } else if (si->sb.ccrev < 20) {
3246 OR_REG(si->osh, &cc->system_clk_ctl, SYCC_HR);
3247 } else {
3248 OR_REG(si->osh, &cc->clk_ctl_st, CCS_FORCEHT);
3251 /* wait for the PLL */
3252 if (R_REG(si->osh, &cc->capabilities) & CC_CAP_PMU) {
3253 SPINWAIT(((R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL) == 0),
3254 PMU_MAX_TRANSITION_DLY);
3255 ASSERT(R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL);
3256 } else {
3257 OSL_DELAY(PLL_DELAY);
3259 break;
3261 case CLK_DYNAMIC: /* enable dynamic clock control */
3262 if (si->sb.ccrev < 10) {
3263 scc = R_REG(si->osh, &cc->slow_clk_ctl);
3264 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
3265 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
3266 scc |= SCC_XC;
3267 W_REG(si->osh, &cc->slow_clk_ctl, scc);
3269 /* for dynamic control, we have to release our xtal_pu "force on" */
3270 if (scc & SCC_XC)
3271 sb_clkctl_xtal(&si->sb, XTAL, OFF);
3272 } else if (si->sb.ccrev < 20) {
3273 /* Instaclock */
3274 AND_REG(si->osh, &cc->system_clk_ctl, ~SYCC_HR);
3275 } else {
3276 AND_REG(si->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
3278 break;
3280 default:
3281 ASSERT(0);
3284 done:
3285 sb_setcoreidx(sbh, origidx);
3286 INTR_RESTORE(si, intr_val);
3287 return (mode == CLK_FAST);
3290 /* register driver interrupt disabling and restoring callback functions */
3291 void
3292 sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn,
3293 void *intrsenabled_fn, void *intr_arg)
3295 sb_info_t *si;
3297 si = SB_INFO(sbh);
3298 si->intr_arg = intr_arg;
3299 si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
3300 si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
3301 si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
3302 /* save current core id. when this function called, the current core
3303 * must be the core which provides driver functions(il, et, wl, etc.)
3305 si->dev_coreid = si->coreid[si->curidx];
3308 void
3309 sb_deregister_intr_callback(sb_t *sbh)
3311 sb_info_t *si;
3313 si = SB_INFO(sbh);
3314 si->intrsoff_fn = NULL;
3318 uint16
3319 BCMINITFN(sb_d11_devid)(sb_t *sbh)
3321 sb_info_t *si = SB_INFO(sbh);
3322 uint16 device;
3324 #if defined(BCM4328)
3325 /* Fix device id for dual band BCM4328 */
3326 if (sbh->chip == BCM4328_CHIP_ID &&
3327 (sbh->chippkg == BCM4328USBDUAL_PKG_ID || sbh->chippkg == BCM4328SDIODUAL_PKG_ID))
3328 device = BCM4328_D11DUAL_ID;
3329 else
3330 #endif /* BCM4328 */
3331 /* Let an nvram variable with devpath override devid */
3332 if ((device = (uint16)sb_getdevpathintvar(sbh, "devid")) != 0)
3334 /* Get devid from OTP/SPROM depending on where the SROM is read */
3335 else if ((device = (uint16)getintvar(si->vars, "devid")) != 0)
3338 * no longer support wl0id, but keep the code
3339 * here for backward compatibility.
3341 else if ((device = (uint16)getintvar(si->vars, "wl0id")) != 0)
3343 /* Chip specific conversion */
3344 else if (sbh->chip == BCM4712_CHIP_ID) {
3345 if (sbh->chippkg == BCM4712SMALL_PKG_ID)
3346 device = BCM4306_D11G_ID;
3347 else
3348 device = BCM4306_D11DUAL_ID;
3350 /* ignore it */
3351 else
3352 device = 0xffff;
3354 return device;
3358 BCMINITFN(sb_corepciid)(sb_t *sbh, uint func, uint16 *pcivendor, uint16 *pcidevice,
3359 uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
3360 uint8 *pciheader)
3362 uint16 vendor = 0xffff, device = 0xffff;
3363 uint8 class, subclass, progif = 0;
3364 uint8 header = PCI_HEADER_NORMAL;
3365 uint32 core = sb_coreid(sbh);
3367 /* Verify whether the function exists for the core */
3368 if (func >= (uint)(core == SB_USB20H ? 2 : 1))
3369 return BCME_ERROR;
3371 /* Known vendor translations */
3372 switch (sb_corevendor(sbh)) {
3373 case SB_VEND_BCM:
3374 vendor = VENDOR_BROADCOM;
3375 break;
3376 default:
3377 return BCME_ERROR;
3380 /* Determine class based on known core codes */
3381 switch (core) {
3382 case SB_ILINE20:
3383 class = PCI_CLASS_NET;
3384 subclass = PCI_NET_ETHER;
3385 device = BCM47XX_ILINE_ID;
3386 break;
3387 case SB_ENET:
3388 class = PCI_CLASS_NET;
3389 subclass = PCI_NET_ETHER;
3390 device = BCM47XX_ENET_ID;
3391 break;
3392 case SB_GIGETH:
3393 class = PCI_CLASS_NET;
3394 subclass = PCI_NET_ETHER;
3395 device = BCM47XX_GIGETH_ID;
3396 break;
3397 case SB_SDRAM:
3398 case SB_MEMC:
3399 class = PCI_CLASS_MEMORY;
3400 subclass = PCI_MEMORY_RAM;
3401 device = (uint16)core;
3402 break;
3403 case SB_PCI:
3404 case SB_PCIE:
3405 class = PCI_CLASS_BRIDGE;
3406 subclass = PCI_BRIDGE_PCI;
3407 device = (uint16)core;
3408 header = PCI_HEADER_BRIDGE;
3409 break;
3410 case SB_MIPS33:
3411 class = PCI_CLASS_CPU;
3412 subclass = PCI_CPU_MIPS;
3413 device = (uint16)core;
3414 break;
3415 case SB_CODEC:
3416 class = PCI_CLASS_COMM;
3417 subclass = PCI_COMM_MODEM;
3418 device = BCM47XX_V90_ID;
3419 break;
3420 case SB_USB:
3421 class = PCI_CLASS_SERIAL;
3422 subclass = PCI_SERIAL_USB;
3423 progif = 0x10; /* OHCI */
3424 device = BCM47XX_USB_ID;
3425 break;
3426 case SB_USB11H:
3427 class = PCI_CLASS_SERIAL;
3428 subclass = PCI_SERIAL_USB;
3429 progif = 0x10; /* OHCI */
3430 device = BCM47XX_USBH_ID;
3431 break;
3432 case SB_USB20H:
3433 class = PCI_CLASS_SERIAL;
3434 subclass = PCI_SERIAL_USB;
3435 progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI */
3436 device = BCM47XX_USB20H_ID;
3437 header = 0x80; /* multifunction */
3438 break;
3439 case SB_IPSEC:
3440 class = PCI_CLASS_CRYPT;
3441 subclass = PCI_CRYPT_NETWORK;
3442 device = BCM47XX_IPSEC_ID;
3443 break;
3444 case SB_ROBO:
3445 class = PCI_CLASS_NET;
3446 subclass = PCI_NET_OTHER;
3447 device = BCM47XX_ROBO_ID;
3448 break;
3449 case SB_CC:
3450 class = PCI_CLASS_MEMORY;
3451 subclass = PCI_MEMORY_FLASH;
3452 device = (uint16)core;
3453 break;
3454 case SB_SATAXOR:
3455 class = PCI_CLASS_XOR;
3456 subclass = PCI_XOR_QDMA;
3457 device = BCM47XX_SATAXOR_ID;
3458 break;
3459 case SB_ATA100:
3460 class = PCI_CLASS_DASDI;
3461 subclass = PCI_DASDI_IDE;
3462 device = BCM47XX_ATA100_ID;
3463 break;
3464 case SB_USB11D:
3465 class = PCI_CLASS_SERIAL;
3466 subclass = PCI_SERIAL_USB;
3467 device = BCM47XX_USBD_ID;
3468 break;
3469 case SB_USB20D:
3470 class = PCI_CLASS_SERIAL;
3471 subclass = PCI_SERIAL_USB;
3472 device = BCM47XX_USB20D_ID;
3473 break;
3474 case SB_D11:
3475 class = PCI_CLASS_NET;
3476 subclass = PCI_NET_OTHER;
3477 device = sb_d11_devid(sbh);
3478 break;
3480 default:
3481 class = subclass = progif = 0xff;
3482 device = (uint16)core;
3483 break;
3486 *pcivendor = vendor;
3487 *pcidevice = device;
3488 *pciclass = class;
3489 *pcisubclass = subclass;
3490 *pciprogif = progif;
3491 *pciheader = header;
3493 return 0;
3496 /* use the mdio interface to read from mdio slaves */
3497 static int
3498 sb_pcie_mdioread(sb_info_t *si, uint physmedia, uint regaddr, uint *regval)
3500 uint mdiodata;
3501 uint i = 0;
3502 sbpcieregs_t *pcieregs;
3504 pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3505 ASSERT(pcieregs);
3507 /* enable mdio access to SERDES */
3508 W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3510 mdiodata = MDIODATA_START | MDIODATA_READ |
3511 (physmedia << MDIODATA_DEVADDR_SHF) |
3512 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA;
3514 W_REG(si->osh, &pcieregs->mdiodata, mdiodata);
3516 PR28829_DELAY();
3518 /* retry till the transaction is complete */
3519 while (i < 10) {
3520 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
3521 PR28829_DELAY();
3522 *regval = (R_REG(si->osh, &(pcieregs->mdiodata)) & MDIODATA_MASK);
3523 /* Disable mdio access to SERDES */
3524 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3525 return 0;
3527 OSL_DELAY(1000);
3528 i++;
3531 SB_ERROR(("sb_pcie_mdioread: timed out\n"));
3532 /* Disable mdio access to SERDES */
3533 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3534 return 1;
3538 /* use the mdio interface to write to mdio slaves */
3539 static int
3540 sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint regaddr, uint val)
3542 uint mdiodata;
3543 uint i = 0;
3544 sbpcieregs_t *pcieregs;
3546 pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3547 ASSERT(pcieregs);
3549 /* enable mdio access to SERDES */
3550 W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3552 mdiodata = MDIODATA_START | MDIODATA_WRITE |
3553 (physmedia << MDIODATA_DEVADDR_SHF) |
3554 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
3556 W_REG(si->osh, (&pcieregs->mdiodata), mdiodata);
3558 PR28829_DELAY();
3560 /* retry till the transaction is complete */
3561 while (i < 10) {
3562 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
3563 /* Disable mdio access to SERDES */
3564 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3565 return 0;
3567 OSL_DELAY(1000);
3568 i++;
3571 SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
3572 /* Disable mdio access to SERDES */
3573 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3574 return 1;
3578 /* indirect way to read pcie config regs */
3579 uint
3580 sb_pcie_readreg(void *sb, void* arg1, uint offset)
3582 sb_info_t *si;
3583 sb_t *sbh;
3584 uint retval = 0xFFFFFFFF;
3585 sbpcieregs_t *pcieregs;
3586 uint addrtype;
3588 sbh = (sb_t *)sb;
3589 si = SB_INFO(sbh);
3590 ASSERT(PCIE(si));
3592 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
3593 ASSERT(pcieregs);
3595 addrtype = (uint)((uintptr)arg1);
3596 switch (addrtype) {
3597 case PCIE_CONFIGREGS:
3598 W_REG(si->osh, (&pcieregs->configaddr), offset);
3599 retval = R_REG(si->osh, &(pcieregs->configdata));
3600 break;
3601 case PCIE_PCIEREGS:
3602 W_REG(si->osh, &(pcieregs->pcieindaddr), offset);
3603 retval = R_REG(si->osh, &(pcieregs->pcieinddata));
3604 break;
3605 default:
3606 ASSERT(0);
3607 break;
3609 return retval;
3612 /* indirect way to write pcie config/mdio/pciecore regs */
3613 uint
3614 sb_pcie_writereg(sb_t *sbh, void *arg1, uint offset, uint val)
3616 sb_info_t *si;
3617 sbpcieregs_t *pcieregs;
3618 uint addrtype;
3620 si = SB_INFO(sbh);
3621 ASSERT(PCIE(si));
3623 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
3624 ASSERT(pcieregs);
3626 addrtype = (uint)((uintptr)arg1);
3628 switch (addrtype) {
3629 case PCIE_CONFIGREGS:
3630 W_REG(si->osh, (&pcieregs->configaddr), offset);
3631 W_REG(si->osh, (&pcieregs->configdata), val);
3632 break;
3633 case PCIE_PCIEREGS:
3634 W_REG(si->osh, (&pcieregs->pcieindaddr), offset);
3635 W_REG(si->osh, (&pcieregs->pcieinddata), val);
3636 break;
3637 default:
3638 ASSERT(0);
3639 break;
3641 return 0;
3645 /* Build device path. Support SB, PCI, and JTAG for now. */
3647 BCMINITFN(sb_devpath)(sb_t *sbh, char *path, int size)
3649 int slen;
3650 ASSERT(path);
3651 ASSERT(size >= SB_DEVPATH_BUFSZ);
3653 if (!path || size <= 0)
3654 return -1;
3656 switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
3657 case SB_BUS:
3658 case JTAG_BUS:
3659 slen = snprintf(path, (size_t)size, "sb/%u/", sb_coreidx(sbh));
3660 break;
3661 case PCI_BUS:
3662 ASSERT((SB_INFO(sbh))->osh);
3663 slen = snprintf(path, (size_t)size, "pci/%u/%u/",
3664 OSL_PCI_BUS((SB_INFO(sbh))->osh),
3665 OSL_PCI_SLOT((SB_INFO(sbh))->osh));
3666 break;
3667 case PCMCIA_BUS:
3668 SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
3669 SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
3670 slen = snprintf(path, (size_t)size, "pc/1/1/");
3671 break;
3672 default:
3673 slen = -1;
3674 ASSERT(0);
3675 break;
3678 if (slen < 0 || slen >= size) {
3679 path[0] = '\0';
3680 return -1;
3683 return 0;
3686 /* Get a variable, but only if it has a devpath prefix */
3687 char *
3688 BCMINITFN(sb_getdevpathvar)(sb_t *sbh, const char *name)
3690 char varname[SB_DEVPATH_BUFSZ + 32];
3692 sb_devpathvar(sbh, varname, sizeof(varname), name);
3694 return (getvar(NULL, varname));
3697 /* Get a variable, but only if it has a devpath prefix */
3699 BCMINITFN(sb_getdevpathintvar)(sb_t *sbh, const char *name)
3701 char varname[SB_DEVPATH_BUFSZ + 32];
3703 sb_devpathvar(sbh, varname, sizeof(varname), name);
3705 return (getintvar(NULL, varname));
3708 /* Concatenate the dev path with a varname into the given 'var' buffer
3709 * and return the 'var' pointer.
3710 * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
3711 * On overflow, the first char will be set to '\0'.
3713 static char *
3714 BCMINITFN(sb_devpathvar)(sb_t *sbh, char *var, int len, const char *name)
3716 uint path_len;
3718 if (!var || len <= 0)
3719 return var;
3721 if (sb_devpath(sbh, var, len) == 0) {
3722 path_len = strlen(var);
3724 if (strlen(name) + 1 > (uint)(len - path_len))
3725 var[0] = '\0';
3726 else
3727 strncpy(var + path_len, name, len - path_len - 1);
3730 return var;
3735 * Fixup SROMless PCI device's configuration.
3736 * The current core may be changed upon return.
3738 static int
3739 sb_pci_fixcfg(sb_info_t *si)
3741 uint origidx, pciidx;
3742 sbpciregs_t *pciregs;
3743 sbpcieregs_t *pcieregs = NULL;
3744 uint16 val16, *reg16;
3745 uint32 w;
3747 ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
3749 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
3750 /* save the current index */
3751 origidx = sb_coreidx(&si->sb);
3753 /* check 'pi' is correct and fix it if not */
3754 if (si->sb.buscoretype == SB_PCIE) {
3755 pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
3756 ASSERT(pcieregs);
3757 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
3758 } else if (si->sb.buscoretype == SB_PCI) {
3759 pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
3760 ASSERT(pciregs);
3761 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
3762 } else {
3763 ASSERT(0);
3764 return -1;
3766 pciidx = sb_coreidx(&si->sb);
3767 val16 = R_REG(si->osh, reg16);
3768 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
3769 val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
3770 W_REG(si->osh, reg16, val16);
3773 if (PCIE_ASPMWARS(si)) {
3774 w = sb_pcie_readreg((void *)(uintptr)&si->sb, (void *)PCIE_PCIEREGS,
3775 PCIE_PLP_STATUSREG);
3777 /* Detect the current polarity at attach and force that polarity and
3778 * disable changing the polarity
3780 if ((w & PCIE_PLP_POLARITYINV_STAT) == 0) {
3781 si->pcie_polarity = (SERDES_RX_CTRL_FORCE);
3782 } else {
3783 si->pcie_polarity = (SERDES_RX_CTRL_FORCE |
3784 SERDES_RX_CTRL_POLARITY);
3787 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
3788 if (w & PCIE_CLKREQ_ENAB) {
3789 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
3790 val16 = R_REG(si->osh, reg16);
3791 /* if clockreq is not advertized clkreq should not be enabled */
3792 if (!(val16 & SRSH_CLKREQ_ENB))
3793 SB_ERROR(("WARNING: CLK REQ enabled already 0x%x\n", w));
3796 sb_war43448(&si->sb);
3798 sb_war42767(&si->sb);
3802 /* restore the original index */
3803 sb_setcoreidx(&si->sb, origidx);
3805 return 0;
3808 /* Return ADDR64 capability of the backplane */
3809 bool
3810 sb_backplane64(sb_t *sbh)
3812 sb_info_t *si;
3814 si = SB_INFO(sbh);
3815 return ((si->sb.cccaps & CC_CAP_BKPLN64) != 0);
3818 void
3819 sb_btcgpiowar(sb_t *sbh)
3821 sb_info_t *si;
3822 uint origidx;
3823 uint intr_val = 0;
3824 chipcregs_t *cc;
3825 si = SB_INFO(sbh);
3827 /* Make sure that there is ChipCommon core present &&
3828 * UART_TX is strapped to 1
3830 if (!(si->sb.cccaps & CC_CAP_UARTGPIO))
3831 return;
3833 /* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
3834 INTR_OFF(si, intr_val);
3836 origidx = sb_coreidx(sbh);
3838 cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
3839 ASSERT(cc);
3841 W_REG(si->osh, &cc->uart0mcr, R_REG(si->osh, &cc->uart0mcr) | 0x04);
3843 /* restore the original index */
3844 sb_setcoreidx(sbh, origidx);
3846 INTR_RESTORE(si, intr_val);
3849 /* check if the device is removed */
3850 bool
3851 sb_deviceremoved(sb_t *sbh)
3853 uint32 w;
3854 sb_info_t *si;
3856 si = SB_INFO(sbh);
3858 switch (BUSTYPE(si->sb.bustype)) {
3859 case PCI_BUS:
3860 ASSERT(si->osh);
3861 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_VID, sizeof(uint32));
3862 if ((w & 0xFFFF) != VENDOR_BROADCOM)
3863 return TRUE;
3864 else
3865 return FALSE;
3866 default:
3867 return FALSE;
3869 return FALSE;
3872 /* Return the RAM size of the SOCRAM core */
3873 uint32
3874 BCMINITFN(sb_socram_size)(sb_t *sbh)
3876 sb_info_t *si;
3877 uint origidx;
3878 uint intr_val = 0;
3880 sbsocramregs_t *regs;
3881 bool wasup;
3882 uint corerev;
3883 uint32 coreinfo;
3884 uint memsize = 0;
3886 si = SB_INFO(sbh);
3887 ASSERT(si);
3889 /* Block ints and save current core */
3890 INTR_OFF(si, intr_val);
3891 origidx = sb_coreidx(sbh);
3893 /* Switch to SOCRAM core */
3894 if (!(regs = sb_setcore(sbh, SB_SOCRAM, 0)))
3895 goto done;
3897 /* Get info for determining size */
3898 if (!(wasup = sb_iscoreup(sbh)))
3899 sb_core_reset(sbh, 0, 0);
3900 corerev = sb_corerev(sbh);
3901 coreinfo = R_REG(si->osh, &regs->coreinfo);
3903 /* Calculate size from coreinfo based on rev */
3904 if (corerev == 0)
3905 memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
3906 else if (corerev < 3) {
3907 memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
3908 memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3910 else {
3911 uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3912 uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
3913 uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
3914 if (lss != 0)
3915 nb --;
3916 memsize = nb * (1 << (bsz + SR_BSZ_BASE));
3917 if (lss != 0)
3918 memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
3920 /* Return to previous state and core */
3921 if (!wasup)
3922 sb_core_disable(sbh, 0);
3923 sb_setcoreidx(sbh, origidx);
3925 done:
3926 INTR_RESTORE(si, intr_val);
3927 return memsize;