Changes for kernel and Busybox
[tomato.git] / release / src / shared / sbutils.c
blob820cc926a06be6802cd517978ec86587d8d4c91b
1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright 2007, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id$
15 #include <typedefs.h>
16 #include <bcmdefs.h>
17 #include <osl.h>
18 #include <bcmutils.h>
19 #include <sbutils.h>
20 #include <bcmdevs.h>
21 #include <sbconfig.h>
22 #include <sbchipc.h>
23 #include <sbpci.h>
24 #include <sbpcie.h>
25 #include <pcicfg.h>
26 #include <sbpcmcia.h>
27 #include <sbsocram.h>
28 #include <bcmnvram.h>
29 #include <bcmsrom.h>
30 #include <hndpmu.h>
32 /* debug/trace */
33 #define SB_ERROR(args)
35 #define SB_MSG(args)
37 typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
38 typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
39 typedef bool (*sb_intrsenabled_t)(void *intr_arg);
41 typedef struct gpioh_item {
42 void *arg;
43 bool level;
44 gpio_handler_t handler;
45 uint32 event;
46 struct gpioh_item *next;
47 } gpioh_item_t;
49 /* misc sb info needed by some of the routines */
50 typedef struct sb_info {
52 struct sb_pub sb; /* back plane public state (must be first field) */
54 void *osh; /* osl os handle */
55 void *sdh; /* bcmsdh handle */
57 void *curmap; /* current regs va */
58 void *regs[SB_MAXCORES]; /* other regs va */
60 uint curidx; /* current core index */
61 uint dev_coreid; /* the core provides driver functions */
63 bool memseg; /* flag to toggle MEM_SEG register */
65 uint numcores; /* # discovered cores */
66 uint coreid[SB_MAXCORES]; /* id of each core */
67 uint32 coresba[SB_MAXCORES]; /* backplane address of each core */
69 void *intr_arg; /* interrupt callback function arg */
70 sb_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
71 sb_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
72 sb_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
74 uint8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
75 bool pr42767_war;
76 uint8 pcie_polarity;
77 bool pcie_war_ovr; /* Override ASPM/Clkreq settings */
79 uint8 pmecap_offset; /* PM Capability offset in the config space */
80 bool pmecap; /* Capable of generating PME */
82 gpioh_item_t *gpioh_head; /* GPIO event handlers list */
84 char *vars;
85 uint varsz;
86 } sb_info_t;
88 /* local prototypes */
89 static sb_info_t * sb_doattach(sb_info_t *si, uint devid, osl_t *osh, void *regs,
90 uint bustype, void *sdh, char **vars, uint *varsz);
91 static void sb_scan(sb_info_t *si, void *regs, uint devid);
92 static uint _sb_coreidx(sb_info_t *si, uint32 sba);
93 static uint _sb_scan(sb_info_t *si, uint32 sba, void *regs, uint bus, uint32 sbba,
94 uint ncores);
95 static uint32 _sb_coresba(sb_info_t *si);
96 static void *_sb_setcoreidx(sb_info_t *si, uint coreidx);
97 static uint sb_chip2numcores(uint chip);
98 static bool sb_ispcie(sb_info_t *si);
99 static uint8 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id,
100 uchar *buf, uint32 *buflen);
101 static int sb_pci_fixcfg(sb_info_t *si);
102 /* routines to access mdio slave device registers */
103 static int sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint readdr, uint val);
104 static int sb_pcie_mdioread(sb_info_t *si, uint physmedia, uint readdr, uint *ret_val);
106 /* dev path concatenation util */
107 static char *sb_devpathvar(sb_t *sbh, char *var, int len, const char *name);
109 /* WARs */
110 static void sb_war43448(sb_t *sbh);
111 static void sb_war43448_aspm(sb_t *sbh);
112 static void sb_war32414_forceHT(sb_t *sbh, bool forceHT);
113 static void sb_war30841(sb_info_t *si);
114 static void sb_war42767(sb_t *sbh);
115 static void sb_war42767_clkreq(sb_t *sbh);
117 /* delay needed between the mdio control/ mdiodata register data access */
118 #define PR28829_DELAY() OSL_DELAY(10)
120 /* size that can take bitfielddump */
121 #define BITFIELD_DUMP_SIZE 32
123 /* global variable to indicate reservation/release of gpio's */
124 static uint32 sb_gpioreservation = 0;
126 /* global flag to prevent shared resources from being initialized multiple times in sb_attach() */
127 static bool sb_onetimeinit = FALSE;
129 #define SB_INFO(sbh) (sb_info_t*)(uintptr)sbh
130 #define SET_SBREG(si, r, mask, val) \
131 W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
132 #define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SB_MAXCORES * SB_CORE_SIZE)) && \
133 ISALIGNED((x), SB_CORE_SIZE))
134 #define GOODREGS(regs) ((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
135 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
136 #define BADCOREADDR 0
137 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
138 #define BADIDX (SB_MAXCORES+1)
139 #define NOREV -1 /* Invalid rev */
141 #define PCI(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
142 #define PCIE(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
143 #define PCMCIA(si) ((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->memseg == TRUE))
145 /* sonicsrev */
146 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
147 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
149 #define R_SBREG(si, sbr) sb_read_sbreg((si), (sbr))
150 #define W_SBREG(si, sbr, v) sb_write_sbreg((si), (sbr), (v))
151 #define AND_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
152 #define OR_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
155 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
156 * after core switching to avoid invalid register accesss inside ISR.
158 #define INTR_OFF(si, intr_val) \
159 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
160 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
161 #define INTR_RESTORE(si, intr_val) \
162 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
163 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
165 /* dynamic clock control defines */
166 #define LPOMINFREQ 25000 /* low power oscillator min */
167 #define LPOMAXFREQ 43000 /* low power oscillator max */
168 #define XTALMINFREQ 19800000 /* 20 MHz - 1% */
169 #define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
170 #define PCIMINFREQ 25000000 /* 25 MHz */
171 #define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
173 #define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
174 #define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
176 /* force HT war check on non-mips platforms
177 This WAR seem to introduce a significant slowdon on
178 4704 mips router where the problem itself never shows.
181 #ifndef __mips__
182 #define FORCEHT_WAR32414(si) \
183 (((PCIE(si)) && (si->sb.chip == BCM4311_CHIP_ID) && ((si->sb.chiprev <= 1))) || \
184 ((PCI(si) || PCIE(si)) && (si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3)))
185 #else
186 #define FORCEHT_WAR32414(si) 0
187 #endif /* __mips__ */
189 #define PCIE_ASPMWARS(si) \
190 ((PCIE(si)) && ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)))
192 /* GPIO Based LED powersave defines */
193 #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
194 #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
196 #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
198 static uint32
199 sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
201 uint8 tmp;
202 uint32 val, intr_val = 0;
206 * compact flash only has 11 bits address, while we needs 12 bits address.
207 * MEM_SEG will be OR'd with other 11 bits address in hardware,
208 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
209 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
211 if (PCMCIA(si)) {
212 INTR_OFF(si, intr_val);
213 tmp = 1;
214 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
215 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
218 val = R_REG(si->osh, sbr);
220 if (PCMCIA(si)) {
221 tmp = 0;
222 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
223 INTR_RESTORE(si, intr_val);
226 return (val);
229 static void
230 sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
232 uint8 tmp;
233 volatile uint32 dummy;
234 uint32 intr_val = 0;
238 * compact flash only has 11 bits address, while we needs 12 bits address.
239 * MEM_SEG will be OR'd with other 11 bits address in hardware,
240 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
241 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
243 if (PCMCIA(si)) {
244 INTR_OFF(si, intr_val);
245 tmp = 1;
246 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
247 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
250 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
251 #ifdef IL_BIGENDIAN
252 dummy = R_REG(si->osh, sbr);
253 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
254 dummy = R_REG(si->osh, sbr);
255 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
256 #else
257 dummy = R_REG(si->osh, sbr);
258 W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
259 dummy = R_REG(si->osh, sbr);
260 W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
261 #endif /* IL_BIGENDIAN */
262 } else
263 W_REG(si->osh, sbr, v);
265 if (PCMCIA(si)) {
266 tmp = 0;
267 OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
268 INTR_RESTORE(si, intr_val);
273 * Allocate a sb handle.
274 * devid - pci device id (used to determine chip#)
275 * osh - opaque OS handle
276 * regs - virtual address of initial core registers
277 * bustype - pci/pcmcia/sb/sdio/etc
278 * vars - pointer to a pointer area for "environment" variables
279 * varsz - pointer to int to return the size of the vars
281 sb_t *
282 BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
283 uint bustype, void *sdh, char **vars, uint *varsz)
285 sb_info_t *si;
287 /* alloc sb_info_t */
288 if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
289 SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
290 return (NULL);
293 if (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
294 MFREE(osh, si, sizeof(sb_info_t));
295 return (NULL);
297 si->vars = vars ? *vars : NULL;
298 si->varsz = varsz ? *varsz : 0;
300 return (sb_t *)si;
303 /* Using sb_kattach depends on SB_BUS support, either implicit */
304 /* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
305 #if !defined(CONFIG_BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
307 /* global kernel resource */
308 static sb_info_t ksi;
310 /* generic kernel variant of sb_attach() */
311 sb_t *
312 BCMINITFN(sb_kattach)(osl_t *osh)
314 static bool ksi_attached = FALSE;
316 if (!ksi_attached) {
317 void *regs = (void *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
319 if (sb_doattach(&ksi, BCM4710_DEVICE_ID, osh, regs,
320 SB_BUS, NULL,
321 osh != SB_OSH ? &ksi.vars : NULL,
322 osh != SB_OSH ? &ksi.varsz : NULL) == NULL) {
323 SB_ERROR(("sb_kattach: sb_doattach failed\n"));
324 return NULL;
327 ksi_attached = TRUE;
330 return &ksi.sb;
332 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
334 static sb_info_t *
335 BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
336 uint bustype, void *sdh, char **vars, uint *varsz)
338 uint origidx;
339 chipcregs_t *cc;
340 sbconfig_t *sb;
341 uint32 w;
342 char *pvars;
344 ASSERT(GOODREGS(regs));
346 bzero((uchar*)si, sizeof(sb_info_t));
348 si->sb.buscoreidx = BADIDX;
350 si->curmap = regs;
351 si->sdh = sdh;
352 si->osh = osh;
354 /* check to see if we are a sb core mimic'ing a pci core */
355 if (bustype == PCI_BUS) {
356 if (OSL_PCI_READ_CONFIG(si->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff) {
357 SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB "
358 "devid:0x%x\n", __FUNCTION__, devid));
359 bustype = SB_BUS;
362 si->sb.bustype = bustype;
363 if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
364 SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
365 si->sb.bustype, BUSTYPE(si->sb.bustype)));
366 return NULL;
369 /* need to set memseg flag for CF card first before any sb registers access */
370 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
371 si->memseg = TRUE;
373 /* kludge to enable the clock on the 4306 which lacks a slowclock */
374 if (BUSTYPE(si->sb.bustype) == PCI_BUS && !sb_ispcie(si))
375 sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
377 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
378 w = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
379 if (!GOODCOREADDR(w, SB_ENUM_BASE))
380 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32), SB_ENUM_BASE);
384 /* get sonics backplane revision */
385 sb = REGS2SB(regs);
386 si->sb.sonicsrev = (R_SBREG(si, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
388 /* scan for cores */
389 sb_scan(si, regs, devid);
391 /* no cores found, bail out */
392 if (si->numcores == 0) {
393 SB_ERROR(("sb_doattach: could not find any cores\n"));
394 return NULL;
397 /* save the current core index */
398 origidx = si->curidx;
400 /* don't go beyond if there is no chipc core in the chip */
401 if (!(cc = sb_setcore(&si->sb, SB_CC, 0)))
402 return si;
404 if (BUSTYPE(si->sb.bustype) == SB_BUS &&
405 (si->sb.chip == BCM4712_CHIP_ID) &&
406 (si->sb.chippkg != BCM4712LARGE_PKG_ID) &&
407 (si->sb.chiprev <= 3))
408 OR_REG(si->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
410 /* fixup necessary chip/core configurations */
411 if (BUSTYPE(si->sb.bustype) == PCI_BUS && sb_pci_fixcfg(si)) {
412 SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
413 return NULL;
417 /* Switch back to the original core, nvram/srom init needs it */
418 sb_setcoreidx(&si->sb, origidx);
420 /* Init nvram from flash if it exists */
421 nvram_init((void *)&si->sb);
423 /* Init nvram from sprom/otp if they exist */
424 if (srom_var_init(&si->sb, BUSTYPE(si->sb.bustype), regs, si->osh, vars, varsz)) {
425 SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
426 return (NULL);
428 pvars = vars ? *vars : NULL;
430 /* PMU specific initializations */
431 if ((si->sb.cccaps & CC_CAP_PMU) && !sb_onetimeinit) {
432 sb_pmu_init(&si->sb, si->osh);
433 /* Find out Crystal frequency and init PLL */
434 sb_pmu_pll_init(&si->sb, si->osh, getintvar(pvars, "xtalfreq"));
435 /* Initialize PMU resources (up/dn timers, dep masks, etc.) */
436 sb_pmu_res_init(&si->sb, si->osh);
439 if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
440 w = getintvar(pvars, "regwindowsz");
441 si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
444 /* get boardtype and boardrev */
445 switch (BUSTYPE(si->sb.bustype)) {
446 case PCI_BUS:
447 /* do a pci config read to get subsystem id and subvendor id */
448 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_SVID, sizeof(uint32));
449 /* Let nvram variables override subsystem Vend/ID */
450 if ((si->sb.boardvendor = (uint16)sb_getdevpathintvar(&si->sb, "boardvendor")) == 0)
451 si->sb.boardvendor = w & 0xffff;
452 else
453 SB_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n",
454 si->sb.boardvendor, w & 0xffff));
455 if ((si->sb.boardtype = (uint16)sb_getdevpathintvar(&si->sb, "boardtype")) == 0)
456 si->sb.boardtype = (w >> 16) & 0xffff;
457 else
458 SB_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n",
459 si->sb.boardtype, (w >> 16) & 0xffff));
460 break;
462 case PCMCIA_BUS:
463 si->sb.boardvendor = getintvar(pvars, "manfid");
464 si->sb.boardtype = getintvar(pvars, "prodid");
465 break;
467 case SB_BUS:
468 case JTAG_BUS:
469 si->sb.boardvendor = VENDOR_BROADCOM;
470 if (pvars == NULL || ((si->sb.boardtype = getintvar(pvars, "prodid")) == 0))
471 if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
472 si->sb.boardtype = 0xffff;
473 break;
476 if (si->sb.boardtype == 0) {
477 SB_ERROR(("sb_doattach: unknown board type\n"));
478 ASSERT(si->sb.boardtype);
481 si->sb.boardflags = getintvar(pvars, "boardflags");
483 /* setup the GPIO based LED powersave register */
484 if (si->sb.ccrev >= 16) {
485 if ((pvars == NULL) || ((w = getintvar(pvars, "leddc")) == 0))
486 w = DEFAULT_GPIOTIMERVAL;
487 sb_corereg(&si->sb, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
490 /* Determine if this board needs override */
491 if (PCIE(si) && (si->sb.chip == BCM4321_CHIP_ID))
492 si->pcie_war_ovr = ((si->sb.boardvendor == VENDOR_APPLE) &&
493 ((uint8)getintvar(pvars, "sromrev") == 4) &&
494 ((uint8)getintvar(pvars, "boardrev") <= 0x71)) ||
495 ((uint32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR);
497 if (PCIE_ASPMWARS(si)) {
498 sb_war43448_aspm((void *)si);
499 sb_war42767_clkreq((void *)si);
502 if (FORCEHT_WAR32414(si)) {
503 si->sb.pr32414 = TRUE;
504 sb_clkctl_init(&si->sb);
505 sb_war32414_forceHT(&si->sb, 1);
508 if (PCIE(si) && ((si->sb.buscorerev == 6) || (si->sb.buscorerev == 7)))
509 si->sb.pr42780 = TRUE;
511 if (PCIE_ASPMWARS(si))
512 sb_pcieclkreq(&si->sb, 1, 0);
514 if (PCIE(si) &&
515 (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
516 ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
517 sb_set_initiator_to(&si->sb, 0x3, sb_findcoreidx(&si->sb, SB_D11, 0));
519 /* Disable gpiopullup and gpiopulldown */
520 if (!sb_onetimeinit && si->sb.ccrev >= 20) {
521 cc = (chipcregs_t *)sb_setcore(&si->sb, SB_CC, 0);
522 W_REG(osh, &cc->gpiopullup, 0);
523 W_REG(osh, &cc->gpiopulldown, 0);
524 sb_setcoreidx(&si->sb, origidx);
528 #ifdef HNDRTE
529 sb_onetimeinit = TRUE;
530 #endif
532 return (si);
535 /* Enable/Disable clkreq for PCIE (4311B0/4321B1) */
536 void
537 BCMINITFN(sb_war42780_clkreq)(sb_t *sbh, bool clkreq)
539 sb_info_t *si;
541 si = SB_INFO(sbh);
543 /* Don't change clkreq value if serdespll war has not yet been applied */
544 if (!si->pr42767_war && PCIE_ASPMWARS(si))
545 return;
547 sb_pcieclkreq(sbh, 1, (int32)clkreq);
550 static void
551 BCMINITFN(sb_war43448)(sb_t *sbh)
553 sb_info_t *si;
555 si = SB_INFO(sbh);
557 /* if not pcie bus, we're done */
558 if (!PCIE(si) || !PCIE_ASPMWARS(si))
559 return;
561 /* Restore the polarity */
562 if (si->pcie_polarity != 0)
563 sb_pcie_mdiowrite((void *)(uintptr)&si->sb, MDIODATA_DEV_RX,
564 SERDES_RX_CTRL, si->pcie_polarity);
567 static void
568 BCMINITFN(sb_war43448_aspm)(sb_t *sbh)
570 uint32 w;
571 uint16 val16, *reg16;
572 sbpcieregs_t *pcieregs;
573 sb_info_t *si;
575 si = SB_INFO(sbh);
577 /* if not pcie bus, we're done */
578 if (!PCIE(si) || !PCIE_ASPMWARS(si))
579 return;
581 /* no ASPM stuff on QT or VSIM */
582 if (si->sb.chippkg == HDLSIM_PKG_ID || si->sb.chippkg == HWSIM_PKG_ID)
583 return;
585 pcieregs = (sbpcieregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
587 /* Enable ASPM in the shadow SROM and Link control */
588 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
589 val16 = R_REG(si->osh, reg16);
590 if (!si->pcie_war_ovr)
591 val16 |= SRSH_ASPM_ENB;
592 else
593 val16 &= ~SRSH_ASPM_ENB;
594 W_REG(si->osh, reg16, val16);
596 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
597 if (!si->pcie_war_ovr)
598 w |= PCIE_ASPM_ENAB;
599 else
600 w &= ~PCIE_ASPM_ENAB;
601 OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32), w);
604 static void
605 BCMINITFN(sb_war32414_forceHT)(sb_t *sbh, bool forceHT)
607 sb_info_t *si;
608 uint32 val = 0;
610 si = SB_INFO(sbh);
612 ASSERT(FORCEHT_WAR32414(si));
615 if (forceHT)
616 val = SYCC_HR;
617 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, system_clk_ctl),
618 SYCC_HR, val);
621 uint
622 sb_coreid(sb_t *sbh)
624 sb_info_t *si;
625 sbconfig_t *sb;
627 si = SB_INFO(sbh);
628 sb = REGS2SB(si->curmap);
630 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
633 uint
634 sb_flag(sb_t *sbh)
636 sb_info_t *si;
637 sbconfig_t *sb;
639 si = SB_INFO(sbh);
640 sb = REGS2SB(si->curmap);
642 return R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
645 uint
646 sb_coreidx(sb_t *sbh)
648 sb_info_t *si;
650 si = SB_INFO(sbh);
651 return (si->curidx);
654 /* return core index of the core with address 'sba' */
655 static uint
656 BCMINITFN(_sb_coreidx)(sb_info_t *si, uint32 sba)
658 uint i;
660 for (i = 0; i < si->numcores; i ++)
661 if (sba == si->coresba[i])
662 return i;
663 return BADIDX;
666 /* return core address of the current core */
667 static uint32
668 BCMINITFN(_sb_coresba)(sb_info_t *si)
670 uint32 sbaddr;
672 switch (BUSTYPE(si->sb.bustype)) {
673 case SB_BUS: {
674 sbconfig_t *sb = REGS2SB(si->curmap);
675 sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
676 break;
679 case PCI_BUS:
680 sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
681 break;
683 case PCMCIA_BUS: {
684 uint8 tmp = 0;
685 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
686 sbaddr = (uint32)tmp << 12;
687 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
688 sbaddr |= (uint32)tmp << 16;
689 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
690 sbaddr |= (uint32)tmp << 24;
691 break;
695 #ifdef BCMJTAG
696 case JTAG_BUS:
697 sbaddr = (uint32)(uintptr)si->curmap;
698 break;
699 #endif /* BCMJTAG */
701 default:
702 sbaddr = BADCOREADDR;
703 break;
706 SB_MSG(("_sb_coresba: current core is 0x%08x\n", sbaddr));
707 return sbaddr;
710 uint
711 sb_corevendor(sb_t *sbh)
713 sb_info_t *si;
714 sbconfig_t *sb;
716 si = SB_INFO(sbh);
717 sb = REGS2SB(si->curmap);
719 return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
722 uint
723 sb_corerev(sb_t *sbh)
725 sb_info_t *si;
726 sbconfig_t *sb;
727 uint sbidh;
729 si = SB_INFO(sbh);
730 sb = REGS2SB(si->curmap);
731 sbidh = R_SBREG(si, &sb->sbidhigh);
733 return (SBCOREREV(sbidh));
736 void *
737 sb_osh(sb_t *sbh)
739 sb_info_t *si;
741 si = SB_INFO(sbh);
742 return si->osh;
745 void
746 sb_setosh(sb_t *sbh, osl_t *osh)
748 sb_info_t *si;
750 si = SB_INFO(sbh);
751 if (si->osh != NULL) {
752 SB_ERROR(("osh is already set....\n"));
753 ASSERT(!si->osh);
755 si->osh = osh;
758 /* set sbtmstatelow core-specific flags */
759 void
760 sb_coreflags_wo(sb_t *sbh, uint32 mask, uint32 val)
762 sb_info_t *si;
763 sbconfig_t *sb;
764 uint32 w;
766 si = SB_INFO(sbh);
767 sb = REGS2SB(si->curmap);
769 ASSERT((val & ~mask) == 0);
771 /* mask and set */
772 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
773 W_SBREG(si, &sb->sbtmstatelow, w);
776 /* set/clear sbtmstatelow core-specific flags */
777 uint32
778 sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
780 sb_info_t *si;
781 sbconfig_t *sb;
782 uint32 w;
784 si = SB_INFO(sbh);
785 sb = REGS2SB(si->curmap);
787 ASSERT((val & ~mask) == 0);
789 /* mask and set */
790 if (mask || val) {
791 w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
792 W_SBREG(si, &sb->sbtmstatelow, w);
795 /* return the new value
796 * for write operation, the following readback ensures the completion of write opration.
798 return (R_SBREG(si, &sb->sbtmstatelow));
801 /* set/clear sbtmstatehigh core-specific flags */
802 uint32
803 sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
805 sb_info_t *si;
806 sbconfig_t *sb;
807 uint32 w;
809 si = SB_INFO(sbh);
810 sb = REGS2SB(si->curmap);
812 ASSERT((val & ~mask) == 0);
813 ASSERT((mask & ~SBTMH_FL_MASK) == 0);
815 /* mask and set */
816 if (mask || val) {
817 w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
818 W_SBREG(si, &sb->sbtmstatehigh, w);
821 /* return the new value */
822 return (R_SBREG(si, &sb->sbtmstatehigh));
825 /* Run bist on current core. Caller needs to take care of core-specific bist hazards */
827 sb_corebist(sb_t *sbh)
829 uint32 sblo;
830 sb_info_t *si;
831 sbconfig_t *sb;
832 int result = 0;
834 si = SB_INFO(sbh);
835 sb = REGS2SB(si->curmap);
837 sblo = R_SBREG(si, &sb->sbtmstatelow);
838 W_SBREG(si, &sb->sbtmstatelow, (sblo | SBTML_FGC | SBTML_BE));
840 SPINWAIT(((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTD) == 0), 100000);
842 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTF)
843 result = BCME_ERROR;
845 W_SBREG(si, &sb->sbtmstatelow, sblo);
847 return result;
850 bool
851 sb_iscoreup(sb_t *sbh)
853 sb_info_t *si;
854 sbconfig_t *sb;
856 si = SB_INFO(sbh);
857 sb = REGS2SB(si->curmap);
859 return ((R_SBREG(si, &sb->sbtmstatelow) &
860 (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
864 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
865 * switch back to the original core, and return the new value.
867 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
869 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
870 * and (on newer pci cores) chipcommon registers.
872 uint
873 sb_corereg(sb_t *sbh, uint coreidx, uint regoff, uint mask, uint val)
875 uint origidx = 0;
876 uint32 *r = NULL;
877 uint w;
878 uint intr_val = 0;
879 bool fast = FALSE;
880 sb_info_t *si;
882 si = SB_INFO(sbh);
884 ASSERT(GOODIDX(coreidx));
885 ASSERT(regoff < SB_CORE_SIZE);
886 ASSERT((val & ~mask) == 0);
888 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
889 /* If internal bus, we can always get at everything */
890 fast = TRUE;
891 /* map if does not exist */
892 if (!si->regs[coreidx]) {
893 si->regs[coreidx] = (void*)REG_MAP(si->coresba[coreidx],
894 SB_CORE_SIZE);
895 ASSERT(GOODREGS(si->regs[coreidx]));
897 r = (uint32 *)((uchar *)si->regs[coreidx] + regoff);
898 } else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
899 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
901 if ((si->coreid[coreidx] == SB_CC) &&
902 ((si->sb.buscoretype == SB_PCIE) ||
903 (si->sb.buscorerev >= 13))) {
904 /* Chipc registers are mapped at 12KB */
906 fast = TRUE;
907 r = (uint32 *)((char *)si->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
908 } else if (si->sb.buscoreidx == coreidx) {
909 /* pci registers are at either in the last 2KB of an 8KB window
910 * or, in pcie and pci rev 13 at 8KB
912 fast = TRUE;
913 if ((si->sb.buscoretype == SB_PCIE) ||
914 (si->sb.buscorerev >= 13))
915 r = (uint32 *)((char *)si->curmap +
916 PCI_16KB0_PCIREGS_OFFSET + regoff);
917 else
918 r = (uint32 *)((char *)si->curmap +
919 ((regoff >= SBCONFIGOFF) ?
920 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
921 regoff);
925 if (!fast) {
926 INTR_OFF(si, intr_val);
928 /* save current core index */
929 origidx = sb_coreidx(&si->sb);
931 /* switch core */
932 r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
934 ASSERT(r);
936 /* mask and set */
937 if (mask || val) {
938 if (regoff >= SBCONFIGOFF) {
939 w = (R_SBREG(si, r) & ~mask) | val;
940 W_SBREG(si, r, w);
941 } else {
942 w = (R_REG(si->osh, r) & ~mask) | val;
943 W_REG(si->osh, r, w);
947 /* readback */
948 if (regoff >= SBCONFIGOFF)
949 w = R_SBREG(si, r);
950 else {
951 #if defined(CONFIG_BCM5354)
952 if ((si->sb.chip == BCM5354_CHIP_ID) &&
953 (coreidx == SB_CC_IDX) &&
954 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
955 w = val;
956 } else
957 #endif /* BCM5354 */
958 w = R_REG(si->osh, r);
961 if (!fast) {
962 /* restore core index */
963 if (origidx != coreidx)
964 sb_setcoreidx(&si->sb, origidx);
966 INTR_RESTORE(si, intr_val);
969 return (w);
972 #define DWORD_ALIGN(x) (x & ~(0x03))
973 #define BYTE_POS(x) (x & 0x3)
974 #define WORD_POS(x) (x & 0x1)
976 #define BYTE_SHIFT(x) (8 * BYTE_POS(x))
977 #define WORD_SHIFT(x) (16 * WORD_POS(x))
979 #define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
980 #define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
982 #define read_pci_cfg_byte(a) \
983 (BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
985 #define read_pci_cfg_word(a) \
986 (WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
989 /* return cap_offset if requested capability exists in the PCI config space */
990 static uint8
991 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
993 uint8 cap_id;
994 uint8 cap_ptr = 0;
995 uint32 bufsize;
996 uint8 byte_val;
998 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
999 goto end;
1001 /* check for Header type 0 */
1002 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
1003 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
1004 goto end;
1006 /* check if the capability pointer field exists */
1007 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
1008 if (!(byte_val & PCI_CAPPTR_PRESENT))
1009 goto end;
1011 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
1012 /* check if the capability pointer is 0x00 */
1013 if (cap_ptr == 0x00)
1014 goto end;
1016 /* loop thr'u the capability list and see if the pcie capabilty exists */
1018 cap_id = read_pci_cfg_byte(cap_ptr);
1020 while (cap_id != req_cap_id) {
1021 cap_ptr = read_pci_cfg_byte((cap_ptr+1));
1022 if (cap_ptr == 0x00) break;
1023 cap_id = read_pci_cfg_byte(cap_ptr);
1025 if (cap_id != req_cap_id) {
1026 goto end;
1028 /* found the caller requested capability */
1029 if ((buf != NULL) && (buflen != NULL)) {
1030 uint8 cap_data;
1032 bufsize = *buflen;
1033 if (!bufsize) goto end;
1034 *buflen = 0;
1035 /* copy the cpability data excluding cap ID and next ptr */
1036 cap_data = cap_ptr + 2;
1037 if ((bufsize + cap_data) > SZPCR)
1038 bufsize = SZPCR - cap_data;
1039 *buflen = bufsize;
1040 while (bufsize--) {
1041 *buf = read_pci_cfg_byte(cap_data);
1042 cap_data++;
1043 buf++;
1046 end:
1047 return cap_ptr;
1050 uint8
1051 sb_pcieclkreq(sb_t *sbh, uint32 mask, uint32 val)
1053 sb_info_t *si;
1054 uint32 reg_val;
1055 uint8 offset;
1057 si = SB_INFO(sbh);
1059 offset = si->pciecap_lcreg_offset;
1060 if (!offset)
1061 return 0;
1063 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1064 /* set operation */
1065 if (mask) {
1066 if (val)
1067 reg_val |= PCIE_CLKREQ_ENAB;
1068 else
1069 reg_val &= ~PCIE_CLKREQ_ENAB;
1070 OSL_PCI_WRITE_CONFIG(si->osh, offset, sizeof(uint32), reg_val);
1071 reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1073 if (reg_val & PCIE_CLKREQ_ENAB)
1074 return 1;
1075 else
1076 return 0;
1081 /* return TRUE if PCIE capability exists in the pci config space */
1082 static bool
1083 sb_ispcie(sb_info_t *si)
1085 uint8 cap_ptr;
1087 cap_ptr = sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL);
1088 if (!cap_ptr)
1089 return FALSE;
1091 si->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
1093 return TRUE;
1096 /* Wake-on-wireless-LAN (WOWL) support functions */
1097 /* return TRUE if PM capability exists in the pci config space */
1098 bool
1099 sb_pci_pmecap(sb_t *sbh)
1101 uint8 cap_ptr;
1102 uint32 pmecap;
1103 sb_info_t *si;
1105 si = SB_INFO(sbh);
1107 if (si == NULL || !(PCI(si) || PCIE(si)))
1108 return FALSE;
1110 if (!si->pmecap_offset) {
1111 cap_ptr = sb_find_pci_capability(si, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL);
1112 if (!cap_ptr)
1113 return FALSE;
1115 si->pmecap_offset = cap_ptr;
1117 pmecap = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset, sizeof(uint32));
1119 /* At least one state can generate PME */
1120 si->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
1123 return (si->pmecap);
1126 /* Enable PME generation and disable clkreq */
1127 void
1128 sb_pci_pmeen(sb_t *sbh)
1130 sb_info_t *si;
1131 uint32 w;
1132 si = SB_INFO(sbh);
1134 /* if not pmecapable return */
1135 if (!sb_pci_pmecap(sbh))
1136 return;
1138 w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1139 w |= (PME_CSR_PME_EN);
1140 OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1142 /* Disable clkreq */
1143 if (si->pr42767_war) {
1144 sb_pcieclkreq(sbh, 1, 0);
1145 si->pr42767_war = FALSE;
1146 } else if (si->sb.pr42780) {
1147 sb_pcieclkreq(sbh, 1, 1);
1151 /* Disable PME generation, clear the PME status bit if set and
1152 * return TRUE if PME status set
1154 bool
1155 sb_pci_pmeclr(sb_t *sbh)
1157 sb_info_t *si;
1158 uint32 w;
1159 bool ret = FALSE;
1161 si = SB_INFO(sbh);
1163 if (!sb_pci_pmecap(sbh))
1164 return ret;
1166 w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1168 SB_ERROR(("sb_pci_pmeclr PMECSR : 0x%x\n", w));
1169 ret = (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
1171 /* PMESTAT is cleared by writing 1 to it */
1172 w &= ~(PME_CSR_PME_EN);
1174 OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1176 return ret;
1179 /* Scan the enumeration space to find all cores starting from the given
1180 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
1181 * is the default core address at chip POR time and 'regs' is the virtual
1182 * address that the default core is mapped at. 'ncores' is the number of
1183 * cores expected on bus 'sbba'. It returns the total number of cores
1184 * starting from bus 'sbba', inclusive.
1186 #define SB_MAXBUSES 2
1187 static uint
1188 BCMINITFN(_sb_scan)(sb_info_t *si, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
1190 uint next;
1191 uint ncc = 0;
1192 uint i;
1194 if (bus >= SB_MAXBUSES) {
1195 SB_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
1196 return 0;
1198 SB_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
1200 /* Scan all cores on the bus starting from core 0.
1201 * Core addresses must be contiguous on each bus.
1203 for (i = 0, next = si->numcores; i < numcores && next < SB_MAXCORES; i++, next++) {
1204 si->coresba[next] = sbba + i * SB_CORE_SIZE;
1206 /* keep and reuse the initial register mapping */
1207 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->coresba[next] == sba) {
1208 SB_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
1209 si->regs[next] = regs;
1212 /* change core to 'next' and read its coreid */
1213 si->curmap = _sb_setcoreidx(si, next);
1214 si->curidx = next;
1216 si->coreid[next] = sb_coreid(&si->sb);
1218 /* core specific processing... */
1219 /* chipc on bus SB_ENUM_BASE provides # cores in the chip and lots of
1220 * other stuff.
1222 if (sbba == SB_ENUM_BASE && si->coreid[next] == SB_CC) {
1223 chipcregs_t *cc = (chipcregs_t *)si->curmap;
1225 /* get chip id and rev */
1226 si->sb.chip = R_REG(si->osh, &cc->chipid) & CID_ID_MASK;
1227 si->sb.chiprev = (R_REG(si->osh, &cc->chipid) & CID_REV_MASK) >>
1228 CID_REV_SHIFT;
1229 si->sb.chippkg = (R_REG(si->osh, &cc->chipid) & CID_PKG_MASK) >>
1230 CID_PKG_SHIFT;
1232 /* get chipcommon rev */
1233 si->sb.ccrev = (int)sb_corerev(&si->sb);
1235 /* get chipcommon chipstatus */
1236 if (si->sb.ccrev >= 11)
1237 si->sb.chipst = R_REG(si->osh, &cc->chipstatus);
1239 /* get chipcommon capabilites */
1240 si->sb.cccaps = R_REG(si->osh, &cc->capabilities);
1242 /* get pmu rev and caps */
1243 if ((si->sb.cccaps & CC_CAP_PMU)) {
1244 si->sb.pmucaps = R_REG(si->osh, &cc->pmucapabilities);
1245 si->sb.pmurev = si->sb.pmucaps & PCAP_REV_MASK;
1248 /* determine numcores - this is the total # cores in the chip */
1249 if (((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
1250 numcores = (R_REG(si->osh, &cc->chipid) & CID_CC_MASK) >>
1251 CID_CC_SHIFT;
1252 else
1253 numcores = sb_chip2numcores(si->sb.chip);
1254 SB_MSG(("_sb_scan: there are %u cores in the chip\n", numcores));
1256 /* scan bridged SB(s) and add results to the end of the list */
1257 else if (si->coreid[next] == SB_OCP) {
1258 sbconfig_t *sb = REGS2SB(si->curmap);
1259 uint32 nsbba = R_SBREG(si, &sb->sbadmatch1);
1260 uint nsbcc;
1262 si->numcores = next + 1;
1264 if ((nsbba & 0xfff00000) != SB_ENUM_BASE)
1265 continue;
1266 nsbba &= 0xfffff000;
1267 if (_sb_coreidx(si, nsbba) != BADIDX)
1268 continue;
1270 nsbcc = (R_SBREG(si, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
1271 nsbcc = _sb_scan(si, sba, regs, bus + 1, nsbba, nsbcc);
1272 if (sbba == SB_ENUM_BASE)
1273 numcores -= nsbcc;
1274 ncc += nsbcc;
1278 SB_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
1280 si->numcores = i + ncc;
1281 return si->numcores;
1284 /* scan the sb enumerated space to identify all cores */
1285 static void
1286 BCMINITFN(sb_scan)(sb_info_t *si, void *regs, uint devid)
1288 uint origidx;
1289 uint32 origsba;
1290 uint i;
1291 bool pci;
1292 bool pcie;
1293 uint pciidx;
1294 uint pcieidx;
1295 uint pcirev;
1296 uint pcierev;
1297 uint numcores;
1299 /* Save the current core info and validate it later till we know
1300 * for sure what is good and what is bad.
1302 origsba = _sb_coresba(si);
1303 origidx = BADIDX;
1305 /* Use devid as initial chipid and we'll update it later in _sb_scan */
1306 si->sb.chip = devid;
1308 /* Support chipcommon-less chips for a little while longer so the old
1309 * sdio host fpga continues to work until we can get the new one working
1310 * reliably. This particular chip has 2 cores - codec/sdio and pci.
1312 if (devid == SDIOH_FPGA_ID)
1313 numcores = 2;
1314 /* Expect at least one core on 0x18000000 and it must be chipcommon where
1315 * the core count for the whole chip is kept.
1317 else
1318 numcores = 1;
1320 /* scan all SB(s) starting from SB_ENUM_BASE */
1321 si->numcores = _sb_scan(si, origsba, regs, 0, SB_ENUM_BASE, numcores);
1322 if (si->numcores == 0)
1323 return;
1325 /* figure out bus/orignal core idx */
1326 si->sb.buscorerev = NOREV;
1327 si->sb.buscoreidx = BADIDX;
1329 pci = pcie = FALSE;
1330 pcirev = pcierev = NOREV;
1331 pciidx = pcieidx = BADIDX;
1333 for (i = 0; i < si->numcores; i++) {
1334 sb_setcoreidx(&si->sb, i);
1336 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1337 if (si->coreid[i] == SB_PCI) {
1338 pciidx = i;
1339 pcirev = sb_corerev(&si->sb);
1340 pci = TRUE;
1341 } else if (si->coreid[i] == SB_PCIE) {
1342 pcieidx = i;
1343 pcierev = sb_corerev(&si->sb);
1344 pcie = TRUE;
1346 } else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1347 if (si->coreid[i] == SB_PCMCIA) {
1348 si->sb.buscorerev = sb_corerev(&si->sb);
1349 si->sb.buscoretype = si->coreid[i];
1350 si->sb.buscoreidx = i;
1354 /* find the core idx before entering this func. */
1355 if (origsba == si->coresba[i])
1356 origidx = i;
1359 if (pci && pcie) {
1360 if (sb_ispcie(si))
1361 pci = FALSE;
1362 else
1363 pcie = FALSE;
1365 if (pci) {
1366 si->sb.buscoretype = SB_PCI;
1367 si->sb.buscorerev = pcirev;
1368 si->sb.buscoreidx = pciidx;
1369 } else if (pcie) {
1370 si->sb.buscoretype = SB_PCIE;
1371 si->sb.buscorerev = pcierev;
1372 si->sb.buscoreidx = pcieidx;
1375 /* return to the original core */
1376 if (origidx != BADIDX)
1377 sb_setcoreidx(&si->sb, origidx);
1378 ASSERT(origidx != BADIDX);
1381 /* may be called with core in reset */
1382 void
1383 sb_detach(sb_t *sbh)
1385 sb_info_t *si;
1386 uint idx;
1388 si = SB_INFO(sbh);
1390 if (si == NULL)
1391 return;
1393 if (BUSTYPE(si->sb.bustype) == SB_BUS)
1394 for (idx = 0; idx < SB_MAXCORES; idx++)
1395 if (si->regs[idx]) {
1396 REG_UNMAP(si->regs[idx]);
1397 si->regs[idx] = NULL;
1399 #if !defined(CONFIG_BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
1400 if (si != &ksi)
1401 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
1402 MFREE(si->osh, si, sizeof(sb_info_t));
1405 /* convert chip number to number of i/o cores */
1406 static uint
1407 BCMINITFN(sb_chip2numcores)(uint chip)
1409 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
1410 return (6);
1411 if (chip == BCM4704_CHIP_ID)
1412 return (9);
1413 if (chip == BCM5365_CHIP_ID)
1414 return (7);
1416 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
1417 ASSERT(0);
1418 return (1);
1421 /* return index of coreid or BADIDX if not found */
1422 uint
1423 sb_findcoreidx(sb_t *sbh, uint coreid, uint coreunit)
1425 sb_info_t *si;
1426 uint found;
1427 uint i;
1429 si = SB_INFO(sbh);
1431 found = 0;
1433 for (i = 0; i < si->numcores; i++)
1434 if (si->coreid[i] == coreid) {
1435 if (found == coreunit)
1436 return (i);
1437 found++;
1440 return (BADIDX);
1444 * this function changes logical "focus" to the indiciated core,
1445 * must be called with interrupt off.
1446 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1448 void*
1449 sb_setcoreidx(sb_t *sbh, uint coreidx)
1451 sb_info_t *si;
1453 si = SB_INFO(sbh);
1455 if (coreidx >= si->numcores)
1456 return (NULL);
1459 * If the user has provided an interrupt mask enabled function,
1460 * then assert interrupts are disabled before switching the core.
1462 ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
1464 si->curmap = _sb_setcoreidx(si, coreidx);
1465 si->curidx = coreidx;
1467 return (si->curmap);
1470 /* This function changes the logical "focus" to the indiciated core.
1471 * Return the current core's virtual address.
1473 static void *
1474 _sb_setcoreidx(sb_info_t *si, uint coreidx)
1476 uint32 sbaddr = si->coresba[coreidx];
1477 void *regs;
1479 switch (BUSTYPE(si->sb.bustype)) {
1480 case SB_BUS:
1481 /* map new one */
1482 if (!si->regs[coreidx]) {
1483 si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
1484 ASSERT(GOODREGS(si->regs[coreidx]));
1486 regs = si->regs[coreidx];
1487 break;
1489 case PCI_BUS:
1490 /* point bar0 window */
1491 OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
1492 regs = si->curmap;
1493 break;
1495 case PCMCIA_BUS: {
1496 uint8 tmp = (sbaddr >> 12) & 0x0f;
1497 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
1498 tmp = (sbaddr >> 16) & 0xff;
1499 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
1500 tmp = (sbaddr >> 24) & 0xff;
1501 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
1502 regs = si->curmap;
1503 break;
1506 #ifdef BCMJTAG
1507 case JTAG_BUS:
1508 /* map new one */
1509 if (!si->regs[coreidx]) {
1510 si->regs[coreidx] = (void *)(uintptr)sbaddr;
1511 ASSERT(GOODREGS(si->regs[coreidx]));
1513 regs = si->regs[coreidx];
1514 break;
1515 #endif /* BCMJTAG */
1517 default:
1518 ASSERT(0);
1519 regs = NULL;
1520 break;
1523 return regs;
1527 * this function changes logical "focus" to the indiciated core,
1528 * must be called with interrupt off.
1529 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1531 void*
1532 sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
1534 uint idx;
1536 idx = sb_findcoreidx(sbh, coreid, coreunit);
1537 if (!GOODIDX(idx))
1538 return (NULL);
1540 return (sb_setcoreidx(sbh, idx));
1543 /* return chip number */
1544 uint
1545 BCMINITFN(sb_chip)(sb_t *sbh)
1547 sb_info_t *si;
1549 si = SB_INFO(sbh);
1550 return (si->sb.chip);
1553 /* return chip revision number */
1554 uint
1555 BCMINITFN(sb_chiprev)(sb_t *sbh)
1557 sb_info_t *si;
1559 si = SB_INFO(sbh);
1560 return (si->sb.chiprev);
1563 /* return chip common revision number */
1564 uint
1565 BCMINITFN(sb_chipcrev)(sb_t *sbh)
1567 sb_info_t *si;
1569 si = SB_INFO(sbh);
1570 return (si->sb.ccrev);
1573 /* return chip package option */
1574 uint
1575 BCMINITFN(sb_chippkg)(sb_t *sbh)
1577 sb_info_t *si;
1579 si = SB_INFO(sbh);
1580 return (si->sb.chippkg);
1583 /* return PCI core rev. */
1584 uint
1585 BCMINITFN(sb_pcirev)(sb_t *sbh)
1587 sb_info_t *si;
1589 si = SB_INFO(sbh);
1590 return (si->sb.buscorerev);
1593 bool
1594 BCMINITFN(sb_war16165)(sb_t *sbh)
1596 sb_info_t *si;
1598 si = SB_INFO(sbh);
1600 return (PCI(si) && (si->sb.buscorerev <= 10));
1603 static void
1604 BCMINITFN(sb_war30841)(sb_info_t *si)
1606 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
1607 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
1608 sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
1611 /* return PCMCIA core rev. */
1612 uint
1613 BCMINITFN(sb_pcmciarev)(sb_t *sbh)
1615 sb_info_t *si;
1617 si = SB_INFO(sbh);
1618 return (si->sb.buscorerev);
1621 /* return board vendor id */
1622 uint
1623 BCMINITFN(sb_boardvendor)(sb_t *sbh)
1625 sb_info_t *si;
1627 si = SB_INFO(sbh);
1628 return (si->sb.boardvendor);
1631 /* return boardtype */
1632 uint
1633 BCMINITFN(sb_boardtype)(sb_t *sbh)
1635 sb_info_t *si;
1636 char *var;
1638 si = SB_INFO(sbh);
1640 if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
1641 /* boardtype format is a hex string */
1642 si->sb.boardtype = getintvar(NULL, "boardtype");
1644 /* backward compatibility for older boardtype string format */
1645 if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
1646 if (!strcmp(var, "bcm94710dev"))
1647 si->sb.boardtype = BCM94710D_BOARD;
1648 else if (!strcmp(var, "bcm94710ap"))
1649 si->sb.boardtype = BCM94710AP_BOARD;
1650 else if (!strcmp(var, "bu4710"))
1651 si->sb.boardtype = BU4710_BOARD;
1652 else if (!strcmp(var, "bcm94702mn"))
1653 si->sb.boardtype = BCM94702MN_BOARD;
1654 else if (!strcmp(var, "bcm94710r1"))
1655 si->sb.boardtype = BCM94710R1_BOARD;
1656 else if (!strcmp(var, "bcm94710r4"))
1657 si->sb.boardtype = BCM94710R4_BOARD;
1658 else if (!strcmp(var, "bcm94702cpci"))
1659 si->sb.boardtype = BCM94702CPCI_BOARD;
1660 else if (!strcmp(var, "bcm95380_rr"))
1661 si->sb.boardtype = BCM95380RR_BOARD;
1665 return (si->sb.boardtype);
1668 /* return bus type of sbh device */
1669 uint
1670 sb_bus(sb_t *sbh)
1672 sb_info_t *si;
1674 si = SB_INFO(sbh);
1675 return (si->sb.bustype);
1678 /* return bus core type */
1679 uint
1680 sb_buscoretype(sb_t *sbh)
1682 sb_info_t *si;
1684 si = SB_INFO(sbh);
1686 return (si->sb.buscoretype);
1689 /* return bus core revision */
1690 uint
1691 sb_buscorerev(sb_t *sbh)
1693 sb_info_t *si;
1694 si = SB_INFO(sbh);
1696 return (si->sb.buscorerev);
1699 /* return list of found cores */
1700 uint
1701 sb_corelist(sb_t *sbh, uint coreid[])
1703 sb_info_t *si;
1705 si = SB_INFO(sbh);
1707 bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof(uint)));
1708 return (si->numcores);
1711 /* return current register mapping */
1712 void *
1713 sb_coreregs(sb_t *sbh)
1715 sb_info_t *si;
1717 si = SB_INFO(sbh);
1718 ASSERT(GOODREGS(si->curmap));
1720 return (si->curmap);
1723 #if defined(CONFIG_BCMDBG_ASSERT)
1724 /* traverse all cores to find and clear source of serror */
1725 static void
1726 sb_serr_clear(sb_info_t *si)
1728 sbconfig_t *sb;
1729 uint origidx;
1730 uint i, intr_val = 0;
1731 void * corereg = NULL;
1733 INTR_OFF(si, intr_val);
1734 origidx = sb_coreidx(&si->sb);
1736 for (i = 0; i < si->numcores; i++) {
1737 corereg = sb_setcoreidx(&si->sb, i);
1738 if (NULL != corereg) {
1739 sb = REGS2SB(corereg);
1740 if ((R_SBREG(si, &sb->sbtmstatehigh)) & SBTMH_SERR) {
1741 AND_SBREG(si, &sb->sbtmstatehigh, ~SBTMH_SERR);
1742 SB_ERROR(("sb_serr_clear: SError at core 0x%x\n",
1743 sb_coreid(&si->sb)));
1748 sb_setcoreidx(&si->sb, origidx);
1749 INTR_RESTORE(si, intr_val);
1753 * Check if any inband, outband or timeout errors has happened and clear them.
1754 * Must be called with chip clk on !
1756 bool
1757 sb_taclear(sb_t *sbh)
1759 sb_info_t *si;
1760 sbconfig_t *sb;
1761 uint origidx;
1762 uint intr_val = 0;
1763 bool rc = FALSE;
1764 uint32 inband = 0, serror = 0, timeout = 0;
1765 void *corereg = NULL;
1766 volatile uint32 imstate, tmstate;
1768 si = SB_INFO(sbh);
1770 if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1771 volatile uint32 stcmd;
1773 /* inband error is Target abort for PCI */
1774 stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32));
1775 inband = stcmd & PCI_CFG_CMD_STAT_TA;
1776 if (inband) {
1777 OSL_PCI_WRITE_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
1780 /* serror */
1781 stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32));
1782 serror = stcmd & PCI_SBIM_STATUS_SERR;
1783 if (serror) {
1784 sb_serr_clear(si);
1785 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
1788 /* timeout */
1789 imstate = sb_corereg(sbh, si->sb.buscoreidx,
1790 SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
1791 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1792 sb_corereg(sbh, si->sb.buscoreidx,
1793 SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
1794 (imstate & ~(SBIM_IBE | SBIM_TO)));
1795 /* inband = imstate & SBIM_IBE; same as TA above */
1796 timeout = imstate & SBIM_TO;
1797 if (timeout) {
1801 if (inband) {
1802 /* dump errlog for sonics >= 2.3 */
1803 if (si->sb.sonicsrev == SONICS_2_2)
1805 else {
1806 uint32 imerrlog, imerrloga;
1807 imerrlog = sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOG, 0, 0);
1808 if (imerrlog & SBTMEL_EC) {
1809 imerrloga = sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOGA,
1810 0, 0);
1811 /* clear errlog */
1812 sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOG, ~0, 0);
1813 SB_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
1814 imerrlog, imerrloga));
1820 } else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1822 INTR_OFF(si, intr_val);
1823 origidx = sb_coreidx(sbh);
1825 corereg = sb_setcore(sbh, SB_PCMCIA, 0);
1826 if (NULL != corereg) {
1827 sb = REGS2SB(corereg);
1829 imstate = R_SBREG(si, &sb->sbimstate);
1830 /* handle surprise removal */
1831 if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1832 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1833 inband = imstate & SBIM_IBE;
1834 timeout = imstate & SBIM_TO;
1836 tmstate = R_SBREG(si, &sb->sbtmstatehigh);
1837 if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
1838 if (!inband) {
1839 serror = 1;
1840 sb_serr_clear(si);
1842 OR_SBREG(si, &sb->sbtmstatelow, SBTML_INT_ACK);
1843 AND_SBREG(si, &sb->sbtmstatelow, ~SBTML_INT_ACK);
1846 sb_setcoreidx(sbh, origidx);
1847 INTR_RESTORE(si, intr_val);
1852 if (inband | timeout | serror) {
1853 rc = TRUE;
1854 SB_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
1855 inband, serror, timeout));
1858 return (rc);
1860 #endif
1862 /* do buffered registers update */
1863 void
1864 sb_commit(sb_t *sbh)
1866 sb_info_t *si;
1867 uint origidx;
1868 uint intr_val = 0;
1870 si = SB_INFO(sbh);
1872 origidx = si->curidx;
1873 ASSERT(GOODIDX(origidx));
1875 INTR_OFF(si, intr_val);
1877 /* switch over to chipcommon core if there is one, else use pci */
1878 if (si->sb.ccrev != NOREV) {
1879 chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
1881 /* do the buffer registers update */
1882 W_REG(si->osh, &ccregs->broadcastaddress, SB_COMMIT);
1883 W_REG(si->osh, &ccregs->broadcastdata, 0x0);
1884 } else if (PCI(si)) {
1885 sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
1887 /* do the buffer registers update */
1888 W_REG(si->osh, &pciregs->bcastaddr, SB_COMMIT);
1889 W_REG(si->osh, &pciregs->bcastdata, 0x0);
1890 } else
1891 ASSERT(0);
1893 /* restore core index */
1894 sb_setcoreidx(sbh, origidx);
1895 INTR_RESTORE(si, intr_val);
1898 /* reset and re-enable a core
1899 * inputs:
1900 * bits - core specific bits that are set during and after reset sequence
1901 * resetbits - core specific bits that are set only during reset sequence
1903 void
1904 sb_core_reset(sb_t *sbh, uint32 bits, uint32 resetbits)
1906 sb_info_t *si;
1907 sbconfig_t *sb;
1908 volatile uint32 dummy;
1910 si = SB_INFO(sbh);
1911 ASSERT(GOODREGS(si->curmap));
1912 sb = REGS2SB(si->curmap);
1915 * Must do the disable sequence first to work for arbitrary current core state.
1917 sb_core_disable(sbh, (bits | resetbits));
1920 * Now do the initialization sequence.
1923 /* set reset while enabling the clock and forcing them on throughout the core */
1924 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits | resetbits));
1925 dummy = R_SBREG(si, &sb->sbtmstatelow);
1926 OSL_DELAY(1);
1928 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
1929 W_SBREG(si, &sb->sbtmstatehigh, 0);
1931 if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1932 AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1935 /* clear reset and allow it to propagate throughout the core */
1936 W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1937 dummy = R_SBREG(si, &sb->sbtmstatelow);
1938 OSL_DELAY(1);
1940 /* leave clock enabled */
1941 W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
1942 dummy = R_SBREG(si, &sb->sbtmstatelow);
1943 OSL_DELAY(1);
1946 void
1947 sb_core_tofixup(sb_t *sbh)
1949 sb_info_t *si;
1950 sbconfig_t *sb;
1952 si = SB_INFO(sbh);
1954 if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) ||
1955 (PCI(si) && (si->sb.buscorerev >= 5)))
1956 return;
1958 ASSERT(GOODREGS(si->curmap));
1959 sb = REGS2SB(si->curmap);
1961 if (BUSTYPE(si->sb.bustype) == SB_BUS) {
1962 SET_SBREG(si, &sb->sbimconfiglow,
1963 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1964 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1965 } else {
1966 if (sb_coreid(sbh) == SB_PCI) {
1967 SET_SBREG(si, &sb->sbimconfiglow,
1968 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1969 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1970 } else {
1971 SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1975 sb_commit(sbh);
1979 * Set the initiator timeout for the "master core".
1980 * The master core is defined to be the core in control
1981 * of the chip and so it issues accesses to non-memory
1982 * locations (Because of dma *any* core can access memeory).
1984 * The routine uses the bus to decide who is the master:
1985 * SB_BUS => mips
1986 * JTAG_BUS => chipc
1987 * PCI_BUS => pci or pcie
1988 * PCMCIA_BUS => pcmcia
1989 * SDIO_BUS => pcmcia
1991 * This routine exists so callers can disable initiator
1992 * timeouts so accesses to very slow devices like otp
1993 * won't cause an abort. The routine allows arbitrary
1994 * settings of the service and request timeouts, though.
1996 * Returns the timeout state before changing it or -1
1997 * on error.
2000 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
2002 uint32
2003 sb_set_initiator_to(sb_t *sbh, uint32 to, uint idx)
2005 sb_info_t *si;
2006 uint origidx;
2007 uint intr_val = 0;
2008 uint32 tmp, ret = 0xffffffff;
2009 sbconfig_t *sb;
2011 si = SB_INFO(sbh);
2013 if ((to & ~TO_MASK) != 0)
2014 return ret;
2016 /* Figure out the master core */
2017 if (idx == BADIDX) {
2018 switch (BUSTYPE(si->sb.bustype)) {
2019 case PCI_BUS:
2020 idx = si->sb.buscoreidx;
2021 break;
2022 case JTAG_BUS:
2023 idx = SB_CC_IDX;
2024 break;
2025 case PCMCIA_BUS:
2026 idx = sb_findcoreidx(sbh, SB_PCMCIA, 0);
2027 break;
2028 case SB_BUS:
2029 idx = sb_findcoreidx(sbh, SB_MIPS33, 0);
2030 break;
2031 default:
2032 ASSERT(0);
2034 if (idx == BADIDX)
2035 return ret;
2038 INTR_OFF(si, intr_val);
2039 origidx = sb_coreidx(sbh);
2041 sb = REGS2SB(sb_setcoreidx(sbh, idx));
2043 tmp = R_SBREG(si, &sb->sbimconfiglow);
2044 ret = tmp & TO_MASK;
2045 W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
2047 sb_commit(sbh);
2048 sb_setcoreidx(sbh, origidx);
2049 INTR_RESTORE(si, intr_val);
2050 return ret;
2053 void
2054 sb_core_disable(sb_t *sbh, uint32 bits)
2056 sb_info_t *si;
2057 volatile uint32 dummy;
2058 uint32 rej;
2059 sbconfig_t *sb;
2061 si = SB_INFO(sbh);
2063 ASSERT(GOODREGS(si->curmap));
2064 sb = REGS2SB(si->curmap);
2066 /* if core is already in reset, just return */
2067 if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
2068 return;
2070 /* reject value changed between sonics 2.2 and 2.3 */
2071 if (si->sb.sonicsrev == SONICS_2_2)
2072 rej = (1 << SBTML_REJ_SHIFT);
2073 else
2074 rej = (2 << SBTML_REJ_SHIFT);
2076 /* if clocks are not enabled, put into reset and return */
2077 if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
2078 goto disable;
2080 /* set target reject and spin until busy is clear (preserve core-specific bits) */
2081 OR_SBREG(si, &sb->sbtmstatelow, rej);
2082 dummy = R_SBREG(si, &sb->sbtmstatelow);
2083 OSL_DELAY(1);
2084 SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
2085 if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY)
2086 SB_ERROR(("%s: target state still busy\n", __FUNCTION__));
2088 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
2089 OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
2090 dummy = R_SBREG(si, &sb->sbimstate);
2091 OSL_DELAY(1);
2092 SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
2095 /* set reset and reject while enabling the clocks */
2096 W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
2097 dummy = R_SBREG(si, &sb->sbtmstatelow);
2098 OSL_DELAY(10);
2100 /* don't forget to clear the initiator reject bit */
2101 if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
2102 AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
2104 disable:
2105 /* leave reset and reject asserted */
2106 W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
2107 OSL_DELAY(1);
2110 /* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
2111 void
2112 sb_watchdog(sb_t *sbh, uint ticks)
2114 /* make sure we come up in fast clock mode; or if clearing, clear clock */
2115 if (ticks)
2116 sb_clkctl_clk(sbh, CLK_FAST);
2117 else
2118 sb_clkctl_clk(sbh, CLK_DYNAMIC);
2120 #if defined(CONFIG_BCM4328)
2121 if (sbh->chip == BCM4328_CHIP_ID && ticks != 0)
2122 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, min_res_mask),
2123 PMURES_BIT(RES4328_ROM_SWITCH),
2124 PMURES_BIT(RES4328_ROM_SWITCH));
2125 #endif
2127 /* instant NMI */
2128 sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
2131 /* initialize the pcmcia core */
2132 void
2133 sb_pcmcia_init(sb_t *sbh)
2135 sb_info_t *si;
2136 uint8 cor = 0;
2138 si = SB_INFO(sbh);
2140 /* enable d11 mac interrupts */
2141 OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2142 cor |= COR_IRQEN | COR_FUNEN;
2143 OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2148 void
2149 BCMINITFN(sb_pci_up)(sb_t *sbh)
2151 sb_info_t *si;
2153 si = SB_INFO(sbh);
2155 /* if not pci bus, we're done */
2156 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2157 return;
2159 if (FORCEHT_WAR32414(si))
2160 sb_war32414_forceHT(sbh, 1);
2162 if (PCIE_ASPMWARS(si) || si->sb.pr42780)
2163 sb_pcieclkreq(sbh, 1, 0);
2165 if (PCIE(si) &&
2166 (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
2167 ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
2168 sb_set_initiator_to((void *)si, 0x3, sb_findcoreidx((void *)si, SB_D11, 0));
2172 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
2173 void
2174 BCMUNINITFN(sb_pci_sleep)(sb_t *sbh)
2176 sb_info_t *si;
2177 uint32 w;
2178 si = SB_INFO(sbh);
2180 /* if not pci bus, we're done */
2181 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2182 return;
2184 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
2185 w &= ~PCIE_CAP_LCREG_ASPML1;
2186 OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32), w);
2189 /* Unconfigure and/or apply various WARs when going down */
2190 void
2191 BCMINITFN(sb_pci_down)(sb_t *sbh)
2193 sb_info_t *si;
2195 si = SB_INFO(sbh);
2197 /* if not pci bus, we're done */
2198 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2199 return;
2201 if (FORCEHT_WAR32414(si))
2202 sb_war32414_forceHT(sbh, 0);
2204 if (si->pr42767_war) {
2205 sb_pcieclkreq(sbh, 1, 1);
2206 si->pr42767_war = FALSE;
2207 } else if (si->sb.pr42780) {
2208 sb_pcieclkreq(sbh, 1, 1);
2212 static void
2213 BCMINITFN(sb_war42767_clkreq)(sb_t *sbh)
2215 sbpcieregs_t *pcieregs;
2216 uint16 val16, *reg16;
2217 sb_info_t *si;
2219 si = SB_INFO(sbh);
2221 /* if not pcie bus, we're done */
2222 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2223 return;
2225 pcieregs = (sbpcieregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
2226 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
2227 val16 = R_REG(si->osh, reg16);
2228 /* if clockreq is not advertized advertize it */
2229 if (!si->pcie_war_ovr) {
2230 val16 |= SRSH_CLKREQ_ENB;
2231 si->pr42767_war = TRUE;
2233 si->sb.pr42780 = TRUE;
2234 } else
2235 val16 &= ~SRSH_CLKREQ_ENB;
2236 W_REG(si->osh, reg16, val16);
2239 static void
2240 BCMINITFN(sb_war42767)(sb_t *sbh)
2242 uint32 w = 0;
2243 sb_info_t *si;
2245 si = SB_INFO(sbh);
2247 /* if not pcie bus, we're done */
2248 if (!PCIE(si) || !PCIE_ASPMWARS(si))
2249 return;
2251 sb_pcie_mdioread(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
2252 if (w & PLL_CTRL_FREQDET_EN) {
2253 w &= ~PLL_CTRL_FREQDET_EN;
2254 sb_pcie_mdiowrite(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
2259 * Configure the pci core for pci client (NIC) action
2260 * coremask is the bitvec of cores by index to be enabled.
2262 void
2263 BCMINITFN(sb_pci_setup)(sb_t *sbh, uint coremask)
2265 sb_info_t *si;
2266 sbconfig_t *sb;
2267 sbpciregs_t *pciregs;
2268 uint32 sbflag;
2269 uint32 w;
2270 uint idx;
2272 si = SB_INFO(sbh);
2274 /* if not pci bus, we're done */
2275 if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2276 return;
2278 ASSERT(PCI(si) || PCIE(si));
2279 ASSERT(si->sb.buscoreidx != BADIDX);
2281 /* get current core index */
2282 idx = si->curidx;
2284 /* we interrupt on this backplane flag number */
2285 ASSERT(GOODREGS(si->curmap));
2286 sb = REGS2SB(si->curmap);
2287 sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
2289 /* switch over to pci core */
2290 pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
2291 sb = REGS2SB(pciregs);
2294 * Enable sb->pci interrupts. Assume
2295 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
2297 if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
2298 /* pci config write to set this core bit in PCIIntMask */
2299 w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
2300 w |= (coremask << PCI_SBIM_SHIFT);
2301 OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
2302 } else {
2303 /* set sbintvec bit for our flag number */
2304 OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
2307 if (PCI(si)) {
2308 OR_REG(si->osh, &pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
2309 if (si->sb.buscorerev >= 11)
2310 OR_REG(si->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
2311 if (si->sb.buscorerev < 5) {
2312 SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
2313 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
2314 sb_commit(sbh);
2318 /* PCIE workarounds */
2319 if (PCIE(si)) {
2320 if ((si->sb.buscorerev == 0) || (si->sb.buscorerev == 1)) {
2321 w = sb_pcie_readreg((void *)(uintptr)sbh,
2322 (void *)(uintptr)PCIE_PCIEREGS,
2323 PCIE_TLP_WORKAROUNDSREG);
2324 w |= 0x8;
2325 sb_pcie_writereg((void *)(uintptr)sbh,
2326 (void *)(uintptr)PCIE_PCIEREGS,
2327 PCIE_TLP_WORKAROUNDSREG, w);
2330 if (si->sb.buscorerev == 1) {
2331 w = sb_pcie_readreg((void *)(uintptr)sbh,
2332 (void *)(uintptr)PCIE_PCIEREGS,
2333 PCIE_DLLP_LCREG);
2334 w |= (0x40);
2335 sb_pcie_writereg((void *)(uintptr)sbh,
2336 (void *)(uintptr)PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
2339 if (si->sb.buscorerev == 0)
2340 sb_war30841(si);
2342 if ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)) {
2343 w = sb_pcie_readreg((void *)(uintptr)sbh,
2344 (void *)(uintptr)PCIE_PCIEREGS,
2345 PCIE_DLLP_PMTHRESHREG);
2346 w &= ~(PCIE_L1THRESHOLDTIME_MASK);
2347 w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
2348 sb_pcie_writereg((void *)(uintptr)sbh, (void *)(uintptr)PCIE_PCIEREGS,
2349 PCIE_DLLP_PMTHRESHREG, w);
2351 sb_war43448(sbh);
2353 sb_war42767(sbh);
2355 sb_war43448_aspm(sbh);
2356 sb_war42767_clkreq(sbh);
2360 /* switch back to previous core */
2361 sb_setcoreidx(sbh, idx);
2364 uint32
2365 sb_base(uint32 admatch)
2367 uint32 base;
2368 uint type;
2370 type = admatch & SBAM_TYPE_MASK;
2371 ASSERT(type < 3);
2373 base = 0;
2375 if (type == 0) {
2376 base = admatch & SBAM_BASE0_MASK;
2377 } else if (type == 1) {
2378 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2379 base = admatch & SBAM_BASE1_MASK;
2380 } else if (type == 2) {
2381 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2382 base = admatch & SBAM_BASE2_MASK;
2385 return (base);
2388 uint32
2389 sb_size(uint32 admatch)
2391 uint32 size;
2392 uint type;
2394 type = admatch & SBAM_TYPE_MASK;
2395 ASSERT(type < 3);
2397 size = 0;
2399 if (type == 0) {
2400 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
2401 } else if (type == 1) {
2402 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2403 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
2404 } else if (type == 2) {
2405 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
2406 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
2409 return (size);
2412 /* return the core-type instantiation # of the current core */
2413 uint
2414 sb_coreunit(sb_t *sbh)
2416 sb_info_t *si;
2417 uint idx;
2418 uint coreid;
2419 uint coreunit;
2420 uint i;
2422 si = SB_INFO(sbh);
2423 coreunit = 0;
2425 idx = si->curidx;
2427 ASSERT(GOODREGS(si->curmap));
2428 coreid = sb_coreid(sbh);
2430 /* count the cores of our type */
2431 for (i = 0; i < idx; i++)
2432 if (si->coreid[i] == coreid)
2433 coreunit++;
2435 return (coreunit);
2438 static uint32
2439 BCMINITFN(factor6)(uint32 x)
2441 switch (x) {
2442 case CC_F6_2: return 2;
2443 case CC_F6_3: return 3;
2444 case CC_F6_4: return 4;
2445 case CC_F6_5: return 5;
2446 case CC_F6_6: return 6;
2447 case CC_F6_7: return 7;
2448 default: return 0;
2452 /* calculate the speed the SB would run at given a set of clockcontrol values */
2453 uint32
2454 BCMINITFN(sb_clock_rate)(uint32 pll_type, uint32 n, uint32 m)
2456 uint32 n1, n2, clock, m1, m2, m3, mc;
2458 n1 = n & CN_N1_MASK;
2459 n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
2461 if (pll_type == PLL_TYPE6) {
2462 if (m & CC_T6_MMASK)
2463 return CC_T6_M1;
2464 else
2465 return CC_T6_M0;
2466 } else if ((pll_type == PLL_TYPE1) ||
2467 (pll_type == PLL_TYPE3) ||
2468 (pll_type == PLL_TYPE4) ||
2469 (pll_type == PLL_TYPE7)) {
2470 n1 = factor6(n1);
2471 n2 += CC_F5_BIAS;
2472 } else if (pll_type == PLL_TYPE2) {
2473 n1 += CC_T2_BIAS;
2474 n2 += CC_T2_BIAS;
2475 ASSERT((n1 >= 2) && (n1 <= 7));
2476 ASSERT((n2 >= 5) && (n2 <= 23));
2477 } else if (pll_type == PLL_TYPE5) {
2478 return (100000000);
2479 } else
2480 ASSERT(0);
2481 /* PLL types 3 and 7 use BASE2 (25Mhz) */
2482 if ((pll_type == PLL_TYPE3) ||
2483 (pll_type == PLL_TYPE7)) {
2484 clock = CC_CLOCK_BASE2 * n1 * n2;
2485 } else
2486 clock = CC_CLOCK_BASE1 * n1 * n2;
2488 if (clock == 0)
2489 return 0;
2491 m1 = m & CC_M1_MASK;
2492 m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
2493 m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
2494 mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
2496 if ((pll_type == PLL_TYPE1) ||
2497 (pll_type == PLL_TYPE3) ||
2498 (pll_type == PLL_TYPE4) ||
2499 (pll_type == PLL_TYPE7)) {
2500 m1 = factor6(m1);
2501 if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
2502 m2 += CC_F5_BIAS;
2503 else
2504 m2 = factor6(m2);
2505 m3 = factor6(m3);
2507 switch (mc) {
2508 case CC_MC_BYPASS: return (clock);
2509 case CC_MC_M1: return (clock / m1);
2510 case CC_MC_M1M2: return (clock / (m1 * m2));
2511 case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
2512 case CC_MC_M1M3: return (clock / (m1 * m3));
2513 default: return (0);
2515 } else {
2516 ASSERT(pll_type == PLL_TYPE2);
2518 m1 += CC_T2_BIAS;
2519 m2 += CC_T2M2_BIAS;
2520 m3 += CC_T2_BIAS;
2521 ASSERT((m1 >= 2) && (m1 <= 7));
2522 ASSERT((m2 >= 3) && (m2 <= 10));
2523 ASSERT((m3 >= 2) && (m3 <= 7));
2525 if ((mc & CC_T2MC_M1BYP) == 0)
2526 clock /= m1;
2527 if ((mc & CC_T2MC_M2BYP) == 0)
2528 clock /= m2;
2529 if ((mc & CC_T2MC_M3BYP) == 0)
2530 clock /= m3;
2532 return (clock);
2536 /* returns the current speed the SB is running at */
2537 uint32
2538 BCMINITFN(sb_clock)(sb_t *sbh)
2540 sb_info_t *si;
2541 chipcregs_t *cc;
2542 uint32 n, m;
2543 uint idx;
2544 uint32 pll_type, rate;
2545 uint intr_val = 0;
2547 si = SB_INFO(sbh);
2548 idx = si->curidx;
2549 pll_type = PLL_TYPE1;
2551 INTR_OFF(si, intr_val);
2553 cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
2554 ASSERT(cc);
2556 if (sbh->cccaps & CC_CAP_PMU) {
2557 rate = sb_pmu_cpu_clock(sbh, si->osh);
2558 goto exit;
2561 pll_type = sbh->cccaps & CC_CAP_PLL_MASK;
2562 n = R_REG(si->osh, &cc->clockcontrol_n);
2563 if (pll_type == PLL_TYPE6)
2564 m = R_REG(si->osh, &cc->clockcontrol_m3);
2565 else if (pll_type == PLL_TYPE3)
2566 m = R_REG(si->osh, &cc->clockcontrol_m2);
2567 else
2568 m = R_REG(si->osh, &cc->clockcontrol_sb);
2570 if (sb_chip(sbh) == BCM5365_CHIP_ID)
2572 rate = 200000000; /* PLL_TYPE3 */
2573 } else if (sb_chip(sbh) == BCM5354_CHIP_ID)
2575 /* 5354 has a constant sb clock of 120MHz */
2576 rate = 120000000;
2577 } else {
2578 /* calculate rate */
2579 rate = sb_clock_rate(pll_type, n, m);
2582 if (pll_type == PLL_TYPE3)
2583 rate = rate / 2;
2585 exit:
2586 /* switch back to previous core */
2587 sb_setcoreidx(sbh, idx);
2589 INTR_RESTORE(si, intr_val);
2591 return rate;
2594 uint32
2595 BCMINITFN(sb_alp_clock)(sb_t *sbh)
2597 uint32 clock = ALP_CLOCK;
2599 if (sbh->cccaps & CC_CAP_PMU)
2600 clock = sb_pmu_alp_clock(sbh, sb_osh(sbh));
2602 return clock;
2605 /* change logical "focus" to the gpio core for optimized access */
2606 void*
2607 sb_gpiosetcore(sb_t *sbh)
2609 sb_info_t *si;
2611 si = SB_INFO(sbh);
2613 return (sb_setcoreidx(sbh, SB_CC_IDX));
2616 /* mask&set gpiocontrol bits */
2617 uint32
2618 sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2620 sb_info_t *si;
2621 uint regoff;
2623 si = SB_INFO(sbh);
2624 regoff = 0;
2626 /* gpios could be shared on router platforms
2627 * ignore reservation if it's high priority (e.g., test apps)
2629 if ((priority != GPIO_HI_PRIORITY) &&
2630 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2631 mask = priority ? (sb_gpioreservation & mask) :
2632 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2633 val &= mask;
2636 regoff = OFFSETOF(chipcregs_t, gpiocontrol);
2637 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2640 /* mask&set gpio output enable bits */
2641 uint32
2642 sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2644 sb_info_t *si;
2645 uint regoff;
2647 si = SB_INFO(sbh);
2648 regoff = 0;
2650 /* gpios could be shared on router platforms
2651 * ignore reservation if it's high priority (e.g., test apps)
2653 if ((priority != GPIO_HI_PRIORITY) &&
2654 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2655 mask = priority ? (sb_gpioreservation & mask) :
2656 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2657 val &= mask;
2660 regoff = OFFSETOF(chipcregs_t, gpioouten);
2661 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2664 /* mask&set gpio output bits */
2665 uint32
2666 sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2668 sb_info_t *si;
2669 uint regoff;
2671 si = SB_INFO(sbh);
2672 regoff = 0;
2674 /* gpios could be shared on router platforms
2675 * ignore reservation if it's high priority (e.g., test apps)
2677 if ((priority != GPIO_HI_PRIORITY) &&
2678 (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2679 mask = priority ? (sb_gpioreservation & mask) :
2680 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2681 val &= mask;
2684 regoff = OFFSETOF(chipcregs_t, gpioout);
2685 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2688 /* reserve one gpio */
2689 uint32
2690 sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2692 sb_info_t *si;
2694 si = SB_INFO(sbh);
2696 /* only cores on SB_BUS share GPIO's and only applcation users need to
2697 * reserve/release GPIO
2699 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2700 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2701 return -1;
2703 /* make sure only one bit is set */
2704 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2705 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2706 return -1;
2709 /* already reserved */
2710 if (sb_gpioreservation & gpio_bitmask)
2711 return -1;
2712 /* set reservation */
2713 sb_gpioreservation |= gpio_bitmask;
2715 return sb_gpioreservation;
2718 /* release one gpio */
2720 * releasing the gpio doesn't change the current value on the GPIO last write value
2721 * persists till some one overwrites it
2724 uint32
2725 sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2727 sb_info_t *si;
2729 si = SB_INFO(sbh);
2731 /* only cores on SB_BUS share GPIO's and only applcation users need to
2732 * reserve/release GPIO
2734 if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority)) {
2735 ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2736 return -1;
2738 /* make sure only one bit is set */
2739 if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2740 ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2741 return -1;
2744 /* already released */
2745 if (!(sb_gpioreservation & gpio_bitmask))
2746 return -1;
2748 /* clear reservation */
2749 sb_gpioreservation &= ~gpio_bitmask;
2751 return sb_gpioreservation;
2754 /* return the current gpioin register value */
2755 uint32
2756 sb_gpioin(sb_t *sbh)
2758 sb_info_t *si;
2759 uint regoff;
2761 si = SB_INFO(sbh);
2762 regoff = 0;
2764 regoff = OFFSETOF(chipcregs_t, gpioin);
2765 return (sb_corereg(sbh, SB_CC_IDX, regoff, 0, 0));
2768 /* mask&set gpio interrupt polarity bits */
2769 uint32
2770 sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2772 sb_info_t *si;
2773 uint regoff;
2775 si = SB_INFO(sbh);
2776 regoff = 0;
2778 /* gpios could be shared on router platforms */
2779 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2780 mask = priority ? (sb_gpioreservation & mask) :
2781 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2782 val &= mask;
2785 regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
2786 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2789 /* mask&set gpio interrupt mask bits */
2790 uint32
2791 sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2793 sb_info_t *si;
2794 uint regoff;
2796 si = SB_INFO(sbh);
2797 regoff = 0;
2799 /* gpios could be shared on router platforms */
2800 if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2801 mask = priority ? (sb_gpioreservation & mask) :
2802 ((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2803 val &= mask;
2806 regoff = OFFSETOF(chipcregs_t, gpiointmask);
2807 return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2810 /* assign the gpio to an led */
2811 uint32
2812 sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
2814 sb_info_t *si;
2816 si = SB_INFO(sbh);
2817 if (si->sb.ccrev < 16)
2818 return -1;
2820 /* gpio led powersave reg */
2821 return (sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
2824 /* mask&set gpio timer val */
2825 uint32
2826 sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
2828 sb_info_t *si;
2829 si = SB_INFO(sbh);
2831 if (si->sb.ccrev < 16)
2832 return -1;
2834 return (sb_corereg(sbh, SB_CC_IDX,
2835 OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
2838 uint32
2839 sb_gpiopull(sb_t *sbh, bool updown, uint32 mask, uint32 val)
2841 sb_info_t *si;
2842 uint offs;
2844 si = SB_INFO(sbh);
2845 if (si->sb.ccrev < 20)
2846 return -1;
2848 offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
2849 return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2852 uint32
2853 sb_gpioevent(sb_t *sbh, uint regtype, uint32 mask, uint32 val)
2855 sb_info_t *si;
2856 uint offs;
2858 si = SB_INFO(sbh);
2859 if (si->sb.ccrev < 11)
2860 return -1;
2862 if (regtype == GPIO_REGEVT)
2863 offs = OFFSETOF(chipcregs_t, gpioevent);
2864 else if (regtype == GPIO_REGEVT_INTMSK)
2865 offs = OFFSETOF(chipcregs_t, gpioeventintmask);
2866 else if (regtype == GPIO_REGEVT_INTPOL)
2867 offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
2868 else
2869 return -1;
2871 return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2874 void*
2875 BCMINITFN(sb_gpio_handler_register)(sb_t *sbh, uint32 event,
2876 bool level, gpio_handler_t cb, void *arg)
2878 sb_info_t *si;
2879 gpioh_item_t *gi;
2881 ASSERT(event);
2882 ASSERT(cb);
2884 si = SB_INFO(sbh);
2885 if (si->sb.ccrev < 11)
2886 return NULL;
2888 if ((gi = MALLOC(si->osh, sizeof(gpioh_item_t))) == NULL)
2889 return NULL;
2891 bzero(gi, sizeof(gpioh_item_t));
2892 gi->event = event;
2893 gi->handler = cb;
2894 gi->arg = arg;
2895 gi->level = level;
2897 gi->next = si->gpioh_head;
2898 si->gpioh_head = gi;
2900 return (void*)(gi);
2903 void
2904 BCMINITFN(sb_gpio_handler_unregister)(sb_t *sbh, void* gpioh)
2906 sb_info_t *si;
2907 gpioh_item_t *p, *n;
2909 si = SB_INFO(sbh);
2910 if (si->sb.ccrev < 11)
2911 return;
2913 ASSERT(si->gpioh_head);
2914 if ((void*)si->gpioh_head == gpioh) {
2915 si->gpioh_head = si->gpioh_head->next;
2916 MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
2917 return;
2919 else {
2920 p = si->gpioh_head;
2921 n = p->next;
2922 while (n) {
2923 if ((void*)n == gpioh) {
2924 p->next = n->next;
2925 MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
2926 return;
2928 p = n;
2929 n = n->next;
2933 ASSERT(0); /* Not found in list */
2936 void
2937 sb_gpio_handler_process(sb_t *sbh)
2939 sb_info_t *si;
2940 gpioh_item_t *h;
2941 uint32 status;
2942 uint32 level = sb_gpioin(sbh);
2943 uint32 edge = sb_gpioevent(sbh, GPIO_REGEVT, 0, 0);
2945 si = SB_INFO(sbh);
2946 for (h = si->gpioh_head; h != NULL; h = h->next) {
2947 if (h->handler) {
2948 status = (h->level ? level : edge);
2950 if (status & h->event)
2951 h->handler(status, h->arg);
2955 sb_gpioevent(sbh, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
2958 uint32
2959 sb_gpio_int_enable(sb_t *sbh, bool enable)
2961 sb_info_t *si;
2962 uint offs;
2964 si = SB_INFO(sbh);
2965 if (si->sb.ccrev < 11)
2966 return -1;
2968 offs = OFFSETOF(chipcregs_t, intmask);
2969 return (sb_corereg(sbh, SB_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
2973 /* return the slow clock source - LPO, XTAL, or PCI */
2974 static uint
2975 sb_slowclk_src(sb_info_t *si)
2977 chipcregs_t *cc;
2980 ASSERT(sb_coreid(&si->sb) == SB_CC);
2982 if (si->sb.ccrev < 6) {
2983 if ((BUSTYPE(si->sb.bustype) == PCI_BUS) &&
2984 (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32)) &
2985 PCI_CFG_GPIO_SCS))
2986 return (SCC_SS_PCI);
2987 else
2988 return (SCC_SS_XTAL);
2989 } else if (si->sb.ccrev < 10) {
2990 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2991 return (R_REG(si->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
2992 } else /* Insta-clock */
2993 return (SCC_SS_XTAL);
2996 /* return the ILP (slowclock) min or max frequency */
2997 static uint
2998 sb_slowclk_freq(sb_info_t *si, bool max_freq)
3000 chipcregs_t *cc;
3001 uint32 slowclk;
3002 uint div;
3005 ASSERT(sb_coreid(&si->sb) == SB_CC);
3007 cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
3009 /* shouldn't be here unless we've established the chip has dynamic clk control */
3010 ASSERT(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
3012 slowclk = sb_slowclk_src(si);
3013 if (si->sb.ccrev < 6) {
3014 if (slowclk == SCC_SS_PCI)
3015 return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
3016 else
3017 return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
3018 } else if (si->sb.ccrev < 10) {
3019 div = 4 * (((R_REG(si->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
3020 if (slowclk == SCC_SS_LPO)
3021 return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
3022 else if (slowclk == SCC_SS_XTAL)
3023 return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
3024 else if (slowclk == SCC_SS_PCI)
3025 return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
3026 else
3027 ASSERT(0);
3028 } else {
3029 /* Chipc rev 10 is InstaClock */
3030 div = R_REG(si->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
3031 div = 4 * (div + 1);
3032 return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
3034 return (0);
3037 static void
3038 BCMINITFN(sb_clkctl_setdelay)(sb_info_t *si, void *chipcregs)
3040 chipcregs_t * cc;
3041 uint slowmaxfreq, pll_delay, slowclk;
3042 uint pll_on_delay, fref_sel_delay;
3044 pll_delay = PLL_DELAY;
3046 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
3047 * since the xtal will also be powered down by dynamic clk control logic.
3050 slowclk = sb_slowclk_src(si);
3051 if (slowclk != SCC_SS_XTAL)
3052 pll_delay += XTAL_ON_DELAY;
3054 /* Starting with 4318 it is ILP that is used for the delays */
3055 slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
3057 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
3058 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
3060 cc = (chipcregs_t *)chipcregs;
3061 W_REG(si->osh, &cc->pll_on_delay, pll_on_delay);
3062 W_REG(si->osh, &cc->fref_sel_delay, fref_sel_delay);
3065 /* initialize power control delay registers */
3066 void
3067 BCMINITFN(sb_clkctl_init)(sb_t *sbh)
3069 sb_info_t *si;
3070 uint origidx;
3071 chipcregs_t *cc;
3073 si = SB_INFO(sbh);
3075 origidx = si->curidx;
3077 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
3078 return;
3080 if ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev < 2))
3081 W_REG(si->osh, &cc->chipcontrol,
3082 (si->sb.chiprev == 0) ? CHIPCTRL_4321A0_DEFAULT : CHIPCTRL_4321A1_DEFAULT);
3084 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL))
3085 goto done;
3087 /* set all Instaclk chip ILP to 1 MHz */
3088 if (si->sb.ccrev >= 10)
3089 SET_REG(si->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
3090 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
3092 sb_clkctl_setdelay(si, (void *)(uintptr)cc);
3094 done:
3095 sb_setcoreidx(sbh, origidx);
3098 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
3099 uint16
3100 BCMINITFN(sb_clkctl_fast_pwrup_delay)(sb_t *sbh)
3102 sb_info_t *si;
3103 uint origidx;
3104 chipcregs_t *cc;
3105 uint slowminfreq;
3106 uint16 fpdelay;
3107 uint intr_val = 0;
3109 si = SB_INFO(sbh);
3110 fpdelay = 0;
3111 origidx = si->curidx;
3113 INTR_OFF(si, intr_val);
3115 if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
3116 goto done;
3118 if (sbh->cccaps & CC_CAP_PMU) {
3119 fpdelay = sb_pmu_fast_pwrup_delay(sbh, si->osh);
3120 goto done;
3123 if (!(sbh->cccaps & CC_CAP_PWR_CTL))
3124 goto done;
3126 slowminfreq = sb_slowclk_freq(si, FALSE);
3127 fpdelay = (((R_REG(si->osh, &cc->pll_on_delay) + 2) * 1000000) +
3128 (slowminfreq - 1)) / slowminfreq;
3130 done:
3131 sb_setcoreidx(sbh, origidx);
3132 INTR_RESTORE(si, intr_val);
3133 return (fpdelay);
3136 /* turn primary xtal and/or pll off/on */
3138 sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
3140 sb_info_t *si;
3141 uint32 in, out, outen;
3143 si = SB_INFO(sbh);
3145 switch (BUSTYPE(si->sb.bustype)) {
3148 case PCMCIA_BUS:
3149 return (0);
3152 case PCI_BUS:
3154 /* pcie core doesn't have any mapping to control the xtal pu */
3155 if (PCIE(si))
3156 return -1;
3158 in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof(uint32));
3159 out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32));
3160 outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32));
3163 * Avoid glitching the clock if GPRS is already using it.
3164 * We can't actually read the state of the PLLPD so we infer it
3165 * by the value of XTAL_PU which *is* readable via gpioin.
3167 if (on && (in & PCI_CFG_GPIO_XTAL))
3168 return (0);
3170 if (what & XTAL)
3171 outen |= PCI_CFG_GPIO_XTAL;
3172 if (what & PLL)
3173 outen |= PCI_CFG_GPIO_PLL;
3175 if (on) {
3176 /* turn primary xtal on */
3177 if (what & XTAL) {
3178 out |= PCI_CFG_GPIO_XTAL;
3179 if (what & PLL)
3180 out |= PCI_CFG_GPIO_PLL;
3181 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3182 sizeof(uint32), out);
3183 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
3184 sizeof(uint32), outen);
3185 OSL_DELAY(XTAL_ON_DELAY);
3188 /* turn pll on */
3189 if (what & PLL) {
3190 out &= ~PCI_CFG_GPIO_PLL;
3191 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3192 sizeof(uint32), out);
3193 OSL_DELAY(2000);
3195 } else {
3196 if (what & XTAL)
3197 out &= ~PCI_CFG_GPIO_XTAL;
3198 if (what & PLL)
3199 out |= PCI_CFG_GPIO_PLL;
3200 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32), out);
3201 OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32),
3202 outen);
3205 default:
3206 return (-1);
3209 return (0);
3212 /* set dynamic clk control mode (forceslow, forcefast, dynamic) */
3213 /* returns true if we are forcing fast clock */
3214 bool
3215 sb_clkctl_clk(sb_t *sbh, uint mode)
3217 sb_info_t *si;
3218 uint origidx;
3219 chipcregs_t *cc;
3220 uint32 scc;
3221 uint intr_val = 0;
3223 si = SB_INFO(sbh);
3225 /* chipcommon cores prior to rev6 don't support dynamic clock control */
3226 if (si->sb.ccrev < 6)
3227 return (FALSE);
3230 /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
3231 ASSERT(si->sb.ccrev != 10);
3233 INTR_OFF(si, intr_val);
3235 origidx = si->curidx;
3237 if (sb_setcore(sbh, SB_MIPS33, 0) && (sb_corerev(&si->sb) <= 7) &&
3238 (BUSTYPE(si->sb.bustype) == SB_BUS) && (si->sb.ccrev >= 10))
3239 goto done;
3241 if (FORCEHT_WAR32414(si))
3242 goto done;
3244 cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
3245 ASSERT(cc != NULL);
3247 if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL) && (si->sb.ccrev < 20))
3248 goto done;
3250 switch (mode) {
3251 case CLK_FAST: /* force fast (pll) clock */
3252 if (si->sb.ccrev < 10) {
3253 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
3254 sb_clkctl_xtal(&si->sb, XTAL, ON);
3256 SET_REG(si->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
3257 } else if (si->sb.ccrev < 20) {
3258 OR_REG(si->osh, &cc->system_clk_ctl, SYCC_HR);
3259 } else {
3260 OR_REG(si->osh, &cc->clk_ctl_st, CCS_FORCEHT);
3263 /* wait for the PLL */
3264 if (R_REG(si->osh, &cc->capabilities) & CC_CAP_PMU) {
3265 SPINWAIT(((R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL) == 0),
3266 PMU_MAX_TRANSITION_DLY);
3267 ASSERT(R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL);
3268 } else {
3269 OSL_DELAY(PLL_DELAY);
3271 break;
3273 case CLK_DYNAMIC: /* enable dynamic clock control */
3274 if (si->sb.ccrev < 10) {
3275 scc = R_REG(si->osh, &cc->slow_clk_ctl);
3276 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
3277 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
3278 scc |= SCC_XC;
3279 W_REG(si->osh, &cc->slow_clk_ctl, scc);
3281 /* for dynamic control, we have to release our xtal_pu "force on" */
3282 if (scc & SCC_XC)
3283 sb_clkctl_xtal(&si->sb, XTAL, OFF);
3284 } else if (si->sb.ccrev < 20) {
3285 /* Instaclock */
3286 AND_REG(si->osh, &cc->system_clk_ctl, ~SYCC_HR);
3287 } else {
3288 AND_REG(si->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
3290 break;
3292 default:
3293 ASSERT(0);
3296 done:
3297 sb_setcoreidx(sbh, origidx);
3298 INTR_RESTORE(si, intr_val);
3299 return (mode == CLK_FAST);
3302 /* register driver interrupt disabling and restoring callback functions */
3303 void
3304 sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn,
3305 void *intrsenabled_fn, void *intr_arg)
3307 sb_info_t *si;
3309 si = SB_INFO(sbh);
3310 si->intr_arg = intr_arg;
3311 si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
3312 si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
3313 si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
3314 /* save current core id. when this function called, the current core
3315 * must be the core which provides driver functions(il, et, wl, etc.)
3317 si->dev_coreid = si->coreid[si->curidx];
3320 void
3321 sb_deregister_intr_callback(sb_t *sbh)
3323 sb_info_t *si;
3325 si = SB_INFO(sbh);
3326 si->intrsoff_fn = NULL;
3330 uint16
3331 BCMINITFN(sb_d11_devid)(sb_t *sbh)
3333 sb_info_t *si = SB_INFO(sbh);
3334 uint16 device;
3336 #if defined(CONFIG_BCM4328)
3337 /* Fix device id for dual band BCM4328 */
3338 if (sbh->chip == BCM4328_CHIP_ID &&
3339 (sbh->chippkg == BCM4328USBDUAL_PKG_ID || sbh->chippkg == BCM4328SDIODUAL_PKG_ID))
3340 device = BCM4328_D11DUAL_ID;
3341 else
3342 #endif /* BCM4328 */
3343 /* Let an nvram variable with devpath override devid */
3344 if ((device = (uint16)sb_getdevpathintvar(sbh, "devid")) != 0)
3346 /* Get devid from OTP/SPROM depending on where the SROM is read */
3347 else if ((device = (uint16)getintvar(si->vars, "devid")) != 0)
3350 * no longer support wl0id, but keep the code
3351 * here for backward compatibility.
3353 else if ((device = (uint16)getintvar(si->vars, "wl0id")) != 0)
3355 /* Chip specific conversion */
3356 else if (sbh->chip == BCM4712_CHIP_ID) {
3357 if (sbh->chippkg == BCM4712SMALL_PKG_ID)
3358 device = BCM4306_D11G_ID;
3359 else
3360 device = BCM4306_D11DUAL_ID;
3362 /* ignore it */
3363 else
3364 device = 0xffff;
3366 return device;
3370 BCMINITFN(sb_corepciid)(sb_t *sbh, uint func, uint16 *pcivendor, uint16 *pcidevice,
3371 uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
3372 uint8 *pciheader)
3374 uint16 vendor = 0xffff, device = 0xffff;
3375 uint8 class, subclass, progif = 0;
3376 uint8 header = PCI_HEADER_NORMAL;
3377 uint32 core = sb_coreid(sbh);
3379 /* Verify whether the function exists for the core */
3380 if (func >= (uint)(core == SB_USB20H ? 2 : 1))
3381 return BCME_ERROR;
3383 /* Known vendor translations */
3384 switch (sb_corevendor(sbh)) {
3385 case SB_VEND_BCM:
3386 vendor = VENDOR_BROADCOM;
3387 break;
3388 default:
3389 return BCME_ERROR;
3392 /* Determine class based on known core codes */
3393 switch (core) {
3394 case SB_ILINE20:
3395 class = PCI_CLASS_NET;
3396 subclass = PCI_NET_ETHER;
3397 device = BCM47XX_ILINE_ID;
3398 break;
3399 case SB_ENET:
3400 class = PCI_CLASS_NET;
3401 subclass = PCI_NET_ETHER;
3402 device = BCM47XX_ENET_ID;
3403 break;
3404 case SB_GIGETH:
3405 class = PCI_CLASS_NET;
3406 subclass = PCI_NET_ETHER;
3407 device = BCM47XX_GIGETH_ID;
3408 break;
3409 case SB_SDRAM:
3410 case SB_MEMC:
3411 class = PCI_CLASS_MEMORY;
3412 subclass = PCI_MEMORY_RAM;
3413 device = (uint16)core;
3414 break;
3415 case SB_PCI:
3416 case SB_PCIE:
3417 class = PCI_CLASS_BRIDGE;
3418 subclass = PCI_BRIDGE_PCI;
3419 device = (uint16)core;
3420 header = PCI_HEADER_BRIDGE;
3421 break;
3422 case SB_MIPS33:
3423 class = PCI_CLASS_CPU;
3424 subclass = PCI_CPU_MIPS;
3425 device = (uint16)core;
3426 break;
3427 case SB_CODEC:
3428 class = PCI_CLASS_COMM;
3429 subclass = PCI_COMM_MODEM;
3430 device = BCM47XX_V90_ID;
3431 break;
3432 case SB_USB:
3433 class = PCI_CLASS_SERIAL;
3434 subclass = PCI_SERIAL_USB;
3435 progif = 0x10; /* OHCI */
3436 device = BCM47XX_USB_ID;
3437 break;
3438 case SB_USB11H:
3439 class = PCI_CLASS_SERIAL;
3440 subclass = PCI_SERIAL_USB;
3441 progif = 0x10; /* OHCI */
3442 device = BCM47XX_USBH_ID;
3443 break;
3444 case SB_USB20H:
3445 class = PCI_CLASS_SERIAL;
3446 subclass = PCI_SERIAL_USB;
3447 progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI */
3448 device = BCM47XX_USB20H_ID;
3449 header = 0x80; /* multifunction */
3450 break;
3451 case SB_IPSEC:
3452 class = PCI_CLASS_CRYPT;
3453 subclass = PCI_CRYPT_NETWORK;
3454 device = BCM47XX_IPSEC_ID;
3455 break;
3456 case SB_ROBO:
3457 class = PCI_CLASS_NET;
3458 subclass = PCI_NET_OTHER;
3459 device = BCM47XX_ROBO_ID;
3460 break;
3461 case SB_CC:
3462 class = PCI_CLASS_MEMORY;
3463 subclass = PCI_MEMORY_FLASH;
3464 device = (uint16)core;
3465 break;
3466 case SB_SATAXOR:
3467 class = PCI_CLASS_XOR;
3468 subclass = PCI_XOR_QDMA;
3469 device = BCM47XX_SATAXOR_ID;
3470 break;
3471 case SB_ATA100:
3472 class = PCI_CLASS_DASDI;
3473 subclass = PCI_DASDI_IDE;
3474 device = BCM47XX_ATA100_ID;
3475 break;
3476 case SB_USB11D:
3477 class = PCI_CLASS_SERIAL;
3478 subclass = PCI_SERIAL_USB;
3479 device = BCM47XX_USBD_ID;
3480 break;
3481 case SB_USB20D:
3482 class = PCI_CLASS_SERIAL;
3483 subclass = PCI_SERIAL_USB;
3484 device = BCM47XX_USB20D_ID;
3485 break;
3486 case SB_D11:
3487 class = PCI_CLASS_NET;
3488 subclass = PCI_NET_OTHER;
3489 device = sb_d11_devid(sbh);
3490 break;
3492 default:
3493 class = subclass = progif = 0xff;
3494 device = (uint16)core;
3495 break;
3498 *pcivendor = vendor;
3499 *pcidevice = device;
3500 *pciclass = class;
3501 *pcisubclass = subclass;
3502 *pciprogif = progif;
3503 *pciheader = header;
3505 return 0;
3508 /* use the mdio interface to read from mdio slaves */
3509 static int
3510 sb_pcie_mdioread(sb_info_t *si, uint physmedia, uint regaddr, uint *regval)
3512 uint mdiodata;
3513 uint i = 0;
3514 sbpcieregs_t *pcieregs;
3516 pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3517 ASSERT(pcieregs);
3519 /* enable mdio access to SERDES */
3520 W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3522 mdiodata = MDIODATA_START | MDIODATA_READ |
3523 (physmedia << MDIODATA_DEVADDR_SHF) |
3524 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA;
3526 W_REG(si->osh, &pcieregs->mdiodata, mdiodata);
3528 PR28829_DELAY();
3530 /* retry till the transaction is complete */
3531 while (i < 10) {
3532 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
3533 PR28829_DELAY();
3534 *regval = (R_REG(si->osh, &(pcieregs->mdiodata)) & MDIODATA_MASK);
3535 /* Disable mdio access to SERDES */
3536 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3537 return 0;
3539 OSL_DELAY(1000);
3540 i++;
3543 SB_ERROR(("sb_pcie_mdioread: timed out\n"));
3544 /* Disable mdio access to SERDES */
3545 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3546 return 1;
3550 /* use the mdio interface to write to mdio slaves */
3551 static int
3552 sb_pcie_mdiowrite(sb_info_t *si, uint physmedia, uint regaddr, uint val)
3554 uint mdiodata;
3555 uint i = 0;
3556 sbpcieregs_t *pcieregs;
3558 pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3559 ASSERT(pcieregs);
3561 /* enable mdio access to SERDES */
3562 W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3564 mdiodata = MDIODATA_START | MDIODATA_WRITE |
3565 (physmedia << MDIODATA_DEVADDR_SHF) |
3566 (regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
3568 W_REG(si->osh, (&pcieregs->mdiodata), mdiodata);
3570 PR28829_DELAY();
3572 /* retry till the transaction is complete */
3573 while (i < 10) {
3574 if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
3575 /* Disable mdio access to SERDES */
3576 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3577 return 0;
3579 OSL_DELAY(1000);
3580 i++;
3583 SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
3584 /* Disable mdio access to SERDES */
3585 W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3586 return 1;
3590 /* indirect way to read pcie config regs */
3591 uint
3592 sb_pcie_readreg(void *sb, void* arg1, uint offset)
3594 sb_info_t *si;
3595 sb_t *sbh;
3596 uint retval = 0xFFFFFFFF;
3597 sbpcieregs_t *pcieregs;
3598 uint addrtype;
3600 sbh = (sb_t *)sb;
3601 si = SB_INFO(sbh);
3602 ASSERT(PCIE(si));
3604 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
3605 ASSERT(pcieregs);
3607 addrtype = (uint)((uintptr)arg1);
3608 switch (addrtype) {
3609 case PCIE_CONFIGREGS:
3610 W_REG(si->osh, (&pcieregs->configaddr), offset);
3611 retval = R_REG(si->osh, &(pcieregs->configdata));
3612 break;
3613 case PCIE_PCIEREGS:
3614 W_REG(si->osh, &(pcieregs->pcieindaddr), offset);
3615 retval = R_REG(si->osh, &(pcieregs->pcieinddata));
3616 break;
3617 default:
3618 ASSERT(0);
3619 break;
3621 return retval;
3624 /* indirect way to write pcie config/mdio/pciecore regs */
3625 uint
3626 sb_pcie_writereg(sb_t *sbh, void *arg1, uint offset, uint val)
3628 sb_info_t *si;
3629 sbpcieregs_t *pcieregs;
3630 uint addrtype;
3632 si = SB_INFO(sbh);
3633 ASSERT(PCIE(si));
3635 pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
3636 ASSERT(pcieregs);
3638 addrtype = (uint)((uintptr)arg1);
3640 switch (addrtype) {
3641 case PCIE_CONFIGREGS:
3642 W_REG(si->osh, (&pcieregs->configaddr), offset);
3643 W_REG(si->osh, (&pcieregs->configdata), val);
3644 break;
3645 case PCIE_PCIEREGS:
3646 W_REG(si->osh, (&pcieregs->pcieindaddr), offset);
3647 W_REG(si->osh, (&pcieregs->pcieinddata), val);
3648 break;
3649 default:
3650 ASSERT(0);
3651 break;
3653 return 0;
3657 /* Build device path. Support SB, PCI, and JTAG for now. */
3659 BCMINITFN(sb_devpath)(sb_t *sbh, char *path, int size)
3661 int slen;
3662 ASSERT(path);
3663 ASSERT(size >= SB_DEVPATH_BUFSZ);
3665 if (!path || size <= 0)
3666 return -1;
3668 switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
3669 case SB_BUS:
3670 case JTAG_BUS:
3671 slen = snprintf(path, (size_t)size, "sb/%u/", sb_coreidx(sbh));
3672 break;
3673 case PCI_BUS:
3674 ASSERT((SB_INFO(sbh))->osh);
3675 slen = snprintf(path, (size_t)size, "pci/%u/%u/",
3676 OSL_PCI_BUS((SB_INFO(sbh))->osh),
3677 OSL_PCI_SLOT((SB_INFO(sbh))->osh));
3678 break;
3679 case PCMCIA_BUS:
3680 SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
3681 SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
3682 slen = snprintf(path, (size_t)size, "pc/1/1/");
3683 break;
3684 default:
3685 slen = -1;
3686 ASSERT(0);
3687 break;
3690 if (slen < 0 || slen >= size) {
3691 path[0] = '\0';
3692 return -1;
3695 return 0;
3698 /* Get a variable, but only if it has a devpath prefix */
3699 char *
3700 BCMINITFN(sb_getdevpathvar)(sb_t *sbh, const char *name)
3702 char varname[SB_DEVPATH_BUFSZ + 32];
3704 sb_devpathvar(sbh, varname, sizeof(varname), name);
3706 return (getvar(NULL, varname));
3709 /* Get a variable, but only if it has a devpath prefix */
3711 BCMINITFN(sb_getdevpathintvar)(sb_t *sbh, const char *name)
3713 char varname[SB_DEVPATH_BUFSZ + 32];
3715 sb_devpathvar(sbh, varname, sizeof(varname), name);
3717 return (getintvar(NULL, varname));
3720 /* Concatenate the dev path with a varname into the given 'var' buffer
3721 * and return the 'var' pointer.
3722 * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
3723 * On overflow, the first char will be set to '\0'.
3725 static char *
3726 BCMINITFN(sb_devpathvar)(sb_t *sbh, char *var, int len, const char *name)
3728 uint path_len;
3730 if (!var || len <= 0)
3731 return var;
3733 if (sb_devpath(sbh, var, len) == 0) {
3734 path_len = strlen(var);
3736 if (strlen(name) + 1 > (uint)(len - path_len))
3737 var[0] = '\0';
3738 else
3739 strncpy(var + path_len, name, len - path_len - 1);
3742 return var;
3747 * Fixup SROMless PCI device's configuration.
3748 * The current core may be changed upon return.
3750 static int
3751 sb_pci_fixcfg(sb_info_t *si)
3753 uint origidx, pciidx;
3754 sbpciregs_t *pciregs;
3755 sbpcieregs_t *pcieregs = NULL;
3756 uint16 val16, *reg16;
3757 uint32 w;
3759 ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
3761 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
3762 /* save the current index */
3763 origidx = sb_coreidx(&si->sb);
3765 /* check 'pi' is correct and fix it if not */
3766 if (si->sb.buscoretype == SB_PCIE) {
3767 pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
3768 ASSERT(pcieregs);
3769 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
3770 } else if (si->sb.buscoretype == SB_PCI) {
3771 pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
3772 ASSERT(pciregs);
3773 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
3774 } else {
3775 ASSERT(0);
3776 return -1;
3778 pciidx = sb_coreidx(&si->sb);
3779 val16 = R_REG(si->osh, reg16);
3780 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
3781 val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
3782 W_REG(si->osh, reg16, val16);
3785 if (PCIE_ASPMWARS(si)) {
3786 w = sb_pcie_readreg((void *)(uintptr)&si->sb, (void *)PCIE_PCIEREGS,
3787 PCIE_PLP_STATUSREG);
3789 /* Detect the current polarity at attach and force that polarity and
3790 * disable changing the polarity
3792 if ((w & PCIE_PLP_POLARITYINV_STAT) == 0) {
3793 si->pcie_polarity = (SERDES_RX_CTRL_FORCE);
3794 } else {
3795 si->pcie_polarity = (SERDES_RX_CTRL_FORCE |
3796 SERDES_RX_CTRL_POLARITY);
3799 w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
3800 if (w & PCIE_CLKREQ_ENAB) {
3801 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
3802 val16 = R_REG(si->osh, reg16);
3803 /* if clockreq is not advertized clkreq should not be enabled */
3804 if (!(val16 & SRSH_CLKREQ_ENB))
3805 SB_ERROR(("WARNING: CLK REQ enabled already 0x%x\n", w));
3808 sb_war43448(&si->sb);
3810 sb_war42767(&si->sb);
3814 /* restore the original index */
3815 sb_setcoreidx(&si->sb, origidx);
3817 return 0;
3820 /* Return ADDR64 capability of the backplane */
3821 bool
3822 sb_backplane64(sb_t *sbh)
3824 sb_info_t *si;
3826 si = SB_INFO(sbh);
3827 return ((si->sb.cccaps & CC_CAP_BKPLN64) != 0);
3830 void
3831 sb_btcgpiowar(sb_t *sbh)
3833 sb_info_t *si;
3834 uint origidx;
3835 uint intr_val = 0;
3836 chipcregs_t *cc;
3837 si = SB_INFO(sbh);
3839 /* Make sure that there is ChipCommon core present &&
3840 * UART_TX is strapped to 1
3842 if (!(si->sb.cccaps & CC_CAP_UARTGPIO))
3843 return;
3845 /* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
3846 INTR_OFF(si, intr_val);
3848 origidx = sb_coreidx(sbh);
3850 cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
3851 ASSERT(cc);
3853 W_REG(si->osh, &cc->uart0mcr, R_REG(si->osh, &cc->uart0mcr) | 0x04);
3855 /* restore the original index */
3856 sb_setcoreidx(sbh, origidx);
3858 INTR_RESTORE(si, intr_val);
3861 /* check if the device is removed */
3862 bool
3863 sb_deviceremoved(sb_t *sbh)
3865 uint32 w;
3866 sb_info_t *si;
3868 si = SB_INFO(sbh);
3870 switch (BUSTYPE(si->sb.bustype)) {
3871 case PCI_BUS:
3872 ASSERT(si->osh);
3873 w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_VID, sizeof(uint32));
3874 if ((w & 0xFFFF) != VENDOR_BROADCOM)
3875 return TRUE;
3876 else
3877 return FALSE;
3878 default:
3879 return FALSE;
3881 return FALSE;
3884 /* Return the RAM size of the SOCRAM core */
3885 uint32
3886 BCMINITFN(sb_socram_size)(sb_t *sbh)
3888 sb_info_t *si;
3889 uint origidx;
3890 uint intr_val = 0;
3892 sbsocramregs_t *regs;
3893 bool wasup;
3894 uint corerev;
3895 uint32 coreinfo;
3896 uint memsize = 0;
3898 si = SB_INFO(sbh);
3899 ASSERT(si);
3901 /* Block ints and save current core */
3902 INTR_OFF(si, intr_val);
3903 origidx = sb_coreidx(sbh);
3905 /* Switch to SOCRAM core */
3906 if (!(regs = sb_setcore(sbh, SB_SOCRAM, 0)))
3907 goto done;
3909 /* Get info for determining size */
3910 if (!(wasup = sb_iscoreup(sbh)))
3911 sb_core_reset(sbh, 0, 0);
3912 corerev = sb_corerev(sbh);
3913 coreinfo = R_REG(si->osh, &regs->coreinfo);
3915 /* Calculate size from coreinfo based on rev */
3916 if (corerev == 0)
3917 memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
3918 else if (corerev < 3) {
3919 memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
3920 memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3922 else {
3923 uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3924 uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
3925 uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
3926 if (lss != 0)
3927 nb --;
3928 memsize = nb * (1 << (bsz + SR_BSZ_BASE));
3929 if (lss != 0)
3930 memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
3932 /* Return to previous state and core */
3933 if (!wasup)
3934 sb_core_disable(sbh, 0);
3935 sb_setcoreidx(sbh, origidx);
3937 done:
3938 INTR_RESTORE(si, intr_val);
3939 return memsize;