2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright 2007, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
33 #define SB_ERROR(args)
37 typedef uint32 (*sb_intrsoff_t
)(void *intr_arg
);
38 typedef void (*sb_intrsrestore_t
)(void *intr_arg
, uint32 arg
);
39 typedef bool (*sb_intrsenabled_t
)(void *intr_arg
);
41 typedef struct gpioh_item
{
44 gpio_handler_t handler
;
46 struct gpioh_item
*next
;
49 /* misc sb info needed by some of the routines */
50 typedef struct sb_info
{
52 struct sb_pub sb
; /* back plane public state (must be first field) */
54 void *osh
; /* osl os handle */
55 void *sdh
; /* bcmsdh handle */
57 void *curmap
; /* current regs va */
58 void *regs
[SB_MAXCORES
]; /* other regs va */
60 uint curidx
; /* current core index */
61 uint dev_coreid
; /* the core provides driver functions */
63 bool memseg
; /* flag to toggle MEM_SEG register */
65 uint numcores
; /* # discovered cores */
66 uint coreid
[SB_MAXCORES
]; /* id of each core */
67 uint32 coresba
[SB_MAXCORES
]; /* backplane address of each core */
69 void *intr_arg
; /* interrupt callback function arg */
70 sb_intrsoff_t intrsoff_fn
; /* turns chip interrupts off */
71 sb_intrsrestore_t intrsrestore_fn
; /* restore chip interrupts */
72 sb_intrsenabled_t intrsenabled_fn
; /* check if interrupts are enabled */
74 uint8 pciecap_lcreg_offset
; /* PCIE capability LCreg offset in the config space */
77 bool pcie_war_ovr
; /* Override ASPM/Clkreq settings */
79 uint8 pmecap_offset
; /* PM Capability offset in the config space */
80 bool pmecap
; /* Capable of generating PME */
82 gpioh_item_t
*gpioh_head
; /* GPIO event handlers list */
88 /* local prototypes */
89 static sb_info_t
* sb_doattach(sb_info_t
*si
, uint devid
, osl_t
*osh
, void *regs
,
90 uint bustype
, void *sdh
, char **vars
, uint
*varsz
);
91 static void sb_scan(sb_info_t
*si
, void *regs
, uint devid
);
92 static uint
_sb_coreidx(sb_info_t
*si
, uint32 sba
);
93 static uint
_sb_scan(sb_info_t
*si
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
,
95 static uint32
_sb_coresba(sb_info_t
*si
);
96 static void *_sb_setcoreidx(sb_info_t
*si
, uint coreidx
);
97 static uint
sb_chip2numcores(uint chip
);
98 static bool sb_ispcie(sb_info_t
*si
);
99 static uint8
sb_find_pci_capability(sb_info_t
*si
, uint8 req_cap_id
,
100 uchar
*buf
, uint32
*buflen
);
101 static int sb_pci_fixcfg(sb_info_t
*si
);
102 /* routines to access mdio slave device registers */
103 static int sb_pcie_mdiowrite(sb_info_t
*si
, uint physmedia
, uint readdr
, uint val
);
104 static int sb_pcie_mdioread(sb_info_t
*si
, uint physmedia
, uint readdr
, uint
*ret_val
);
106 /* dev path concatenation util */
107 static char *sb_devpathvar(sb_t
*sbh
, char *var
, int len
, const char *name
);
110 static void sb_war43448(sb_t
*sbh
);
111 static void sb_war43448_aspm(sb_t
*sbh
);
112 static void sb_war32414_forceHT(sb_t
*sbh
, bool forceHT
);
113 static void sb_war30841(sb_info_t
*si
);
114 static void sb_war42767(sb_t
*sbh
);
115 static void sb_war42767_clkreq(sb_t
*sbh
);
117 /* delay needed between the mdio control/ mdiodata register data access */
118 #define PR28829_DELAY() OSL_DELAY(10)
120 /* size that can take bitfielddump */
121 #define BITFIELD_DUMP_SIZE 32
123 /* global variable to indicate reservation/release of gpio's */
124 static uint32 sb_gpioreservation
= 0;
126 /* global flag to prevent shared resources from being initialized multiple times in sb_attach() */
127 static bool sb_onetimeinit
= FALSE
;
129 #define SB_INFO(sbh) (sb_info_t*)(uintptr)sbh
130 #define SET_SBREG(si, r, mask, val) \
131 W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
132 #define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SB_MAXCORES * SB_CORE_SIZE)) && \
133 ISALIGNED((x), SB_CORE_SIZE))
134 #define GOODREGS(regs) ((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
135 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
136 #define BADCOREADDR 0
137 #define GOODIDX(idx) (((uint)idx) < SB_MAXCORES)
138 #define BADIDX (SB_MAXCORES+1)
139 #define NOREV -1 /* Invalid rev */
141 #define PCI(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
142 #define PCIE(si) ((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
143 #define PCMCIA(si) ((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->memseg == TRUE))
146 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
147 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
149 #define R_SBREG(si, sbr) sb_read_sbreg((si), (sbr))
150 #define W_SBREG(si, sbr, v) sb_write_sbreg((si), (sbr), (v))
151 #define AND_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
152 #define OR_SBREG(si, sbr, v) W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
155 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
156 * after core switching to avoid invalid register accesss inside ISR.
158 #define INTR_OFF(si, intr_val) \
159 if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
160 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
161 #define INTR_RESTORE(si, intr_val) \
162 if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
163 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
165 /* dynamic clock control defines */
166 #define LPOMINFREQ 25000 /* low power oscillator min */
167 #define LPOMAXFREQ 43000 /* low power oscillator max */
168 #define XTALMINFREQ 19800000 /* 20 MHz - 1% */
169 #define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
170 #define PCIMINFREQ 25000000 /* 25 MHz */
171 #define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
173 #define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
174 #define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
176 /* force HT war check on non-mips platforms
177 This WAR seem to introduce a significant slowdon on
178 4704 mips router where the problem itself never shows.
182 #define FORCEHT_WAR32414(si) \
183 (((PCIE(si)) && (si->sb.chip == BCM4311_CHIP_ID) && ((si->sb.chiprev <= 1))) || \
184 ((PCI(si) || PCIE(si)) && (si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3)))
186 #define FORCEHT_WAR32414(si) 0
187 #endif /* __mips__ */
189 #define PCIE_ASPMWARS(si) \
190 ((PCIE(si)) && ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)))
192 /* GPIO Based LED powersave defines */
193 #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
194 #define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
196 #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
199 sb_read_sbreg(sb_info_t
*si
, volatile uint32
*sbr
)
202 uint32 val
, intr_val
= 0;
206 * compact flash only has 11 bits address, while we needs 12 bits address.
207 * MEM_SEG will be OR'd with other 11 bits address in hardware,
208 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
209 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
212 INTR_OFF(si
, intr_val
);
214 OSL_PCMCIA_WRITE_ATTR(si
->osh
, MEM_SEG
, &tmp
, 1);
215 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
218 val
= R_REG(si
->osh
, sbr
);
222 OSL_PCMCIA_WRITE_ATTR(si
->osh
, MEM_SEG
, &tmp
, 1);
223 INTR_RESTORE(si
, intr_val
);
230 sb_write_sbreg(sb_info_t
*si
, volatile uint32
*sbr
, uint32 v
)
233 volatile uint32 dummy
;
238 * compact flash only has 11 bits address, while we needs 12 bits address.
239 * MEM_SEG will be OR'd with other 11 bits address in hardware,
240 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
241 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
244 INTR_OFF(si
, intr_val
);
246 OSL_PCMCIA_WRITE_ATTR(si
->osh
, MEM_SEG
, &tmp
, 1);
247 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
250 if (BUSTYPE(si
->sb
.bustype
) == PCMCIA_BUS
) {
252 dummy
= R_REG(si
->osh
, sbr
);
253 W_REG(si
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
254 dummy
= R_REG(si
->osh
, sbr
);
255 W_REG(si
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
257 dummy
= R_REG(si
->osh
, sbr
);
258 W_REG(si
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
259 dummy
= R_REG(si
->osh
, sbr
);
260 W_REG(si
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
261 #endif /* IL_BIGENDIAN */
263 W_REG(si
->osh
, sbr
, v
);
267 OSL_PCMCIA_WRITE_ATTR(si
->osh
, MEM_SEG
, &tmp
, 1);
268 INTR_RESTORE(si
, intr_val
);
273 * Allocate a sb handle.
274 * devid - pci device id (used to determine chip#)
275 * osh - opaque OS handle
276 * regs - virtual address of initial core registers
277 * bustype - pci/pcmcia/sb/sdio/etc
278 * vars - pointer to a pointer area for "environment" variables
279 * varsz - pointer to int to return the size of the vars
282 BCMINITFN(sb_attach
)(uint devid
, osl_t
*osh
, void *regs
,
283 uint bustype
, void *sdh
, char **vars
, uint
*varsz
)
287 /* alloc sb_info_t */
288 if ((si
= MALLOC(osh
, sizeof (sb_info_t
))) == NULL
) {
289 SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh
)));
293 if (sb_doattach(si
, devid
, osh
, regs
, bustype
, sdh
, vars
, varsz
) == NULL
) {
294 MFREE(osh
, si
, sizeof(sb_info_t
));
297 si
->vars
= vars
? *vars
: NULL
;
298 si
->varsz
= varsz
? *varsz
: 0;
303 /* Using sb_kattach depends on SB_BUS support, either implicit */
304 /* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
305 #if !defined(CONFIG_BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
307 /* global kernel resource */
308 static sb_info_t ksi
;
310 /* generic kernel variant of sb_attach() */
312 BCMINITFN(sb_kattach
)(osl_t
*osh
)
314 static bool ksi_attached
= FALSE
;
317 void *regs
= (void *)REG_MAP(SB_ENUM_BASE
, SB_CORE_SIZE
);
319 if (sb_doattach(&ksi
, BCM4710_DEVICE_ID
, osh
, regs
,
321 osh
!= SB_OSH
? &ksi
.vars
: NULL
,
322 osh
!= SB_OSH
? &ksi
.varsz
: NULL
) == NULL
) {
323 SB_ERROR(("sb_kattach: sb_doattach failed\n"));
332 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
335 BCMINITFN(sb_doattach
)(sb_info_t
*si
, uint devid
, osl_t
*osh
, void *regs
,
336 uint bustype
, void *sdh
, char **vars
, uint
*varsz
)
344 ASSERT(GOODREGS(regs
));
346 bzero((uchar
*)si
, sizeof(sb_info_t
));
348 si
->sb
.buscoreidx
= BADIDX
;
354 /* check to see if we are a sb core mimic'ing a pci core */
355 if (bustype
== PCI_BUS
) {
356 if (OSL_PCI_READ_CONFIG(si
->osh
, PCI_SPROM_CONTROL
, sizeof(uint32
)) == 0xffffffff) {
357 SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB "
358 "devid:0x%x\n", __FUNCTION__
, devid
));
362 si
->sb
.bustype
= bustype
;
363 if (si
->sb
.bustype
!= BUSTYPE(si
->sb
.bustype
)) {
364 SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
365 si
->sb
.bustype
, BUSTYPE(si
->sb
.bustype
)));
369 /* need to set memseg flag for CF card first before any sb registers access */
370 if (BUSTYPE(si
->sb
.bustype
) == PCMCIA_BUS
)
373 /* kludge to enable the clock on the 4306 which lacks a slowclock */
374 if (BUSTYPE(si
->sb
.bustype
) == PCI_BUS
&& !sb_ispcie(si
))
375 sb_clkctl_xtal(&si
->sb
, XTAL
|PLL
, ON
);
377 if (BUSTYPE(si
->sb
.bustype
) == PCI_BUS
) {
378 w
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_BAR0_WIN
, sizeof(uint32
));
379 if (!GOODCOREADDR(w
, SB_ENUM_BASE
))
380 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_BAR0_WIN
, sizeof(uint32
), SB_ENUM_BASE
);
384 /* get sonics backplane revision */
386 si
->sb
.sonicsrev
= (R_SBREG(si
, &sb
->sbidlow
) & SBIDL_RV_MASK
) >> SBIDL_RV_SHIFT
;
389 sb_scan(si
, regs
, devid
);
391 /* no cores found, bail out */
392 if (si
->numcores
== 0) {
393 SB_ERROR(("sb_doattach: could not find any cores\n"));
397 /* save the current core index */
398 origidx
= si
->curidx
;
400 /* don't go beyond if there is no chipc core in the chip */
401 if (!(cc
= sb_setcore(&si
->sb
, SB_CC
, 0)))
404 if (BUSTYPE(si
->sb
.bustype
) == SB_BUS
&&
405 (si
->sb
.chip
== BCM4712_CHIP_ID
) &&
406 (si
->sb
.chippkg
!= BCM4712LARGE_PKG_ID
) &&
407 (si
->sb
.chiprev
<= 3))
408 OR_REG(si
->osh
, &cc
->slow_clk_ctl
, SCC_SS_XTAL
);
410 /* fixup necessary chip/core configurations */
411 if (BUSTYPE(si
->sb
.bustype
) == PCI_BUS
&& sb_pci_fixcfg(si
)) {
412 SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
417 /* Switch back to the original core, nvram/srom init needs it */
418 sb_setcoreidx(&si
->sb
, origidx
);
420 /* Init nvram from flash if it exists */
421 nvram_init((void *)&si
->sb
);
423 /* Init nvram from sprom/otp if they exist */
424 if (srom_var_init(&si
->sb
, BUSTYPE(si
->sb
.bustype
), regs
, si
->osh
, vars
, varsz
)) {
425 SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
428 pvars
= vars
? *vars
: NULL
;
430 /* PMU specific initializations */
431 if ((si
->sb
.cccaps
& CC_CAP_PMU
) && !sb_onetimeinit
) {
432 sb_pmu_init(&si
->sb
, si
->osh
);
433 /* Find out Crystal frequency and init PLL */
434 sb_pmu_pll_init(&si
->sb
, si
->osh
, getintvar(pvars
, "xtalfreq"));
435 /* Initialize PMU resources (up/dn timers, dep masks, etc.) */
436 sb_pmu_res_init(&si
->sb
, si
->osh
);
439 if (BUSTYPE(si
->sb
.bustype
) == PCMCIA_BUS
) {
440 w
= getintvar(pvars
, "regwindowsz");
441 si
->memseg
= (w
<= CFTABLE_REGWIN_2K
) ? TRUE
: FALSE
;
444 /* get boardtype and boardrev */
445 switch (BUSTYPE(si
->sb
.bustype
)) {
447 /* do a pci config read to get subsystem id and subvendor id */
448 w
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_CFG_SVID
, sizeof(uint32
));
449 /* Let nvram variables override subsystem Vend/ID */
450 if ((si
->sb
.boardvendor
= (uint16
)sb_getdevpathintvar(&si
->sb
, "boardvendor")) == 0)
451 si
->sb
.boardvendor
= w
& 0xffff;
453 SB_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n",
454 si
->sb
.boardvendor
, w
& 0xffff));
455 if ((si
->sb
.boardtype
= (uint16
)sb_getdevpathintvar(&si
->sb
, "boardtype")) == 0)
456 si
->sb
.boardtype
= (w
>> 16) & 0xffff;
458 SB_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n",
459 si
->sb
.boardtype
, (w
>> 16) & 0xffff));
463 si
->sb
.boardvendor
= getintvar(pvars
, "manfid");
464 si
->sb
.boardtype
= getintvar(pvars
, "prodid");
469 si
->sb
.boardvendor
= VENDOR_BROADCOM
;
470 if (pvars
== NULL
|| ((si
->sb
.boardtype
= getintvar(pvars
, "prodid")) == 0))
471 if ((si
->sb
.boardtype
= getintvar(NULL
, "boardtype")) == 0)
472 si
->sb
.boardtype
= 0xffff;
476 if (si
->sb
.boardtype
== 0) {
477 SB_ERROR(("sb_doattach: unknown board type\n"));
478 ASSERT(si
->sb
.boardtype
);
481 si
->sb
.boardflags
= getintvar(pvars
, "boardflags");
483 /* setup the GPIO based LED powersave register */
484 if (si
->sb
.ccrev
>= 16) {
485 if ((pvars
== NULL
) || ((w
= getintvar(pvars
, "leddc")) == 0))
486 w
= DEFAULT_GPIOTIMERVAL
;
487 sb_corereg(&si
->sb
, SB_CC_IDX
, OFFSETOF(chipcregs_t
, gpiotimerval
), ~0, w
);
490 /* Determine if this board needs override */
491 if (PCIE(si
) && (si
->sb
.chip
== BCM4321_CHIP_ID
))
492 si
->pcie_war_ovr
= ((si
->sb
.boardvendor
== VENDOR_APPLE
) &&
493 ((uint8
)getintvar(pvars
, "sromrev") == 4) &&
494 ((uint8
)getintvar(pvars
, "boardrev") <= 0x71)) ||
495 ((uint32
)getintvar(pvars
, "boardflags2") & BFL2_PCIEWAR_OVR
);
497 if (PCIE_ASPMWARS(si
)) {
498 sb_war43448_aspm((void *)si
);
499 sb_war42767_clkreq((void *)si
);
502 if (FORCEHT_WAR32414(si
)) {
503 si
->sb
.pr32414
= TRUE
;
504 sb_clkctl_init(&si
->sb
);
505 sb_war32414_forceHT(&si
->sb
, 1);
508 if (PCIE(si
) && ((si
->sb
.buscorerev
== 6) || (si
->sb
.buscorerev
== 7)))
509 si
->sb
.pr42780
= TRUE
;
511 if (PCIE_ASPMWARS(si
))
512 sb_pcieclkreq(&si
->sb
, 1, 0);
515 (((si
->sb
.chip
== BCM4311_CHIP_ID
) && (si
->sb
.chiprev
== 2)) ||
516 ((si
->sb
.chip
== BCM4312_CHIP_ID
) && (si
->sb
.chiprev
== 0))))
517 sb_set_initiator_to(&si
->sb
, 0x3, sb_findcoreidx(&si
->sb
, SB_D11
, 0));
519 /* Disable gpiopullup and gpiopulldown */
520 if (!sb_onetimeinit
&& si
->sb
.ccrev
>= 20) {
521 cc
= (chipcregs_t
*)sb_setcore(&si
->sb
, SB_CC
, 0);
522 W_REG(osh
, &cc
->gpiopullup
, 0);
523 W_REG(osh
, &cc
->gpiopulldown
, 0);
524 sb_setcoreidx(&si
->sb
, origidx
);
529 sb_onetimeinit
= TRUE
;
535 /* Enable/Disable clkreq for PCIE (4311B0/4321B1) */
537 BCMINITFN(sb_war42780_clkreq
)(sb_t
*sbh
, bool clkreq
)
543 /* Don't change clkreq value if serdespll war has not yet been applied */
544 if (!si
->pr42767_war
&& PCIE_ASPMWARS(si
))
547 sb_pcieclkreq(sbh
, 1, (int32
)clkreq
);
551 BCMINITFN(sb_war43448
)(sb_t
*sbh
)
557 /* if not pcie bus, we're done */
558 if (!PCIE(si
) || !PCIE_ASPMWARS(si
))
561 /* Restore the polarity */
562 if (si
->pcie_polarity
!= 0)
563 sb_pcie_mdiowrite((void *)(uintptr
)&si
->sb
, MDIODATA_DEV_RX
,
564 SERDES_RX_CTRL
, si
->pcie_polarity
);
568 BCMINITFN(sb_war43448_aspm
)(sb_t
*sbh
)
571 uint16 val16
, *reg16
;
572 sbpcieregs_t
*pcieregs
;
577 /* if not pcie bus, we're done */
578 if (!PCIE(si
) || !PCIE_ASPMWARS(si
))
581 /* no ASPM stuff on QT or VSIM */
582 if (si
->sb
.chippkg
== HDLSIM_PKG_ID
|| si
->sb
.chippkg
== HWSIM_PKG_ID
)
585 pcieregs
= (sbpcieregs_t
*) sb_setcoreidx(sbh
, si
->sb
.buscoreidx
);
587 /* Enable ASPM in the shadow SROM and Link control */
588 reg16
= &pcieregs
->sprom
[SRSH_ASPM_OFFSET
];
589 val16
= R_REG(si
->osh
, reg16
);
590 if (!si
->pcie_war_ovr
)
591 val16
|= SRSH_ASPM_ENB
;
593 val16
&= ~SRSH_ASPM_ENB
;
594 W_REG(si
->osh
, reg16
, val16
);
596 w
= OSL_PCI_READ_CONFIG(si
->osh
, si
->pciecap_lcreg_offset
, sizeof(uint32
));
597 if (!si
->pcie_war_ovr
)
600 w
&= ~PCIE_ASPM_ENAB
;
601 OSL_PCI_WRITE_CONFIG(si
->osh
, si
->pciecap_lcreg_offset
, sizeof(uint32
), w
);
605 BCMINITFN(sb_war32414_forceHT
)(sb_t
*sbh
, bool forceHT
)
612 ASSERT(FORCEHT_WAR32414(si
));
617 sb_corereg(sbh
, SB_CC_IDX
, OFFSETOF(chipcregs_t
, system_clk_ctl
),
628 sb
= REGS2SB(si
->curmap
);
630 return ((R_SBREG(si
, &sb
->sbidhigh
) & SBIDH_CC_MASK
) >> SBIDH_CC_SHIFT
);
640 sb
= REGS2SB(si
->curmap
);
642 return R_SBREG(si
, &sb
->sbtpsflag
) & SBTPS_NUM0_MASK
;
646 sb_coreidx(sb_t
*sbh
)
654 /* return core index of the core with address 'sba' */
656 BCMINITFN(_sb_coreidx
)(sb_info_t
*si
, uint32 sba
)
660 for (i
= 0; i
< si
->numcores
; i
++)
661 if (sba
== si
->coresba
[i
])
666 /* return core address of the current core */
668 BCMINITFN(_sb_coresba
)(sb_info_t
*si
)
672 switch (BUSTYPE(si
->sb
.bustype
)) {
674 sbconfig_t
*sb
= REGS2SB(si
->curmap
);
675 sbaddr
= sb_base(R_SBREG(si
, &sb
->sbadmatch0
));
680 sbaddr
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_BAR0_WIN
, sizeof(uint32
));
685 OSL_PCMCIA_READ_ATTR(si
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
686 sbaddr
= (uint32
)tmp
<< 12;
687 OSL_PCMCIA_READ_ATTR(si
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
688 sbaddr
|= (uint32
)tmp
<< 16;
689 OSL_PCMCIA_READ_ATTR(si
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
690 sbaddr
|= (uint32
)tmp
<< 24;
697 sbaddr
= (uint32
)(uintptr
)si
->curmap
;
702 sbaddr
= BADCOREADDR
;
706 SB_MSG(("_sb_coresba: current core is 0x%08x\n", sbaddr
));
711 sb_corevendor(sb_t
*sbh
)
717 sb
= REGS2SB(si
->curmap
);
719 return ((R_SBREG(si
, &sb
->sbidhigh
) & SBIDH_VC_MASK
) >> SBIDH_VC_SHIFT
);
723 sb_corerev(sb_t
*sbh
)
730 sb
= REGS2SB(si
->curmap
);
731 sbidh
= R_SBREG(si
, &sb
->sbidhigh
);
733 return (SBCOREREV(sbidh
));
746 sb_setosh(sb_t
*sbh
, osl_t
*osh
)
751 if (si
->osh
!= NULL
) {
752 SB_ERROR(("osh is already set....\n"));
758 /* set sbtmstatelow core-specific flags */
760 sb_coreflags_wo(sb_t
*sbh
, uint32 mask
, uint32 val
)
767 sb
= REGS2SB(si
->curmap
);
769 ASSERT((val
& ~mask
) == 0);
772 w
= (R_SBREG(si
, &sb
->sbtmstatelow
) & ~mask
) | val
;
773 W_SBREG(si
, &sb
->sbtmstatelow
, w
);
776 /* set/clear sbtmstatelow core-specific flags */
778 sb_coreflags(sb_t
*sbh
, uint32 mask
, uint32 val
)
785 sb
= REGS2SB(si
->curmap
);
787 ASSERT((val
& ~mask
) == 0);
791 w
= (R_SBREG(si
, &sb
->sbtmstatelow
) & ~mask
) | val
;
792 W_SBREG(si
, &sb
->sbtmstatelow
, w
);
795 /* return the new value
796 * for write operation, the following readback ensures the completion of write opration.
798 return (R_SBREG(si
, &sb
->sbtmstatelow
));
801 /* set/clear sbtmstatehigh core-specific flags */
803 sb_coreflagshi(sb_t
*sbh
, uint32 mask
, uint32 val
)
810 sb
= REGS2SB(si
->curmap
);
812 ASSERT((val
& ~mask
) == 0);
813 ASSERT((mask
& ~SBTMH_FL_MASK
) == 0);
817 w
= (R_SBREG(si
, &sb
->sbtmstatehigh
) & ~mask
) | val
;
818 W_SBREG(si
, &sb
->sbtmstatehigh
, w
);
821 /* return the new value */
822 return (R_SBREG(si
, &sb
->sbtmstatehigh
));
825 /* Run bist on current core. Caller needs to take care of core-specific bist hazards */
827 sb_corebist(sb_t
*sbh
)
835 sb
= REGS2SB(si
->curmap
);
837 sblo
= R_SBREG(si
, &sb
->sbtmstatelow
);
838 W_SBREG(si
, &sb
->sbtmstatelow
, (sblo
| SBTML_FGC
| SBTML_BE
));
840 SPINWAIT(((R_SBREG(si
, &sb
->sbtmstatehigh
) & SBTMH_BISTD
) == 0), 100000);
842 if (R_SBREG(si
, &sb
->sbtmstatehigh
) & SBTMH_BISTF
)
845 W_SBREG(si
, &sb
->sbtmstatelow
, sblo
);
851 sb_iscoreup(sb_t
*sbh
)
857 sb
= REGS2SB(si
->curmap
);
859 return ((R_SBREG(si
, &sb
->sbtmstatelow
) &
860 (SBTML_RESET
| SBTML_REJ_MASK
| SBTML_CLK
)) == SBTML_CLK
);
864 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
865 * switch back to the original core, and return the new value.
867 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
869 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
870 * and (on newer pci cores) chipcommon registers.
873 sb_corereg(sb_t
*sbh
, uint coreidx
, uint regoff
, uint mask
, uint val
)
884 ASSERT(GOODIDX(coreidx
));
885 ASSERT(regoff
< SB_CORE_SIZE
);
886 ASSERT((val
& ~mask
) == 0);
888 if (BUSTYPE(si
->sb
.bustype
) == SB_BUS
) {
889 /* If internal bus, we can always get at everything */
891 /* map if does not exist */
892 if (!si
->regs
[coreidx
]) {
893 si
->regs
[coreidx
] = (void*)REG_MAP(si
->coresba
[coreidx
],
895 ASSERT(GOODREGS(si
->regs
[coreidx
]));
897 r
= (uint32
*)((uchar
*)si
->regs
[coreidx
] + regoff
);
898 } else if (BUSTYPE(si
->sb
.bustype
) == PCI_BUS
) {
899 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
901 if ((si
->coreid
[coreidx
] == SB_CC
) &&
902 ((si
->sb
.buscoretype
== SB_PCIE
) ||
903 (si
->sb
.buscorerev
>= 13))) {
904 /* Chipc registers are mapped at 12KB */
907 r
= (uint32
*)((char *)si
->curmap
+ PCI_16KB0_CCREGS_OFFSET
+ regoff
);
908 } else if (si
->sb
.buscoreidx
== coreidx
) {
909 /* pci registers are at either in the last 2KB of an 8KB window
910 * or, in pcie and pci rev 13 at 8KB
913 if ((si
->sb
.buscoretype
== SB_PCIE
) ||
914 (si
->sb
.buscorerev
>= 13))
915 r
= (uint32
*)((char *)si
->curmap
+
916 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
918 r
= (uint32
*)((char *)si
->curmap
+
919 ((regoff
>= SBCONFIGOFF
) ?
920 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
926 INTR_OFF(si
, intr_val
);
928 /* save current core index */
929 origidx
= sb_coreidx(&si
->sb
);
932 r
= (uint32
*) ((uchar
*) sb_setcoreidx(&si
->sb
, coreidx
) + regoff
);
938 if (regoff
>= SBCONFIGOFF
) {
939 w
= (R_SBREG(si
, r
) & ~mask
) | val
;
942 w
= (R_REG(si
->osh
, r
) & ~mask
) | val
;
943 W_REG(si
->osh
, r
, w
);
948 if (regoff
>= SBCONFIGOFF
)
951 #if defined(CONFIG_BCM5354)
952 if ((si
->sb
.chip
== BCM5354_CHIP_ID
) &&
953 (coreidx
== SB_CC_IDX
) &&
954 (regoff
== OFFSETOF(chipcregs_t
, watchdog
))) {
958 w
= R_REG(si
->osh
, r
);
962 /* restore core index */
963 if (origidx
!= coreidx
)
964 sb_setcoreidx(&si
->sb
, origidx
);
966 INTR_RESTORE(si
, intr_val
);
972 #define DWORD_ALIGN(x) (x & ~(0x03))
973 #define BYTE_POS(x) (x & 0x3)
974 #define WORD_POS(x) (x & 0x1)
976 #define BYTE_SHIFT(x) (8 * BYTE_POS(x))
977 #define WORD_SHIFT(x) (16 * WORD_POS(x))
979 #define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
980 #define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
982 #define read_pci_cfg_byte(a) \
983 (BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
985 #define read_pci_cfg_word(a) \
986 (WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
989 /* return cap_offset if requested capability exists in the PCI config space */
991 sb_find_pci_capability(sb_info_t
*si
, uint8 req_cap_id
, uchar
*buf
, uint32
*buflen
)
998 if (BUSTYPE(si
->sb
.bustype
) != PCI_BUS
)
1001 /* check for Header type 0 */
1002 byte_val
= read_pci_cfg_byte(PCI_CFG_HDR
);
1003 if ((byte_val
& 0x7f) != PCI_HEADER_NORMAL
)
1006 /* check if the capability pointer field exists */
1007 byte_val
= read_pci_cfg_byte(PCI_CFG_STAT
);
1008 if (!(byte_val
& PCI_CAPPTR_PRESENT
))
1011 cap_ptr
= read_pci_cfg_byte(PCI_CFG_CAPPTR
);
1012 /* check if the capability pointer is 0x00 */
1013 if (cap_ptr
== 0x00)
1016 /* loop thr'u the capability list and see if the pcie capabilty exists */
1018 cap_id
= read_pci_cfg_byte(cap_ptr
);
1020 while (cap_id
!= req_cap_id
) {
1021 cap_ptr
= read_pci_cfg_byte((cap_ptr
+1));
1022 if (cap_ptr
== 0x00) break;
1023 cap_id
= read_pci_cfg_byte(cap_ptr
);
1025 if (cap_id
!= req_cap_id
) {
1028 /* found the caller requested capability */
1029 if ((buf
!= NULL
) && (buflen
!= NULL
)) {
1033 if (!bufsize
) goto end
;
1035 /* copy the cpability data excluding cap ID and next ptr */
1036 cap_data
= cap_ptr
+ 2;
1037 if ((bufsize
+ cap_data
) > SZPCR
)
1038 bufsize
= SZPCR
- cap_data
;
1041 *buf
= read_pci_cfg_byte(cap_data
);
1051 sb_pcieclkreq(sb_t
*sbh
, uint32 mask
, uint32 val
)
1059 offset
= si
->pciecap_lcreg_offset
;
1063 reg_val
= OSL_PCI_READ_CONFIG(si
->osh
, offset
, sizeof(uint32
));
1067 reg_val
|= PCIE_CLKREQ_ENAB
;
1069 reg_val
&= ~PCIE_CLKREQ_ENAB
;
1070 OSL_PCI_WRITE_CONFIG(si
->osh
, offset
, sizeof(uint32
), reg_val
);
1071 reg_val
= OSL_PCI_READ_CONFIG(si
->osh
, offset
, sizeof(uint32
));
1073 if (reg_val
& PCIE_CLKREQ_ENAB
)
1081 /* return TRUE if PCIE capability exists in the pci config space */
1083 sb_ispcie(sb_info_t
*si
)
1087 cap_ptr
= sb_find_pci_capability(si
, PCI_CAP_PCIECAP_ID
, NULL
, NULL
);
1091 si
->pciecap_lcreg_offset
= cap_ptr
+ PCIE_CAP_LINKCTRL_OFFSET
;
1096 /* Wake-on-wireless-LAN (WOWL) support functions */
1097 /* return TRUE if PM capability exists in the pci config space */
1099 sb_pci_pmecap(sb_t
*sbh
)
1107 if (si
== NULL
|| !(PCI(si
) || PCIE(si
)))
1110 if (!si
->pmecap_offset
) {
1111 cap_ptr
= sb_find_pci_capability(si
, PCI_CAP_POWERMGMTCAP_ID
, NULL
, NULL
);
1115 si
->pmecap_offset
= cap_ptr
;
1117 pmecap
= OSL_PCI_READ_CONFIG(si
->osh
, si
->pmecap_offset
, sizeof(uint32
));
1119 /* At least one state can generate PME */
1120 si
->pmecap
= (pmecap
& PME_CAP_PM_STATES
) != 0;
1123 return (si
->pmecap
);
1126 /* Enable PME generation and disable clkreq */
1128 sb_pci_pmeen(sb_t
*sbh
)
1134 /* if not pmecapable return */
1135 if (!sb_pci_pmecap(sbh
))
1138 w
= OSL_PCI_READ_CONFIG(si
->osh
, si
->pmecap_offset
+ PME_CSR_OFFSET
, sizeof(uint32
));
1139 w
|= (PME_CSR_PME_EN
);
1140 OSL_PCI_WRITE_CONFIG(si
->osh
, si
->pmecap_offset
+ PME_CSR_OFFSET
, sizeof(uint32
), w
);
1142 /* Disable clkreq */
1143 if (si
->pr42767_war
) {
1144 sb_pcieclkreq(sbh
, 1, 0);
1145 si
->pr42767_war
= FALSE
;
1146 } else if (si
->sb
.pr42780
) {
1147 sb_pcieclkreq(sbh
, 1, 1);
1151 /* Disable PME generation, clear the PME status bit if set and
1152 * return TRUE if PME status set
1155 sb_pci_pmeclr(sb_t
*sbh
)
1163 if (!sb_pci_pmecap(sbh
))
1166 w
= OSL_PCI_READ_CONFIG(si
->osh
, si
->pmecap_offset
+ PME_CSR_OFFSET
, sizeof(uint32
));
1168 SB_ERROR(("sb_pci_pmeclr PMECSR : 0x%x\n", w
));
1169 ret
= (w
& PME_CSR_PME_STAT
) == PME_CSR_PME_STAT
;
1171 /* PMESTAT is cleared by writing 1 to it */
1172 w
&= ~(PME_CSR_PME_EN
);
1174 OSL_PCI_WRITE_CONFIG(si
->osh
, si
->pmecap_offset
+ PME_CSR_OFFSET
, sizeof(uint32
), w
);
1179 /* Scan the enumeration space to find all cores starting from the given
1180 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
1181 * is the default core address at chip POR time and 'regs' is the virtual
1182 * address that the default core is mapped at. 'ncores' is the number of
1183 * cores expected on bus 'sbba'. It returns the total number of cores
1184 * starting from bus 'sbba', inclusive.
1186 #define SB_MAXBUSES 2
1188 BCMINITFN(_sb_scan
)(sb_info_t
*si
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
, uint numcores
)
1194 if (bus
>= SB_MAXBUSES
) {
1195 SB_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba
, bus
));
1198 SB_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba
, numcores
));
1200 /* Scan all cores on the bus starting from core 0.
1201 * Core addresses must be contiguous on each bus.
1203 for (i
= 0, next
= si
->numcores
; i
< numcores
&& next
< SB_MAXCORES
; i
++, next
++) {
1204 si
->coresba
[next
] = sbba
+ i
* SB_CORE_SIZE
;
1206 /* keep and reuse the initial register mapping */
1207 if (BUSTYPE(si
->sb
.bustype
) == SB_BUS
&& si
->coresba
[next
] == sba
) {
1208 SB_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs
, next
));
1209 si
->regs
[next
] = regs
;
1212 /* change core to 'next' and read its coreid */
1213 si
->curmap
= _sb_setcoreidx(si
, next
);
1216 si
->coreid
[next
] = sb_coreid(&si
->sb
);
1218 /* core specific processing... */
1219 /* chipc on bus SB_ENUM_BASE provides # cores in the chip and lots of
1222 if (sbba
== SB_ENUM_BASE
&& si
->coreid
[next
] == SB_CC
) {
1223 chipcregs_t
*cc
= (chipcregs_t
*)si
->curmap
;
1225 /* get chip id and rev */
1226 si
->sb
.chip
= R_REG(si
->osh
, &cc
->chipid
) & CID_ID_MASK
;
1227 si
->sb
.chiprev
= (R_REG(si
->osh
, &cc
->chipid
) & CID_REV_MASK
) >>
1229 si
->sb
.chippkg
= (R_REG(si
->osh
, &cc
->chipid
) & CID_PKG_MASK
) >>
1232 /* get chipcommon rev */
1233 si
->sb
.ccrev
= (int)sb_corerev(&si
->sb
);
1235 /* get chipcommon chipstatus */
1236 if (si
->sb
.ccrev
>= 11)
1237 si
->sb
.chipst
= R_REG(si
->osh
, &cc
->chipstatus
);
1239 /* get chipcommon capabilites */
1240 si
->sb
.cccaps
= R_REG(si
->osh
, &cc
->capabilities
);
1242 /* get pmu rev and caps */
1243 if ((si
->sb
.cccaps
& CC_CAP_PMU
)) {
1244 si
->sb
.pmucaps
= R_REG(si
->osh
, &cc
->pmucapabilities
);
1245 si
->sb
.pmurev
= si
->sb
.pmucaps
& PCAP_REV_MASK
;
1248 /* determine numcores - this is the total # cores in the chip */
1249 if (((si
->sb
.ccrev
== 4) || (si
->sb
.ccrev
>= 6)))
1250 numcores
= (R_REG(si
->osh
, &cc
->chipid
) & CID_CC_MASK
) >>
1253 numcores
= sb_chip2numcores(si
->sb
.chip
);
1254 SB_MSG(("_sb_scan: there are %u cores in the chip\n", numcores
));
1256 /* scan bridged SB(s) and add results to the end of the list */
1257 else if (si
->coreid
[next
] == SB_OCP
) {
1258 sbconfig_t
*sb
= REGS2SB(si
->curmap
);
1259 uint32 nsbba
= R_SBREG(si
, &sb
->sbadmatch1
);
1262 si
->numcores
= next
+ 1;
1264 if ((nsbba
& 0xfff00000) != SB_ENUM_BASE
)
1266 nsbba
&= 0xfffff000;
1267 if (_sb_coreidx(si
, nsbba
) != BADIDX
)
1270 nsbcc
= (R_SBREG(si
, &sb
->sbtmstatehigh
) & 0x000f0000) >> 16;
1271 nsbcc
= _sb_scan(si
, sba
, regs
, bus
+ 1, nsbba
, nsbcc
);
1272 if (sbba
== SB_ENUM_BASE
)
1278 SB_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i
, sbba
));
1280 si
->numcores
= i
+ ncc
;
1281 return si
->numcores
;
1284 /* scan the sb enumerated space to identify all cores */
1286 BCMINITFN(sb_scan
)(sb_info_t
*si
, void *regs
, uint devid
)
1299 /* Save the current core info and validate it later till we know
1300 * for sure what is good and what is bad.
1302 origsba
= _sb_coresba(si
);
1305 /* Use devid as initial chipid and we'll update it later in _sb_scan */
1306 si
->sb
.chip
= devid
;
1308 /* Support chipcommon-less chips for a little while longer so the old
1309 * sdio host fpga continues to work until we can get the new one working
1310 * reliably. This particular chip has 2 cores - codec/sdio and pci.
1312 if (devid
== SDIOH_FPGA_ID
)
1314 /* Expect at least one core on 0x18000000 and it must be chipcommon where
1315 * the core count for the whole chip is kept.
1320 /* scan all SB(s) starting from SB_ENUM_BASE */
1321 si
->numcores
= _sb_scan(si
, origsba
, regs
, 0, SB_ENUM_BASE
, numcores
);
1322 if (si
->numcores
== 0)
1325 /* figure out bus/orignal core idx */
1326 si
->sb
.buscorerev
= NOREV
;
1327 si
->sb
.buscoreidx
= BADIDX
;
1330 pcirev
= pcierev
= NOREV
;
1331 pciidx
= pcieidx
= BADIDX
;
1333 for (i
= 0; i
< si
->numcores
; i
++) {
1334 sb_setcoreidx(&si
->sb
, i
);
1336 if (BUSTYPE(si
->sb
.bustype
) == PCI_BUS
) {
1337 if (si
->coreid
[i
] == SB_PCI
) {
1339 pcirev
= sb_corerev(&si
->sb
);
1341 } else if (si
->coreid
[i
] == SB_PCIE
) {
1343 pcierev
= sb_corerev(&si
->sb
);
1346 } else if (BUSTYPE(si
->sb
.bustype
) == PCMCIA_BUS
) {
1347 if (si
->coreid
[i
] == SB_PCMCIA
) {
1348 si
->sb
.buscorerev
= sb_corerev(&si
->sb
);
1349 si
->sb
.buscoretype
= si
->coreid
[i
];
1350 si
->sb
.buscoreidx
= i
;
1354 /* find the core idx before entering this func. */
1355 if (origsba
== si
->coresba
[i
])
1366 si
->sb
.buscoretype
= SB_PCI
;
1367 si
->sb
.buscorerev
= pcirev
;
1368 si
->sb
.buscoreidx
= pciidx
;
1370 si
->sb
.buscoretype
= SB_PCIE
;
1371 si
->sb
.buscorerev
= pcierev
;
1372 si
->sb
.buscoreidx
= pcieidx
;
1375 /* return to the original core */
1376 if (origidx
!= BADIDX
)
1377 sb_setcoreidx(&si
->sb
, origidx
);
1378 ASSERT(origidx
!= BADIDX
);
1381 /* may be called with core in reset */
1383 sb_detach(sb_t
*sbh
)
1393 if (BUSTYPE(si
->sb
.bustype
) == SB_BUS
)
1394 for (idx
= 0; idx
< SB_MAXCORES
; idx
++)
1395 if (si
->regs
[idx
]) {
1396 REG_UNMAP(si
->regs
[idx
]);
1397 si
->regs
[idx
] = NULL
;
1399 #if !defined(CONFIG_BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
1401 #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
1402 MFREE(si
->osh
, si
, sizeof(sb_info_t
));
1405 /* convert chip number to number of i/o cores */
1407 BCMINITFN(sb_chip2numcores
)(uint chip
)
1409 if (chip
== BCM4306_CHIP_ID
) /* < 4306c0 */
1411 if (chip
== BCM4704_CHIP_ID
)
1413 if (chip
== BCM5365_CHIP_ID
)
1416 SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip
));
1421 /* return index of coreid or BADIDX if not found */
1423 sb_findcoreidx(sb_t
*sbh
, uint coreid
, uint coreunit
)
1433 for (i
= 0; i
< si
->numcores
; i
++)
1434 if (si
->coreid
[i
] == coreid
) {
1435 if (found
== coreunit
)
1444 * this function changes logical "focus" to the indiciated core,
1445 * must be called with interrupt off.
1446 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1449 sb_setcoreidx(sb_t
*sbh
, uint coreidx
)
1455 if (coreidx
>= si
->numcores
)
1459 * If the user has provided an interrupt mask enabled function,
1460 * then assert interrupts are disabled before switching the core.
1462 ASSERT((si
->intrsenabled_fn
== NULL
) || !(*(si
)->intrsenabled_fn
)((si
)->intr_arg
));
1464 si
->curmap
= _sb_setcoreidx(si
, coreidx
);
1465 si
->curidx
= coreidx
;
1467 return (si
->curmap
);
1470 /* This function changes the logical "focus" to the indiciated core.
1471 * Return the current core's virtual address.
1474 _sb_setcoreidx(sb_info_t
*si
, uint coreidx
)
1476 uint32 sbaddr
= si
->coresba
[coreidx
];
1479 switch (BUSTYPE(si
->sb
.bustype
)) {
1482 if (!si
->regs
[coreidx
]) {
1483 si
->regs
[coreidx
] = (void*)REG_MAP(sbaddr
, SB_CORE_SIZE
);
1484 ASSERT(GOODREGS(si
->regs
[coreidx
]));
1486 regs
= si
->regs
[coreidx
];
1490 /* point bar0 window */
1491 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_BAR0_WIN
, 4, sbaddr
);
1496 uint8 tmp
= (sbaddr
>> 12) & 0x0f;
1497 OSL_PCMCIA_WRITE_ATTR(si
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
1498 tmp
= (sbaddr
>> 16) & 0xff;
1499 OSL_PCMCIA_WRITE_ATTR(si
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
1500 tmp
= (sbaddr
>> 24) & 0xff;
1501 OSL_PCMCIA_WRITE_ATTR(si
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
1509 if (!si
->regs
[coreidx
]) {
1510 si
->regs
[coreidx
] = (void *)(uintptr
)sbaddr
;
1511 ASSERT(GOODREGS(si
->regs
[coreidx
]));
1513 regs
= si
->regs
[coreidx
];
1515 #endif /* BCMJTAG */
1527 * this function changes logical "focus" to the indiciated core,
1528 * must be called with interrupt off.
1529 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1532 sb_setcore(sb_t
*sbh
, uint coreid
, uint coreunit
)
1536 idx
= sb_findcoreidx(sbh
, coreid
, coreunit
);
1540 return (sb_setcoreidx(sbh
, idx
));
1543 /* return chip number */
1545 BCMINITFN(sb_chip
)(sb_t
*sbh
)
1550 return (si
->sb
.chip
);
1553 /* return chip revision number */
1555 BCMINITFN(sb_chiprev
)(sb_t
*sbh
)
1560 return (si
->sb
.chiprev
);
1563 /* return chip common revision number */
1565 BCMINITFN(sb_chipcrev
)(sb_t
*sbh
)
1570 return (si
->sb
.ccrev
);
1573 /* return chip package option */
1575 BCMINITFN(sb_chippkg
)(sb_t
*sbh
)
1580 return (si
->sb
.chippkg
);
1583 /* return PCI core rev. */
1585 BCMINITFN(sb_pcirev
)(sb_t
*sbh
)
1590 return (si
->sb
.buscorerev
);
1594 BCMINITFN(sb_war16165
)(sb_t
*sbh
)
1600 return (PCI(si
) && (si
->sb
.buscorerev
<= 10));
1604 BCMINITFN(sb_war30841
)(sb_info_t
*si
)
1606 sb_pcie_mdiowrite(si
, MDIODATA_DEV_RX
, SERDES_RX_TIMER1
, 0x8128);
1607 sb_pcie_mdiowrite(si
, MDIODATA_DEV_RX
, SERDES_RX_CDR
, 0x0100);
1608 sb_pcie_mdiowrite(si
, MDIODATA_DEV_RX
, SERDES_RX_CDRBW
, 0x1466);
1611 /* return PCMCIA core rev. */
1613 BCMINITFN(sb_pcmciarev
)(sb_t
*sbh
)
1618 return (si
->sb
.buscorerev
);
1621 /* return board vendor id */
1623 BCMINITFN(sb_boardvendor
)(sb_t
*sbh
)
1628 return (si
->sb
.boardvendor
);
1631 /* return boardtype */
1633 BCMINITFN(sb_boardtype
)(sb_t
*sbh
)
1640 if (BUSTYPE(si
->sb
.bustype
) == SB_BUS
&& si
->sb
.boardtype
== 0xffff) {
1641 /* boardtype format is a hex string */
1642 si
->sb
.boardtype
= getintvar(NULL
, "boardtype");
1644 /* backward compatibility for older boardtype string format */
1645 if ((si
->sb
.boardtype
== 0) && (var
= getvar(NULL
, "boardtype"))) {
1646 if (!strcmp(var
, "bcm94710dev"))
1647 si
->sb
.boardtype
= BCM94710D_BOARD
;
1648 else if (!strcmp(var
, "bcm94710ap"))
1649 si
->sb
.boardtype
= BCM94710AP_BOARD
;
1650 else if (!strcmp(var
, "bu4710"))
1651 si
->sb
.boardtype
= BU4710_BOARD
;
1652 else if (!strcmp(var
, "bcm94702mn"))
1653 si
->sb
.boardtype
= BCM94702MN_BOARD
;
1654 else if (!strcmp(var
, "bcm94710r1"))
1655 si
->sb
.boardtype
= BCM94710R1_BOARD
;
1656 else if (!strcmp(var
, "bcm94710r4"))
1657 si
->sb
.boardtype
= BCM94710R4_BOARD
;
1658 else if (!strcmp(var
, "bcm94702cpci"))
1659 si
->sb
.boardtype
= BCM94702CPCI_BOARD
;
1660 else if (!strcmp(var
, "bcm95380_rr"))
1661 si
->sb
.boardtype
= BCM95380RR_BOARD
;
1665 return (si
->sb
.boardtype
);
1668 /* return bus type of sbh device */
1675 return (si
->sb
.bustype
);
1678 /* return bus core type */
1680 sb_buscoretype(sb_t
*sbh
)
1686 return (si
->sb
.buscoretype
);
1689 /* return bus core revision */
1691 sb_buscorerev(sb_t
*sbh
)
1696 return (si
->sb
.buscorerev
);
1699 /* return list of found cores */
1701 sb_corelist(sb_t
*sbh
, uint coreid
[])
1707 bcopy((uchar
*)si
->coreid
, (uchar
*)coreid
, (si
->numcores
* sizeof(uint
)));
1708 return (si
->numcores
);
1711 /* return current register mapping */
1713 sb_coreregs(sb_t
*sbh
)
1718 ASSERT(GOODREGS(si
->curmap
));
1720 return (si
->curmap
);
1723 #if defined(CONFIG_BCMDBG_ASSERT)
1724 /* traverse all cores to find and clear source of serror */
1726 sb_serr_clear(sb_info_t
*si
)
1730 uint i
, intr_val
= 0;
1731 void * corereg
= NULL
;
1733 INTR_OFF(si
, intr_val
);
1734 origidx
= sb_coreidx(&si
->sb
);
1736 for (i
= 0; i
< si
->numcores
; i
++) {
1737 corereg
= sb_setcoreidx(&si
->sb
, i
);
1738 if (NULL
!= corereg
) {
1739 sb
= REGS2SB(corereg
);
1740 if ((R_SBREG(si
, &sb
->sbtmstatehigh
)) & SBTMH_SERR
) {
1741 AND_SBREG(si
, &sb
->sbtmstatehigh
, ~SBTMH_SERR
);
1742 SB_ERROR(("sb_serr_clear: SError at core 0x%x\n",
1743 sb_coreid(&si
->sb
)));
1748 sb_setcoreidx(&si
->sb
, origidx
);
1749 INTR_RESTORE(si
, intr_val
);
1753 * Check if any inband, outband or timeout errors has happened and clear them.
1754 * Must be called with chip clk on !
1757 sb_taclear(sb_t
*sbh
)
1764 uint32 inband
= 0, serror
= 0, timeout
= 0;
1765 void *corereg
= NULL
;
1766 volatile uint32 imstate
, tmstate
;
1770 if (BUSTYPE(si
->sb
.bustype
) == PCI_BUS
) {
1771 volatile uint32 stcmd
;
1773 /* inband error is Target abort for PCI */
1774 stcmd
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_CFG_CMD
, sizeof(uint32
));
1775 inband
= stcmd
& PCI_CFG_CMD_STAT_TA
;
1777 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_CFG_CMD
, sizeof(uint32
), stcmd
);
1781 stcmd
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_INT_STATUS
, sizeof(uint32
));
1782 serror
= stcmd
& PCI_SBIM_STATUS_SERR
;
1785 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_INT_STATUS
, sizeof(uint32
), stcmd
);
1789 imstate
= sb_corereg(sbh
, si
->sb
.buscoreidx
,
1790 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), 0, 0);
1791 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
1792 sb_corereg(sbh
, si
->sb
.buscoreidx
,
1793 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), ~0,
1794 (imstate
& ~(SBIM_IBE
| SBIM_TO
)));
1795 /* inband = imstate & SBIM_IBE; same as TA above */
1796 timeout
= imstate
& SBIM_TO
;
1802 /* dump errlog for sonics >= 2.3 */
1803 if (si
->sb
.sonicsrev
== SONICS_2_2
)
1806 uint32 imerrlog
, imerrloga
;
1807 imerrlog
= sb_corereg(sbh
, si
->sb
.buscoreidx
, SBIMERRLOG
, 0, 0);
1808 if (imerrlog
& SBTMEL_EC
) {
1809 imerrloga
= sb_corereg(sbh
, si
->sb
.buscoreidx
, SBIMERRLOGA
,
1812 sb_corereg(sbh
, si
->sb
.buscoreidx
, SBIMERRLOG
, ~0, 0);
1813 SB_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
1814 imerrlog
, imerrloga
));
1820 } else if (BUSTYPE(si
->sb
.bustype
) == PCMCIA_BUS
) {
1822 INTR_OFF(si
, intr_val
);
1823 origidx
= sb_coreidx(sbh
);
1825 corereg
= sb_setcore(sbh
, SB_PCMCIA
, 0);
1826 if (NULL
!= corereg
) {
1827 sb
= REGS2SB(corereg
);
1829 imstate
= R_SBREG(si
, &sb
->sbimstate
);
1830 /* handle surprise removal */
1831 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
1832 AND_SBREG(si
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
1833 inband
= imstate
& SBIM_IBE
;
1834 timeout
= imstate
& SBIM_TO
;
1836 tmstate
= R_SBREG(si
, &sb
->sbtmstatehigh
);
1837 if ((tmstate
!= 0xffffffff) && (tmstate
& SBTMH_INT_STATUS
)) {
1842 OR_SBREG(si
, &sb
->sbtmstatelow
, SBTML_INT_ACK
);
1843 AND_SBREG(si
, &sb
->sbtmstatelow
, ~SBTML_INT_ACK
);
1846 sb_setcoreidx(sbh
, origidx
);
1847 INTR_RESTORE(si
, intr_val
);
1852 if (inband
| timeout
| serror
) {
1854 SB_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
1855 inband
, serror
, timeout
));
1862 /* do buffered registers update */
1864 sb_commit(sb_t
*sbh
)
1872 origidx
= si
->curidx
;
1873 ASSERT(GOODIDX(origidx
));
1875 INTR_OFF(si
, intr_val
);
1877 /* switch over to chipcommon core if there is one, else use pci */
1878 if (si
->sb
.ccrev
!= NOREV
) {
1879 chipcregs_t
*ccregs
= (chipcregs_t
*)sb_setcore(sbh
, SB_CC
, 0);
1881 /* do the buffer registers update */
1882 W_REG(si
->osh
, &ccregs
->broadcastaddress
, SB_COMMIT
);
1883 W_REG(si
->osh
, &ccregs
->broadcastdata
, 0x0);
1884 } else if (PCI(si
)) {
1885 sbpciregs_t
*pciregs
= (sbpciregs_t
*)sb_setcore(sbh
, SB_PCI
, 0);
1887 /* do the buffer registers update */
1888 W_REG(si
->osh
, &pciregs
->bcastaddr
, SB_COMMIT
);
1889 W_REG(si
->osh
, &pciregs
->bcastdata
, 0x0);
1893 /* restore core index */
1894 sb_setcoreidx(sbh
, origidx
);
1895 INTR_RESTORE(si
, intr_val
);
1898 /* reset and re-enable a core
1900 * bits - core specific bits that are set during and after reset sequence
1901 * resetbits - core specific bits that are set only during reset sequence
1904 sb_core_reset(sb_t
*sbh
, uint32 bits
, uint32 resetbits
)
1908 volatile uint32 dummy
;
1911 ASSERT(GOODREGS(si
->curmap
));
1912 sb
= REGS2SB(si
->curmap
);
1915 * Must do the disable sequence first to work for arbitrary current core state.
1917 sb_core_disable(sbh
, (bits
| resetbits
));
1920 * Now do the initialization sequence.
1923 /* set reset while enabling the clock and forcing them on throughout the core */
1924 W_SBREG(si
, &sb
->sbtmstatelow
, (SBTML_FGC
| SBTML_CLK
| SBTML_RESET
| bits
| resetbits
));
1925 dummy
= R_SBREG(si
, &sb
->sbtmstatelow
);
1928 if (R_SBREG(si
, &sb
->sbtmstatehigh
) & SBTMH_SERR
) {
1929 W_SBREG(si
, &sb
->sbtmstatehigh
, 0);
1931 if ((dummy
= R_SBREG(si
, &sb
->sbimstate
)) & (SBIM_IBE
| SBIM_TO
)) {
1932 AND_SBREG(si
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
1935 /* clear reset and allow it to propagate throughout the core */
1936 W_SBREG(si
, &sb
->sbtmstatelow
, (SBTML_FGC
| SBTML_CLK
| bits
));
1937 dummy
= R_SBREG(si
, &sb
->sbtmstatelow
);
1940 /* leave clock enabled */
1941 W_SBREG(si
, &sb
->sbtmstatelow
, (SBTML_CLK
| bits
));
1942 dummy
= R_SBREG(si
, &sb
->sbtmstatelow
);
1947 sb_core_tofixup(sb_t
*sbh
)
1954 if ((BUSTYPE(si
->sb
.bustype
) != PCI_BUS
) || PCIE(si
) ||
1955 (PCI(si
) && (si
->sb
.buscorerev
>= 5)))
1958 ASSERT(GOODREGS(si
->curmap
));
1959 sb
= REGS2SB(si
->curmap
);
1961 if (BUSTYPE(si
->sb
.bustype
) == SB_BUS
) {
1962 SET_SBREG(si
, &sb
->sbimconfiglow
,
1963 SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
,
1964 (0x5 << SBIMCL_RTO_SHIFT
) | 0x3);
1966 if (sb_coreid(sbh
) == SB_PCI
) {
1967 SET_SBREG(si
, &sb
->sbimconfiglow
,
1968 SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
,
1969 (0x3 << SBIMCL_RTO_SHIFT
) | 0x2);
1971 SET_SBREG(si
, &sb
->sbimconfiglow
, (SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
), 0);
1979 * Set the initiator timeout for the "master core".
1980 * The master core is defined to be the core in control
1981 * of the chip and so it issues accesses to non-memory
1982 * locations (Because of dma *any* core can access memeory).
1984 * The routine uses the bus to decide who is the master:
1987 * PCI_BUS => pci or pcie
1988 * PCMCIA_BUS => pcmcia
1989 * SDIO_BUS => pcmcia
1991 * This routine exists so callers can disable initiator
1992 * timeouts so accesses to very slow devices like otp
1993 * won't cause an abort. The routine allows arbitrary
1994 * settings of the service and request timeouts, though.
1996 * Returns the timeout state before changing it or -1
2000 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
2003 sb_set_initiator_to(sb_t
*sbh
, uint32 to
, uint idx
)
2008 uint32 tmp
, ret
= 0xffffffff;
2013 if ((to
& ~TO_MASK
) != 0)
2016 /* Figure out the master core */
2017 if (idx
== BADIDX
) {
2018 switch (BUSTYPE(si
->sb
.bustype
)) {
2020 idx
= si
->sb
.buscoreidx
;
2026 idx
= sb_findcoreidx(sbh
, SB_PCMCIA
, 0);
2029 idx
= sb_findcoreidx(sbh
, SB_MIPS33
, 0);
2038 INTR_OFF(si
, intr_val
);
2039 origidx
= sb_coreidx(sbh
);
2041 sb
= REGS2SB(sb_setcoreidx(sbh
, idx
));
2043 tmp
= R_SBREG(si
, &sb
->sbimconfiglow
);
2044 ret
= tmp
& TO_MASK
;
2045 W_SBREG(si
, &sb
->sbimconfiglow
, (tmp
& ~TO_MASK
) | to
);
2048 sb_setcoreidx(sbh
, origidx
);
2049 INTR_RESTORE(si
, intr_val
);
2054 sb_core_disable(sb_t
*sbh
, uint32 bits
)
2057 volatile uint32 dummy
;
2063 ASSERT(GOODREGS(si
->curmap
));
2064 sb
= REGS2SB(si
->curmap
);
2066 /* if core is already in reset, just return */
2067 if (R_SBREG(si
, &sb
->sbtmstatelow
) & SBTML_RESET
)
2070 /* reject value changed between sonics 2.2 and 2.3 */
2071 if (si
->sb
.sonicsrev
== SONICS_2_2
)
2072 rej
= (1 << SBTML_REJ_SHIFT
);
2074 rej
= (2 << SBTML_REJ_SHIFT
);
2076 /* if clocks are not enabled, put into reset and return */
2077 if ((R_SBREG(si
, &sb
->sbtmstatelow
) & SBTML_CLK
) == 0)
2080 /* set target reject and spin until busy is clear (preserve core-specific bits) */
2081 OR_SBREG(si
, &sb
->sbtmstatelow
, rej
);
2082 dummy
= R_SBREG(si
, &sb
->sbtmstatelow
);
2084 SPINWAIT((R_SBREG(si
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
), 100000);
2085 if (R_SBREG(si
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
)
2086 SB_ERROR(("%s: target state still busy\n", __FUNCTION__
));
2088 if (R_SBREG(si
, &sb
->sbidlow
) & SBIDL_INIT
) {
2089 OR_SBREG(si
, &sb
->sbimstate
, SBIM_RJ
);
2090 dummy
= R_SBREG(si
, &sb
->sbimstate
);
2092 SPINWAIT((R_SBREG(si
, &sb
->sbimstate
) & SBIM_BY
), 100000);
2095 /* set reset and reject while enabling the clocks */
2096 W_SBREG(si
, &sb
->sbtmstatelow
, (bits
| SBTML_FGC
| SBTML_CLK
| rej
| SBTML_RESET
));
2097 dummy
= R_SBREG(si
, &sb
->sbtmstatelow
);
2100 /* don't forget to clear the initiator reject bit */
2101 if (R_SBREG(si
, &sb
->sbidlow
) & SBIDL_INIT
)
2102 AND_SBREG(si
, &sb
->sbimstate
, ~SBIM_RJ
);
2105 /* leave reset and reject asserted */
2106 W_SBREG(si
, &sb
->sbtmstatelow
, (bits
| rej
| SBTML_RESET
));
2110 /* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
2112 sb_watchdog(sb_t
*sbh
, uint ticks
)
2114 /* make sure we come up in fast clock mode; or if clearing, clear clock */
2116 sb_clkctl_clk(sbh
, CLK_FAST
);
2118 sb_clkctl_clk(sbh
, CLK_DYNAMIC
);
2120 #if defined(CONFIG_BCM4328)
2121 if (sbh
->chip
== BCM4328_CHIP_ID
&& ticks
!= 0)
2122 sb_corereg(sbh
, SB_CC_IDX
, OFFSETOF(chipcregs_t
, min_res_mask
),
2123 PMURES_BIT(RES4328_ROM_SWITCH
),
2124 PMURES_BIT(RES4328_ROM_SWITCH
));
2128 sb_corereg(sbh
, SB_CC_IDX
, OFFSETOF(chipcregs_t
, watchdog
), ~0, ticks
);
2131 /* initialize the pcmcia core */
2133 sb_pcmcia_init(sb_t
*sbh
)
2140 /* enable d11 mac interrupts */
2141 OSL_PCMCIA_READ_ATTR(si
->osh
, PCMCIA_FCR0
+ PCMCIA_COR
, &cor
, 1);
2142 cor
|= COR_IRQEN
| COR_FUNEN
;
2143 OSL_PCMCIA_WRITE_ATTR(si
->osh
, PCMCIA_FCR0
+ PCMCIA_COR
, &cor
, 1);
2149 BCMINITFN(sb_pci_up
)(sb_t
*sbh
)
2155 /* if not pci bus, we're done */
2156 if (BUSTYPE(si
->sb
.bustype
) != PCI_BUS
)
2159 if (FORCEHT_WAR32414(si
))
2160 sb_war32414_forceHT(sbh
, 1);
2162 if (PCIE_ASPMWARS(si
) || si
->sb
.pr42780
)
2163 sb_pcieclkreq(sbh
, 1, 0);
2166 (((si
->sb
.chip
== BCM4311_CHIP_ID
) && (si
->sb
.chiprev
== 2)) ||
2167 ((si
->sb
.chip
== BCM4312_CHIP_ID
) && (si
->sb
.chiprev
== 0))))
2168 sb_set_initiator_to((void *)si
, 0x3, sb_findcoreidx((void *)si
, SB_D11
, 0));
2172 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
2174 BCMUNINITFN(sb_pci_sleep
)(sb_t
*sbh
)
2180 /* if not pci bus, we're done */
2181 if (!PCIE(si
) || !PCIE_ASPMWARS(si
))
2184 w
= OSL_PCI_READ_CONFIG(si
->osh
, si
->pciecap_lcreg_offset
, sizeof(uint32
));
2185 w
&= ~PCIE_CAP_LCREG_ASPML1
;
2186 OSL_PCI_WRITE_CONFIG(si
->osh
, si
->pciecap_lcreg_offset
, sizeof(uint32
), w
);
2189 /* Unconfigure and/or apply various WARs when going down */
2191 BCMINITFN(sb_pci_down
)(sb_t
*sbh
)
2197 /* if not pci bus, we're done */
2198 if (BUSTYPE(si
->sb
.bustype
) != PCI_BUS
)
2201 if (FORCEHT_WAR32414(si
))
2202 sb_war32414_forceHT(sbh
, 0);
2204 if (si
->pr42767_war
) {
2205 sb_pcieclkreq(sbh
, 1, 1);
2206 si
->pr42767_war
= FALSE
;
2207 } else if (si
->sb
.pr42780
) {
2208 sb_pcieclkreq(sbh
, 1, 1);
2213 BCMINITFN(sb_war42767_clkreq
)(sb_t
*sbh
)
2215 sbpcieregs_t
*pcieregs
;
2216 uint16 val16
, *reg16
;
2221 /* if not pcie bus, we're done */
2222 if (!PCIE(si
) || !PCIE_ASPMWARS(si
))
2225 pcieregs
= (sbpcieregs_t
*) sb_setcoreidx(sbh
, si
->sb
.buscoreidx
);
2226 reg16
= &pcieregs
->sprom
[SRSH_CLKREQ_OFFSET
];
2227 val16
= R_REG(si
->osh
, reg16
);
2228 /* if clockreq is not advertized advertize it */
2229 if (!si
->pcie_war_ovr
) {
2230 val16
|= SRSH_CLKREQ_ENB
;
2231 si
->pr42767_war
= TRUE
;
2233 si
->sb
.pr42780
= TRUE
;
2235 val16
&= ~SRSH_CLKREQ_ENB
;
2236 W_REG(si
->osh
, reg16
, val16
);
2240 BCMINITFN(sb_war42767
)(sb_t
*sbh
)
2247 /* if not pcie bus, we're done */
2248 if (!PCIE(si
) || !PCIE_ASPMWARS(si
))
2251 sb_pcie_mdioread(si
, MDIODATA_DEV_PLL
, SERDES_PLL_CTRL
, &w
);
2252 if (w
& PLL_CTRL_FREQDET_EN
) {
2253 w
&= ~PLL_CTRL_FREQDET_EN
;
2254 sb_pcie_mdiowrite(si
, MDIODATA_DEV_PLL
, SERDES_PLL_CTRL
, w
);
2259 * Configure the pci core for pci client (NIC) action
2260 * coremask is the bitvec of cores by index to be enabled.
2263 BCMINITFN(sb_pci_setup
)(sb_t
*sbh
, uint coremask
)
2267 sbpciregs_t
*pciregs
;
2274 /* if not pci bus, we're done */
2275 if (BUSTYPE(si
->sb
.bustype
) != PCI_BUS
)
2278 ASSERT(PCI(si
) || PCIE(si
));
2279 ASSERT(si
->sb
.buscoreidx
!= BADIDX
);
2281 /* get current core index */
2284 /* we interrupt on this backplane flag number */
2285 ASSERT(GOODREGS(si
->curmap
));
2286 sb
= REGS2SB(si
->curmap
);
2287 sbflag
= R_SBREG(si
, &sb
->sbtpsflag
) & SBTPS_NUM0_MASK
;
2289 /* switch over to pci core */
2290 pciregs
= (sbpciregs_t
*) sb_setcoreidx(sbh
, si
->sb
.buscoreidx
);
2291 sb
= REGS2SB(pciregs
);
2294 * Enable sb->pci interrupts. Assume
2295 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
2297 if (PCIE(si
) || (PCI(si
) && ((si
->sb
.buscorerev
) >= 6))) {
2298 /* pci config write to set this core bit in PCIIntMask */
2299 w
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_INT_MASK
, sizeof(uint32
));
2300 w
|= (coremask
<< PCI_SBIM_SHIFT
);
2301 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_INT_MASK
, sizeof(uint32
), w
);
2303 /* set sbintvec bit for our flag number */
2304 OR_SBREG(si
, &sb
->sbintvec
, (1 << sbflag
));
2308 OR_REG(si
->osh
, &pciregs
->sbtopci2
, (SBTOPCI_PREF
|SBTOPCI_BURST
));
2309 if (si
->sb
.buscorerev
>= 11)
2310 OR_REG(si
->osh
, &pciregs
->sbtopci2
, SBTOPCI_RC_READMULTI
);
2311 if (si
->sb
.buscorerev
< 5) {
2312 SET_SBREG(si
, &sb
->sbimconfiglow
, SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
,
2313 (0x3 << SBIMCL_RTO_SHIFT
) | 0x2);
2318 /* PCIE workarounds */
2320 if ((si
->sb
.buscorerev
== 0) || (si
->sb
.buscorerev
== 1)) {
2321 w
= sb_pcie_readreg((void *)(uintptr
)sbh
,
2322 (void *)(uintptr
)PCIE_PCIEREGS
,
2323 PCIE_TLP_WORKAROUNDSREG
);
2325 sb_pcie_writereg((void *)(uintptr
)sbh
,
2326 (void *)(uintptr
)PCIE_PCIEREGS
,
2327 PCIE_TLP_WORKAROUNDSREG
, w
);
2330 if (si
->sb
.buscorerev
== 1) {
2331 w
= sb_pcie_readreg((void *)(uintptr
)sbh
,
2332 (void *)(uintptr
)PCIE_PCIEREGS
,
2335 sb_pcie_writereg((void *)(uintptr
)sbh
,
2336 (void *)(uintptr
)PCIE_PCIEREGS
, PCIE_DLLP_LCREG
, w
);
2339 if (si
->sb
.buscorerev
== 0)
2342 if ((si
->sb
.buscorerev
>= 3) && (si
->sb
.buscorerev
<= 5)) {
2343 w
= sb_pcie_readreg((void *)(uintptr
)sbh
,
2344 (void *)(uintptr
)PCIE_PCIEREGS
,
2345 PCIE_DLLP_PMTHRESHREG
);
2346 w
&= ~(PCIE_L1THRESHOLDTIME_MASK
);
2347 w
|= (PCIE_L1THRESHOLD_WARVAL
<< PCIE_L1THRESHOLDTIME_SHIFT
);
2348 sb_pcie_writereg((void *)(uintptr
)sbh
, (void *)(uintptr
)PCIE_PCIEREGS
,
2349 PCIE_DLLP_PMTHRESHREG
, w
);
2355 sb_war43448_aspm(sbh
);
2356 sb_war42767_clkreq(sbh
);
2360 /* switch back to previous core */
2361 sb_setcoreidx(sbh
, idx
);
2365 sb_base(uint32 admatch
)
2370 type
= admatch
& SBAM_TYPE_MASK
;
2376 base
= admatch
& SBAM_BASE0_MASK
;
2377 } else if (type
== 1) {
2378 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
2379 base
= admatch
& SBAM_BASE1_MASK
;
2380 } else if (type
== 2) {
2381 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
2382 base
= admatch
& SBAM_BASE2_MASK
;
2389 sb_size(uint32 admatch
)
2394 type
= admatch
& SBAM_TYPE_MASK
;
2400 size
= 1 << (((admatch
& SBAM_ADINT0_MASK
) >> SBAM_ADINT0_SHIFT
) + 1);
2401 } else if (type
== 1) {
2402 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
2403 size
= 1 << (((admatch
& SBAM_ADINT1_MASK
) >> SBAM_ADINT1_SHIFT
) + 1);
2404 } else if (type
== 2) {
2405 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
2406 size
= 1 << (((admatch
& SBAM_ADINT2_MASK
) >> SBAM_ADINT2_SHIFT
) + 1);
2412 /* return the core-type instantiation # of the current core */
2414 sb_coreunit(sb_t
*sbh
)
2427 ASSERT(GOODREGS(si
->curmap
));
2428 coreid
= sb_coreid(sbh
);
2430 /* count the cores of our type */
2431 for (i
= 0; i
< idx
; i
++)
2432 if (si
->coreid
[i
] == coreid
)
2439 BCMINITFN(factor6
)(uint32 x
)
2442 case CC_F6_2
: return 2;
2443 case CC_F6_3
: return 3;
2444 case CC_F6_4
: return 4;
2445 case CC_F6_5
: return 5;
2446 case CC_F6_6
: return 6;
2447 case CC_F6_7
: return 7;
2452 /* calculate the speed the SB would run at given a set of clockcontrol values */
2454 BCMINITFN(sb_clock_rate
)(uint32 pll_type
, uint32 n
, uint32 m
)
2456 uint32 n1
, n2
, clock
, m1
, m2
, m3
, mc
;
2458 n1
= n
& CN_N1_MASK
;
2459 n2
= (n
& CN_N2_MASK
) >> CN_N2_SHIFT
;
2461 if (pll_type
== PLL_TYPE6
) {
2462 if (m
& CC_T6_MMASK
)
2466 } else if ((pll_type
== PLL_TYPE1
) ||
2467 (pll_type
== PLL_TYPE3
) ||
2468 (pll_type
== PLL_TYPE4
) ||
2469 (pll_type
== PLL_TYPE7
)) {
2472 } else if (pll_type
== PLL_TYPE2
) {
2475 ASSERT((n1
>= 2) && (n1
<= 7));
2476 ASSERT((n2
>= 5) && (n2
<= 23));
2477 } else if (pll_type
== PLL_TYPE5
) {
2481 /* PLL types 3 and 7 use BASE2 (25Mhz) */
2482 if ((pll_type
== PLL_TYPE3
) ||
2483 (pll_type
== PLL_TYPE7
)) {
2484 clock
= CC_CLOCK_BASE2
* n1
* n2
;
2486 clock
= CC_CLOCK_BASE1
* n1
* n2
;
2491 m1
= m
& CC_M1_MASK
;
2492 m2
= (m
& CC_M2_MASK
) >> CC_M2_SHIFT
;
2493 m3
= (m
& CC_M3_MASK
) >> CC_M3_SHIFT
;
2494 mc
= (m
& CC_MC_MASK
) >> CC_MC_SHIFT
;
2496 if ((pll_type
== PLL_TYPE1
) ||
2497 (pll_type
== PLL_TYPE3
) ||
2498 (pll_type
== PLL_TYPE4
) ||
2499 (pll_type
== PLL_TYPE7
)) {
2501 if ((pll_type
== PLL_TYPE1
) || (pll_type
== PLL_TYPE3
))
2508 case CC_MC_BYPASS
: return (clock
);
2509 case CC_MC_M1
: return (clock
/ m1
);
2510 case CC_MC_M1M2
: return (clock
/ (m1
* m2
));
2511 case CC_MC_M1M2M3
: return (clock
/ (m1
* m2
* m3
));
2512 case CC_MC_M1M3
: return (clock
/ (m1
* m3
));
2513 default: return (0);
2516 ASSERT(pll_type
== PLL_TYPE2
);
2521 ASSERT((m1
>= 2) && (m1
<= 7));
2522 ASSERT((m2
>= 3) && (m2
<= 10));
2523 ASSERT((m3
>= 2) && (m3
<= 7));
2525 if ((mc
& CC_T2MC_M1BYP
) == 0)
2527 if ((mc
& CC_T2MC_M2BYP
) == 0)
2529 if ((mc
& CC_T2MC_M3BYP
) == 0)
2536 /* returns the current speed the SB is running at */
2538 BCMINITFN(sb_clock
)(sb_t
*sbh
)
2544 uint32 pll_type
, rate
;
2549 pll_type
= PLL_TYPE1
;
2551 INTR_OFF(si
, intr_val
);
2553 cc
= (chipcregs_t
*)sb_setcore(sbh
, SB_CC
, 0);
2556 if (sbh
->cccaps
& CC_CAP_PMU
) {
2557 rate
= sb_pmu_cpu_clock(sbh
, si
->osh
);
2561 pll_type
= sbh
->cccaps
& CC_CAP_PLL_MASK
;
2562 n
= R_REG(si
->osh
, &cc
->clockcontrol_n
);
2563 if (pll_type
== PLL_TYPE6
)
2564 m
= R_REG(si
->osh
, &cc
->clockcontrol_m3
);
2565 else if (pll_type
== PLL_TYPE3
)
2566 m
= R_REG(si
->osh
, &cc
->clockcontrol_m2
);
2568 m
= R_REG(si
->osh
, &cc
->clockcontrol_sb
);
2570 if (sb_chip(sbh
) == BCM5365_CHIP_ID
)
2572 rate
= 200000000; /* PLL_TYPE3 */
2573 } else if (sb_chip(sbh
) == BCM5354_CHIP_ID
)
2575 /* 5354 has a constant sb clock of 120MHz */
2578 /* calculate rate */
2579 rate
= sb_clock_rate(pll_type
, n
, m
);
2582 if (pll_type
== PLL_TYPE3
)
2586 /* switch back to previous core */
2587 sb_setcoreidx(sbh
, idx
);
2589 INTR_RESTORE(si
, intr_val
);
2595 BCMINITFN(sb_alp_clock
)(sb_t
*sbh
)
2597 uint32 clock
= ALP_CLOCK
;
2599 if (sbh
->cccaps
& CC_CAP_PMU
)
2600 clock
= sb_pmu_alp_clock(sbh
, sb_osh(sbh
));
2605 /* change logical "focus" to the gpio core for optimized access */
2607 sb_gpiosetcore(sb_t
*sbh
)
2613 return (sb_setcoreidx(sbh
, SB_CC_IDX
));
2616 /* mask&set gpiocontrol bits */
2618 sb_gpiocontrol(sb_t
*sbh
, uint32 mask
, uint32 val
, uint8 priority
)
2626 /* gpios could be shared on router platforms
2627 * ignore reservation if it's high priority (e.g., test apps)
2629 if ((priority
!= GPIO_HI_PRIORITY
) &&
2630 (BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (val
|| mask
)) {
2631 mask
= priority
? (sb_gpioreservation
& mask
) :
2632 ((sb_gpioreservation
| mask
) & ~(sb_gpioreservation
));
2636 regoff
= OFFSETOF(chipcregs_t
, gpiocontrol
);
2637 return (sb_corereg(sbh
, SB_CC_IDX
, regoff
, mask
, val
));
2640 /* mask&set gpio output enable bits */
2642 sb_gpioouten(sb_t
*sbh
, uint32 mask
, uint32 val
, uint8 priority
)
2650 /* gpios could be shared on router platforms
2651 * ignore reservation if it's high priority (e.g., test apps)
2653 if ((priority
!= GPIO_HI_PRIORITY
) &&
2654 (BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (val
|| mask
)) {
2655 mask
= priority
? (sb_gpioreservation
& mask
) :
2656 ((sb_gpioreservation
| mask
) & ~(sb_gpioreservation
));
2660 regoff
= OFFSETOF(chipcregs_t
, gpioouten
);
2661 return (sb_corereg(sbh
, SB_CC_IDX
, regoff
, mask
, val
));
2664 /* mask&set gpio output bits */
2666 sb_gpioout(sb_t
*sbh
, uint32 mask
, uint32 val
, uint8 priority
)
2674 /* gpios could be shared on router platforms
2675 * ignore reservation if it's high priority (e.g., test apps)
2677 if ((priority
!= GPIO_HI_PRIORITY
) &&
2678 (BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (val
|| mask
)) {
2679 mask
= priority
? (sb_gpioreservation
& mask
) :
2680 ((sb_gpioreservation
| mask
) & ~(sb_gpioreservation
));
2684 regoff
= OFFSETOF(chipcregs_t
, gpioout
);
2685 return (sb_corereg(sbh
, SB_CC_IDX
, regoff
, mask
, val
));
2688 /* reserve one gpio */
2690 sb_gpioreserve(sb_t
*sbh
, uint32 gpio_bitmask
, uint8 priority
)
2696 /* only cores on SB_BUS share GPIO's and only applcation users need to
2697 * reserve/release GPIO
2699 if ((BUSTYPE(si
->sb
.bustype
) != SB_BUS
) || (!priority
)) {
2700 ASSERT((BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (priority
));
2703 /* make sure only one bit is set */
2704 if ((!gpio_bitmask
) || ((gpio_bitmask
) & (gpio_bitmask
- 1))) {
2705 ASSERT((gpio_bitmask
) && !((gpio_bitmask
) & (gpio_bitmask
- 1)));
2709 /* already reserved */
2710 if (sb_gpioreservation
& gpio_bitmask
)
2712 /* set reservation */
2713 sb_gpioreservation
|= gpio_bitmask
;
2715 return sb_gpioreservation
;
2718 /* release one gpio */
2720 * releasing the gpio doesn't change the current value on the GPIO last write value
2721 * persists till some one overwrites it
2725 sb_gpiorelease(sb_t
*sbh
, uint32 gpio_bitmask
, uint8 priority
)
2731 /* only cores on SB_BUS share GPIO's and only applcation users need to
2732 * reserve/release GPIO
2734 if ((BUSTYPE(si
->sb
.bustype
) != SB_BUS
) || (!priority
)) {
2735 ASSERT((BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (priority
));
2738 /* make sure only one bit is set */
2739 if ((!gpio_bitmask
) || ((gpio_bitmask
) & (gpio_bitmask
- 1))) {
2740 ASSERT((gpio_bitmask
) && !((gpio_bitmask
) & (gpio_bitmask
- 1)));
2744 /* already released */
2745 if (!(sb_gpioreservation
& gpio_bitmask
))
2748 /* clear reservation */
2749 sb_gpioreservation
&= ~gpio_bitmask
;
2751 return sb_gpioreservation
;
2754 /* return the current gpioin register value */
2756 sb_gpioin(sb_t
*sbh
)
2764 regoff
= OFFSETOF(chipcregs_t
, gpioin
);
2765 return (sb_corereg(sbh
, SB_CC_IDX
, regoff
, 0, 0));
2768 /* mask&set gpio interrupt polarity bits */
2770 sb_gpiointpolarity(sb_t
*sbh
, uint32 mask
, uint32 val
, uint8 priority
)
2778 /* gpios could be shared on router platforms */
2779 if ((BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (val
|| mask
)) {
2780 mask
= priority
? (sb_gpioreservation
& mask
) :
2781 ((sb_gpioreservation
| mask
) & ~(sb_gpioreservation
));
2785 regoff
= OFFSETOF(chipcregs_t
, gpiointpolarity
);
2786 return (sb_corereg(sbh
, SB_CC_IDX
, regoff
, mask
, val
));
2789 /* mask&set gpio interrupt mask bits */
2791 sb_gpiointmask(sb_t
*sbh
, uint32 mask
, uint32 val
, uint8 priority
)
2799 /* gpios could be shared on router platforms */
2800 if ((BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (val
|| mask
)) {
2801 mask
= priority
? (sb_gpioreservation
& mask
) :
2802 ((sb_gpioreservation
| mask
) & ~(sb_gpioreservation
));
2806 regoff
= OFFSETOF(chipcregs_t
, gpiointmask
);
2807 return (sb_corereg(sbh
, SB_CC_IDX
, regoff
, mask
, val
));
2810 /* assign the gpio to an led */
2812 sb_gpioled(sb_t
*sbh
, uint32 mask
, uint32 val
)
2817 if (si
->sb
.ccrev
< 16)
2820 /* gpio led powersave reg */
2821 return (sb_corereg(sbh
, SB_CC_IDX
, OFFSETOF(chipcregs_t
, gpiotimeroutmask
), mask
, val
));
2824 /* mask&set gpio timer val */
2826 sb_gpiotimerval(sb_t
*sbh
, uint32 mask
, uint32 gpiotimerval
)
2831 if (si
->sb
.ccrev
< 16)
2834 return (sb_corereg(sbh
, SB_CC_IDX
,
2835 OFFSETOF(chipcregs_t
, gpiotimerval
), mask
, gpiotimerval
));
2839 sb_gpiopull(sb_t
*sbh
, bool updown
, uint32 mask
, uint32 val
)
2845 if (si
->sb
.ccrev
< 20)
2848 offs
= (updown
? OFFSETOF(chipcregs_t
, gpiopulldown
) : OFFSETOF(chipcregs_t
, gpiopullup
));
2849 return (sb_corereg(sbh
, SB_CC_IDX
, offs
, mask
, val
));
2853 sb_gpioevent(sb_t
*sbh
, uint regtype
, uint32 mask
, uint32 val
)
2859 if (si
->sb
.ccrev
< 11)
2862 if (regtype
== GPIO_REGEVT
)
2863 offs
= OFFSETOF(chipcregs_t
, gpioevent
);
2864 else if (regtype
== GPIO_REGEVT_INTMSK
)
2865 offs
= OFFSETOF(chipcregs_t
, gpioeventintmask
);
2866 else if (regtype
== GPIO_REGEVT_INTPOL
)
2867 offs
= OFFSETOF(chipcregs_t
, gpioeventintpolarity
);
2871 return (sb_corereg(sbh
, SB_CC_IDX
, offs
, mask
, val
));
2875 BCMINITFN(sb_gpio_handler_register
)(sb_t
*sbh
, uint32 event
,
2876 bool level
, gpio_handler_t cb
, void *arg
)
2885 if (si
->sb
.ccrev
< 11)
2888 if ((gi
= MALLOC(si
->osh
, sizeof(gpioh_item_t
))) == NULL
)
2891 bzero(gi
, sizeof(gpioh_item_t
));
2897 gi
->next
= si
->gpioh_head
;
2898 si
->gpioh_head
= gi
;
2904 BCMINITFN(sb_gpio_handler_unregister
)(sb_t
*sbh
, void* gpioh
)
2907 gpioh_item_t
*p
, *n
;
2910 if (si
->sb
.ccrev
< 11)
2913 ASSERT(si
->gpioh_head
);
2914 if ((void*)si
->gpioh_head
== gpioh
) {
2915 si
->gpioh_head
= si
->gpioh_head
->next
;
2916 MFREE(si
->osh
, gpioh
, sizeof(gpioh_item_t
));
2923 if ((void*)n
== gpioh
) {
2925 MFREE(si
->osh
, gpioh
, sizeof(gpioh_item_t
));
2933 ASSERT(0); /* Not found in list */
2937 sb_gpio_handler_process(sb_t
*sbh
)
2942 uint32 level
= sb_gpioin(sbh
);
2943 uint32 edge
= sb_gpioevent(sbh
, GPIO_REGEVT
, 0, 0);
2946 for (h
= si
->gpioh_head
; h
!= NULL
; h
= h
->next
) {
2948 status
= (h
->level
? level
: edge
);
2950 if (status
& h
->event
)
2951 h
->handler(status
, h
->arg
);
2955 sb_gpioevent(sbh
, GPIO_REGEVT
, edge
, edge
); /* clear edge-trigger status */
2959 sb_gpio_int_enable(sb_t
*sbh
, bool enable
)
2965 if (si
->sb
.ccrev
< 11)
2968 offs
= OFFSETOF(chipcregs_t
, intmask
);
2969 return (sb_corereg(sbh
, SB_CC_IDX
, offs
, CI_GPIO
, (enable
? CI_GPIO
: 0)));
2973 /* return the slow clock source - LPO, XTAL, or PCI */
2975 sb_slowclk_src(sb_info_t
*si
)
2980 ASSERT(sb_coreid(&si
->sb
) == SB_CC
);
2982 if (si
->sb
.ccrev
< 6) {
2983 if ((BUSTYPE(si
->sb
.bustype
) == PCI_BUS
) &&
2984 (OSL_PCI_READ_CONFIG(si
->osh
, PCI_GPIO_OUT
, sizeof(uint32
)) &
2986 return (SCC_SS_PCI
);
2988 return (SCC_SS_XTAL
);
2989 } else if (si
->sb
.ccrev
< 10) {
2990 cc
= (chipcregs_t
*) sb_setcoreidx(&si
->sb
, si
->curidx
);
2991 return (R_REG(si
->osh
, &cc
->slow_clk_ctl
) & SCC_SS_MASK
);
2992 } else /* Insta-clock */
2993 return (SCC_SS_XTAL
);
2996 /* return the ILP (slowclock) min or max frequency */
2998 sb_slowclk_freq(sb_info_t
*si
, bool max_freq
)
3005 ASSERT(sb_coreid(&si
->sb
) == SB_CC
);
3007 cc
= (chipcregs_t
*) sb_setcoreidx(&si
->sb
, si
->curidx
);
3009 /* shouldn't be here unless we've established the chip has dynamic clk control */
3010 ASSERT(R_REG(si
->osh
, &cc
->capabilities
) & CC_CAP_PWR_CTL
);
3012 slowclk
= sb_slowclk_src(si
);
3013 if (si
->sb
.ccrev
< 6) {
3014 if (slowclk
== SCC_SS_PCI
)
3015 return (max_freq
? (PCIMAXFREQ
/ 64) : (PCIMINFREQ
/ 64));
3017 return (max_freq
? (XTALMAXFREQ
/ 32) : (XTALMINFREQ
/ 32));
3018 } else if (si
->sb
.ccrev
< 10) {
3019 div
= 4 * (((R_REG(si
->osh
, &cc
->slow_clk_ctl
) & SCC_CD_MASK
) >> SCC_CD_SHIFT
) + 1);
3020 if (slowclk
== SCC_SS_LPO
)
3021 return (max_freq
? LPOMAXFREQ
: LPOMINFREQ
);
3022 else if (slowclk
== SCC_SS_XTAL
)
3023 return (max_freq
? (XTALMAXFREQ
/ div
) : (XTALMINFREQ
/ div
));
3024 else if (slowclk
== SCC_SS_PCI
)
3025 return (max_freq
? (PCIMAXFREQ
/ div
) : (PCIMINFREQ
/ div
));
3029 /* Chipc rev 10 is InstaClock */
3030 div
= R_REG(si
->osh
, &cc
->system_clk_ctl
) >> SYCC_CD_SHIFT
;
3031 div
= 4 * (div
+ 1);
3032 return (max_freq
? XTALMAXFREQ
: (XTALMINFREQ
/ div
));
3038 BCMINITFN(sb_clkctl_setdelay
)(sb_info_t
*si
, void *chipcregs
)
3041 uint slowmaxfreq
, pll_delay
, slowclk
;
3042 uint pll_on_delay
, fref_sel_delay
;
3044 pll_delay
= PLL_DELAY
;
3046 /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
3047 * since the xtal will also be powered down by dynamic clk control logic.
3050 slowclk
= sb_slowclk_src(si
);
3051 if (slowclk
!= SCC_SS_XTAL
)
3052 pll_delay
+= XTAL_ON_DELAY
;
3054 /* Starting with 4318 it is ILP that is used for the delays */
3055 slowmaxfreq
= sb_slowclk_freq(si
, (si
->sb
.ccrev
>= 10) ? FALSE
: TRUE
);
3057 pll_on_delay
= ((slowmaxfreq
* pll_delay
) + 999999) / 1000000;
3058 fref_sel_delay
= ((slowmaxfreq
* FREF_DELAY
) + 999999) / 1000000;
3060 cc
= (chipcregs_t
*)chipcregs
;
3061 W_REG(si
->osh
, &cc
->pll_on_delay
, pll_on_delay
);
3062 W_REG(si
->osh
, &cc
->fref_sel_delay
, fref_sel_delay
);
3065 /* initialize power control delay registers */
3067 BCMINITFN(sb_clkctl_init
)(sb_t
*sbh
)
3075 origidx
= si
->curidx
;
3077 if ((cc
= (chipcregs_t
*) sb_setcore(sbh
, SB_CC
, 0)) == NULL
)
3080 if ((si
->sb
.chip
== BCM4321_CHIP_ID
) && (si
->sb
.chiprev
< 2))
3081 W_REG(si
->osh
, &cc
->chipcontrol
,
3082 (si
->sb
.chiprev
== 0) ? CHIPCTRL_4321A0_DEFAULT
: CHIPCTRL_4321A1_DEFAULT
);
3084 if (!(R_REG(si
->osh
, &cc
->capabilities
) & CC_CAP_PWR_CTL
))
3087 /* set all Instaclk chip ILP to 1 MHz */
3088 if (si
->sb
.ccrev
>= 10)
3089 SET_REG(si
->osh
, &cc
->system_clk_ctl
, SYCC_CD_MASK
,
3090 (ILP_DIV_1MHZ
<< SYCC_CD_SHIFT
));
3092 sb_clkctl_setdelay(si
, (void *)(uintptr
)cc
);
3095 sb_setcoreidx(sbh
, origidx
);
3098 /* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
3100 BCMINITFN(sb_clkctl_fast_pwrup_delay
)(sb_t
*sbh
)
3111 origidx
= si
->curidx
;
3113 INTR_OFF(si
, intr_val
);
3115 if ((cc
= (chipcregs_t
*) sb_setcore(sbh
, SB_CC
, 0)) == NULL
)
3118 if (sbh
->cccaps
& CC_CAP_PMU
) {
3119 fpdelay
= sb_pmu_fast_pwrup_delay(sbh
, si
->osh
);
3123 if (!(sbh
->cccaps
& CC_CAP_PWR_CTL
))
3126 slowminfreq
= sb_slowclk_freq(si
, FALSE
);
3127 fpdelay
= (((R_REG(si
->osh
, &cc
->pll_on_delay
) + 2) * 1000000) +
3128 (slowminfreq
- 1)) / slowminfreq
;
3131 sb_setcoreidx(sbh
, origidx
);
3132 INTR_RESTORE(si
, intr_val
);
3136 /* turn primary xtal and/or pll off/on */
3138 sb_clkctl_xtal(sb_t
*sbh
, uint what
, bool on
)
3141 uint32 in
, out
, outen
;
3145 switch (BUSTYPE(si
->sb
.bustype
)) {
3154 /* pcie core doesn't have any mapping to control the xtal pu */
3158 in
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_GPIO_IN
, sizeof(uint32
));
3159 out
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_GPIO_OUT
, sizeof(uint32
));
3160 outen
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_GPIO_OUTEN
, sizeof(uint32
));
3163 * Avoid glitching the clock if GPRS is already using it.
3164 * We can't actually read the state of the PLLPD so we infer it
3165 * by the value of XTAL_PU which *is* readable via gpioin.
3167 if (on
&& (in
& PCI_CFG_GPIO_XTAL
))
3171 outen
|= PCI_CFG_GPIO_XTAL
;
3173 outen
|= PCI_CFG_GPIO_PLL
;
3176 /* turn primary xtal on */
3178 out
|= PCI_CFG_GPIO_XTAL
;
3180 out
|= PCI_CFG_GPIO_PLL
;
3181 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_GPIO_OUT
,
3182 sizeof(uint32
), out
);
3183 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_GPIO_OUTEN
,
3184 sizeof(uint32
), outen
);
3185 OSL_DELAY(XTAL_ON_DELAY
);
3190 out
&= ~PCI_CFG_GPIO_PLL
;
3191 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_GPIO_OUT
,
3192 sizeof(uint32
), out
);
3197 out
&= ~PCI_CFG_GPIO_XTAL
;
3199 out
|= PCI_CFG_GPIO_PLL
;
3200 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_GPIO_OUT
, sizeof(uint32
), out
);
3201 OSL_PCI_WRITE_CONFIG(si
->osh
, PCI_GPIO_OUTEN
, sizeof(uint32
),
3212 /* set dynamic clk control mode (forceslow, forcefast, dynamic) */
3213 /* returns true if we are forcing fast clock */
3215 sb_clkctl_clk(sb_t
*sbh
, uint mode
)
3225 /* chipcommon cores prior to rev6 don't support dynamic clock control */
3226 if (si
->sb
.ccrev
< 6)
3230 /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
3231 ASSERT(si
->sb
.ccrev
!= 10);
3233 INTR_OFF(si
, intr_val
);
3235 origidx
= si
->curidx
;
3237 if (sb_setcore(sbh
, SB_MIPS33
, 0) && (sb_corerev(&si
->sb
) <= 7) &&
3238 (BUSTYPE(si
->sb
.bustype
) == SB_BUS
) && (si
->sb
.ccrev
>= 10))
3241 if (FORCEHT_WAR32414(si
))
3244 cc
= (chipcregs_t
*) sb_setcore(sbh
, SB_CC
, 0);
3247 if (!(R_REG(si
->osh
, &cc
->capabilities
) & CC_CAP_PWR_CTL
) && (si
->sb
.ccrev
< 20))
3251 case CLK_FAST
: /* force fast (pll) clock */
3252 if (si
->sb
.ccrev
< 10) {
3253 /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
3254 sb_clkctl_xtal(&si
->sb
, XTAL
, ON
);
3256 SET_REG(si
->osh
, &cc
->slow_clk_ctl
, (SCC_XC
| SCC_FS
| SCC_IP
), SCC_IP
);
3257 } else if (si
->sb
.ccrev
< 20) {
3258 OR_REG(si
->osh
, &cc
->system_clk_ctl
, SYCC_HR
);
3260 OR_REG(si
->osh
, &cc
->clk_ctl_st
, CCS_FORCEHT
);
3263 /* wait for the PLL */
3264 if (R_REG(si
->osh
, &cc
->capabilities
) & CC_CAP_PMU
) {
3265 SPINWAIT(((R_REG(si
->osh
, &cc
->clk_ctl_st
) & CCS_HTAVAIL
) == 0),
3266 PMU_MAX_TRANSITION_DLY
);
3267 ASSERT(R_REG(si
->osh
, &cc
->clk_ctl_st
) & CCS_HTAVAIL
);
3269 OSL_DELAY(PLL_DELAY
);
3273 case CLK_DYNAMIC
: /* enable dynamic clock control */
3274 if (si
->sb
.ccrev
< 10) {
3275 scc
= R_REG(si
->osh
, &cc
->slow_clk_ctl
);
3276 scc
&= ~(SCC_FS
| SCC_IP
| SCC_XC
);
3277 if ((scc
& SCC_SS_MASK
) != SCC_SS_XTAL
)
3279 W_REG(si
->osh
, &cc
->slow_clk_ctl
, scc
);
3281 /* for dynamic control, we have to release our xtal_pu "force on" */
3283 sb_clkctl_xtal(&si
->sb
, XTAL
, OFF
);
3284 } else if (si
->sb
.ccrev
< 20) {
3286 AND_REG(si
->osh
, &cc
->system_clk_ctl
, ~SYCC_HR
);
3288 AND_REG(si
->osh
, &cc
->clk_ctl_st
, ~CCS_FORCEHT
);
3297 sb_setcoreidx(sbh
, origidx
);
3298 INTR_RESTORE(si
, intr_val
);
3299 return (mode
== CLK_FAST
);
3302 /* register driver interrupt disabling and restoring callback functions */
3304 sb_register_intr_callback(sb_t
*sbh
, void *intrsoff_fn
, void *intrsrestore_fn
,
3305 void *intrsenabled_fn
, void *intr_arg
)
3310 si
->intr_arg
= intr_arg
;
3311 si
->intrsoff_fn
= (sb_intrsoff_t
)intrsoff_fn
;
3312 si
->intrsrestore_fn
= (sb_intrsrestore_t
)intrsrestore_fn
;
3313 si
->intrsenabled_fn
= (sb_intrsenabled_t
)intrsenabled_fn
;
3314 /* save current core id. when this function called, the current core
3315 * must be the core which provides driver functions(il, et, wl, etc.)
3317 si
->dev_coreid
= si
->coreid
[si
->curidx
];
3321 sb_deregister_intr_callback(sb_t
*sbh
)
3326 si
->intrsoff_fn
= NULL
;
3331 BCMINITFN(sb_d11_devid
)(sb_t
*sbh
)
3333 sb_info_t
*si
= SB_INFO(sbh
);
3336 #if defined(CONFIG_BCM4328)
3337 /* Fix device id for dual band BCM4328 */
3338 if (sbh
->chip
== BCM4328_CHIP_ID
&&
3339 (sbh
->chippkg
== BCM4328USBDUAL_PKG_ID
|| sbh
->chippkg
== BCM4328SDIODUAL_PKG_ID
))
3340 device
= BCM4328_D11DUAL_ID
;
3342 #endif /* BCM4328 */
3343 /* Let an nvram variable with devpath override devid */
3344 if ((device
= (uint16
)sb_getdevpathintvar(sbh
, "devid")) != 0)
3346 /* Get devid from OTP/SPROM depending on where the SROM is read */
3347 else if ((device
= (uint16
)getintvar(si
->vars
, "devid")) != 0)
3350 * no longer support wl0id, but keep the code
3351 * here for backward compatibility.
3353 else if ((device
= (uint16
)getintvar(si
->vars
, "wl0id")) != 0)
3355 /* Chip specific conversion */
3356 else if (sbh
->chip
== BCM4712_CHIP_ID
) {
3357 if (sbh
->chippkg
== BCM4712SMALL_PKG_ID
)
3358 device
= BCM4306_D11G_ID
;
3360 device
= BCM4306_D11DUAL_ID
;
3370 BCMINITFN(sb_corepciid
)(sb_t
*sbh
, uint func
, uint16
*pcivendor
, uint16
*pcidevice
,
3371 uint8
*pciclass
, uint8
*pcisubclass
, uint8
*pciprogif
,
3374 uint16 vendor
= 0xffff, device
= 0xffff;
3375 uint8
class, subclass
, progif
= 0;
3376 uint8 header
= PCI_HEADER_NORMAL
;
3377 uint32 core
= sb_coreid(sbh
);
3379 /* Verify whether the function exists for the core */
3380 if (func
>= (uint
)(core
== SB_USB20H
? 2 : 1))
3383 /* Known vendor translations */
3384 switch (sb_corevendor(sbh
)) {
3386 vendor
= VENDOR_BROADCOM
;
3392 /* Determine class based on known core codes */
3395 class = PCI_CLASS_NET
;
3396 subclass
= PCI_NET_ETHER
;
3397 device
= BCM47XX_ILINE_ID
;
3400 class = PCI_CLASS_NET
;
3401 subclass
= PCI_NET_ETHER
;
3402 device
= BCM47XX_ENET_ID
;
3405 class = PCI_CLASS_NET
;
3406 subclass
= PCI_NET_ETHER
;
3407 device
= BCM47XX_GIGETH_ID
;
3411 class = PCI_CLASS_MEMORY
;
3412 subclass
= PCI_MEMORY_RAM
;
3413 device
= (uint16
)core
;
3417 class = PCI_CLASS_BRIDGE
;
3418 subclass
= PCI_BRIDGE_PCI
;
3419 device
= (uint16
)core
;
3420 header
= PCI_HEADER_BRIDGE
;
3423 class = PCI_CLASS_CPU
;
3424 subclass
= PCI_CPU_MIPS
;
3425 device
= (uint16
)core
;
3428 class = PCI_CLASS_COMM
;
3429 subclass
= PCI_COMM_MODEM
;
3430 device
= BCM47XX_V90_ID
;
3433 class = PCI_CLASS_SERIAL
;
3434 subclass
= PCI_SERIAL_USB
;
3435 progif
= 0x10; /* OHCI */
3436 device
= BCM47XX_USB_ID
;
3439 class = PCI_CLASS_SERIAL
;
3440 subclass
= PCI_SERIAL_USB
;
3441 progif
= 0x10; /* OHCI */
3442 device
= BCM47XX_USBH_ID
;
3445 class = PCI_CLASS_SERIAL
;
3446 subclass
= PCI_SERIAL_USB
;
3447 progif
= func
== 0 ? 0x10 : 0x20; /* OHCI/EHCI */
3448 device
= BCM47XX_USB20H_ID
;
3449 header
= 0x80; /* multifunction */
3452 class = PCI_CLASS_CRYPT
;
3453 subclass
= PCI_CRYPT_NETWORK
;
3454 device
= BCM47XX_IPSEC_ID
;
3457 class = PCI_CLASS_NET
;
3458 subclass
= PCI_NET_OTHER
;
3459 device
= BCM47XX_ROBO_ID
;
3462 class = PCI_CLASS_MEMORY
;
3463 subclass
= PCI_MEMORY_FLASH
;
3464 device
= (uint16
)core
;
3467 class = PCI_CLASS_XOR
;
3468 subclass
= PCI_XOR_QDMA
;
3469 device
= BCM47XX_SATAXOR_ID
;
3472 class = PCI_CLASS_DASDI
;
3473 subclass
= PCI_DASDI_IDE
;
3474 device
= BCM47XX_ATA100_ID
;
3477 class = PCI_CLASS_SERIAL
;
3478 subclass
= PCI_SERIAL_USB
;
3479 device
= BCM47XX_USBD_ID
;
3482 class = PCI_CLASS_SERIAL
;
3483 subclass
= PCI_SERIAL_USB
;
3484 device
= BCM47XX_USB20D_ID
;
3487 class = PCI_CLASS_NET
;
3488 subclass
= PCI_NET_OTHER
;
3489 device
= sb_d11_devid(sbh
);
3493 class = subclass
= progif
= 0xff;
3494 device
= (uint16
)core
;
3498 *pcivendor
= vendor
;
3499 *pcidevice
= device
;
3501 *pcisubclass
= subclass
;
3502 *pciprogif
= progif
;
3503 *pciheader
= header
;
3508 /* use the mdio interface to read from mdio slaves */
3510 sb_pcie_mdioread(sb_info_t
*si
, uint physmedia
, uint regaddr
, uint
*regval
)
3514 sbpcieregs_t
*pcieregs
;
3516 pcieregs
= (sbpcieregs_t
*) sb_setcoreidx(&si
->sb
, si
->sb
.buscoreidx
);
3519 /* enable mdio access to SERDES */
3520 W_REG(si
->osh
, (&pcieregs
->mdiocontrol
), MDIOCTL_PREAM_EN
| MDIOCTL_DIVISOR_VAL
);
3522 mdiodata
= MDIODATA_START
| MDIODATA_READ
|
3523 (physmedia
<< MDIODATA_DEVADDR_SHF
) |
3524 (regaddr
<< MDIODATA_REGADDR_SHF
) | MDIODATA_TA
;
3526 W_REG(si
->osh
, &pcieregs
->mdiodata
, mdiodata
);
3530 /* retry till the transaction is complete */
3532 if (R_REG(si
->osh
, &(pcieregs
->mdiocontrol
)) & MDIOCTL_ACCESS_DONE
) {
3534 *regval
= (R_REG(si
->osh
, &(pcieregs
->mdiodata
)) & MDIODATA_MASK
);
3535 /* Disable mdio access to SERDES */
3536 W_REG(si
->osh
, (&pcieregs
->mdiocontrol
), 0);
3543 SB_ERROR(("sb_pcie_mdioread: timed out\n"));
3544 /* Disable mdio access to SERDES */
3545 W_REG(si
->osh
, (&pcieregs
->mdiocontrol
), 0);
3550 /* use the mdio interface to write to mdio slaves */
3552 sb_pcie_mdiowrite(sb_info_t
*si
, uint physmedia
, uint regaddr
, uint val
)
3556 sbpcieregs_t
*pcieregs
;
3558 pcieregs
= (sbpcieregs_t
*) sb_setcoreidx(&si
->sb
, si
->sb
.buscoreidx
);
3561 /* enable mdio access to SERDES */
3562 W_REG(si
->osh
, (&pcieregs
->mdiocontrol
), MDIOCTL_PREAM_EN
| MDIOCTL_DIVISOR_VAL
);
3564 mdiodata
= MDIODATA_START
| MDIODATA_WRITE
|
3565 (physmedia
<< MDIODATA_DEVADDR_SHF
) |
3566 (regaddr
<< MDIODATA_REGADDR_SHF
) | MDIODATA_TA
| val
;
3568 W_REG(si
->osh
, (&pcieregs
->mdiodata
), mdiodata
);
3572 /* retry till the transaction is complete */
3574 if (R_REG(si
->osh
, &(pcieregs
->mdiocontrol
)) & MDIOCTL_ACCESS_DONE
) {
3575 /* Disable mdio access to SERDES */
3576 W_REG(si
->osh
, (&pcieregs
->mdiocontrol
), 0);
3583 SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
3584 /* Disable mdio access to SERDES */
3585 W_REG(si
->osh
, (&pcieregs
->mdiocontrol
), 0);
3590 /* indirect way to read pcie config regs */
3592 sb_pcie_readreg(void *sb
, void* arg1
, uint offset
)
3596 uint retval
= 0xFFFFFFFF;
3597 sbpcieregs_t
*pcieregs
;
3604 pcieregs
= (sbpcieregs_t
*)sb_setcore(sbh
, SB_PCIE
, 0);
3607 addrtype
= (uint
)((uintptr
)arg1
);
3609 case PCIE_CONFIGREGS
:
3610 W_REG(si
->osh
, (&pcieregs
->configaddr
), offset
);
3611 retval
= R_REG(si
->osh
, &(pcieregs
->configdata
));
3614 W_REG(si
->osh
, &(pcieregs
->pcieindaddr
), offset
);
3615 retval
= R_REG(si
->osh
, &(pcieregs
->pcieinddata
));
3624 /* indirect way to write pcie config/mdio/pciecore regs */
3626 sb_pcie_writereg(sb_t
*sbh
, void *arg1
, uint offset
, uint val
)
3629 sbpcieregs_t
*pcieregs
;
3635 pcieregs
= (sbpcieregs_t
*)sb_setcore(sbh
, SB_PCIE
, 0);
3638 addrtype
= (uint
)((uintptr
)arg1
);
3641 case PCIE_CONFIGREGS
:
3642 W_REG(si
->osh
, (&pcieregs
->configaddr
), offset
);
3643 W_REG(si
->osh
, (&pcieregs
->configdata
), val
);
3646 W_REG(si
->osh
, (&pcieregs
->pcieindaddr
), offset
);
3647 W_REG(si
->osh
, (&pcieregs
->pcieinddata
), val
);
3657 /* Build device path. Support SB, PCI, and JTAG for now. */
3659 BCMINITFN(sb_devpath
)(sb_t
*sbh
, char *path
, int size
)
3663 ASSERT(size
>= SB_DEVPATH_BUFSZ
);
3665 if (!path
|| size
<= 0)
3668 switch (BUSTYPE((SB_INFO(sbh
))->sb
.bustype
)) {
3671 slen
= snprintf(path
, (size_t)size
, "sb/%u/", sb_coreidx(sbh
));
3674 ASSERT((SB_INFO(sbh
))->osh
);
3675 slen
= snprintf(path
, (size_t)size
, "pci/%u/%u/",
3676 OSL_PCI_BUS((SB_INFO(sbh
))->osh
),
3677 OSL_PCI_SLOT((SB_INFO(sbh
))->osh
));
3680 SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
3681 SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
3682 slen
= snprintf(path
, (size_t)size
, "pc/1/1/");
3690 if (slen
< 0 || slen
>= size
) {
3698 /* Get a variable, but only if it has a devpath prefix */
3700 BCMINITFN(sb_getdevpathvar
)(sb_t
*sbh
, const char *name
)
3702 char varname
[SB_DEVPATH_BUFSZ
+ 32];
3704 sb_devpathvar(sbh
, varname
, sizeof(varname
), name
);
3706 return (getvar(NULL
, varname
));
3709 /* Get a variable, but only if it has a devpath prefix */
3711 BCMINITFN(sb_getdevpathintvar
)(sb_t
*sbh
, const char *name
)
3713 char varname
[SB_DEVPATH_BUFSZ
+ 32];
3715 sb_devpathvar(sbh
, varname
, sizeof(varname
), name
);
3717 return (getintvar(NULL
, varname
));
3720 /* Concatenate the dev path with a varname into the given 'var' buffer
3721 * and return the 'var' pointer.
3722 * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
3723 * On overflow, the first char will be set to '\0'.
3726 BCMINITFN(sb_devpathvar
)(sb_t
*sbh
, char *var
, int len
, const char *name
)
3730 if (!var
|| len
<= 0)
3733 if (sb_devpath(sbh
, var
, len
) == 0) {
3734 path_len
= strlen(var
);
3736 if (strlen(name
) + 1 > (uint
)(len
- path_len
))
3739 strncpy(var
+ path_len
, name
, len
- path_len
- 1);
3747 * Fixup SROMless PCI device's configuration.
3748 * The current core may be changed upon return.
3751 sb_pci_fixcfg(sb_info_t
*si
)
3753 uint origidx
, pciidx
;
3754 sbpciregs_t
*pciregs
;
3755 sbpcieregs_t
*pcieregs
= NULL
;
3756 uint16 val16
, *reg16
;
3759 ASSERT(BUSTYPE(si
->sb
.bustype
) == PCI_BUS
);
3761 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
3762 /* save the current index */
3763 origidx
= sb_coreidx(&si
->sb
);
3765 /* check 'pi' is correct and fix it if not */
3766 if (si
->sb
.buscoretype
== SB_PCIE
) {
3767 pcieregs
= (sbpcieregs_t
*)sb_setcore(&si
->sb
, SB_PCIE
, 0);
3769 reg16
= &pcieregs
->sprom
[SRSH_PI_OFFSET
];
3770 } else if (si
->sb
.buscoretype
== SB_PCI
) {
3771 pciregs
= (sbpciregs_t
*)sb_setcore(&si
->sb
, SB_PCI
, 0);
3773 reg16
= &pciregs
->sprom
[SRSH_PI_OFFSET
];
3778 pciidx
= sb_coreidx(&si
->sb
);
3779 val16
= R_REG(si
->osh
, reg16
);
3780 if (((val16
& SRSH_PI_MASK
) >> SRSH_PI_SHIFT
) != (uint16
)pciidx
) {
3781 val16
= (uint16
)(pciidx
<< SRSH_PI_SHIFT
) | (val16
& ~SRSH_PI_MASK
);
3782 W_REG(si
->osh
, reg16
, val16
);
3785 if (PCIE_ASPMWARS(si
)) {
3786 w
= sb_pcie_readreg((void *)(uintptr
)&si
->sb
, (void *)PCIE_PCIEREGS
,
3787 PCIE_PLP_STATUSREG
);
3789 /* Detect the current polarity at attach and force that polarity and
3790 * disable changing the polarity
3792 if ((w
& PCIE_PLP_POLARITYINV_STAT
) == 0) {
3793 si
->pcie_polarity
= (SERDES_RX_CTRL_FORCE
);
3795 si
->pcie_polarity
= (SERDES_RX_CTRL_FORCE
|
3796 SERDES_RX_CTRL_POLARITY
);
3799 w
= OSL_PCI_READ_CONFIG(si
->osh
, si
->pciecap_lcreg_offset
, sizeof(uint32
));
3800 if (w
& PCIE_CLKREQ_ENAB
) {
3801 reg16
= &pcieregs
->sprom
[SRSH_CLKREQ_OFFSET
];
3802 val16
= R_REG(si
->osh
, reg16
);
3803 /* if clockreq is not advertized clkreq should not be enabled */
3804 if (!(val16
& SRSH_CLKREQ_ENB
))
3805 SB_ERROR(("WARNING: CLK REQ enabled already 0x%x\n", w
));
3808 sb_war43448(&si
->sb
);
3810 sb_war42767(&si
->sb
);
3814 /* restore the original index */
3815 sb_setcoreidx(&si
->sb
, origidx
);
3820 /* Return ADDR64 capability of the backplane */
3822 sb_backplane64(sb_t
*sbh
)
3827 return ((si
->sb
.cccaps
& CC_CAP_BKPLN64
) != 0);
3831 sb_btcgpiowar(sb_t
*sbh
)
3839 /* Make sure that there is ChipCommon core present &&
3840 * UART_TX is strapped to 1
3842 if (!(si
->sb
.cccaps
& CC_CAP_UARTGPIO
))
3845 /* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
3846 INTR_OFF(si
, intr_val
);
3848 origidx
= sb_coreidx(sbh
);
3850 cc
= (chipcregs_t
*)sb_setcore(sbh
, SB_CC
, 0);
3853 W_REG(si
->osh
, &cc
->uart0mcr
, R_REG(si
->osh
, &cc
->uart0mcr
) | 0x04);
3855 /* restore the original index */
3856 sb_setcoreidx(sbh
, origidx
);
3858 INTR_RESTORE(si
, intr_val
);
3861 /* check if the device is removed */
3863 sb_deviceremoved(sb_t
*sbh
)
3870 switch (BUSTYPE(si
->sb
.bustype
)) {
3873 w
= OSL_PCI_READ_CONFIG(si
->osh
, PCI_CFG_VID
, sizeof(uint32
));
3874 if ((w
& 0xFFFF) != VENDOR_BROADCOM
)
3884 /* Return the RAM size of the SOCRAM core */
3886 BCMINITFN(sb_socram_size
)(sb_t
*sbh
)
3892 sbsocramregs_t
*regs
;
3901 /* Block ints and save current core */
3902 INTR_OFF(si
, intr_val
);
3903 origidx
= sb_coreidx(sbh
);
3905 /* Switch to SOCRAM core */
3906 if (!(regs
= sb_setcore(sbh
, SB_SOCRAM
, 0)))
3909 /* Get info for determining size */
3910 if (!(wasup
= sb_iscoreup(sbh
)))
3911 sb_core_reset(sbh
, 0, 0);
3912 corerev
= sb_corerev(sbh
);
3913 coreinfo
= R_REG(si
->osh
, ®s
->coreinfo
);
3915 /* Calculate size from coreinfo based on rev */
3917 memsize
= 1 << (16 + (coreinfo
& SRCI_MS0_MASK
));
3918 else if (corerev
< 3) {
3919 memsize
= 1 << (SR_BSZ_BASE
+ (coreinfo
& SRCI_SRBSZ_MASK
));
3920 memsize
*= (coreinfo
& SRCI_SRNB_MASK
) >> SRCI_SRNB_SHIFT
;
3923 uint nb
= (coreinfo
& SRCI_SRNB_MASK
) >> SRCI_SRNB_SHIFT
;
3924 uint bsz
= (coreinfo
& SRCI_SRBSZ_MASK
);
3925 uint lss
= (coreinfo
& SRCI_LSS_MASK
) >> SRCI_LSS_SHIFT
;
3928 memsize
= nb
* (1 << (bsz
+ SR_BSZ_BASE
));
3930 memsize
+= (1 << ((lss
- 1) + SR_BSZ_BASE
));
3932 /* Return to previous state and core */
3934 sb_core_disable(sbh
, 0);
3935 sb_setcoreidx(sbh
, origidx
);
3938 INTR_RESTORE(si
, intr_val
);