2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright (C) 2009, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id: sbutils.c,v 1.673.2.4 2008/12/03 00:11:18 Exp $
25 #include <pcie_core.h>
38 #include "siutils_priv.h"
40 /* local prototypes */
41 static uint
_sb_coreidx(si_info_t
*sii
, uint32 sba
);
42 static uint
_sb_scan(si_info_t
*sii
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
,
44 static uint32
_sb_coresba(si_info_t
*sii
);
45 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
);
47 #define SET_SBREG(sii, r, mask, val) \
48 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
49 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
52 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
53 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
55 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
56 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
57 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
58 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
61 sb_read_sbreg(si_info_t
*sii
, volatile uint32
*sbr
)
64 uint32 val
, intr_val
= 0;
68 * compact flash only has 11 bits address, while we needs 12 bits address.
69 * MEM_SEG will be OR'd with other 11 bits address in hardware,
70 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
71 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
74 INTR_OFF(sii
, intr_val
);
76 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
77 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
80 val
= R_REG(sii
->osh
, sbr
);
84 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
85 INTR_RESTORE(sii
, intr_val
);
92 sb_write_sbreg(si_info_t
*sii
, volatile uint32
*sbr
, uint32 v
)
95 volatile uint32 dummy
;
100 * compact flash only has 11 bits address, while we needs 12 bits address.
101 * MEM_SEG will be OR'd with other 11 bits address in hardware,
102 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
103 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
106 INTR_OFF(sii
, intr_val
);
108 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
109 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
112 if (BUSTYPE(sii
->pub
.bustype
) == PCMCIA_BUS
) {
114 dummy
= R_REG(sii
->osh
, sbr
);
115 W_REG(sii
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
116 dummy
= R_REG(sii
->osh
, sbr
);
117 W_REG(sii
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
119 dummy
= R_REG(sii
->osh
, sbr
);
120 W_REG(sii
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
121 dummy
= R_REG(sii
->osh
, sbr
);
122 W_REG(sii
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
123 #endif /* IL_BIGENDIAN */
125 W_REG(sii
->osh
, sbr
, v
);
129 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
130 INTR_RESTORE(sii
, intr_val
);
141 sb
= REGS2SB(sii
->curmap
);
143 return ((R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_CC_MASK
) >> SBIDH_CC_SHIFT
);
153 sb
= REGS2SB(sii
->curmap
);
155 return R_SBREG(sii
, &sb
->sbtpsflag
) & SBTPS_NUM0_MASK
;
159 sb_setint(si_t
*sih
, int siflag
)
166 sb
= REGS2SB(sii
->curmap
);
172 W_SBREG(sii
, &sb
->sbintvec
, vec
);
175 /* return core index of the core with address 'sba' */
177 BCMATTACHFN(_sb_coreidx
)(si_info_t
*sii
, uint32 sba
)
181 for (i
= 0; i
< sii
->numcores
; i
++)
182 if (sba
== sii
->coresba
[i
])
187 /* return core address of the current core */
189 BCMATTACHFN(_sb_coresba
)(si_info_t
*sii
)
194 switch (BUSTYPE(sii
->pub
.bustype
)) {
196 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
197 sbaddr
= sb_base(R_SBREG(sii
, &sb
->sbadmatch0
));
202 sbaddr
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_BAR0_WIN
, sizeof(uint32
));
207 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
208 sbaddr
= (uint32
)tmp
<< 12;
209 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
210 sbaddr
|= (uint32
)tmp
<< 16;
211 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
212 sbaddr
|= (uint32
)tmp
<< 24;
219 sbaddr
= (uint32
)(uintptr
)sii
->curmap
;
224 sbaddr
= BADCOREADDR
;
232 sb_corevendor(si_t
*sih
)
238 sb
= REGS2SB(sii
->curmap
);
240 return ((R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_VC_MASK
) >> SBIDH_VC_SHIFT
);
244 sb_corerev(si_t
*sih
)
251 sb
= REGS2SB(sii
->curmap
);
252 sbidh
= R_SBREG(sii
, &sb
->sbidhigh
);
254 return (SBCOREREV(sbidh
));
257 /* set core-specific control flags */
259 sb_core_cflags_wo(si_t
*sih
, uint32 mask
, uint32 val
)
266 sb
= REGS2SB(sii
->curmap
);
268 ASSERT((val
& ~mask
) == 0);
271 w
= (R_SBREG(sii
, &sb
->sbtmstatelow
) & ~(mask
<< SBTML_SICF_SHIFT
)) |
272 (val
<< SBTML_SICF_SHIFT
);
273 W_SBREG(sii
, &sb
->sbtmstatelow
, w
);
276 /* set/clear core-specific control flags */
278 sb_core_cflags(si_t
*sih
, uint32 mask
, uint32 val
)
285 sb
= REGS2SB(sii
->curmap
);
287 ASSERT((val
& ~mask
) == 0);
291 w
= (R_SBREG(sii
, &sb
->sbtmstatelow
) & ~(mask
<< SBTML_SICF_SHIFT
)) |
292 (val
<< SBTML_SICF_SHIFT
);
293 W_SBREG(sii
, &sb
->sbtmstatelow
, w
);
296 /* return the new value
297 * for write operation, the following readback ensures the completion of write opration.
299 return (R_SBREG(sii
, &sb
->sbtmstatelow
) >> SBTML_SICF_SHIFT
);
302 /* set/clear core-specific status flags */
304 sb_core_sflags(si_t
*sih
, uint32 mask
, uint32 val
)
311 sb
= REGS2SB(sii
->curmap
);
313 ASSERT((val
& ~mask
) == 0);
314 ASSERT((mask
& ~SISF_CORE_BITS
) == 0);
318 w
= (R_SBREG(sii
, &sb
->sbtmstatehigh
) & ~(mask
<< SBTMH_SISF_SHIFT
)) |
319 (val
<< SBTMH_SISF_SHIFT
);
320 W_SBREG(sii
, &sb
->sbtmstatehigh
, w
);
323 /* return the new value */
324 return (R_SBREG(sii
, &sb
->sbtmstatehigh
) >> SBTMH_SISF_SHIFT
);
328 sb_iscoreup(si_t
*sih
)
334 sb
= REGS2SB(sii
->curmap
);
336 return ((R_SBREG(sii
, &sb
->sbtmstatelow
) &
337 (SBTML_RESET
| SBTML_REJ_MASK
| (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
))) ==
338 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
));
342 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
343 * switch back to the original core, and return the new value.
345 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
347 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
348 * and (on newer pci cores) chipcommon registers.
351 sb_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
362 ASSERT(GOODIDX(coreidx
));
363 ASSERT(regoff
< SI_CORE_SIZE
);
364 ASSERT((val
& ~mask
) == 0);
366 if (BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) {
367 /* If internal bus, we can always get at everything */
369 /* map if does not exist */
370 if (!sii
->regs
[coreidx
]) {
371 sii
->regs
[coreidx
] = REG_MAP(sii
->coresba
[coreidx
],
373 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
375 r
= (uint32
*)((uchar
*)sii
->regs
[coreidx
] + regoff
);
376 } else if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
377 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
379 if ((sii
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
380 /* Chipc registers are mapped at 12KB */
383 r
= (uint32
*)((char *)sii
->curmap
+ PCI_16KB0_CCREGS_OFFSET
+ regoff
);
384 } else if (sii
->pub
.buscoreidx
== coreidx
) {
385 /* pci registers are at either in the last 2KB of an 8KB window
386 * or, in pcie and pci rev 13 at 8KB
390 r
= (uint32
*)((char *)sii
->curmap
+
391 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
393 r
= (uint32
*)((char *)sii
->curmap
+
394 ((regoff
>= SBCONFIGOFF
) ?
395 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
401 INTR_OFF(sii
, intr_val
);
403 /* save current core index */
404 origidx
= si_coreidx(&sii
->pub
);
407 r
= (uint32
*) ((uchar
*)sb_setcoreidx(&sii
->pub
, coreidx
) + regoff
);
413 if (regoff
>= SBCONFIGOFF
) {
414 w
= (R_SBREG(sii
, r
) & ~mask
) | val
;
417 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
418 W_REG(sii
->osh
, r
, w
);
423 if (regoff
>= SBCONFIGOFF
)
426 if ((CHIPID(sii
->pub
.chip
) == BCM5354_CHIP_ID
) &&
427 (coreidx
== SI_CC_IDX
) &&
428 (regoff
== OFFSETOF(chipcregs_t
, watchdog
))) {
431 w
= R_REG(sii
->osh
, r
);
435 /* restore core index */
436 if (origidx
!= coreidx
)
437 sb_setcoreidx(&sii
->pub
, origidx
);
439 INTR_RESTORE(sii
, intr_val
);
445 /* Scan the enumeration space to find all cores starting from the given
446 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
447 * is the default core address at chip POR time and 'regs' is the virtual
448 * address that the default core is mapped at. 'ncores' is the number of
449 * cores expected on bus 'sbba'. It returns the total number of cores
450 * starting from bus 'sbba', inclusive.
452 #define SB_MAXBUSES 2
454 BCMATTACHFN(_sb_scan
)(si_info_t
*sii
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
, uint numcores
)
460 if (bus
>= SB_MAXBUSES
) {
461 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba
, bus
));
464 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba
, numcores
));
466 /* San all cores on the bus starting from core 0.
467 * Core addresses must be contiguous on each bus.
469 for (i
= 0, next
= sii
->numcores
; i
< numcores
&& next
< SB_BUS_MAXCORES
; i
++, next
++) {
470 sii
->coresba
[next
] = sbba
+ (i
* SI_CORE_SIZE
);
472 /* keep and reuse the initial register mapping */
473 if ((BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) && (sii
->coresba
[next
] == sba
)) {
474 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs
, next
));
475 sii
->regs
[next
] = regs
;
478 /* change core to 'next' and read its coreid */
479 sii
->curmap
= _sb_setcoreidx(sii
, next
);
482 sii
->coreid
[next
] = sb_coreid(&sii
->pub
);
484 /* core specific processing... */
485 /* chipc provides # cores */
486 if (sii
->coreid
[next
] == CC_CORE_ID
) {
487 chipcregs_t
*cc
= (chipcregs_t
*)sii
->curmap
;
488 uint32 ccrev
= sb_corerev(&sii
->pub
);
490 /* determine numcores - this is the total # cores in the chip */
491 if (((ccrev
== 4) || (ccrev
>= 6)))
492 numcores
= (R_REG(sii
->osh
, &cc
->chipid
) & CID_CC_MASK
) >>
496 uint chip
= sii
->pub
.chip
;
498 if (chip
== BCM4306_CHIP_ID
) /* < 4306c0 */
500 else if (chip
== BCM4704_CHIP_ID
)
502 else if (chip
== BCM5365_CHIP_ID
)
505 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
511 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores
,
512 sii
->pub
.issim
? "QT" : ""));
514 /* scan bridged SB(s) and add results to the end of the list */
515 else if (sii
->coreid
[next
] == OCP_CORE_ID
) {
516 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
517 uint32 nsbba
= R_SBREG(sii
, &sb
->sbadmatch1
);
520 sii
->numcores
= next
+ 1;
522 if ((nsbba
& 0xfff00000) != SI_ENUM_BASE
)
525 if (_sb_coreidx(sii
, nsbba
) != BADIDX
)
528 nsbcc
= (R_SBREG(sii
, &sb
->sbtmstatehigh
) & 0x000f0000) >> 16;
529 nsbcc
= _sb_scan(sii
, sba
, regs
, bus
+ 1, nsbba
, nsbcc
);
530 if (sbba
== SI_ENUM_BASE
)
536 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i
, sbba
));
538 sii
->numcores
= i
+ ncc
;
539 return sii
->numcores
;
542 /* scan the sb enumerated space to identify all cores */
544 BCMATTACHFN(sb_scan
)(si_t
*sih
, void *regs
, uint devid
)
551 sb
= REGS2SB(sii
->curmap
);
553 sii
->pub
.socirev
= (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_RV_MASK
) >> SBIDL_RV_SHIFT
;
555 /* Save the current core info and validate it later till we know
556 * for sure what is good and what is bad.
558 origsba
= _sb_coresba(sii
);
560 /* scan all SB(s) starting from SI_ENUM_BASE */
561 sii
->numcores
= _sb_scan(sii
, origsba
, regs
, 0, SI_ENUM_BASE
, 1);
565 * This function changes logical "focus" to the indicated core;
566 * must be called with interrupts off.
567 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
570 sb_setcoreidx(si_t
*sih
, uint coreidx
)
576 if (coreidx
>= sii
->numcores
)
580 * If the user has provided an interrupt mask enabled function,
581 * then assert interrupts are disabled before switching the core.
583 ASSERT((sii
->intrsenabled_fn
== NULL
) || !(*(sii
)->intrsenabled_fn
)((sii
)->intr_arg
));
585 sii
->curmap
= _sb_setcoreidx(sii
, coreidx
);
586 sii
->curidx
= coreidx
;
588 return (sii
->curmap
);
591 /* This function changes the logical "focus" to the indicated core.
592 * Return the current core's virtual address.
595 _sb_setcoreidx(si_info_t
*sii
, uint coreidx
)
597 uint32 sbaddr
= sii
->coresba
[coreidx
];
600 switch (BUSTYPE(sii
->pub
.bustype
)) {
603 if (!sii
->regs
[coreidx
]) {
604 sii
->regs
[coreidx
] = REG_MAP(sbaddr
, SI_CORE_SIZE
);
605 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
607 regs
= sii
->regs
[coreidx
];
611 /* point bar0 window */
612 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_BAR0_WIN
, 4, sbaddr
);
617 uint8 tmp
= (sbaddr
>> 12) & 0x0f;
618 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
619 tmp
= (sbaddr
>> 16) & 0xff;
620 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
621 tmp
= (sbaddr
>> 24) & 0xff;
622 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
630 if (!sii
->regs
[coreidx
]) {
631 sii
->regs
[coreidx
] = (void *)(uintptr
)sbaddr
;
632 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
634 regs
= sii
->regs
[coreidx
];
647 /* Return the address of sbadmatch0/1/2/3 register */
648 static volatile uint32
*
649 sb_admatch(si_info_t
*sii
, uint asidx
)
652 volatile uint32
*addrm
;
654 sb
= REGS2SB(sii
->curmap
);
658 addrm
= &sb
->sbadmatch0
;
662 addrm
= &sb
->sbadmatch1
;
666 addrm
= &sb
->sbadmatch2
;
670 addrm
= &sb
->sbadmatch3
;
674 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__
, asidx
));
681 /* Return the number of address spaces in current core */
683 sb_numaddrspaces(si_t
*sih
)
689 sb
= REGS2SB(sii
->curmap
);
691 /* + 1 because of enumeration space */
692 return ((R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_AR_MASK
) >> SBIDL_AR_SHIFT
) + 1;
695 /* Return the address of the nth address space in the current core */
697 sb_addrspace(si_t
*sih
, uint asidx
)
703 return (sb_base(R_SBREG(sii
, sb_admatch(sii
, asidx
))));
706 /* Return the size of the nth address space in the current core */
708 sb_addrspacesize(si_t
*sih
, uint asidx
)
714 return (sb_size(R_SBREG(sii
, sb_admatch(sii
, asidx
))));
717 #if defined(BCMASSERT_SUPPORT) || defined(BCMDBG_DUMP)
718 /* traverse all cores to find and clear source of serror */
720 sb_serr_clear(si_info_t
*sii
)
724 uint i
, intr_val
= 0;
725 void *corereg
= NULL
;
727 INTR_OFF(sii
, intr_val
);
728 origidx
= si_coreidx(&sii
->pub
);
730 for (i
= 0; i
< sii
->numcores
; i
++) {
731 corereg
= sb_setcoreidx(&sii
->pub
, i
);
732 if (NULL
!= corereg
) {
733 sb
= REGS2SB(corereg
);
734 if ((R_SBREG(sii
, &sb
->sbtmstatehigh
)) & SBTMH_SERR
) {
735 AND_SBREG(sii
, &sb
->sbtmstatehigh
, ~SBTMH_SERR
);
736 SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
737 sb_coreid(&sii
->pub
)));
742 sb_setcoreidx(&sii
->pub
, origidx
);
743 INTR_RESTORE(sii
, intr_val
);
747 * Check if any inband, outband or timeout errors has happened and clear them.
748 * Must be called with chip clk on !
751 sb_taclear(si_t
*sih
, bool details
)
758 uint32 inband
= 0, serror
= 0, timeout
= 0;
759 void *corereg
= NULL
;
760 volatile uint32 imstate
, tmstate
;
764 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
765 volatile uint32 stcmd
;
767 /* inband error is Target abort for PCI */
768 stcmd
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_CFG_CMD
, sizeof(uint32
));
769 inband
= stcmd
& PCI_STAT_TA
;
771 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_CFG_CMD
, sizeof(uint32
), stcmd
);
775 stcmd
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_INT_STATUS
, sizeof(uint32
));
776 serror
= stcmd
& PCI_SBIM_STATUS_SERR
;
779 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_INT_STATUS
, sizeof(uint32
), stcmd
);
783 imstate
= sb_corereg(sih
, sii
->pub
.buscoreidx
,
784 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), 0, 0);
785 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
786 sb_corereg(sih
, sii
->pub
.buscoreidx
,
787 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), ~0,
788 (imstate
& ~(SBIM_IBE
| SBIM_TO
)));
789 /* inband = imstate & SBIM_IBE; same as TA above */
790 timeout
= imstate
& SBIM_TO
;
796 /* dump errlog for sonics >= 2.3 */
797 if (sii
->pub
.socirev
== SONICS_2_2
)
800 uint32 imerrlog
, imerrloga
;
801 imerrlog
= sb_corereg(sih
, sii
->pub
.buscoreidx
, SBIMERRLOG
, 0, 0);
802 if (imerrlog
& SBTMEL_EC
) {
803 imerrloga
= sb_corereg(sih
, sii
->pub
.buscoreidx
,
806 sb_corereg(sih
, sii
->pub
.buscoreidx
, SBIMERRLOG
, ~0, 0);
807 SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
808 imerrlog
, imerrloga
));
814 } else if (BUSTYPE(sii
->pub
.bustype
) == PCMCIA_BUS
) {
816 INTR_OFF(sii
, intr_val
);
817 origidx
= si_coreidx(sih
);
819 corereg
= si_setcore(sih
, PCMCIA_CORE_ID
, 0);
820 if (NULL
!= corereg
) {
821 sb
= REGS2SB(corereg
);
823 imstate
= R_SBREG(sii
, &sb
->sbimstate
);
824 /* handle surprise removal */
825 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
826 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
827 inband
= imstate
& SBIM_IBE
;
828 timeout
= imstate
& SBIM_TO
;
830 tmstate
= R_SBREG(sii
, &sb
->sbtmstatehigh
);
831 if ((tmstate
!= 0xffffffff) && (tmstate
& SBTMH_INT_STATUS
)) {
836 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_INT_ACK
);
837 AND_SBREG(sii
, &sb
->sbtmstatelow
, ~SBTML_INT_ACK
);
840 sb_setcoreidx(sih
, origidx
);
841 INTR_RESTORE(sii
, intr_val
);
846 if (inband
| timeout
| serror
) {
848 SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
849 inband
, serror
, timeout
));
856 /* do buffered registers update */
866 origidx
= sii
->curidx
;
867 ASSERT(GOODIDX(origidx
));
869 INTR_OFF(sii
, intr_val
);
871 /* switch over to chipcommon core if there is one, else use pci */
872 if (sii
->pub
.ccrev
!= NOREV
) {
873 chipcregs_t
*ccregs
= (chipcregs_t
*)si_setcore(sih
, CC_CORE_ID
, 0);
875 /* do the buffer registers update */
876 W_REG(sii
->osh
, &ccregs
->broadcastaddress
, SB_COMMIT
);
877 W_REG(sii
->osh
, &ccregs
->broadcastdata
, 0x0);
878 } else if (PCI(sii
)) {
879 sbpciregs_t
*pciregs
= (sbpciregs_t
*)si_setcore(sih
, PCI_CORE_ID
, 0);
881 /* do the buffer registers update */
882 W_REG(sii
->osh
, &pciregs
->bcastaddr
, SB_COMMIT
);
883 W_REG(sii
->osh
, &pciregs
->bcastdata
, 0x0);
887 /* restore core index */
888 sb_setcoreidx(sih
, origidx
);
889 INTR_RESTORE(sii
, intr_val
);
893 sb_core_disable(si_t
*sih
, uint32 bits
)
896 volatile uint32 dummy
;
901 ASSERT(GOODREGS(sii
->curmap
));
902 sb
= REGS2SB(sii
->curmap
);
904 /* if core is already in reset, just return */
905 if (R_SBREG(sii
, &sb
->sbtmstatelow
) & SBTML_RESET
)
908 /* if clocks are not enabled, put into reset and return */
909 if ((R_SBREG(sii
, &sb
->sbtmstatelow
) & (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
)) == 0)
912 /* set target reject and spin until busy is clear (preserve core-specific bits) */
913 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_REJ
);
914 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
916 SPINWAIT((R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
), 100000);
917 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
)
918 SI_ERROR(("%s: target state still busy\n", __FUNCTION__
));
920 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
) {
921 OR_SBREG(sii
, &sb
->sbimstate
, SBIM_RJ
);
922 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
924 SPINWAIT((R_SBREG(sii
, &sb
->sbimstate
) & SBIM_BY
), 100000);
927 /* set reset and reject while enabling the clocks */
928 W_SBREG(sii
, &sb
->sbtmstatelow
,
929 (((bits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
930 SBTML_REJ
| SBTML_RESET
));
931 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
934 /* don't forget to clear the initiator reject bit */
935 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
)
936 AND_SBREG(sii
, &sb
->sbimstate
, ~SBIM_RJ
);
939 /* leave reset and reject asserted */
940 W_SBREG(sii
, &sb
->sbtmstatelow
, ((bits
<< SBTML_SICF_SHIFT
) | SBTML_REJ
| SBTML_RESET
));
944 /* reset and re-enable a core
946 * bits - core specific bits that are set during and after reset sequence
947 * resetbits - core specific bits that are set only during reset sequence
950 sb_core_reset(si_t
*sih
, uint32 bits
, uint32 resetbits
)
954 volatile uint32 dummy
;
957 ASSERT(GOODREGS(sii
->curmap
));
958 sb
= REGS2SB(sii
->curmap
);
961 * Must do the disable sequence first to work for arbitrary current core state.
963 sb_core_disable(sih
, (bits
| resetbits
));
966 * Now do the initialization sequence.
969 /* set reset while enabling the clock and forcing them on throughout the core */
970 W_SBREG(sii
, &sb
->sbtmstatelow
,
971 (((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
973 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
976 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_SERR
) {
977 W_SBREG(sii
, &sb
->sbtmstatehigh
, 0);
979 if ((dummy
= R_SBREG(sii
, &sb
->sbimstate
)) & (SBIM_IBE
| SBIM_TO
)) {
980 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
983 /* clear reset and allow it to propagate throughout the core */
984 W_SBREG(sii
, &sb
->sbtmstatelow
,
985 ((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
986 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
989 /* leave clock enabled */
990 W_SBREG(sii
, &sb
->sbtmstatelow
, ((bits
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
991 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
996 sb_core_tofixup(si_t
*sih
)
1003 if ((BUSTYPE(sii
->pub
.bustype
) != PCI_BUS
) || PCIE(sii
) ||
1004 (PCI(sii
) && (sii
->pub
.buscorerev
>= 5)))
1007 ASSERT(GOODREGS(sii
->curmap
));
1008 sb
= REGS2SB(sii
->curmap
);
1010 if (BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) {
1011 SET_SBREG(sii
, &sb
->sbimconfiglow
,
1012 SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
,
1013 (0x5 << SBIMCL_RTO_SHIFT
) | 0x3);
1015 if (sb_coreid(sih
) == PCI_CORE_ID
) {
1016 SET_SBREG(sii
, &sb
->sbimconfiglow
,
1017 SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
,
1018 (0x3 << SBIMCL_RTO_SHIFT
) | 0x2);
1020 SET_SBREG(sii
, &sb
->sbimconfiglow
, (SBIMCL_RTO_MASK
| SBIMCL_STO_MASK
), 0);
1028 * Set the initiator timeout for the "master core".
1029 * The master core is defined to be the core in control
1030 * of the chip and so it issues accesses to non-memory
1031 * locations (Because of dma *any* core can access memeory).
1033 * The routine uses the bus to decide who is the master:
1036 * PCI_BUS => pci or pcie
1037 * PCMCIA_BUS => pcmcia
1038 * SDIO_BUS => pcmcia
1040 * This routine exists so callers can disable initiator
1041 * timeouts so accesses to very slow devices like otp
1042 * won't cause an abort. The routine allows arbitrary
1043 * settings of the service and request timeouts, though.
1045 * Returns the timeout state before changing it or -1
1049 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1052 sb_set_initiator_to(si_t
*sih
, uint32 to
, uint idx
)
1057 uint32 tmp
, ret
= 0xffffffff;
1062 if ((to
& ~TO_MASK
) != 0)
1065 /* Figure out the master core */
1066 if (idx
== BADIDX
) {
1067 switch (BUSTYPE(sii
->pub
.bustype
)) {
1069 idx
= sii
->pub
.buscoreidx
;
1075 idx
= si_findcoreidx(sih
, PCMCIA_CORE_ID
, 0);
1078 idx
= si_findcoreidx(sih
, MIPS33_CORE_ID
, 0);
1087 INTR_OFF(sii
, intr_val
);
1088 origidx
= si_coreidx(sih
);
1090 sb
= REGS2SB(sb_setcoreidx(sih
, idx
));
1092 tmp
= R_SBREG(sii
, &sb
->sbimconfiglow
);
1093 ret
= tmp
& TO_MASK
;
1094 W_SBREG(sii
, &sb
->sbimconfiglow
, (tmp
& ~TO_MASK
) | to
);
1097 sb_setcoreidx(sih
, origidx
);
1098 INTR_RESTORE(sii
, intr_val
);
1103 sb_base(uint32 admatch
)
1108 type
= admatch
& SBAM_TYPE_MASK
;
1114 base
= admatch
& SBAM_BASE0_MASK
;
1115 } else if (type
== 1) {
1116 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1117 base
= admatch
& SBAM_BASE1_MASK
;
1118 } else if (type
== 2) {
1119 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1120 base
= admatch
& SBAM_BASE2_MASK
;
1127 sb_size(uint32 admatch
)
1132 type
= admatch
& SBAM_TYPE_MASK
;
1138 size
= 1 << (((admatch
& SBAM_ADINT0_MASK
) >> SBAM_ADINT0_SHIFT
) + 1);
1139 } else if (type
== 1) {
1140 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1141 size
= 1 << (((admatch
& SBAM_ADINT1_MASK
) >> SBAM_ADINT1_SHIFT
) + 1);
1142 } else if (type
== 2) {
1143 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1144 size
= 1 << (((admatch
& SBAM_ADINT2_MASK
) >> SBAM_ADINT2_SHIFT
) + 1);
1150 #if defined(BCMDBG_DUMP)
1151 /* print interesting sbconfig registers */
1153 sb_dumpregs(si_t
*sih
, struct bcmstrbuf
*b
)
1157 uint origidx
, i
, intr_val
= 0;
1160 origidx
= sii
->curidx
;
1162 INTR_OFF(sii
, intr_val
);
1164 for (i
= 0; i
< sii
->numcores
; i
++) {
1165 sb
= REGS2SB(sb_setcoreidx(sih
, i
));
1167 bcm_bprintf(b
, "core 0x%x: \n", sii
->coreid
[i
]);
1169 if (sii
->pub
.socirev
> SONICS_2_2
)
1170 bcm_bprintf(b
, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1171 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOG
, 0, 0),
1172 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOGA
, 0, 0));
1174 bcm_bprintf(b
, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1175 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1176 R_SBREG(sii
, &sb
->sbtmstatelow
), R_SBREG(sii
, &sb
->sbtmstatehigh
),
1177 R_SBREG(sii
, &sb
->sbidhigh
), R_SBREG(sii
, &sb
->sbimstate
),
1178 R_SBREG(sii
, &sb
->sbimconfiglow
), R_SBREG(sii
, &sb
->sbimconfighigh
));
1181 sb_setcoreidx(sih
, origidx
);
1182 INTR_RESTORE(sii
, intr_val
);