2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright (C) 2011, Broadcom Corporation. All Rights Reserved.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 * $Id: sbutils.c 300516 2011-12-04 17:39:44Z $
35 #include "siutils_priv.h"
38 /* local prototypes */
39 static uint
_sb_coreidx(si_info_t
*sii
, uint32 sba
);
40 static uint
_sb_scan(si_info_t
*sii
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
,
42 static uint32
_sb_coresba(si_info_t
*sii
);
43 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
);
45 #define SET_SBREG(sii, r, mask, val) \
46 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
47 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
50 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
51 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
53 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
54 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
55 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
56 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
59 sb_read_sbreg(si_info_t
*sii
, volatile uint32
*sbr
)
62 uint32 val
, intr_val
= 0;
66 * compact flash only has 11 bits address, while we needs 12 bits address.
67 * MEM_SEG will be OR'd with other 11 bits address in hardware,
68 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
69 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
72 INTR_OFF(sii
, intr_val
);
74 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
75 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
78 val
= R_REG(sii
->osh
, sbr
);
82 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
83 INTR_RESTORE(sii
, intr_val
);
90 sb_write_sbreg(si_info_t
*sii
, volatile uint32
*sbr
, uint32 v
)
93 volatile uint32 dummy
;
98 * compact flash only has 11 bits address, while we needs 12 bits address.
99 * MEM_SEG will be OR'd with other 11 bits address in hardware,
100 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
101 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
104 INTR_OFF(sii
, intr_val
);
106 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
107 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
110 if (BUSTYPE(sii
->pub
.bustype
) == PCMCIA_BUS
) {
112 dummy
= R_REG(sii
->osh
, sbr
);
113 BCM_REFERENCE(dummy
);
114 W_REG(sii
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
115 dummy
= R_REG(sii
->osh
, sbr
);
116 BCM_REFERENCE(dummy
);
117 W_REG(sii
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
119 dummy
= R_REG(sii
->osh
, sbr
);
120 BCM_REFERENCE(dummy
);
121 W_REG(sii
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
122 dummy
= R_REG(sii
->osh
, sbr
);
123 BCM_REFERENCE(dummy
);
124 W_REG(sii
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
125 #endif /* IL_BIGENDIAN */
127 W_REG(sii
->osh
, sbr
, v
);
131 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
132 INTR_RESTORE(sii
, intr_val
);
143 sb
= REGS2SB(sii
->curmap
);
145 return ((R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_CC_MASK
) >> SBIDH_CC_SHIFT
);
149 sb_intflag(si_t
*sih
)
154 uint origidx
, intflag
, intr_val
= 0;
158 INTR_OFF(sii
, intr_val
);
159 origidx
= si_coreidx(sih
);
160 corereg
= si_setcore(sih
, CC_CORE_ID
, 0);
161 ASSERT(corereg
!= NULL
);
162 sb
= REGS2SB(corereg
);
163 intflag
= R_SBREG(sii
, &sb
->sbflagst
);
164 sb_setcoreidx(sih
, origidx
);
165 INTR_RESTORE(sii
, intr_val
);
177 sb
= REGS2SB(sii
->curmap
);
179 return R_SBREG(sii
, &sb
->sbtpsflag
) & SBTPS_NUM0_MASK
;
183 sb_setint(si_t
*sih
, int siflag
)
190 sb
= REGS2SB(sii
->curmap
);
196 W_SBREG(sii
, &sb
->sbintvec
, vec
);
199 /* return core index of the core with address 'sba' */
201 BCMATTACHFN(_sb_coreidx
)(si_info_t
*sii
, uint32 sba
)
205 for (i
= 0; i
< sii
->numcores
; i
++)
206 if (sba
== sii
->coresba
[i
])
211 /* return core address of the current core */
213 BCMATTACHFN(_sb_coresba
)(si_info_t
*sii
)
218 switch (BUSTYPE(sii
->pub
.bustype
)) {
220 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
221 sbaddr
= sb_base(R_SBREG(sii
, &sb
->sbadmatch0
));
226 sbaddr
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_BAR0_WIN
, sizeof(uint32
));
231 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
232 sbaddr
= (uint32
)tmp
<< 12;
233 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
234 sbaddr
|= (uint32
)tmp
<< 16;
235 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
236 sbaddr
|= (uint32
)tmp
<< 24;
243 sbaddr
= (uint32
)(uintptr
)sii
->curmap
;
248 sbaddr
= BADCOREADDR
;
256 sb_corevendor(si_t
*sih
)
262 sb
= REGS2SB(sii
->curmap
);
264 return ((R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_VC_MASK
) >> SBIDH_VC_SHIFT
);
268 sb_corerev(si_t
*sih
)
275 sb
= REGS2SB(sii
->curmap
);
276 sbidh
= R_SBREG(sii
, &sb
->sbidhigh
);
278 return (SBCOREREV(sbidh
));
281 /* set core-specific control flags */
283 sb_core_cflags_wo(si_t
*sih
, uint32 mask
, uint32 val
)
290 sb
= REGS2SB(sii
->curmap
);
292 ASSERT((val
& ~mask
) == 0);
295 w
= (R_SBREG(sii
, &sb
->sbtmstatelow
) & ~(mask
<< SBTML_SICF_SHIFT
)) |
296 (val
<< SBTML_SICF_SHIFT
);
297 W_SBREG(sii
, &sb
->sbtmstatelow
, w
);
300 /* set/clear core-specific control flags */
302 sb_core_cflags(si_t
*sih
, uint32 mask
, uint32 val
)
309 sb
= REGS2SB(sii
->curmap
);
311 ASSERT((val
& ~mask
) == 0);
315 w
= (R_SBREG(sii
, &sb
->sbtmstatelow
) & ~(mask
<< SBTML_SICF_SHIFT
)) |
316 (val
<< SBTML_SICF_SHIFT
);
317 W_SBREG(sii
, &sb
->sbtmstatelow
, w
);
320 /* return the new value
321 * for write operation, the following readback ensures the completion of write opration.
323 return (R_SBREG(sii
, &sb
->sbtmstatelow
) >> SBTML_SICF_SHIFT
);
326 /* set/clear core-specific status flags */
328 sb_core_sflags(si_t
*sih
, uint32 mask
, uint32 val
)
335 sb
= REGS2SB(sii
->curmap
);
337 ASSERT((val
& ~mask
) == 0);
338 ASSERT((mask
& ~SISF_CORE_BITS
) == 0);
342 w
= (R_SBREG(sii
, &sb
->sbtmstatehigh
) & ~(mask
<< SBTMH_SISF_SHIFT
)) |
343 (val
<< SBTMH_SISF_SHIFT
);
344 W_SBREG(sii
, &sb
->sbtmstatehigh
, w
);
347 /* return the new value */
348 return (R_SBREG(sii
, &sb
->sbtmstatehigh
) >> SBTMH_SISF_SHIFT
);
352 sb_iscoreup(si_t
*sih
)
358 sb
= REGS2SB(sii
->curmap
);
360 return ((R_SBREG(sii
, &sb
->sbtmstatelow
) &
361 (SBTML_RESET
| SBTML_REJ_MASK
| (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
))) ==
362 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
));
366 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
367 * switch back to the original core, and return the new value.
369 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
371 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
372 * and (on newer pci cores) chipcommon registers.
375 sb_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
386 ASSERT(GOODIDX(coreidx
));
387 ASSERT(regoff
< SI_CORE_SIZE
);
388 ASSERT((val
& ~mask
) == 0);
390 if (coreidx
>= SI_MAXCORES
)
393 if (BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) {
394 /* If internal bus, we can always get at everything */
396 /* map if does not exist */
397 if (!sii
->regs
[coreidx
]) {
398 sii
->regs
[coreidx
] = REG_MAP(sii
->coresba
[coreidx
],
400 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
402 r
= (uint32
*)((uchar
*)sii
->regs
[coreidx
] + regoff
);
403 } else if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
404 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
406 if ((sii
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
407 /* Chipc registers are mapped at 12KB */
410 r
= (uint32
*)((char *)sii
->curmap
+ PCI_16KB0_CCREGS_OFFSET
+ regoff
);
411 } else if (sii
->pub
.buscoreidx
== coreidx
) {
412 /* pci registers are at either in the last 2KB of an 8KB window
413 * or, in pcie and pci rev 13 at 8KB
417 r
= (uint32
*)((char *)sii
->curmap
+
418 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
420 r
= (uint32
*)((char *)sii
->curmap
+
421 ((regoff
>= SBCONFIGOFF
) ?
422 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
428 INTR_OFF(sii
, intr_val
);
430 /* save current core index */
431 origidx
= si_coreidx(&sii
->pub
);
434 r
= (uint32
*) ((uchar
*)sb_setcoreidx(&sii
->pub
, coreidx
) + regoff
);
440 if (regoff
>= SBCONFIGOFF
) {
441 w
= (R_SBREG(sii
, r
) & ~mask
) | val
;
444 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
445 W_REG(sii
->osh
, r
, w
);
450 if (regoff
>= SBCONFIGOFF
)
453 if ((CHIPID(sii
->pub
.chip
) == BCM5354_CHIP_ID
) &&
454 (coreidx
== SI_CC_IDX
) &&
455 (regoff
== OFFSETOF(chipcregs_t
, watchdog
))) {
458 w
= R_REG(sii
->osh
, r
);
462 /* restore core index */
463 if (origidx
!= coreidx
)
464 sb_setcoreidx(&sii
->pub
, origidx
);
466 INTR_RESTORE(sii
, intr_val
);
472 /* Scan the enumeration space to find all cores starting from the given
473 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
474 * is the default core address at chip POR time and 'regs' is the virtual
475 * address that the default core is mapped at. 'ncores' is the number of
476 * cores expected on bus 'sbba'. It returns the total number of cores
477 * starting from bus 'sbba', inclusive.
479 #define SB_MAXBUSES 2
481 BCMATTACHFN(_sb_scan
)(si_info_t
*sii
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
, uint numcores
)
487 if (bus
>= SB_MAXBUSES
) {
488 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba
, bus
));
491 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba
, numcores
));
493 /* Scan all cores on the bus starting from core 0.
494 * Core addresses must be contiguous on each bus.
496 for (i
= 0, next
= sii
->numcores
; i
< numcores
&& next
< SB_BUS_MAXCORES
; i
++, next
++) {
497 sii
->coresba
[next
] = sbba
+ (i
* SI_CORE_SIZE
);
499 /* keep and reuse the initial register mapping */
500 if ((BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) && (sii
->coresba
[next
] == sba
)) {
501 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs
, next
));
502 sii
->regs
[next
] = regs
;
505 /* change core to 'next' and read its coreid */
506 sii
->curmap
= _sb_setcoreidx(sii
, next
);
509 sii
->coreid
[next
] = sb_coreid(&sii
->pub
);
511 /* core specific processing... */
512 /* chipc provides # cores */
513 if (sii
->coreid
[next
] == CC_CORE_ID
) {
514 chipcregs_t
*cc
= (chipcregs_t
*)sii
->curmap
;
515 uint32 ccrev
= sb_corerev(&sii
->pub
);
517 /* determine numcores - this is the total # cores in the chip */
518 if (((ccrev
== 4) || (ccrev
>= 6)))
519 numcores
= (R_REG(sii
->osh
, &cc
->chipid
) & CID_CC_MASK
) >>
523 uint chip
= CHIPID(sii
->pub
.chip
);
525 if (chip
== BCM4306_CHIP_ID
) /* < 4306c0 */
527 else if (chip
== BCM4704_CHIP_ID
)
529 else if (chip
== BCM5365_CHIP_ID
)
532 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
538 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores
,
539 sii
->pub
.issim
? "QT" : ""));
541 /* scan bridged SB(s) and add results to the end of the list */
542 else if (sii
->coreid
[next
] == OCP_CORE_ID
) {
543 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
544 uint32 nsbba
= R_SBREG(sii
, &sb
->sbadmatch1
);
547 sii
->numcores
= next
+ 1;
549 if ((nsbba
& 0xfff00000) != SI_ENUM_BASE
)
552 if (_sb_coreidx(sii
, nsbba
) != BADIDX
)
555 nsbcc
= (R_SBREG(sii
, &sb
->sbtmstatehigh
) & 0x000f0000) >> 16;
556 nsbcc
= _sb_scan(sii
, sba
, regs
, bus
+ 1, nsbba
, nsbcc
);
557 if (sbba
== SI_ENUM_BASE
)
563 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i
, sbba
));
565 sii
->numcores
= i
+ ncc
;
566 return sii
->numcores
;
569 /* scan the sb enumerated space to identify all cores */
571 BCMATTACHFN(sb_scan
)(si_t
*sih
, void *regs
, uint devid
)
578 sb
= REGS2SB(sii
->curmap
);
580 sii
->pub
.socirev
= (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_RV_MASK
) >> SBIDL_RV_SHIFT
;
582 /* Save the current core info and validate it later till we know
583 * for sure what is good and what is bad.
585 origsba
= _sb_coresba(sii
);
587 /* scan all SB(s) starting from SI_ENUM_BASE */
588 sii
->numcores
= _sb_scan(sii
, origsba
, regs
, 0, SI_ENUM_BASE
, 1);
592 * This function changes logical "focus" to the indicated core;
593 * must be called with interrupts off.
594 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
597 sb_setcoreidx(si_t
*sih
, uint coreidx
)
603 if (coreidx
>= sii
->numcores
)
607 * If the user has provided an interrupt mask enabled function,
608 * then assert interrupts are disabled before switching the core.
610 ASSERT((sii
->intrsenabled_fn
== NULL
) || !(*(sii
)->intrsenabled_fn
)((sii
)->intr_arg
));
612 sii
->curmap
= _sb_setcoreidx(sii
, coreidx
);
613 sii
->curidx
= coreidx
;
615 return (sii
->curmap
);
618 /* This function changes the logical "focus" to the indicated core.
619 * Return the current core's virtual address.
622 _sb_setcoreidx(si_info_t
*sii
, uint coreidx
)
624 uint32 sbaddr
= sii
->coresba
[coreidx
];
627 switch (BUSTYPE(sii
->pub
.bustype
)) {
630 if (!sii
->regs
[coreidx
]) {
631 sii
->regs
[coreidx
] = REG_MAP(sbaddr
, SI_CORE_SIZE
);
632 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
634 regs
= sii
->regs
[coreidx
];
638 /* point bar0 window */
639 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_BAR0_WIN
, 4, sbaddr
);
644 uint8 tmp
= (sbaddr
>> 12) & 0x0f;
645 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
646 tmp
= (sbaddr
>> 16) & 0xff;
647 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
648 tmp
= (sbaddr
>> 24) & 0xff;
649 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
657 if (!sii
->regs
[coreidx
]) {
658 sii
->regs
[coreidx
] = (void *)(uintptr
)sbaddr
;
659 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
661 regs
= sii
->regs
[coreidx
];
674 /* Return the address of sbadmatch0/1/2/3 register */
675 static volatile uint32
*
676 sb_admatch(si_info_t
*sii
, uint asidx
)
679 volatile uint32
*addrm
;
681 sb
= REGS2SB(sii
->curmap
);
685 addrm
= &sb
->sbadmatch0
;
689 addrm
= &sb
->sbadmatch1
;
693 addrm
= &sb
->sbadmatch2
;
697 addrm
= &sb
->sbadmatch3
;
701 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__
, asidx
));
708 /* Return the number of address spaces in current core */
710 sb_numaddrspaces(si_t
*sih
)
716 sb
= REGS2SB(sii
->curmap
);
718 /* + 1 because of enumeration space */
719 return ((R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_AR_MASK
) >> SBIDL_AR_SHIFT
) + 1;
722 /* Return the address of the nth address space in the current core */
724 sb_addrspace(si_t
*sih
, uint asidx
)
730 return (sb_base(R_SBREG(sii
, sb_admatch(sii
, asidx
))));
733 /* Return the size of the nth address space in the current core */
735 sb_addrspacesize(si_t
*sih
, uint asidx
)
741 return (sb_size(R_SBREG(sii
, sb_admatch(sii
, asidx
))));
744 #if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT)
745 /* traverse all cores to find and clear source of serror */
747 sb_serr_clear(si_info_t
*sii
)
751 uint i
, intr_val
= 0;
752 void *corereg
= NULL
;
754 INTR_OFF(sii
, intr_val
);
755 origidx
= si_coreidx(&sii
->pub
);
757 for (i
= 0; i
< sii
->numcores
; i
++) {
758 corereg
= sb_setcoreidx(&sii
->pub
, i
);
759 if (NULL
!= corereg
) {
760 sb
= REGS2SB(corereg
);
761 if ((R_SBREG(sii
, &sb
->sbtmstatehigh
)) & SBTMH_SERR
) {
762 AND_SBREG(sii
, &sb
->sbtmstatehigh
, ~SBTMH_SERR
);
763 SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
764 sb_coreid(&sii
->pub
)));
769 sb_setcoreidx(&sii
->pub
, origidx
);
770 INTR_RESTORE(sii
, intr_val
);
774 * Check if any inband, outband or timeout errors has happened and clear them.
775 * Must be called with chip clk on !
778 sb_taclear(si_t
*sih
, bool details
)
785 uint32 inband
= 0, serror
= 0, timeout
= 0;
786 void *corereg
= NULL
;
787 volatile uint32 imstate
, tmstate
;
789 bool printed
= FALSE
;
794 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
795 volatile uint32 stcmd
;
797 /* inband error is Target abort for PCI */
798 stcmd
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_CFG_CMD
, sizeof(uint32
));
799 inband
= stcmd
& PCI_STAT_TA
;
803 SI_ERROR(("\ninband:\n"));
804 si_viewall((void*)sii
, FALSE
);
808 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_CFG_CMD
, sizeof(uint32
), stcmd
);
812 stcmd
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_INT_STATUS
, sizeof(uint32
));
813 serror
= stcmd
& PCI_SBIM_STATUS_SERR
;
817 SI_ERROR(("\nserror:\n"));
819 si_viewall((void*)sii
, FALSE
);
824 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_INT_STATUS
, sizeof(uint32
), stcmd
);
828 imstate
= sb_corereg(sih
, sii
->pub
.buscoreidx
,
829 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), 0, 0);
830 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
831 sb_corereg(sih
, sii
->pub
.buscoreidx
,
832 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), ~0,
833 (imstate
& ~(SBIM_IBE
| SBIM_TO
)));
834 /* inband = imstate & SBIM_IBE; same as TA above */
835 timeout
= imstate
& SBIM_TO
;
839 SI_ERROR(("\ntimeout:\n"));
841 si_viewall((void*)sii
, FALSE
);
849 /* dump errlog for sonics >= 2.3 */
850 if (sii
->pub
.socirev
== SONICS_2_2
)
853 uint32 imerrlog
, imerrloga
;
854 imerrlog
= sb_corereg(sih
, sii
->pub
.buscoreidx
, SBIMERRLOG
, 0, 0);
855 if (imerrlog
& SBTMEL_EC
) {
856 imerrloga
= sb_corereg(sih
, sii
->pub
.buscoreidx
,
858 BCM_REFERENCE(imerrloga
);
860 sb_corereg(sih
, sii
->pub
.buscoreidx
, SBIMERRLOG
, ~0, 0);
861 SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
862 imerrlog
, imerrloga
));
868 } else if (BUSTYPE(sii
->pub
.bustype
) == PCMCIA_BUS
) {
870 INTR_OFF(sii
, intr_val
);
871 origidx
= si_coreidx(sih
);
873 corereg
= si_setcore(sih
, PCMCIA_CORE_ID
, 0);
874 if (NULL
!= corereg
) {
875 sb
= REGS2SB(corereg
);
877 imstate
= R_SBREG(sii
, &sb
->sbimstate
);
878 /* handle surprise removal */
879 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
880 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
881 inband
= imstate
& SBIM_IBE
;
882 timeout
= imstate
& SBIM_TO
;
884 tmstate
= R_SBREG(sii
, &sb
->sbtmstatehigh
);
885 if ((tmstate
!= 0xffffffff) && (tmstate
& SBTMH_INT_STATUS
)) {
890 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_INT_ACK
);
891 AND_SBREG(sii
, &sb
->sbtmstatelow
, ~SBTML_INT_ACK
);
894 sb_setcoreidx(sih
, origidx
);
895 INTR_RESTORE(sii
, intr_val
);
900 if (inband
| timeout
| serror
) {
902 SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
903 inband
, serror
, timeout
));
910 /* do buffered registers update */
920 origidx
= sii
->curidx
;
921 ASSERT(GOODIDX(origidx
));
923 INTR_OFF(sii
, intr_val
);
925 /* switch over to chipcommon core if there is one, else use pci */
926 if (sii
->pub
.ccrev
!= NOREV
) {
927 chipcregs_t
*ccregs
= (chipcregs_t
*)si_setcore(sih
, CC_CORE_ID
, 0);
928 ASSERT(ccregs
!= NULL
);
930 /* do the buffer registers update */
931 W_REG(sii
->osh
, &ccregs
->broadcastaddress
, SB_COMMIT
);
932 W_REG(sii
->osh
, &ccregs
->broadcastdata
, 0x0);
933 } else if (PCI(sii
)) {
934 sbpciregs_t
*pciregs
= (sbpciregs_t
*)si_setcore(sih
, PCI_CORE_ID
, 0);
936 /* do the buffer registers update */
937 W_REG(sii
->osh
, &pciregs
->bcastaddr
, SB_COMMIT
);
938 W_REG(sii
->osh
, &pciregs
->bcastdata
, 0x0);
942 /* restore core index */
943 sb_setcoreidx(sih
, origidx
);
944 INTR_RESTORE(sii
, intr_val
);
948 sb_core_disable(si_t
*sih
, uint32 bits
)
951 volatile uint32 dummy
;
956 ASSERT(GOODREGS(sii
->curmap
));
957 sb
= REGS2SB(sii
->curmap
);
959 /* if core is already in reset, just return */
960 if (R_SBREG(sii
, &sb
->sbtmstatelow
) & SBTML_RESET
)
963 /* if clocks are not enabled, put into reset and return */
964 if ((R_SBREG(sii
, &sb
->sbtmstatelow
) & (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
)) == 0)
967 /* set target reject and spin until busy is clear (preserve core-specific bits) */
968 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_REJ
);
969 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
970 BCM_REFERENCE(dummy
);
972 SPINWAIT((R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
), 100000);
973 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
)
974 SI_ERROR(("%s: target state still busy\n", __FUNCTION__
));
976 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
) {
977 OR_SBREG(sii
, &sb
->sbimstate
, SBIM_RJ
);
978 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
979 BCM_REFERENCE(dummy
);
981 SPINWAIT((R_SBREG(sii
, &sb
->sbimstate
) & SBIM_BY
), 100000);
984 /* set reset and reject while enabling the clocks */
985 W_SBREG(sii
, &sb
->sbtmstatelow
,
986 (((bits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
987 SBTML_REJ
| SBTML_RESET
));
988 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
989 BCM_REFERENCE(dummy
);
992 /* don't forget to clear the initiator reject bit */
993 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
)
994 AND_SBREG(sii
, &sb
->sbimstate
, ~SBIM_RJ
);
997 /* leave reset and reject asserted */
998 W_SBREG(sii
, &sb
->sbtmstatelow
, ((bits
<< SBTML_SICF_SHIFT
) | SBTML_REJ
| SBTML_RESET
));
1002 /* reset and re-enable a core
1004 * bits - core specific bits that are set during and after reset sequence
1005 * resetbits - core specific bits that are set only during reset sequence
1008 sb_core_reset(si_t
*sih
, uint32 bits
, uint32 resetbits
)
1012 volatile uint32 dummy
;
1015 ASSERT(GOODREGS(sii
->curmap
));
1016 sb
= REGS2SB(sii
->curmap
);
1019 * Must do the disable sequence first to work for arbitrary current core state.
1021 sb_core_disable(sih
, (bits
| resetbits
));
1024 * Now do the initialization sequence.
1027 /* set reset while enabling the clock and forcing them on throughout the core */
1028 W_SBREG(sii
, &sb
->sbtmstatelow
,
1029 (((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
1031 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
1032 BCM_REFERENCE(dummy
);
1035 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_SERR
) {
1036 W_SBREG(sii
, &sb
->sbtmstatehigh
, 0);
1038 if ((dummy
= R_SBREG(sii
, &sb
->sbimstate
)) & (SBIM_IBE
| SBIM_TO
)) {
1039 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
1042 /* clear reset and allow it to propagate throughout the core */
1043 W_SBREG(sii
, &sb
->sbtmstatelow
,
1044 ((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
1045 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
1046 BCM_REFERENCE(dummy
);
1049 /* leave clock enabled */
1050 W_SBREG(sii
, &sb
->sbtmstatelow
, ((bits
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
1051 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
1052 BCM_REFERENCE(dummy
);
1057 * Set the initiator timeout for the "master core".
1058 * The master core is defined to be the core in control
1059 * of the chip and so it issues accesses to non-memory
1060 * locations (Because of dma *any* core can access memeory).
1062 * The routine uses the bus to decide who is the master:
1065 * PCI_BUS => pci or pcie
1066 * PCMCIA_BUS => pcmcia
1067 * SDIO_BUS => pcmcia
1069 * This routine exists so callers can disable initiator
1070 * timeouts so accesses to very slow devices like otp
1071 * won't cause an abort. The routine allows arbitrary
1072 * settings of the service and request timeouts, though.
1074 * Returns the timeout state before changing it or -1
1078 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1081 sb_set_initiator_to(si_t
*sih
, uint32 to
, uint idx
)
1086 uint32 tmp
, ret
= 0xffffffff;
1091 if ((to
& ~TO_MASK
) != 0)
1094 /* Figure out the master core */
1095 if (idx
== BADIDX
) {
1096 switch (BUSTYPE(sii
->pub
.bustype
)) {
1098 idx
= sii
->pub
.buscoreidx
;
1104 idx
= si_findcoreidx(sih
, PCMCIA_CORE_ID
, 0);
1107 idx
= si_findcoreidx(sih
, MIPS33_CORE_ID
, 0);
1116 INTR_OFF(sii
, intr_val
);
1117 origidx
= si_coreidx(sih
);
1119 sb
= REGS2SB(sb_setcoreidx(sih
, idx
));
1121 tmp
= R_SBREG(sii
, &sb
->sbimconfiglow
);
1122 ret
= tmp
& TO_MASK
;
1123 W_SBREG(sii
, &sb
->sbimconfiglow
, (tmp
& ~TO_MASK
) | to
);
1126 sb_setcoreidx(sih
, origidx
);
1127 INTR_RESTORE(sii
, intr_val
);
1132 sb_base(uint32 admatch
)
1137 type
= admatch
& SBAM_TYPE_MASK
;
1143 base
= admatch
& SBAM_BASE0_MASK
;
1144 } else if (type
== 1) {
1145 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1146 base
= admatch
& SBAM_BASE1_MASK
;
1147 } else if (type
== 2) {
1148 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1149 base
= admatch
& SBAM_BASE2_MASK
;
1156 sb_size(uint32 admatch
)
1161 type
= admatch
& SBAM_TYPE_MASK
;
1167 size
= 1 << (((admatch
& SBAM_ADINT0_MASK
) >> SBAM_ADINT0_SHIFT
) + 1);
1168 } else if (type
== 1) {
1169 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1170 size
= 1 << (((admatch
& SBAM_ADINT1_MASK
) >> SBAM_ADINT1_SHIFT
) + 1);
1171 } else if (type
== 2) {
1172 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1173 size
= 1 << (((admatch
& SBAM_ADINT2_MASK
) >> SBAM_ADINT2_SHIFT
) + 1);
1180 /* print interesting sbconfig registers */
1182 sb_dumpregs(si_t
*sih
, struct bcmstrbuf
*b
)
1186 uint origidx
, i
, intr_val
= 0;
1189 origidx
= sii
->curidx
;
1191 INTR_OFF(sii
, intr_val
);
1193 for (i
= 0; i
< sii
->numcores
; i
++) {
1194 sb
= REGS2SB(sb_setcoreidx(sih
, i
));
1196 bcm_bprintf(b
, "core 0x%x: \n", sii
->coreid
[i
]);
1198 if (sii
->pub
.socirev
> SONICS_2_2
)
1199 bcm_bprintf(b
, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1200 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOG
, 0, 0),
1201 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOGA
, 0, 0));
1203 bcm_bprintf(b
, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1204 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1205 R_SBREG(sii
, &sb
->sbtmstatelow
), R_SBREG(sii
, &sb
->sbtmstatehigh
),
1206 R_SBREG(sii
, &sb
->sbidhigh
), R_SBREG(sii
, &sb
->sbimstate
),
1207 R_SBREG(sii
, &sb
->sbimconfiglow
), R_SBREG(sii
, &sb
->sbimconfighigh
));
1210 sb_setcoreidx(sih
, origidx
);
1211 INTR_RESTORE(sii
, intr_val
);
1217 sb_view(si_t
*sih
, bool verbose
)
1223 sb
= REGS2SB(sii
->curmap
);
1225 SI_ERROR(("\nCore ID: 0x%x\n", sb_coreid(&sii
->pub
)));
1227 if (sii
->pub
.socirev
> SONICS_2_2
)
1228 SI_ERROR(("sbimerrlog 0x%x sbimerrloga 0x%x\n",
1229 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOG
, 0, 0),
1230 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOGA
, 0, 0)));
1232 /* Print important or helpful registers */
1233 SI_ERROR(("sbtmerrloga 0x%x sbtmerrlog 0x%x\n",
1234 R_SBREG(sii
, &sb
->sbtmerrloga
), R_SBREG(sii
, &sb
->sbtmerrlog
)));
1235 SI_ERROR(("sbimstate 0x%x sbtmstatelow 0x%x sbtmstatehigh 0x%x\n",
1236 R_SBREG(sii
, &sb
->sbimstate
),
1237 R_SBREG(sii
, &sb
->sbtmstatelow
), R_SBREG(sii
, &sb
->sbtmstatehigh
)));
1238 SI_ERROR(("sbimconfiglow 0x%x sbtmconfiglow 0x%x\nsbtmconfighigh 0x%x sbidhigh 0x%x\n",
1239 R_SBREG(sii
, &sb
->sbimconfiglow
), R_SBREG(sii
, &sb
->sbtmconfiglow
),
1240 R_SBREG(sii
, &sb
->sbtmconfighigh
), R_SBREG(sii
, &sb
->sbidhigh
)));
1242 /* Print more detailed registers that are otherwise not relevant */
1244 SI_ERROR(("sbipsflag 0x%x sbtpsflag 0x%x\n",
1245 R_SBREG(sii
, &sb
->sbipsflag
), R_SBREG(sii
, &sb
->sbtpsflag
)));
1246 SI_ERROR(("sbadmatch3 0x%x sbadmatch2 0x%x\nsbadmatch1 0x%x sbadmatch0 0x%x\n",
1247 R_SBREG(sii
, &sb
->sbadmatch3
), R_SBREG(sii
, &sb
->sbadmatch2
),
1248 R_SBREG(sii
, &sb
->sbadmatch1
), R_SBREG(sii
, &sb
->sbadmatch0
)));
1249 SI_ERROR(("sbintvec 0x%x sbbwa0 0x%x sbimconfighigh 0x%x\n",
1250 R_SBREG(sii
, &sb
->sbintvec
), R_SBREG(sii
, &sb
->sbbwa0
),
1251 R_SBREG(sii
, &sb
->sbimconfighigh
)));
1252 SI_ERROR(("sbbconfig 0x%x sbbstate 0x%x\n",
1253 R_SBREG(sii
, &sb
->sbbconfig
), R_SBREG(sii
, &sb
->sbbstate
)));
1254 SI_ERROR(("sbactcnfg 0x%x sbflagst 0x%x sbidlow 0x%x \n\n",
1255 R_SBREG(sii
, &sb
->sbactcnfg
), R_SBREG(sii
, &sb
->sbflagst
),
1256 R_SBREG(sii
, &sb
->sbidlow
)));