2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/types.h>
20 #include <linux/netdevice.h>
31 #include "siutils_priv.h"
33 /* local prototypes */
34 static uint
_sb_coreidx(si_info_t
*sii
, u32 sba
);
35 static uint
_sb_scan(si_info_t
*sii
, u32 sba
, void *regs
, uint bus
,
36 u32 sbba
, uint ncores
);
37 static u32
_sb_coresba(si_info_t
*sii
);
38 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
);
40 #define SET_SBREG(sii, r, mask, val) \
41 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
42 #define REGS2SB(va) (sbconfig_t *) ((s8 *)(va) + SBCONFIGOFF)
45 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
46 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
48 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
49 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
50 #define AND_SBREG(sii, sbr, v) \
51 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
52 #define OR_SBREG(sii, sbr, v) \
53 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
55 static u32
sb_read_sbreg(si_info_t
*sii
, volatile u32
*sbr
)
57 return R_REG(sii
->osh
, sbr
);
60 static void sb_write_sbreg(si_info_t
*sii
, volatile u32
*sbr
, u32 v
)
62 W_REG(sii
->osh
, sbr
, v
);
65 uint
sb_coreid(si_t
*sih
)
71 sb
= REGS2SB(sii
->curmap
);
73 return (R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_CC_MASK
) >>
77 /* return core index of the core with address 'sba' */
78 static uint
_sb_coreidx(si_info_t
*sii
, u32 sba
)
82 for (i
= 0; i
< sii
->numcores
; i
++)
83 if (sba
== sii
->coresba
[i
])
88 /* return core address of the current core */
89 static u32
_sb_coresba(si_info_t
*sii
)
93 switch (sii
->pub
.bustype
) {
96 sbaddr
= (u32
)(unsigned long)sii
->curmap
;
106 uint
sb_corerev(si_t
*sih
)
113 sb
= REGS2SB(sii
->curmap
);
114 sbidh
= R_SBREG(sii
, &sb
->sbidhigh
);
116 return SBCOREREV(sbidh
);
119 bool sb_iscoreup(si_t
*sih
)
125 sb
= REGS2SB(sii
->curmap
);
127 return (R_SBREG(sii
, &sb
->sbtmstatelow
) &
128 (SBTML_RESET
| SBTML_REJ_MASK
|
129 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
))) ==
130 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
);
134 * Switch to 'coreidx', issue a single arbitrary 32bit
135 * register mask&set operation,
136 * switch back to the original core, and return the new value.
138 * When using the silicon backplane, no fidleing with interrupts
139 * or core switches are needed.
141 * Also, when using pci/pcie, we can optimize away the core switching
143 * and (on newer pci cores) chipcommon registers.
145 uint
sb_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
156 ASSERT(GOODIDX(coreidx
));
157 ASSERT(regoff
< SI_CORE_SIZE
);
158 ASSERT((val
& ~mask
) == 0);
160 if (coreidx
>= SI_MAXCORES
)
164 INTR_OFF(sii
, intr_val
);
166 /* save current core index */
167 origidx
= si_coreidx(&sii
->pub
);
170 r
= (u32
*) ((unsigned char *) sb_setcoreidx(&sii
->pub
, coreidx
) +
177 if (regoff
>= SBCONFIGOFF
) {
178 w
= (R_SBREG(sii
, r
) & ~mask
) | val
;
181 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
182 W_REG(sii
->osh
, r
, w
);
187 if (regoff
>= SBCONFIGOFF
)
190 w
= R_REG(sii
->osh
, r
);
193 /* restore core index */
194 if (origidx
!= coreidx
)
195 sb_setcoreidx(&sii
->pub
, origidx
);
197 INTR_RESTORE(sii
, intr_val
);
203 /* Scan the enumeration space to find all cores starting from the given
204 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
205 * is the default core address at chip POR time and 'regs' is the virtual
206 * address that the default core is mapped at. 'ncores' is the number of
207 * cores expected on bus 'sbba'. It returns the total number of cores
208 * starting from bus 'sbba', inclusive.
210 #define SB_MAXBUSES 2
211 static uint
_sb_scan(si_info_t
*sii
, u32 sba
, void *regs
, uint bus
, u32 sbba
,
218 if (bus
>= SB_MAXBUSES
) {
219 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to "
220 "scan\n", sbba
, bus
));
223 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n",
226 /* Scan all cores on the bus starting from core 0.
227 * Core addresses must be contiguous on each bus.
229 for (i
= 0, next
= sii
->numcores
;
230 i
< numcores
&& next
< SB_BUS_MAXCORES
; i
++, next
++) {
231 sii
->coresba
[next
] = sbba
+ (i
* SI_CORE_SIZE
);
233 /* change core to 'next' and read its coreid */
234 sii
->curmap
= _sb_setcoreidx(sii
, next
);
237 sii
->coreid
[next
] = sb_coreid(&sii
->pub
);
239 /* core specific processing... */
240 /* chipc provides # cores */
241 if (sii
->coreid
[next
] == CC_CORE_ID
) {
242 chipcregs_t
*cc
= (chipcregs_t
*) sii
->curmap
;
243 u32 ccrev
= sb_corerev(&sii
->pub
);
245 /* determine numcores - this is the
246 total # cores in the chip */
247 if (((ccrev
== 4) || (ccrev
>= 6)))
249 (R_REG(sii
->osh
, &cc
->chipid
) & CID_CC_MASK
)
253 SI_ERROR(("sb_chip2numcores: unsupported chip "
254 "0x%x\n", sii
->pub
.chip
));
259 SI_VMSG(("_sb_scan: %u cores in the chip %s\n",
260 numcores
, sii
->pub
.issim
? "QT" : ""));
262 /* scan bridged SB(s) and add results to the end of the list */
263 else if (sii
->coreid
[next
] == OCP_CORE_ID
) {
264 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
265 u32 nsbba
= R_SBREG(sii
, &sb
->sbadmatch1
);
268 sii
->numcores
= next
+ 1;
270 if ((nsbba
& 0xfff00000) != SI_ENUM_BASE
)
273 if (_sb_coreidx(sii
, nsbba
) != BADIDX
)
277 (R_SBREG(sii
, &sb
->sbtmstatehigh
) & 0x000f0000) >>
279 nsbcc
= _sb_scan(sii
, sba
, regs
, bus
+ 1, nsbba
, nsbcc
);
280 if (sbba
== SI_ENUM_BASE
)
286 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i
, sbba
));
288 sii
->numcores
= i
+ ncc
;
289 return sii
->numcores
;
292 /* scan the sb enumerated space to identify all cores */
293 void sb_scan(si_t
*sih
, void *regs
, uint devid
)
300 sb
= REGS2SB(sii
->curmap
);
303 (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_RV_MASK
) >> SBIDL_RV_SHIFT
;
305 /* Save the current core info and validate it later till we know
306 * for sure what is good and what is bad.
308 origsba
= _sb_coresba(sii
);
310 /* scan all SB(s) starting from SI_ENUM_BASE */
311 sii
->numcores
= _sb_scan(sii
, origsba
, regs
, 0, SI_ENUM_BASE
, 1);
315 * This function changes logical "focus" to the indicated core;
316 * must be called with interrupts off.
317 * Moreover, callers should keep interrupts off during switching out of
318 * and back to d11 core
320 void *sb_setcoreidx(si_t
*sih
, uint coreidx
)
326 if (coreidx
>= sii
->numcores
)
330 * If the user has provided an interrupt mask enabled function,
331 * then assert interrupts are disabled before switching the core.
333 ASSERT((sii
->intrsenabled_fn
== NULL
)
334 || !(*(sii
)->intrsenabled_fn
) ((sii
)->intr_arg
));
336 sii
->curmap
= _sb_setcoreidx(sii
, coreidx
);
337 sii
->curidx
= coreidx
;
342 /* This function changes the logical "focus" to the indicated core.
343 * Return the current core's virtual address.
345 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
)
347 u32 sbaddr
= sii
->coresba
[coreidx
];
350 switch (sii
->pub
.bustype
) {
355 if (!sii
->regs
[coreidx
]) {
356 sii
->regs
[coreidx
] = (void *)sbaddr
;
357 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
359 regs
= sii
->regs
[coreidx
];
371 void sb_core_disable(si_t
*sih
, u32 bits
)
379 ASSERT(GOODREGS(sii
->curmap
));
380 sb
= REGS2SB(sii
->curmap
);
382 /* if core is already in reset, just return */
383 if (R_SBREG(sii
, &sb
->sbtmstatelow
) & SBTML_RESET
)
386 /* if clocks are not enabled, put into reset and return */
387 if ((R_SBREG(sii
, &sb
->sbtmstatelow
) &
388 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
)) == 0)
391 /* set target reject and spin until busy is clear
392 (preserve core-specific bits) */
393 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_REJ
);
394 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
396 SPINWAIT((R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
), 100000);
397 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
)
398 SI_ERROR(("%s: target state still busy\n", __func__
));
400 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
) {
401 OR_SBREG(sii
, &sb
->sbimstate
, SBIM_RJ
);
402 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
404 SPINWAIT((R_SBREG(sii
, &sb
->sbimstate
) & SBIM_BY
), 100000);
407 /* set reset and reject while enabling the clocks */
408 W_SBREG(sii
, &sb
->sbtmstatelow
,
409 (((bits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
410 SBTML_REJ
| SBTML_RESET
));
411 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
414 /* don't forget to clear the initiator reject bit */
415 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
)
416 AND_SBREG(sii
, &sb
->sbimstate
, ~SBIM_RJ
);
419 /* leave reset and reject asserted */
420 W_SBREG(sii
, &sb
->sbtmstatelow
,
421 ((bits
<< SBTML_SICF_SHIFT
) | SBTML_REJ
| SBTML_RESET
));
425 /* reset and re-enable a core
427 * bits - core specific bits that are set during and after reset sequence
428 * resetbits - core specific bits that are set only during reset sequence
430 void sb_core_reset(si_t
*sih
, u32 bits
, u32 resetbits
)
437 ASSERT(GOODREGS(sii
->curmap
));
438 sb
= REGS2SB(sii
->curmap
);
441 * Must do the disable sequence first to work for
442 * arbitrary current core state.
444 sb_core_disable(sih
, (bits
| resetbits
));
447 * Now do the initialization sequence.
450 /* set reset while enabling the clock and
451 forcing them on throughout the core */
452 W_SBREG(sii
, &sb
->sbtmstatelow
,
453 (((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) <<
454 SBTML_SICF_SHIFT
) | SBTML_RESET
));
455 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
458 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_SERR
)
459 W_SBREG(sii
, &sb
->sbtmstatehigh
, 0);
461 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
462 if (dummy
& (SBIM_IBE
| SBIM_TO
))
463 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
465 /* clear reset and allow it to propagate throughout the core */
466 W_SBREG(sii
, &sb
->sbtmstatelow
,
467 ((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) <<
469 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
472 /* leave clock enabled */
473 W_SBREG(sii
, &sb
->sbtmstatelow
,
474 ((bits
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
475 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);