Staging: brcm80211: s/int8/s8/
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / brcm80211 / util / sbutils.c
blobeedfa2909b52c17bfdb7a40bbca5220291ee180b
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <typedefs.h>
18 #include <bcmdefs.h>
19 #include <osl.h>
20 #include <bcmutils.h>
21 #include <siutils.h>
22 #include <bcmdevs.h>
23 #include <hndsoc.h>
24 #include <sbchipc.h>
25 #include <pci_core.h>
26 #include <pcicfg.h>
27 #include <sbpcmcia.h>
28 #include "siutils_priv.h"
30 /* local prototypes */
31 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
32 static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus,
33 uint32 sbba, uint ncores);
34 static uint32 _sb_coresba(si_info_t *sii);
35 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
37 #define SET_SBREG(sii, r, mask, val) \
38 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
39 #define REGS2SB(va) (sbconfig_t *) ((s8 *)(va) + SBCONFIGOFF)
41 /* sonicsrev */
42 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
43 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
45 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
46 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
47 #define AND_SBREG(sii, sbr, v) \
48 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
49 #define OR_SBREG(sii, sbr, v) \
50 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
52 static uint32 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
54 return R_REG(sii->osh, sbr);
57 static void sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
59 W_REG(sii->osh, sbr, v);
62 uint sb_coreid(si_t *sih)
64 si_info_t *sii;
65 sbconfig_t *sb;
67 sii = SI_INFO(sih);
68 sb = REGS2SB(sii->curmap);
70 return (R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >>
71 SBIDH_CC_SHIFT;
74 /* return core index of the core with address 'sba' */
75 static uint BCMATTACHFN(_sb_coreidx) (si_info_t *sii, uint32 sba)
77 uint i;
79 for (i = 0; i < sii->numcores; i++)
80 if (sba == sii->coresba[i])
81 return i;
82 return BADIDX;
85 /* return core address of the current core */
86 static uint32 BCMATTACHFN(_sb_coresba) (si_info_t *sii)
88 uint32 sbaddr = 0;
90 switch (BUSTYPE(sii->pub.bustype)) {
91 case SPI_BUS:
92 case SDIO_BUS:
93 sbaddr = (uint32) (uintptr) sii->curmap;
94 break;
95 default:
96 ASSERT(0);
97 break;
100 return sbaddr;
103 uint sb_corerev(si_t *sih)
105 si_info_t *sii;
106 sbconfig_t *sb;
107 uint sbidh;
109 sii = SI_INFO(sih);
110 sb = REGS2SB(sii->curmap);
111 sbidh = R_SBREG(sii, &sb->sbidhigh);
113 return SBCOREREV(sbidh);
116 bool sb_iscoreup(si_t *sih)
118 si_info_t *sii;
119 sbconfig_t *sb;
121 sii = SI_INFO(sih);
122 sb = REGS2SB(sii->curmap);
124 return (R_SBREG(sii, &sb->sbtmstatelow) &
125 (SBTML_RESET | SBTML_REJ_MASK |
126 (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
127 (SICF_CLOCK_EN << SBTML_SICF_SHIFT);
131 * Switch to 'coreidx', issue a single arbitrary 32bit
132 * register mask&set operation,
133 * switch back to the original core, and return the new value.
135 * When using the silicon backplane, no fidleing with interrupts
136 * or core switches are needed.
138 * Also, when using pci/pcie, we can optimize away the core switching
139 * for pci registers
140 * and (on newer pci cores) chipcommon registers.
142 uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
144 uint origidx = 0;
145 uint32 *r = NULL;
146 uint w;
147 uint intr_val = 0;
148 bool fast = FALSE;
149 si_info_t *sii;
151 sii = SI_INFO(sih);
153 ASSERT(GOODIDX(coreidx));
154 ASSERT(regoff < SI_CORE_SIZE);
155 ASSERT((val & ~mask) == 0);
157 if (coreidx >= SI_MAXCORES)
158 return 0;
160 if (!fast) {
161 INTR_OFF(sii, intr_val);
163 /* save current core index */
164 origidx = si_coreidx(&sii->pub);
166 /* switch core */
167 r = (uint32 *) ((uchar *) sb_setcoreidx(&sii->pub, coreidx) +
168 regoff);
170 ASSERT(r != NULL);
172 /* mask and set */
173 if (mask || val) {
174 if (regoff >= SBCONFIGOFF) {
175 w = (R_SBREG(sii, r) & ~mask) | val;
176 W_SBREG(sii, r, w);
177 } else {
178 w = (R_REG(sii->osh, r) & ~mask) | val;
179 W_REG(sii->osh, r, w);
183 /* readback */
184 if (regoff >= SBCONFIGOFF)
185 w = R_SBREG(sii, r);
186 else
187 w = R_REG(sii->osh, r);
189 if (!fast) {
190 /* restore core index */
191 if (origidx != coreidx)
192 sb_setcoreidx(&sii->pub, origidx);
194 INTR_RESTORE(sii, intr_val);
197 return w;
200 /* Scan the enumeration space to find all cores starting from the given
201 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
202 * is the default core address at chip POR time and 'regs' is the virtual
203 * address that the default core is mapped at. 'ncores' is the number of
204 * cores expected on bus 'sbba'. It returns the total number of cores
205 * starting from bus 'sbba', inclusive.
207 #define SB_MAXBUSES 2
208 static uint
209 BCMATTACHFN(_sb_scan) (si_info_t *sii, uint32 sba, void *regs, uint bus,
210 uint32 sbba, uint numcores) {
211 uint next;
212 uint ncc = 0;
213 uint i;
215 if (bus >= SB_MAXBUSES) {
216 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to "
217 "scan\n", sbba, bus));
218 return 0;
220 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n",
221 sbba, numcores));
223 /* Scan all cores on the bus starting from core 0.
224 * Core addresses must be contiguous on each bus.
226 for (i = 0, next = sii->numcores;
227 i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
228 sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
230 /* change core to 'next' and read its coreid */
231 sii->curmap = _sb_setcoreidx(sii, next);
232 sii->curidx = next;
234 sii->coreid[next] = sb_coreid(&sii->pub);
236 /* core specific processing... */
237 /* chipc provides # cores */
238 if (sii->coreid[next] == CC_CORE_ID) {
239 chipcregs_t *cc = (chipcregs_t *) sii->curmap;
240 uint32 ccrev = sb_corerev(&sii->pub);
242 /* determine numcores - this is the
243 total # cores in the chip */
244 if (((ccrev == 4) || (ccrev >= 6)))
245 numcores =
246 (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK)
247 >> CID_CC_SHIFT;
248 else {
249 /* Older chips */
250 SI_ERROR(("sb_chip2numcores: unsupported chip "
251 "0x%x\n", CHIPID(sii->pub.chip)));
252 ASSERT(0);
253 numcores = 1;
256 SI_VMSG(("_sb_scan: %u cores in the chip %s\n",
257 numcores, sii->pub.issim ? "QT" : ""));
259 /* scan bridged SB(s) and add results to the end of the list */
260 else if (sii->coreid[next] == OCP_CORE_ID) {
261 sbconfig_t *sb = REGS2SB(sii->curmap);
262 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
263 uint nsbcc;
265 sii->numcores = next + 1;
267 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
268 continue;
269 nsbba &= 0xfffff000;
270 if (_sb_coreidx(sii, nsbba) != BADIDX)
271 continue;
273 nsbcc =
274 (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >>
276 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
277 if (sbba == SI_ENUM_BASE)
278 numcores -= nsbcc;
279 ncc += nsbcc;
283 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
285 sii->numcores = i + ncc;
286 return sii->numcores;
289 /* scan the sb enumerated space to identify all cores */
290 void BCMATTACHFN(sb_scan) (si_t *sih, void *regs, uint devid)
292 si_info_t *sii;
293 uint32 origsba;
294 sbconfig_t *sb;
296 sii = SI_INFO(sih);
297 sb = REGS2SB(sii->curmap);
299 sii->pub.socirev =
300 (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
302 /* Save the current core info and validate it later till we know
303 * for sure what is good and what is bad.
305 origsba = _sb_coresba(sii);
307 /* scan all SB(s) starting from SI_ENUM_BASE */
308 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
312 * This function changes logical "focus" to the indicated core;
313 * must be called with interrupts off.
314 * Moreover, callers should keep interrupts off during switching out of
315 * and back to d11 core
317 void *sb_setcoreidx(si_t *sih, uint coreidx)
319 si_info_t *sii;
321 sii = SI_INFO(sih);
323 if (coreidx >= sii->numcores)
324 return NULL;
327 * If the user has provided an interrupt mask enabled function,
328 * then assert interrupts are disabled before switching the core.
330 ASSERT((sii->intrsenabled_fn == NULL)
331 || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
333 sii->curmap = _sb_setcoreidx(sii, coreidx);
334 sii->curidx = coreidx;
336 return sii->curmap;
339 /* This function changes the logical "focus" to the indicated core.
340 * Return the current core's virtual address.
342 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx)
344 uint32 sbaddr = sii->coresba[coreidx];
345 void *regs;
347 switch (BUSTYPE(sii->pub.bustype)) {
348 #ifdef BCMSDIO
349 case SPI_BUS:
350 case SDIO_BUS:
351 /* map new one */
352 if (!sii->regs[coreidx]) {
353 sii->regs[coreidx] = (void *)(uintptr) sbaddr;
354 ASSERT(GOODREGS(sii->regs[coreidx]));
356 regs = sii->regs[coreidx];
357 break;
358 #endif /* BCMSDIO */
359 default:
360 ASSERT(0);
361 regs = NULL;
362 break;
365 return regs;
368 /* traverse all cores to find and clear source of serror */
369 static void sb_serr_clear(si_info_t *sii)
371 sbconfig_t *sb;
372 uint origidx;
373 uint i, intr_val = 0;
374 void *corereg = NULL;
376 INTR_OFF(sii, intr_val);
377 origidx = si_coreidx(&sii->pub);
379 for (i = 0; i < sii->numcores; i++) {
380 corereg = sb_setcoreidx(&sii->pub, i);
381 if (NULL != corereg) {
382 sb = REGS2SB(corereg);
383 if ((R_SBREG(sii, &sb->sbtmstatehigh)) & SBTMH_SERR) {
384 AND_SBREG(sii, &sb->sbtmstatehigh, ~SBTMH_SERR);
385 SI_ERROR(("sb_serr_clear: SError core 0x%x\n",
386 sb_coreid(&sii->pub)));
391 sb_setcoreidx(&sii->pub, origidx);
392 INTR_RESTORE(sii, intr_val);
396 * Check if any inband, outband or timeout errors has happened and clear them.
397 * Must be called with chip clk on !
399 bool sb_taclear(si_t *sih, bool details)
401 si_info_t *sii;
402 sbconfig_t *sb;
403 uint origidx;
404 uint intr_val = 0;
405 bool rc = FALSE;
406 uint32 inband = 0, serror = 0, timeout = 0;
407 void *corereg = NULL;
408 volatile uint32 imstate, tmstate;
410 sii = SI_INFO(sih);
412 if ((BUSTYPE(sii->pub.bustype) == SDIO_BUS) ||
413 (BUSTYPE(sii->pub.bustype) == SPI_BUS)) {
415 INTR_OFF(sii, intr_val);
416 origidx = si_coreidx(sih);
418 corereg = si_setcore(sih, PCMCIA_CORE_ID, 0);
419 if (NULL == corereg)
420 corereg = si_setcore(sih, SDIOD_CORE_ID, 0);
421 if (NULL != corereg) {
422 sb = REGS2SB(corereg);
424 imstate = R_SBREG(sii, &sb->sbimstate);
425 if ((imstate != 0xffffffff)
426 && (imstate & (SBIM_IBE | SBIM_TO))) {
427 AND_SBREG(sii, &sb->sbimstate,
428 ~(SBIM_IBE | SBIM_TO));
429 /* inband = imstate & SBIM_IBE; cmd error */
430 timeout = imstate & SBIM_TO;
432 tmstate = R_SBREG(sii, &sb->sbtmstatehigh);
433 if ((tmstate != 0xffffffff)
434 && (tmstate & SBTMH_INT_STATUS)) {
435 sb_serr_clear(sii);
436 serror = 1;
437 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_INT_ACK);
438 AND_SBREG(sii, &sb->sbtmstatelow,
439 ~SBTML_INT_ACK);
443 sb_setcoreidx(sih, origidx);
444 INTR_RESTORE(sii, intr_val);
447 if (inband | timeout | serror) {
448 rc = TRUE;
449 SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout "
450 "0x%x!\n", inband, serror, timeout));
453 return rc;
456 void sb_core_disable(si_t *sih, uint32 bits)
458 si_info_t *sii;
459 volatile uint32 dummy;
460 sbconfig_t *sb;
462 sii = SI_INFO(sih);
464 ASSERT(GOODREGS(sii->curmap));
465 sb = REGS2SB(sii->curmap);
467 /* if core is already in reset, just return */
468 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
469 return;
471 /* if clocks are not enabled, put into reset and return */
472 if ((R_SBREG(sii, &sb->sbtmstatelow) &
473 (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
474 goto disable;
476 /* set target reject and spin until busy is clear
477 (preserve core-specific bits) */
478 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
479 dummy = R_SBREG(sii, &sb->sbtmstatelow);
480 OSL_DELAY(1);
481 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
482 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
483 SI_ERROR(("%s: target state still busy\n", __func__));
485 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
486 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
487 dummy = R_SBREG(sii, &sb->sbimstate);
488 OSL_DELAY(1);
489 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
492 /* set reset and reject while enabling the clocks */
493 W_SBREG(sii, &sb->sbtmstatelow,
494 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
495 SBTML_REJ | SBTML_RESET));
496 dummy = R_SBREG(sii, &sb->sbtmstatelow);
497 OSL_DELAY(10);
499 /* don't forget to clear the initiator reject bit */
500 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
501 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
503 disable:
504 /* leave reset and reject asserted */
505 W_SBREG(sii, &sb->sbtmstatelow,
506 ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
507 OSL_DELAY(1);
510 /* reset and re-enable a core
511 * inputs:
512 * bits - core specific bits that are set during and after reset sequence
513 * resetbits - core specific bits that are set only during reset sequence
515 void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
517 si_info_t *sii;
518 sbconfig_t *sb;
519 volatile uint32 dummy;
521 sii = SI_INFO(sih);
522 ASSERT(GOODREGS(sii->curmap));
523 sb = REGS2SB(sii->curmap);
526 * Must do the disable sequence first to work for
527 * arbitrary current core state.
529 sb_core_disable(sih, (bits | resetbits));
532 * Now do the initialization sequence.
535 /* set reset while enabling the clock and
536 forcing them on throughout the core */
537 W_SBREG(sii, &sb->sbtmstatelow,
538 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) <<
539 SBTML_SICF_SHIFT) | SBTML_RESET));
540 dummy = R_SBREG(sii, &sb->sbtmstatelow);
541 OSL_DELAY(1);
543 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR)
544 W_SBREG(sii, &sb->sbtmstatehigh, 0);
546 dummy = R_SBREG(sii, &sb->sbimstate);
547 if (dummy & (SBIM_IBE | SBIM_TO))
548 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
550 /* clear reset and allow it to propagate throughout the core */
551 W_SBREG(sii, &sb->sbtmstatelow,
552 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) <<
553 SBTML_SICF_SHIFT));
554 dummy = R_SBREG(sii, &sb->sbtmstatelow);
555 OSL_DELAY(1);
557 /* leave clock enabled */
558 W_SBREG(sii, &sb->sbtmstatelow,
559 ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
560 dummy = R_SBREG(sii, &sb->sbtmstatelow);
561 OSL_DELAY(1);
564 uint32 sb_base(uint32 admatch)
566 uint32 base;
567 uint type;
569 type = admatch & SBAM_TYPE_MASK;
570 ASSERT(type < 3);
572 base = 0;
574 if (type == 0) {
575 base = admatch & SBAM_BASE0_MASK;
576 } else if (type == 1) {
577 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
578 base = admatch & SBAM_BASE1_MASK;
579 } else if (type == 2) {
580 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
581 base = admatch & SBAM_BASE2_MASK;
584 return base;