PageRenderTime 89ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/net/wireless/bcmdhd_34/sbutils.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t
C | 1001 lines | 665 code | 175 blank | 161 comment | 123 complexity | 82ce97e0071cb7af64de98a90c9da175 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. /*
  2. * Misc utility routines for accessing chip-specific features
  3. * of the SiliconBackplane-based Broadcom chips.
  4. *
  5. * Copyright (C) 1999-2012, Broadcom Corporation
  6. *
  7. * Unless you and Broadcom execute a separate written software license
  8. * agreement governing use of this software, this software is licensed to you
  9. * under the terms of the GNU General Public License version 2 (the "GPL"),
  10. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  11. * following added to such license:
  12. *
  13. * As a special exception, the copyright holders of this software give you
  14. * permission to link this software with independent modules, and to copy and
  15. * distribute the resulting executable under terms of your choice, provided that
  16. * you also meet, for each linked independent module, the terms and conditions of
  17. * the license of that module. An independent module is a module which is not
  18. * derived from this software. The special exception does not apply to any
  19. * modifications of the software.
  20. *
  21. * Notwithstanding the above, under no circumstances may you combine this
  22. * software in any way with any other Broadcom software provided under a license
  23. * other than the GPL, without Broadcom's express prior written consent.
  24. *
  25. * $Id: sbutils.c 310902 2012-01-26 19:45:33Z $
  26. */
  27. #include <bcm_cfg.h>
  28. #include <typedefs.h>
  29. #include <bcmdefs.h>
  30. #include <osl.h>
  31. #include <bcmutils.h>
  32. #include <siutils.h>
  33. #include <bcmdevs.h>
  34. #include <hndsoc.h>
  35. #include <sbchipc.h>
  36. #include <pcicfg.h>
  37. #include <sbpcmcia.h>
  38. #include "siutils_priv.h"
  39. /* local prototypes */
  40. static uint _sb_coreidx(si_info_t *sii, uint32 sba);
  41. static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
  42. uint ncores);
  43. static uint32 _sb_coresba(si_info_t *sii);
  44. static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
  45. #define SET_SBREG(sii, r, mask, val) \
  46. W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
  47. #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
  48. /* sonicsrev */
  49. #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
  50. #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
  51. #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
  52. #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
  53. #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
  54. #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
  55. static uint32
  56. sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
  57. {
  58. uint8 tmp;
  59. uint32 val, intr_val = 0;
  60. /*
  61. * compact flash only has 11 bits address, while we needs 12 bits address.
  62. * MEM_SEG will be OR'd with other 11 bits address in hardware,
  63. * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
  64. * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
  65. */
  66. if (PCMCIA(sii)) {
  67. INTR_OFF(sii, intr_val);
  68. tmp = 1;
  69. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  70. sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
  71. }
  72. val = R_REG(sii->osh, sbr);
  73. if (PCMCIA(sii)) {
  74. tmp = 0;
  75. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  76. INTR_RESTORE(sii, intr_val);
  77. }
  78. return (val);
  79. }
  80. static void
  81. sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
  82. {
  83. uint8 tmp;
  84. volatile uint32 dummy;
  85. uint32 intr_val = 0;
  86. /*
  87. * compact flash only has 11 bits address, while we needs 12 bits address.
  88. * MEM_SEG will be OR'd with other 11 bits address in hardware,
  89. * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
  90. * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
  91. */
  92. if (PCMCIA(sii)) {
  93. INTR_OFF(sii, intr_val);
  94. tmp = 1;
  95. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  96. sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
  97. }
  98. if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
  99. dummy = R_REG(sii->osh, sbr);
  100. BCM_REFERENCE(dummy);
  101. W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
  102. dummy = R_REG(sii->osh, sbr);
  103. BCM_REFERENCE(dummy);
  104. W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
  105. } else
  106. W_REG(sii->osh, sbr, v);
  107. if (PCMCIA(sii)) {
  108. tmp = 0;
  109. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  110. INTR_RESTORE(sii, intr_val);
  111. }
  112. }
  113. uint
  114. sb_coreid(si_t *sih)
  115. {
  116. si_info_t *sii;
  117. sbconfig_t *sb;
  118. sii = SI_INFO(sih);
  119. sb = REGS2SB(sii->curmap);
  120. return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
  121. }
  122. uint
  123. sb_intflag(si_t *sih)
  124. {
  125. si_info_t *sii;
  126. void *corereg;
  127. sbconfig_t *sb;
  128. uint origidx, intflag, intr_val = 0;
  129. sii = SI_INFO(sih);
  130. INTR_OFF(sii, intr_val);
  131. origidx = si_coreidx(sih);
  132. corereg = si_setcore(sih, CC_CORE_ID, 0);
  133. ASSERT(corereg != NULL);
  134. sb = REGS2SB(corereg);
  135. intflag = R_SBREG(sii, &sb->sbflagst);
  136. sb_setcoreidx(sih, origidx);
  137. INTR_RESTORE(sii, intr_val);
  138. return intflag;
  139. }
  140. uint
  141. sb_flag(si_t *sih)
  142. {
  143. si_info_t *sii;
  144. sbconfig_t *sb;
  145. sii = SI_INFO(sih);
  146. sb = REGS2SB(sii->curmap);
  147. return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
  148. }
  149. void
  150. sb_setint(si_t *sih, int siflag)
  151. {
  152. si_info_t *sii;
  153. sbconfig_t *sb;
  154. uint32 vec;
  155. sii = SI_INFO(sih);
  156. sb = REGS2SB(sii->curmap);
  157. if (siflag == -1)
  158. vec = 0;
  159. else
  160. vec = 1 << siflag;
  161. W_SBREG(sii, &sb->sbintvec, vec);
  162. }
  163. /* return core index of the core with address 'sba' */
  164. static uint
  165. _sb_coreidx(si_info_t *sii, uint32 sba)
  166. {
  167. uint i;
  168. for (i = 0; i < sii->numcores; i ++)
  169. if (sba == sii->coresba[i])
  170. return i;
  171. return BADIDX;
  172. }
  173. /* return core address of the current core */
  174. static uint32
  175. _sb_coresba(si_info_t *sii)
  176. {
  177. uint32 sbaddr;
  178. switch (BUSTYPE(sii->pub.bustype)) {
  179. case SI_BUS: {
  180. sbconfig_t *sb = REGS2SB(sii->curmap);
  181. sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
  182. break;
  183. }
  184. case PCI_BUS:
  185. sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
  186. break;
  187. case PCMCIA_BUS: {
  188. uint8 tmp = 0;
  189. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
  190. sbaddr = (uint32)tmp << 12;
  191. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
  192. sbaddr |= (uint32)tmp << 16;
  193. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
  194. sbaddr |= (uint32)tmp << 24;
  195. break;
  196. }
  197. case SPI_BUS:
  198. case SDIO_BUS:
  199. sbaddr = (uint32)(uintptr)sii->curmap;
  200. break;
  201. default:
  202. sbaddr = BADCOREADDR;
  203. break;
  204. }
  205. return sbaddr;
  206. }
  207. uint
  208. sb_corevendor(si_t *sih)
  209. {
  210. si_info_t *sii;
  211. sbconfig_t *sb;
  212. sii = SI_INFO(sih);
  213. sb = REGS2SB(sii->curmap);
  214. return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
  215. }
  216. uint
  217. sb_corerev(si_t *sih)
  218. {
  219. si_info_t *sii;
  220. sbconfig_t *sb;
  221. uint sbidh;
  222. sii = SI_INFO(sih);
  223. sb = REGS2SB(sii->curmap);
  224. sbidh = R_SBREG(sii, &sb->sbidhigh);
  225. return (SBCOREREV(sbidh));
  226. }
  227. /* set core-specific control flags */
  228. void
  229. sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
  230. {
  231. si_info_t *sii;
  232. sbconfig_t *sb;
  233. uint32 w;
  234. sii = SI_INFO(sih);
  235. sb = REGS2SB(sii->curmap);
  236. ASSERT((val & ~mask) == 0);
  237. /* mask and set */
  238. w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
  239. (val << SBTML_SICF_SHIFT);
  240. W_SBREG(sii, &sb->sbtmstatelow, w);
  241. }
  242. /* set/clear core-specific control flags */
  243. uint32
  244. sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
  245. {
  246. si_info_t *sii;
  247. sbconfig_t *sb;
  248. uint32 w;
  249. sii = SI_INFO(sih);
  250. sb = REGS2SB(sii->curmap);
  251. ASSERT((val & ~mask) == 0);
  252. /* mask and set */
  253. if (mask || val) {
  254. w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
  255. (val << SBTML_SICF_SHIFT);
  256. W_SBREG(sii, &sb->sbtmstatelow, w);
  257. }
  258. /* return the new value
  259. * for write operation, the following readback ensures the completion of write opration.
  260. */
  261. return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
  262. }
  263. /* set/clear core-specific status flags */
  264. uint32
  265. sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
  266. {
  267. si_info_t *sii;
  268. sbconfig_t *sb;
  269. uint32 w;
  270. sii = SI_INFO(sih);
  271. sb = REGS2SB(sii->curmap);
  272. ASSERT((val & ~mask) == 0);
  273. ASSERT((mask & ~SISF_CORE_BITS) == 0);
  274. /* mask and set */
  275. if (mask || val) {
  276. w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
  277. (val << SBTMH_SISF_SHIFT);
  278. W_SBREG(sii, &sb->sbtmstatehigh, w);
  279. }
  280. /* return the new value */
  281. return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
  282. }
  283. bool
  284. sb_iscoreup(si_t *sih)
  285. {
  286. si_info_t *sii;
  287. sbconfig_t *sb;
  288. sii = SI_INFO(sih);
  289. sb = REGS2SB(sii->curmap);
  290. return ((R_SBREG(sii, &sb->sbtmstatelow) &
  291. (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
  292. (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
  293. }
  294. /*
  295. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  296. * switch back to the original core, and return the new value.
  297. *
  298. * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
  299. *
  300. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  301. * and (on newer pci cores) chipcommon registers.
  302. */
  303. uint
  304. sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  305. {
  306. uint origidx = 0;
  307. uint32 *r = NULL;
  308. uint w;
  309. uint intr_val = 0;
  310. bool fast = FALSE;
  311. si_info_t *sii;
  312. sii = SI_INFO(sih);
  313. ASSERT(GOODIDX(coreidx));
  314. ASSERT(regoff < SI_CORE_SIZE);
  315. ASSERT((val & ~mask) == 0);
  316. if (coreidx >= SI_MAXCORES)
  317. return 0;
  318. if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
  319. /* If internal bus, we can always get at everything */
  320. fast = TRUE;
  321. /* map if does not exist */
  322. if (!sii->regs[coreidx]) {
  323. sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
  324. SI_CORE_SIZE);
  325. ASSERT(GOODREGS(sii->regs[coreidx]));
  326. }
  327. r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
  328. } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  329. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  330. if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  331. /* Chipc registers are mapped at 12KB */
  332. fast = TRUE;
  333. r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
  334. } else if (sii->pub.buscoreidx == coreidx) {
  335. /* pci registers are at either in the last 2KB of an 8KB window
  336. * or, in pcie and pci rev 13 at 8KB
  337. */
  338. fast = TRUE;
  339. if (SI_FAST(sii))
  340. r = (uint32 *)((char *)sii->curmap +
  341. PCI_16KB0_PCIREGS_OFFSET + regoff);
  342. else
  343. r = (uint32 *)((char *)sii->curmap +
  344. ((regoff >= SBCONFIGOFF) ?
  345. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  346. regoff);
  347. }
  348. }
  349. if (!fast) {
  350. INTR_OFF(sii, intr_val);
  351. /* save current core index */
  352. origidx = si_coreidx(&sii->pub);
  353. /* switch core */
  354. r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
  355. }
  356. ASSERT(r != NULL);
  357. /* mask and set */
  358. if (mask || val) {
  359. if (regoff >= SBCONFIGOFF) {
  360. w = (R_SBREG(sii, r) & ~mask) | val;
  361. W_SBREG(sii, r, w);
  362. } else {
  363. w = (R_REG(sii->osh, r) & ~mask) | val;
  364. W_REG(sii->osh, r, w);
  365. }
  366. }
  367. /* readback */
  368. if (regoff >= SBCONFIGOFF)
  369. w = R_SBREG(sii, r);
  370. else {
  371. if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
  372. (coreidx == SI_CC_IDX) &&
  373. (regoff == OFFSETOF(chipcregs_t, watchdog))) {
  374. w = val;
  375. } else
  376. w = R_REG(sii->osh, r);
  377. }
  378. if (!fast) {
  379. /* restore core index */
  380. if (origidx != coreidx)
  381. sb_setcoreidx(&sii->pub, origidx);
  382. INTR_RESTORE(sii, intr_val);
  383. }
  384. return (w);
  385. }
  386. /* Scan the enumeration space to find all cores starting from the given
  387. * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
  388. * is the default core address at chip POR time and 'regs' is the virtual
  389. * address that the default core is mapped at. 'ncores' is the number of
  390. * cores expected on bus 'sbba'. It returns the total number of cores
  391. * starting from bus 'sbba', inclusive.
  392. */
  393. #define SB_MAXBUSES 2
  394. static uint
  395. _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
  396. {
  397. uint next;
  398. uint ncc = 0;
  399. uint i;
  400. if (bus >= SB_MAXBUSES) {
  401. SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
  402. return 0;
  403. }
  404. SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
  405. /* Scan all cores on the bus starting from core 0.
  406. * Core addresses must be contiguous on each bus.
  407. */
  408. for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
  409. sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
  410. /* keep and reuse the initial register mapping */
  411. if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
  412. SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
  413. sii->regs[next] = regs;
  414. }
  415. /* change core to 'next' and read its coreid */
  416. sii->curmap = _sb_setcoreidx(sii, next);
  417. sii->curidx = next;
  418. sii->coreid[next] = sb_coreid(&sii->pub);
  419. /* core specific processing... */
  420. /* chipc provides # cores */
  421. if (sii->coreid[next] == CC_CORE_ID) {
  422. chipcregs_t *cc = (chipcregs_t *)sii->curmap;
  423. uint32 ccrev = sb_corerev(&sii->pub);
  424. /* determine numcores - this is the total # cores in the chip */
  425. if (((ccrev == 4) || (ccrev >= 6)))
  426. numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
  427. CID_CC_SHIFT;
  428. else {
  429. /* Older chips */
  430. uint chip = CHIPID(sii->pub.chip);
  431. if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
  432. numcores = 6;
  433. else if (chip == BCM4704_CHIP_ID)
  434. numcores = 9;
  435. else if (chip == BCM5365_CHIP_ID)
  436. numcores = 7;
  437. else {
  438. SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
  439. chip));
  440. ASSERT(0);
  441. numcores = 1;
  442. }
  443. }
  444. SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
  445. sii->pub.issim ? "QT" : ""));
  446. }
  447. /* scan bridged SB(s) and add results to the end of the list */
  448. else if (sii->coreid[next] == OCP_CORE_ID) {
  449. sbconfig_t *sb = REGS2SB(sii->curmap);
  450. uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
  451. uint nsbcc;
  452. sii->numcores = next + 1;
  453. if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
  454. continue;
  455. nsbba &= 0xfffff000;
  456. if (_sb_coreidx(sii, nsbba) != BADIDX)
  457. continue;
  458. nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
  459. nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
  460. if (sbba == SI_ENUM_BASE)
  461. numcores -= nsbcc;
  462. ncc += nsbcc;
  463. }
  464. }
  465. SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
  466. sii->numcores = i + ncc;
  467. return sii->numcores;
  468. }
  469. /* scan the sb enumerated space to identify all cores */
  470. void
  471. sb_scan(si_t *sih, void *regs, uint devid)
  472. {
  473. si_info_t *sii;
  474. uint32 origsba;
  475. sbconfig_t *sb;
  476. sii = SI_INFO(sih);
  477. sb = REGS2SB(sii->curmap);
  478. sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
  479. /* Save the current core info and validate it later till we know
  480. * for sure what is good and what is bad.
  481. */
  482. origsba = _sb_coresba(sii);
  483. /* scan all SB(s) starting from SI_ENUM_BASE */
  484. sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
  485. }
  486. /*
  487. * This function changes logical "focus" to the indicated core;
  488. * must be called with interrupts off.
  489. * Moreover, callers should keep interrupts off during switching out of and back to d11 core
  490. */
  491. void *
  492. sb_setcoreidx(si_t *sih, uint coreidx)
  493. {
  494. si_info_t *sii;
  495. sii = SI_INFO(sih);
  496. if (coreidx >= sii->numcores)
  497. return (NULL);
  498. /*
  499. * If the user has provided an interrupt mask enabled function,
  500. * then assert interrupts are disabled before switching the core.
  501. */
  502. ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
  503. sii->curmap = _sb_setcoreidx(sii, coreidx);
  504. sii->curidx = coreidx;
  505. return (sii->curmap);
  506. }
  507. /* This function changes the logical "focus" to the indicated core.
  508. * Return the current core's virtual address.
  509. */
  510. static void *
  511. _sb_setcoreidx(si_info_t *sii, uint coreidx)
  512. {
  513. uint32 sbaddr = sii->coresba[coreidx];
  514. void *regs;
  515. switch (BUSTYPE(sii->pub.bustype)) {
  516. case SI_BUS:
  517. /* map new one */
  518. if (!sii->regs[coreidx]) {
  519. sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
  520. ASSERT(GOODREGS(sii->regs[coreidx]));
  521. }
  522. regs = sii->regs[coreidx];
  523. break;
  524. case PCI_BUS:
  525. /* point bar0 window */
  526. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
  527. regs = sii->curmap;
  528. break;
  529. case PCMCIA_BUS: {
  530. uint8 tmp = (sbaddr >> 12) & 0x0f;
  531. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
  532. tmp = (sbaddr >> 16) & 0xff;
  533. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
  534. tmp = (sbaddr >> 24) & 0xff;
  535. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
  536. regs = sii->curmap;
  537. break;
  538. }
  539. case SPI_BUS:
  540. case SDIO_BUS:
  541. /* map new one */
  542. if (!sii->regs[coreidx]) {
  543. sii->regs[coreidx] = (void *)(uintptr)sbaddr;
  544. ASSERT(GOODREGS(sii->regs[coreidx]));
  545. }
  546. regs = sii->regs[coreidx];
  547. break;
  548. default:
  549. ASSERT(0);
  550. regs = NULL;
  551. break;
  552. }
  553. return regs;
  554. }
  555. /* Return the address of sbadmatch0/1/2/3 register */
  556. static volatile uint32 *
  557. sb_admatch(si_info_t *sii, uint asidx)
  558. {
  559. sbconfig_t *sb;
  560. volatile uint32 *addrm;
  561. sb = REGS2SB(sii->curmap);
  562. switch (asidx) {
  563. case 0:
  564. addrm = &sb->sbadmatch0;
  565. break;
  566. case 1:
  567. addrm = &sb->sbadmatch1;
  568. break;
  569. case 2:
  570. addrm = &sb->sbadmatch2;
  571. break;
  572. case 3:
  573. addrm = &sb->sbadmatch3;
  574. break;
  575. default:
  576. SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
  577. return 0;
  578. }
  579. return (addrm);
  580. }
  581. /* Return the number of address spaces in current core */
  582. int
  583. sb_numaddrspaces(si_t *sih)
  584. {
  585. si_info_t *sii;
  586. sbconfig_t *sb;
  587. sii = SI_INFO(sih);
  588. sb = REGS2SB(sii->curmap);
  589. /* + 1 because of enumeration space */
  590. return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
  591. }
  592. /* Return the address of the nth address space in the current core */
  593. uint32
  594. sb_addrspace(si_t *sih, uint asidx)
  595. {
  596. si_info_t *sii;
  597. sii = SI_INFO(sih);
  598. return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
  599. }
  600. /* Return the size of the nth address space in the current core */
  601. uint32
  602. sb_addrspacesize(si_t *sih, uint asidx)
  603. {
  604. si_info_t *sii;
  605. sii = SI_INFO(sih);
  606. return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
  607. }
  608. /* do buffered registers update */
  609. void
  610. sb_commit(si_t *sih)
  611. {
  612. si_info_t *sii;
  613. uint origidx;
  614. uint intr_val = 0;
  615. sii = SI_INFO(sih);
  616. origidx = sii->curidx;
  617. ASSERT(GOODIDX(origidx));
  618. INTR_OFF(sii, intr_val);
  619. /* switch over to chipcommon core if there is one, else use pci */
  620. if (sii->pub.ccrev != NOREV) {
  621. chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
  622. ASSERT(ccregs != NULL);
  623. /* do the buffer registers update */
  624. W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
  625. W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
  626. } else
  627. ASSERT(0);
  628. /* restore core index */
  629. sb_setcoreidx(sih, origidx);
  630. INTR_RESTORE(sii, intr_val);
  631. }
  632. void
  633. sb_core_disable(si_t *sih, uint32 bits)
  634. {
  635. si_info_t *sii;
  636. volatile uint32 dummy;
  637. sbconfig_t *sb;
  638. sii = SI_INFO(sih);
  639. ASSERT(GOODREGS(sii->curmap));
  640. sb = REGS2SB(sii->curmap);
  641. /* if core is already in reset, just return */
  642. if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
  643. return;
  644. /* if clocks are not enabled, put into reset and return */
  645. if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
  646. goto disable;
  647. /* set target reject and spin until busy is clear (preserve core-specific bits) */
  648. OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
  649. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  650. BCM_REFERENCE(dummy);
  651. OSL_DELAY(1);
  652. SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
  653. if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
  654. SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
  655. if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
  656. OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
  657. dummy = R_SBREG(sii, &sb->sbimstate);
  658. BCM_REFERENCE(dummy);
  659. OSL_DELAY(1);
  660. SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
  661. }
  662. /* set reset and reject while enabling the clocks */
  663. W_SBREG(sii, &sb->sbtmstatelow,
  664. (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
  665. SBTML_REJ | SBTML_RESET));
  666. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  667. BCM_REFERENCE(dummy);
  668. OSL_DELAY(10);
  669. /* don't forget to clear the initiator reject bit */
  670. if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
  671. AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
  672. disable:
  673. /* leave reset and reject asserted */
  674. W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
  675. OSL_DELAY(1);
  676. }
  677. /* reset and re-enable a core
  678. * inputs:
  679. * bits - core specific bits that are set during and after reset sequence
  680. * resetbits - core specific bits that are set only during reset sequence
  681. */
  682. void
  683. sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  684. {
  685. si_info_t *sii;
  686. sbconfig_t *sb;
  687. volatile uint32 dummy;
  688. sii = SI_INFO(sih);
  689. ASSERT(GOODREGS(sii->curmap));
  690. sb = REGS2SB(sii->curmap);
  691. /*
  692. * Must do the disable sequence first to work for arbitrary current core state.
  693. */
  694. sb_core_disable(sih, (bits | resetbits));
  695. /*
  696. * Now do the initialization sequence.
  697. */
  698. /* set reset while enabling the clock and forcing them on throughout the core */
  699. W_SBREG(sii, &sb->sbtmstatelow,
  700. (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
  701. SBTML_RESET));
  702. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  703. BCM_REFERENCE(dummy);
  704. OSL_DELAY(1);
  705. if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
  706. W_SBREG(sii, &sb->sbtmstatehigh, 0);
  707. }
  708. if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
  709. AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
  710. }
  711. /* clear reset and allow it to propagate throughout the core */
  712. W_SBREG(sii, &sb->sbtmstatelow,
  713. ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
  714. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  715. BCM_REFERENCE(dummy);
  716. OSL_DELAY(1);
  717. /* leave clock enabled */
  718. W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
  719. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  720. BCM_REFERENCE(dummy);
  721. OSL_DELAY(1);
  722. }
  723. /*
  724. * Set the initiator timeout for the "master core".
  725. * The master core is defined to be the core in control
  726. * of the chip and so it issues accesses to non-memory
  727. * locations (Because of dma *any* core can access memeory).
  728. *
  729. * The routine uses the bus to decide who is the master:
  730. * SI_BUS => mips
  731. * JTAG_BUS => chipc
  732. * PCI_BUS => pci or pcie
  733. * PCMCIA_BUS => pcmcia
  734. * SDIO_BUS => pcmcia
  735. *
  736. * This routine exists so callers can disable initiator
  737. * timeouts so accesses to very slow devices like otp
  738. * won't cause an abort. The routine allows arbitrary
  739. * settings of the service and request timeouts, though.
  740. *
  741. * Returns the timeout state before changing it or -1
  742. * on error.
  743. */
  744. #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
  745. uint32
  746. sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
  747. {
  748. si_info_t *sii;
  749. uint origidx;
  750. uint intr_val = 0;
  751. uint32 tmp, ret = 0xffffffff;
  752. sbconfig_t *sb;
  753. sii = SI_INFO(sih);
  754. if ((to & ~TO_MASK) != 0)
  755. return ret;
  756. /* Figure out the master core */
  757. if (idx == BADIDX) {
  758. switch (BUSTYPE(sii->pub.bustype)) {
  759. case PCI_BUS:
  760. idx = sii->pub.buscoreidx;
  761. break;
  762. case JTAG_BUS:
  763. idx = SI_CC_IDX;
  764. break;
  765. case PCMCIA_BUS:
  766. case SDIO_BUS:
  767. idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
  768. break;
  769. case SI_BUS:
  770. idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
  771. break;
  772. default:
  773. ASSERT(0);
  774. }
  775. if (idx == BADIDX)
  776. return ret;
  777. }
  778. INTR_OFF(sii, intr_val);
  779. origidx = si_coreidx(sih);
  780. sb = REGS2SB(sb_setcoreidx(sih, idx));
  781. tmp = R_SBREG(sii, &sb->sbimconfiglow);
  782. ret = tmp & TO_MASK;
  783. W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
  784. sb_commit(sih);
  785. sb_setcoreidx(sih, origidx);
  786. INTR_RESTORE(sii, intr_val);
  787. return ret;
  788. }
  789. uint32
  790. sb_base(uint32 admatch)
  791. {
  792. uint32 base;
  793. uint type;
  794. type = admatch & SBAM_TYPE_MASK;
  795. ASSERT(type < 3);
  796. base = 0;
  797. if (type == 0) {
  798. base = admatch & SBAM_BASE0_MASK;
  799. } else if (type == 1) {
  800. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  801. base = admatch & SBAM_BASE1_MASK;
  802. } else if (type == 2) {
  803. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  804. base = admatch & SBAM_BASE2_MASK;
  805. }
  806. return (base);
  807. }
  808. uint32
  809. sb_size(uint32 admatch)
  810. {
  811. uint32 size;
  812. uint type;
  813. type = admatch & SBAM_TYPE_MASK;
  814. ASSERT(type < 3);
  815. size = 0;
  816. if (type == 0) {
  817. size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
  818. } else if (type == 1) {
  819. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  820. size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
  821. } else if (type == 2) {
  822. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  823. size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
  824. }
  825. return (size);
  826. }