PageRenderTime 73ms CodeModel.GetById 26ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/wireless/bcm4329/bcmsdstd.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 3127 lines | 2414 code | 482 blank | 231 comment | 507 complexity | 5aad232ff8803f66b7b0f3bea186897f MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * 'Standard' SDIO HOST CONTROLLER driver
  3. *
  4. * Copyright (C) 1999-2010, Broadcom Corporation
  5. *
  6. * Unless you and Broadcom execute a separate written software license
  7. * agreement governing use of this software, this software is licensed to you
  8. * under the terms of the GNU General Public License version 2 (the "GPL"),
  9. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  10. * following added to such license:
  11. *
  12. * As a special exception, the copyright holders of this software give you
  13. * permission to link this software with independent modules, and to copy and
  14. * distribute the resulting executable under terms of your choice, provided that
  15. * you also meet, for each linked independent module, the terms and conditions of
  16. * the license of that module. An independent module is a module which is not
  17. * derived from this software. The special exception does not apply to any
  18. * modifications of the software.
  19. *
  20. * Notwithstanding the above, under no circumstances may you combine this
  21. * software in any way with any other Broadcom software provided under a license
  22. * other than the GPL, without Broadcom's express prior written consent.
  23. *
  24. * $Id: bcmsdstd.c,v 1.64.4.1.4.4.2.18 2010/08/17 17:00:48 Exp $
  25. */
  26. #include <typedefs.h>
  27. #include <bcmdevs.h>
  28. #include <bcmendian.h>
  29. #include <bcmutils.h>
  30. #include <osl.h>
  31. #include <siutils.h>
  32. #include <sdio.h> /* SDIO Device and Protocol Specs */
  33. #include <sdioh.h> /* SDIO Host Controller Specification */
  34. #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
  35. #include <sdiovar.h> /* ioctl/iovars */
  36. #include <pcicfg.h>
  37. #define SD_PAGE_BITS 12
  38. #define SD_PAGE (1 << SD_PAGE_BITS)
  39. #include <bcmsdstd.h>
  40. /* Globals */
  41. uint sd_msglevel = SDH_ERROR_VAL;
  42. uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
  43. uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
  44. uint sd_f2_blocksize = 64; /* Default blocksize */
  45. #ifdef BCMSDYIELD
  46. bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
  47. uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
  48. bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
  49. #endif
  50. uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
  51. uint sd_power = 1; /* Default to SD Slot powered ON */
  52. uint sd_clock = 1; /* Default to SD Clock turned ON */
  53. uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
  54. uint8 sd_dma_mode = DMA_MODE_SDMA; /* Default to SDMA for now */
  55. uint sd_toctl = 7;
  56. static bool trap_errs = FALSE;
  57. static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
  58. /* Prototypes */
  59. static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
  60. static bool sdstd_start_power(sdioh_info_t *sd);
  61. static bool sdstd_bus_width(sdioh_info_t *sd, int width);
  62. static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
  63. static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
  64. static int sdstd_card_enablefuncs(sdioh_info_t *sd);
  65. static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
  66. static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
  67. static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
  68. int regsize, uint32 *data);
  69. static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
  70. int regsize, uint32 data);
  71. static int sdstd_driver_init(sdioh_info_t *sd);
  72. static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
  73. static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
  74. uint32 addr, int nbytes, uint32 *data);
  75. static int sdstd_abort(sdioh_info_t *sd, uint func);
  76. static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
  77. static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
  78. static void sd_map_dma(sdioh_info_t * sd);
  79. static void sd_unmap_dma(sdioh_info_t * sd);
  80. static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
  81. static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
  82. static void sd_create_adma_descriptor(sdioh_info_t *sd,
  83. uint32 index, uint32 addr_phys,
  84. uint16 length, uint16 flags);
  85. static void sd_dump_adma_dscr(sdioh_info_t *sd);
  86. static void sdstd_dumpregs(sdioh_info_t *sd);
  87. /*
  88. * Private register access routines.
  89. */
  90. /* 16 bit PCI regs */
  91. extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
  92. uint16
  93. sdstd_rreg16(sdioh_info_t *sd, uint reg)
  94. {
  95. volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
  96. sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
  97. return data;
  98. }
  99. extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
  100. void
  101. sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
  102. {
  103. *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
  104. sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
  105. }
  106. static void
  107. sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
  108. {
  109. volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
  110. sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
  111. data |= val;
  112. *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
  113. }
  114. static void
  115. sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
  116. {
  117. volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
  118. sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
  119. data &= ~mask;
  120. data |= (val & mask);
  121. *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
  122. }
  123. /* 32 bit PCI regs */
  124. static uint32
  125. sdstd_rreg(sdioh_info_t *sd, uint reg)
  126. {
  127. volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
  128. sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
  129. return data;
  130. }
  131. static inline void
  132. sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
  133. {
  134. *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
  135. sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
  136. }
  137. /* 8 bit PCI regs */
  138. static inline void
  139. sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
  140. {
  141. *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
  142. sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
  143. }
  144. static uint8
  145. sdstd_rreg8(sdioh_info_t *sd, uint reg)
  146. {
  147. volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
  148. sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
  149. return data;
  150. }
  151. /*
  152. * Private work routines
  153. */
  154. sdioh_info_t *glob_sd;
  155. /*
  156. * Public entry points & extern's
  157. */
  158. extern sdioh_info_t *
  159. sdioh_attach(osl_t *osh, void *bar0, uint irq)
  160. {
  161. sdioh_info_t *sd;
  162. sd_trace(("%s\n", __FUNCTION__));
  163. if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
  164. sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
  165. return NULL;
  166. }
  167. bzero((char *)sd, sizeof(sdioh_info_t));
  168. glob_sd = sd;
  169. sd->osh = osh;
  170. if (sdstd_osinit(sd) != 0) {
  171. sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
  172. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  173. return NULL;
  174. }
  175. sd->mem_space = (volatile char *)sdstd_reg_map(osh, (uintptr)bar0, SDIOH_REG_WINSZ);
  176. sd_init_dma(sd);
  177. sd->irq = irq;
  178. if (sd->mem_space == NULL) {
  179. sd_err(("%s:ioremap() failed\n", __FUNCTION__));
  180. sdstd_osfree(sd);
  181. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  182. return NULL;
  183. }
  184. sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
  185. sd->intr_handler = NULL;
  186. sd->intr_handler_arg = NULL;
  187. sd->intr_handler_valid = FALSE;
  188. /* Set defaults */
  189. sd->sd_blockmode = TRUE;
  190. sd->use_client_ints = TRUE;
  191. sd->sd_dma_mode = sd_dma_mode;
  192. if (!sd->sd_blockmode)
  193. sd->sd_dma_mode = DMA_MODE_NONE;
  194. if (sdstd_driver_init(sd) != SUCCESS) {
  195. /* If host CPU was reset without resetting SD bus or
  196. SD device, the device will still have its RCA but
  197. driver no longer knows what it is (since driver has been restarted).
  198. go through once to clear the RCA and a gain reassign it.
  199. */
  200. sd_info(("driver_init failed - Reset RCA and try again\n"));
  201. if (sdstd_driver_init(sd) != SUCCESS) {
  202. sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
  203. if (sd->mem_space) {
  204. sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
  205. sd->mem_space = NULL;
  206. }
  207. sdstd_osfree(sd);
  208. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  209. return (NULL);
  210. }
  211. }
  212. OSL_DMADDRWIDTH(osh, 32);
  213. /* Always map DMA buffers, so we can switch between DMA modes. */
  214. sd_map_dma(sd);
  215. if (sdstd_register_irq(sd, irq) != SUCCESS) {
  216. sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
  217. sdstd_free_irq(sd->irq, sd);
  218. if (sd->mem_space) {
  219. sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
  220. sd->mem_space = NULL;
  221. }
  222. sdstd_osfree(sd);
  223. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  224. return (NULL);
  225. }
  226. sd_trace(("%s: Done\n", __FUNCTION__));
  227. return sd;
  228. }
  229. extern SDIOH_API_RC
  230. sdioh_detach(osl_t *osh, sdioh_info_t *sd)
  231. {
  232. sd_trace(("%s\n", __FUNCTION__));
  233. if (sd) {
  234. sd_unmap_dma(sd);
  235. sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
  236. sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
  237. sdstd_free_irq(sd->irq, sd);
  238. if (sd->card_init_done)
  239. sdstd_reset(sd, 1, 1);
  240. if (sd->mem_space) {
  241. sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
  242. sd->mem_space = NULL;
  243. }
  244. sdstd_osfree(sd);
  245. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  246. }
  247. return SDIOH_API_RC_SUCCESS;
  248. }
  249. /* Configure callback to client when we receive client interrupt */
  250. extern SDIOH_API_RC
  251. sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
  252. {
  253. sd_trace(("%s: Entering\n", __FUNCTION__));
  254. sd->intr_handler = fn;
  255. sd->intr_handler_arg = argh;
  256. sd->intr_handler_valid = TRUE;
  257. return SDIOH_API_RC_SUCCESS;
  258. }
  259. extern SDIOH_API_RC
  260. sdioh_interrupt_deregister(sdioh_info_t *sd)
  261. {
  262. sd_trace(("%s: Entering\n", __FUNCTION__));
  263. sd->intr_handler_valid = FALSE;
  264. sd->intr_handler = NULL;
  265. sd->intr_handler_arg = NULL;
  266. return SDIOH_API_RC_SUCCESS;
  267. }
  268. extern SDIOH_API_RC
  269. sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
  270. {
  271. sd_trace(("%s: Entering\n", __FUNCTION__));
  272. *onoff = sd->client_intr_enabled;
  273. return SDIOH_API_RC_SUCCESS;
  274. }
  275. #if defined(DHD_DEBUG)
  276. extern bool
  277. sdioh_interrupt_pending(sdioh_info_t *sd)
  278. {
  279. uint16 intrstatus;
  280. intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
  281. return !!(intrstatus & CLIENT_INTR);
  282. }
  283. #endif
  284. uint
  285. sdioh_query_iofnum(sdioh_info_t *sd)
  286. {
  287. return sd->num_funcs;
  288. }
  289. /* IOVar table */
  290. enum {
  291. IOV_MSGLEVEL = 1,
  292. IOV_BLOCKMODE,
  293. IOV_BLOCKSIZE,
  294. IOV_DMA,
  295. IOV_USEINTS,
  296. IOV_NUMINTS,
  297. IOV_NUMLOCALINTS,
  298. IOV_HOSTREG,
  299. IOV_DEVREG,
  300. IOV_DIVISOR,
  301. IOV_SDMODE,
  302. IOV_HISPEED,
  303. IOV_HCIREGS,
  304. IOV_POWER,
  305. IOV_YIELDCPU,
  306. IOV_MINYIELD,
  307. IOV_FORCERB,
  308. IOV_CLOCK
  309. };
  310. const bcm_iovar_t sdioh_iovars[] = {
  311. {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
  312. {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
  313. {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
  314. {"sd_dma", IOV_DMA, 0, IOVT_UINT32, 0 },
  315. #ifdef BCMSDYIELD
  316. {"sd_yieldcpu", IOV_YIELDCPU, 0, IOVT_BOOL, 0 },
  317. {"sd_minyield", IOV_MINYIELD, 0, IOVT_UINT32, 0 },
  318. {"sd_forcerb", IOV_FORCERB, 0, IOVT_BOOL, 0 },
  319. #endif
  320. {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
  321. {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
  322. {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
  323. {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  324. {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  325. {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
  326. {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
  327. {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
  328. {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
  329. {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0},
  330. {NULL, 0, 0, 0, 0 }
  331. };
  332. int
  333. sdioh_iovar_op(sdioh_info_t *si, const char *name,
  334. void *params, int plen, void *arg, int len, bool set)
  335. {
  336. const bcm_iovar_t *vi = NULL;
  337. int bcmerror = 0;
  338. int val_size;
  339. int32 int_val = 0;
  340. bool bool_val;
  341. uint32 actionid;
  342. ASSERT(name);
  343. ASSERT(len >= 0);
  344. /* Get must have return space; Set does not take qualifiers */
  345. ASSERT(set || (arg && len));
  346. ASSERT(!set || (!params && !plen));
  347. sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
  348. if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
  349. bcmerror = BCME_UNSUPPORTED;
  350. goto exit;
  351. }
  352. if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
  353. goto exit;
  354. /* Set up params so get and set can share the convenience variables */
  355. if (params == NULL) {
  356. params = arg;
  357. plen = len;
  358. }
  359. if (vi->type == IOVT_VOID)
  360. val_size = 0;
  361. else if (vi->type == IOVT_BUFFER)
  362. val_size = len;
  363. else
  364. val_size = sizeof(int);
  365. if (plen >= (int)sizeof(int_val))
  366. bcopy(params, &int_val, sizeof(int_val));
  367. bool_val = (int_val != 0) ? TRUE : FALSE;
  368. actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
  369. switch (actionid) {
  370. case IOV_GVAL(IOV_MSGLEVEL):
  371. int_val = (int32)sd_msglevel;
  372. bcopy(&int_val, arg, val_size);
  373. break;
  374. case IOV_SVAL(IOV_MSGLEVEL):
  375. sd_msglevel = int_val;
  376. break;
  377. case IOV_GVAL(IOV_BLOCKMODE):
  378. int_val = (int32)si->sd_blockmode;
  379. bcopy(&int_val, arg, val_size);
  380. break;
  381. case IOV_SVAL(IOV_BLOCKMODE):
  382. si->sd_blockmode = (bool)int_val;
  383. /* Haven't figured out how to make non-block mode with DMA */
  384. if (!si->sd_blockmode)
  385. si->sd_dma_mode = DMA_MODE_NONE;
  386. break;
  387. #ifdef BCMSDYIELD
  388. case IOV_GVAL(IOV_YIELDCPU):
  389. int_val = sd_yieldcpu;
  390. bcopy(&int_val, arg, val_size);
  391. break;
  392. case IOV_SVAL(IOV_YIELDCPU):
  393. sd_yieldcpu = (bool)int_val;
  394. break;
  395. case IOV_GVAL(IOV_MINYIELD):
  396. int_val = sd_minyield;
  397. bcopy(&int_val, arg, val_size);
  398. break;
  399. case IOV_SVAL(IOV_MINYIELD):
  400. sd_minyield = (bool)int_val;
  401. break;
  402. case IOV_GVAL(IOV_FORCERB):
  403. int_val = sd_forcerb;
  404. bcopy(&int_val, arg, val_size);
  405. break;
  406. case IOV_SVAL(IOV_FORCERB):
  407. sd_forcerb = (bool)int_val;
  408. break;
  409. #endif /* BCMSDYIELD */
  410. case IOV_GVAL(IOV_BLOCKSIZE):
  411. if ((uint32)int_val > si->num_funcs) {
  412. bcmerror = BCME_BADARG;
  413. break;
  414. }
  415. int_val = (int32)si->client_block_size[int_val];
  416. bcopy(&int_val, arg, val_size);
  417. break;
  418. case IOV_SVAL(IOV_BLOCKSIZE):
  419. {
  420. uint func = ((uint32)int_val >> 16);
  421. uint blksize = (uint16)int_val;
  422. uint maxsize;
  423. if (func > si->num_funcs) {
  424. bcmerror = BCME_BADARG;
  425. break;
  426. }
  427. switch (func) {
  428. case 0: maxsize = 32; break;
  429. case 1: maxsize = BLOCK_SIZE_4318; break;
  430. case 2: maxsize = BLOCK_SIZE_4328; break;
  431. default: maxsize = 0;
  432. }
  433. if (blksize > maxsize) {
  434. bcmerror = BCME_BADARG;
  435. break;
  436. }
  437. if (!blksize) {
  438. blksize = maxsize;
  439. }
  440. /* Now set it */
  441. sdstd_lock(si);
  442. bcmerror = set_client_block_size(si, func, blksize);
  443. sdstd_unlock(si);
  444. break;
  445. }
  446. case IOV_GVAL(IOV_DMA):
  447. int_val = (int32)si->sd_dma_mode;
  448. bcopy(&int_val, arg, val_size);
  449. break;
  450. case IOV_SVAL(IOV_DMA):
  451. si->sd_dma_mode = (char)int_val;
  452. sdstd_set_dma_mode(si, si->sd_dma_mode);
  453. break;
  454. case IOV_GVAL(IOV_USEINTS):
  455. int_val = (int32)si->use_client_ints;
  456. bcopy(&int_val, arg, val_size);
  457. break;
  458. case IOV_SVAL(IOV_USEINTS):
  459. si->use_client_ints = (bool)int_val;
  460. if (si->use_client_ints)
  461. si->intmask |= CLIENT_INTR;
  462. else
  463. si->intmask &= ~CLIENT_INTR;
  464. break;
  465. case IOV_GVAL(IOV_DIVISOR):
  466. int_val = (uint32)sd_divisor;
  467. bcopy(&int_val, arg, val_size);
  468. break;
  469. case IOV_SVAL(IOV_DIVISOR):
  470. sd_divisor = int_val;
  471. if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
  472. sd_err(("set clock failed!\n"));
  473. bcmerror = BCME_ERROR;
  474. }
  475. break;
  476. case IOV_GVAL(IOV_POWER):
  477. int_val = (uint32)sd_power;
  478. bcopy(&int_val, arg, val_size);
  479. break;
  480. case IOV_SVAL(IOV_POWER):
  481. sd_power = int_val;
  482. if (sd_power == 1) {
  483. if (sdstd_driver_init(si) != SUCCESS) {
  484. sd_err(("set SD Slot power failed!\n"));
  485. bcmerror = BCME_ERROR;
  486. } else {
  487. sd_err(("SD Slot Powered ON.\n"));
  488. }
  489. } else {
  490. uint8 pwr = 0;
  491. pwr = SFIELD(pwr, PWR_BUS_EN, 0);
  492. sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
  493. sd_err(("SD Slot Powered OFF.\n"));
  494. }
  495. break;
  496. case IOV_GVAL(IOV_CLOCK):
  497. int_val = (uint32)sd_clock;
  498. bcopy(&int_val, arg, val_size);
  499. break;
  500. case IOV_SVAL(IOV_CLOCK):
  501. sd_clock = int_val;
  502. if (sd_clock == 1) {
  503. sd_info(("SD Clock turned ON.\n"));
  504. if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
  505. sd_err(("sdstd_start_clock failed\n"));
  506. bcmerror = BCME_ERROR;
  507. }
  508. } else {
  509. /* turn off HC clock */
  510. sdstd_wreg16(si, SD_ClockCntrl,
  511. sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
  512. sd_info(("SD Clock turned OFF.\n"));
  513. }
  514. break;
  515. case IOV_GVAL(IOV_SDMODE):
  516. int_val = (uint32)sd_sdmode;
  517. bcopy(&int_val, arg, val_size);
  518. break;
  519. case IOV_SVAL(IOV_SDMODE):
  520. sd_sdmode = int_val;
  521. if (!sdstd_bus_width(si, sd_sdmode)) {
  522. sd_err(("sdstd_bus_width failed\n"));
  523. bcmerror = BCME_ERROR;
  524. }
  525. break;
  526. case IOV_GVAL(IOV_HISPEED):
  527. int_val = (uint32)sd_hiok;
  528. bcopy(&int_val, arg, val_size);
  529. break;
  530. case IOV_SVAL(IOV_HISPEED):
  531. sd_hiok = int_val;
  532. bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
  533. break;
  534. case IOV_GVAL(IOV_NUMINTS):
  535. int_val = (int32)si->intrcount;
  536. bcopy(&int_val, arg, val_size);
  537. break;
  538. case IOV_GVAL(IOV_NUMLOCALINTS):
  539. int_val = (int32)si->local_intrcount;
  540. bcopy(&int_val, arg, val_size);
  541. break;
  542. case IOV_GVAL(IOV_HOSTREG):
  543. {
  544. sdreg_t *sd_ptr = (sdreg_t *)params;
  545. if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
  546. sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
  547. bcmerror = BCME_BADARG;
  548. break;
  549. }
  550. sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
  551. (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
  552. sd_ptr->offset));
  553. if (sd_ptr->offset & 1)
  554. int_val = sdstd_rreg8(si, sd_ptr->offset);
  555. else if (sd_ptr->offset & 2)
  556. int_val = sdstd_rreg16(si, sd_ptr->offset);
  557. else
  558. int_val = sdstd_rreg(si, sd_ptr->offset);
  559. bcopy(&int_val, arg, sizeof(int_val));
  560. break;
  561. }
  562. case IOV_SVAL(IOV_HOSTREG):
  563. {
  564. sdreg_t *sd_ptr = (sdreg_t *)params;
  565. if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
  566. sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
  567. bcmerror = BCME_BADARG;
  568. break;
  569. }
  570. sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
  571. (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
  572. sd_ptr->offset));
  573. if (sd_ptr->offset & 1)
  574. sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
  575. else if (sd_ptr->offset & 2)
  576. sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
  577. else
  578. sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
  579. break;
  580. }
  581. case IOV_GVAL(IOV_DEVREG):
  582. {
  583. sdreg_t *sd_ptr = (sdreg_t *)params;
  584. uint8 data;
  585. if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
  586. bcmerror = BCME_SDIO_ERROR;
  587. break;
  588. }
  589. int_val = (int)data;
  590. bcopy(&int_val, arg, sizeof(int_val));
  591. break;
  592. }
  593. case IOV_SVAL(IOV_DEVREG):
  594. {
  595. sdreg_t *sd_ptr = (sdreg_t *)params;
  596. uint8 data = (uint8)sd_ptr->value;
  597. if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
  598. bcmerror = BCME_SDIO_ERROR;
  599. break;
  600. }
  601. break;
  602. }
  603. default:
  604. bcmerror = BCME_UNSUPPORTED;
  605. break;
  606. }
  607. exit:
  608. return bcmerror;
  609. }
  610. extern SDIOH_API_RC
  611. sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
  612. {
  613. SDIOH_API_RC status;
  614. /* No lock needed since sdioh_request_byte does locking */
  615. status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
  616. return status;
  617. }
  618. extern SDIOH_API_RC
  619. sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
  620. {
  621. /* No lock needed since sdioh_request_byte does locking */
  622. SDIOH_API_RC status;
  623. status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
  624. return status;
  625. }
  626. extern SDIOH_API_RC
  627. sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
  628. {
  629. uint32 count;
  630. int offset;
  631. uint32 foo;
  632. uint8 *cis = cisd;
  633. sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
  634. if (!sd->func_cis_ptr[func]) {
  635. bzero(cis, length);
  636. return SDIOH_API_RC_FAIL;
  637. }
  638. sdstd_lock(sd);
  639. *cis = 0;
  640. for (count = 0; count < length; count++) {
  641. offset = sd->func_cis_ptr[func] + count;
  642. if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
  643. sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
  644. sdstd_unlock(sd);
  645. return SDIOH_API_RC_FAIL;
  646. }
  647. *cis = (uint8)(foo & 0xff);
  648. cis++;
  649. }
  650. sdstd_unlock(sd);
  651. return SDIOH_API_RC_SUCCESS;
  652. }
  653. extern SDIOH_API_RC
  654. sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
  655. {
  656. int status;
  657. uint32 cmd_arg;
  658. uint32 rsp5;
  659. sdstd_lock(sd);
  660. cmd_arg = 0;
  661. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
  662. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
  663. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
  664. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  665. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
  666. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
  667. sdstd_unlock(sd);
  668. return status;
  669. }
  670. sdstd_cmd_getrsp(sd, &rsp5, 1);
  671. if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
  672. sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
  673. __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
  674. }
  675. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  676. sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
  677. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
  678. if (GFIELD(rsp5, RSP5_STUFF))
  679. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  680. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  681. if (rw == SDIOH_READ)
  682. *byte = GFIELD(rsp5, RSP5_DATA);
  683. sdstd_unlock(sd);
  684. return SDIOH_API_RC_SUCCESS;
  685. }
  686. extern SDIOH_API_RC
  687. sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
  688. uint32 *word, uint nbytes)
  689. {
  690. int status;
  691. bool swap = FALSE;
  692. sdstd_lock(sd);
  693. if (rw == SDIOH_READ) {
  694. status = sdstd_card_regread(sd, func, addr, nbytes, word);
  695. if (swap)
  696. *word = BCMSWAP32(*word);
  697. } else {
  698. if (swap)
  699. *word = BCMSWAP32(*word);
  700. status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
  701. }
  702. sdstd_unlock(sd);
  703. return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
  704. }
  705. extern SDIOH_API_RC
  706. sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
  707. uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
  708. {
  709. int len;
  710. int buflen = (int)buflen_u;
  711. bool fifo = (fix_inc == SDIOH_DATA_FIX);
  712. uint8 *localbuf = NULL, *tmpbuf = NULL;
  713. uint tmplen = 0;
  714. bool local_blockmode = sd->sd_blockmode;
  715. sdstd_lock(sd);
  716. ASSERT(reg_width == 4);
  717. ASSERT(buflen_u < (1 << 30));
  718. ASSERT(sd->client_block_size[func]);
  719. sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
  720. __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
  721. buflen_u, sd->r_cnt, sd->t_cnt, pkt));
  722. /* Break buffer down into blocksize chunks:
  723. * Bytemode: 1 block at a time.
  724. * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
  725. * Both: leftovers are handled last (will be sent via bytemode).
  726. */
  727. while (buflen > 0) {
  728. if (local_blockmode) {
  729. /* Max xfer is Page size */
  730. len = MIN(SD_PAGE, buflen);
  731. /* Round down to a block boundry */
  732. if (buflen > sd->client_block_size[func])
  733. len = (len/sd->client_block_size[func]) *
  734. sd->client_block_size[func];
  735. if ((func == SDIO_FUNC_1) && ((len % 4) == 3) && (rw == SDIOH_WRITE)) {
  736. tmplen = len;
  737. sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
  738. len++;
  739. tmpbuf = buffer;
  740. if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
  741. sd_err(("out of memory, malloced %d bytes\n",
  742. MALLOCED(sd->osh)));
  743. sdstd_unlock(sd);
  744. return SDIOH_API_RC_FAIL;
  745. }
  746. bcopy(buffer, localbuf, len);
  747. buffer = localbuf;
  748. }
  749. } else {
  750. /* Byte mode: One block at a time */
  751. len = MIN(sd->client_block_size[func], buflen);
  752. }
  753. if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
  754. sdstd_unlock(sd);
  755. return SDIOH_API_RC_FAIL;
  756. }
  757. if (local_blockmode) {
  758. if ((func == SDIO_FUNC_1) && ((tmplen % 4) == 3) && (rw == SDIOH_WRITE)) {
  759. if (localbuf)
  760. MFREE(sd->osh, localbuf, len);
  761. len--;
  762. buffer = tmpbuf;
  763. sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
  764. }
  765. }
  766. buffer += len;
  767. buflen -= len;
  768. if (!fifo)
  769. addr += len;
  770. }
  771. sdstd_unlock(sd);
  772. return SDIOH_API_RC_SUCCESS;
  773. }
  774. static
  775. int sdstd_abort(sdioh_info_t *sd, uint func)
  776. {
  777. int err = 0;
  778. int retries;
  779. uint16 cmd_reg;
  780. uint32 cmd_arg;
  781. uint32 rsp5;
  782. uint8 rflags;
  783. uint16 int_reg = 0;
  784. uint16 plain_intstatus;
  785. /* Argument is write to F0 (CCCR) IOAbort with function number */
  786. cmd_arg = 0;
  787. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
  788. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
  789. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
  790. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  791. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
  792. /* Command is CMD52 write */
  793. cmd_reg = 0;
  794. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
  795. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  796. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  797. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  798. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
  799. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
  800. if (sd->sd_mode == SDIOH_MODE_SPI) {
  801. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  802. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  803. }
  804. /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
  805. retries = RETRIES_SMALL;
  806. while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
  807. if (retries == RETRIES_SMALL)
  808. sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
  809. __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
  810. if (!--retries) {
  811. sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
  812. __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
  813. if (trap_errs)
  814. ASSERT(0);
  815. err = BCME_SDIO_ERROR;
  816. goto done;
  817. }
  818. }
  819. /* Clear errors from any previous commands */
  820. if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
  821. sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
  822. sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
  823. }
  824. plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
  825. if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
  826. sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
  827. if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
  828. sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
  829. }
  830. if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
  831. sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
  832. err = BCME_NODEVICE;
  833. goto done;
  834. }
  835. }
  836. /* Issue the command */
  837. sdstd_wreg(sd, SD_Arg0, cmd_arg);
  838. sdstd_wreg16(sd, SD_Command, cmd_reg);
  839. /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
  840. if (!sd->polled_mode)
  841. return err;
  842. /* Otherwise, wait for the command to complete */
  843. retries = RETRIES_LARGE;
  844. do {
  845. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  846. } while (--retries &&
  847. (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
  848. (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
  849. /* If command completion fails, do a cmd reset and note the error */
  850. if (!retries) {
  851. sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
  852. __FUNCTION__, int_reg,
  853. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  854. sdstd_rreg(sd, SD_PresentState)));
  855. sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
  856. retries = RETRIES_LARGE;
  857. do {
  858. sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
  859. } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
  860. SW_RESET_CMD)) && retries--);
  861. if (!retries) {
  862. sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
  863. }
  864. if (trap_errs)
  865. ASSERT(0);
  866. err = BCME_SDIO_ERROR;
  867. }
  868. /* Clear Command Complete interrupt */
  869. int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
  870. sdstd_wreg16(sd, SD_IntrStatus, int_reg);
  871. /* Check for Errors */
  872. if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
  873. sd_err(("%s: ErrorintrStatus: 0x%x, "
  874. "(intrstatus = 0x%x, present state 0x%x) clearing\n",
  875. __FUNCTION__, plain_intstatus,
  876. sdstd_rreg16(sd, SD_IntrStatus),
  877. sdstd_rreg(sd, SD_PresentState)));
  878. sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
  879. sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
  880. retries = RETRIES_LARGE;
  881. do {
  882. sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
  883. } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
  884. SW_RESET_DAT)) && retries--);
  885. if (!retries) {
  886. sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
  887. }
  888. if (trap_errs)
  889. ASSERT(0);
  890. /* ABORT is dataless, only cmd errs count */
  891. if (plain_intstatus & ERRINT_CMD_ERRS)
  892. err = BCME_SDIO_ERROR;
  893. }
  894. /* If command failed don't bother looking at response */
  895. if (err)
  896. goto done;
  897. /* Otherwise, check the response */
  898. sdstd_cmd_getrsp(sd, &rsp5, 1);
  899. rflags = GFIELD(rsp5, RSP5_FLAGS);
  900. if (rflags & SD_RSP_R5_ERRBITS) {
  901. sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
  902. /* The CRC error flag applies to the previous command */
  903. if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
  904. err = BCME_SDIO_ERROR;
  905. goto done;
  906. }
  907. }
  908. if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
  909. ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
  910. sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
  911. err = BCME_SDIO_ERROR;
  912. goto done;
  913. }
  914. if (GFIELD(rsp5, RSP5_STUFF)) {
  915. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  916. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  917. err = BCME_SDIO_ERROR;
  918. goto done;
  919. }
  920. done:
  921. if (err == BCME_NODEVICE)
  922. return err;
  923. sdstd_wreg8(sd, SD_SoftwareReset,
  924. SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
  925. retries = RETRIES_LARGE;
  926. do {
  927. rflags = sdstd_rreg8(sd, SD_SoftwareReset);
  928. if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
  929. break;
  930. } while (--retries);
  931. if (!retries) {
  932. sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
  933. __FUNCTION__, rflags));
  934. err = BCME_SDIO_ERROR;
  935. }
  936. return err;
  937. }
  938. extern int
  939. sdioh_abort(sdioh_info_t *sd, uint fnum)
  940. {
  941. int ret;
  942. sdstd_lock(sd);
  943. ret = sdstd_abort(sd, fnum);
  944. sdstd_unlock(sd);
  945. return ret;
  946. }
  947. int
  948. sdioh_start(sdioh_info_t *sd, int stage)
  949. {
  950. return SUCCESS;
  951. }
  952. int
  953. sdioh_stop(sdioh_info_t *sd)
  954. {
  955. return SUCCESS;
  956. }
  957. static int
  958. sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
  959. {
  960. uint16 regval;
  961. uint retries;
  962. uint function = 0;
  963. /* If no errors, we're done */
  964. if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
  965. return SUCCESS;
  966. sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
  967. __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
  968. sdstd_rreg(sdioh_info, SD_PresentState)));
  969. sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
  970. /* On command error, issue CMD reset */
  971. if (regval & ERRINT_CMD_ERRS) {
  972. sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
  973. sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
  974. for (retries = RETRIES_LARGE; retries; retries--)
  975. if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
  976. break;
  977. if (!retries) {
  978. sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
  979. }
  980. }
  981. /* On data error, issue DAT reset */
  982. if (regval & ERRINT_DATA_ERRS) {
  983. sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
  984. sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
  985. for (retries = RETRIES_LARGE; retries; retries--)
  986. if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
  987. break;
  988. if (!retries) {
  989. sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
  990. }
  991. }
  992. /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
  993. if (cmd == SDIOH_CMD_53)
  994. function = GFIELD(arg, CMD53_FUNCTION);
  995. else if (cmd == SDIOH_CMD_52)
  996. function = GFIELD(arg, CMD52_FUNCTION);
  997. if (function) {
  998. sd_trace(("%s: requesting abort for function %d after cmd %d\n",
  999. __FUNCTION__, function, cmd));
  1000. sdstd_abort(sdioh_info, function);
  1001. }
  1002. if (trap_errs)
  1003. ASSERT(0);
  1004. return ERROR;
  1005. }
  1006. /*
  1007. * Private/Static work routines
  1008. */
  1009. static bool
  1010. sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
  1011. {
  1012. int retries = RETRIES_LARGE;
  1013. uchar regval;
  1014. if (!sd)
  1015. return TRUE;
  1016. sdstd_lock(sd);
  1017. /* Reset client card */
  1018. if (client_reset && (sd->adapter_slot != -1)) {
  1019. if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
  1020. sd_err(("%s: Cannot write to card reg 0x%x\n",
  1021. __FUNCTION__, SDIOD_CCCR_IOABORT));
  1022. else
  1023. sd->card_rca = 0;
  1024. }
  1025. /* Reset host controller */
  1026. if (host_reset) {
  1027. regval = SFIELD(0, SW_RESET_ALL, 1);
  1028. sdstd_wreg8(sd, SD_SoftwareReset, regval);
  1029. do {
  1030. sd_trace(("%s: waiting for reset\n", __FUNCTION__));
  1031. } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
  1032. if (!retries) {
  1033. sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
  1034. sdstd_unlock(sd);
  1035. return (FALSE);
  1036. }
  1037. /* A reset should reset bus back to 1 bit mode */
  1038. sd->sd_mode = SDIOH_MODE_SD1;
  1039. sdstd_set_dma_mode(sd, sd->sd_dma_mode);
  1040. }
  1041. sdstd_unlock(sd);
  1042. return TRUE;
  1043. }
  1044. /* Disable device interrupt */
  1045. void
  1046. sdstd_devintr_off(sdioh_info_t *sd)
  1047. {
  1048. sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
  1049. if (sd->use_client_ints) {
  1050. sd->intmask &= ~CLIENT_INTR;
  1051. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1052. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1053. }
  1054. }
  1055. /* Enable device interrupt */
  1056. void
  1057. sdstd_devintr_on(sdioh_info_t *sd)
  1058. {
  1059. ASSERT(sd->lockcount == 0);
  1060. sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
  1061. if (sd->use_client_ints) {
  1062. uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
  1063. sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
  1064. sdstd_wreg16(sd, SD_IntrStatusEnable, status);
  1065. sd->intmask |= CLIENT_INTR;
  1066. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1067. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1068. }
  1069. }
  1070. #ifdef BCMSDYIELD
  1071. /* Enable/disable other interrupts */
  1072. void
  1073. sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
  1074. {
  1075. if (err) {
  1076. norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
  1077. sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
  1078. }
  1079. sd->intmask |= norm;
  1080. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1081. if (sd_forcerb)
  1082. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1083. }
  1084. void
  1085. sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
  1086. {
  1087. if (err) {
  1088. norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
  1089. sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
  1090. }
  1091. sd->intmask &= ~norm;
  1092. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1093. if (sd_forcerb)
  1094. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1095. }
  1096. #endif /* BCMSDYIELD */
  1097. static int
  1098. sdstd_host_init(sdioh_info_t *sd)
  1099. {
  1100. int num_slots, full_slot;
  1101. uint8 reg8;
  1102. uint32 card_ins;
  1103. int slot, first_bar = 0;
  1104. bool detect_slots = FALSE;
  1105. uint bar;
  1106. /* Check for Arasan ID */
  1107. if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
  1108. sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
  1109. sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
  1110. detect_slots = TRUE;
  1111. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
  1112. sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
  1113. sd->controller_type = SDIOH_TYPE_BCM27XX;
  1114. detect_slots = FALSE;
  1115. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
  1116. sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
  1117. sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
  1118. detect_slots = TRUE;
  1119. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
  1120. sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
  1121. __FUNCTION__));
  1122. sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
  1123. detect_slots = TRUE;
  1124. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
  1125. sd_info(("%s: JMicron Standard SDIO Host Controller\n",
  1126. __FUNCTION__));
  1127. sd->controller_type = SDIOH_TYPE_JMICRON;
  1128. detect_slots = TRUE;
  1129. } else {
  1130. return ERROR;
  1131. }
  1132. /*
  1133. * Determine num of slots
  1134. * Search each slot
  1135. */
  1136. first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
  1137. num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
  1138. num_slots &= 7;
  1139. num_slots++; /* map bits to num slots according to spec */
  1140. if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
  1141. ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
  1142. sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
  1143. /* Set BAR0 Window to SDIOSTH core */
  1144. OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
  1145. /* Set defaults particular to this controller. */
  1146. detect_slots = TRUE;
  1147. num_slots = 1;
  1148. first_bar = 0;
  1149. /* Controller supports ADMA2, so turn it on here. */
  1150. sd->sd_dma_mode = DMA_MODE_ADMA2;
  1151. }
  1152. /* Map in each slot on the board and query it to see if a
  1153. * card is inserted. Use the first populated slot found.
  1154. */
  1155. if (sd->mem_space) {
  1156. sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
  1157. sd->mem_space = NULL;
  1158. }
  1159. full_slot = -1;
  1160. for (slot = 0; slot < num_slots; slot++) {
  1161. bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
  1162. sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
  1163. (uintptr)bar, SDIOH_REG_WINSZ);
  1164. sd->adapter_slot = -1;
  1165. if (detect_slots) {
  1166. card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
  1167. } else {
  1168. card_ins = TRUE;
  1169. }
  1170. if (card_ins) {
  1171. sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
  1172. if (full_slot < 0)
  1173. full_slot = slot;
  1174. } else {
  1175. sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
  1176. }
  1177. if (sd->mem_space) {
  1178. sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
  1179. sd->mem_space = NULL;
  1180. }
  1181. }
  1182. if (full_slot < 0) {
  1183. sd_err(("No slots on SDIO controller are populated\n"));
  1184. return -1;
  1185. }
  1186. bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
  1187. sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
  1188. sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
  1189. full_slot,
  1190. (full_slot + first_bar),
  1191. OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
  1192. sd->mem_space));
  1193. sd->adapter_slot = full_slot;
  1194. sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
  1195. switch (sd->version) {
  1196. case 0:
  1197. sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
  1198. sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
  1199. break;
  1200. case 1:
  1201. case 2:
  1202. sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
  1203. sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
  1204. break;
  1205. default:
  1206. sd_err(("%s: Host Controller version 0x%02x not supported.\n",
  1207. __FUNCTION__, sd->version));
  1208. break;
  1209. }
  1210. sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
  1211. sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
  1212. sdstd_set_dma_mode(sd, sd->sd_dma_mode);
  1213. sdstd_reset(sd, 1, 0);
  1214. /* Read SD4/SD1 mode */
  1215. if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
  1216. if (reg8 & SD4_MODE) {
  1217. sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
  1218. __FUNCTION__, reg8));
  1219. }
  1220. }
  1221. /* Default power on mode is SD1 */
  1222. sd->sd_mode = SDIOH_MODE_SD1;
  1223. sd->polled_mode = TRUE;
  1224. sd->host_init_done = TRUE;
  1225. sd->card_init_done = FALSE;
  1226. sd->adapter_slot = full_slot;
  1227. return (SUCCESS);
  1228. }
  1229. #define CMD5_RETRIES 200
  1230. static int
  1231. get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
  1232. {
  1233. int retries, status;
  1234. /* Get the Card's Operation Condition. Occasionally the board
  1235. * takes a while to become ready
  1236. */
  1237. retries = CMD5_RETRIES;
  1238. do {
  1239. *cmd_rsp = 0;
  1240. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
  1241. != SUCCESS) {
  1242. sd_err(("%s: CMD5 failed\n", __FUNCTION__));
  1243. return status;
  1244. }
  1245. sdstd_cmd_getrsp(sd, cmd_rsp, 1);
  1246. if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
  1247. sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
  1248. } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
  1249. if (!retries)
  1250. return ERROR;
  1251. return (SUCCESS);
  1252. }
  1253. static int
  1254. sdstd_client_init(sdioh_info_t *sd)
  1255. {
  1256. uint32 cmd_arg, cmd_rsp;
  1257. int status;
  1258. uint8 fn_ints;
  1259. sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
  1260. /* Clear any pending ints */
  1261. sdstd_wreg16(sd, SD_IntrStatus, 0x1ff);
  1262. sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
  1263. /* Enable both Normal and Error Status. This does not enable
  1264. * interrupts, it only enables the status bits to
  1265. * become 'live'
  1266. */
  1267. sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
  1268. sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
  1269. sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
  1270. /* Start at ~400KHz clock rate for initialization */
  1271. if (!sdstd_start_clock(sd, 128)) {
  1272. sd_err(("sdstd_start_clock failed\n"));
  1273. return ERROR;
  1274. }
  1275. if (!sdstd_start_power(sd)) {
  1276. sd_err(("sdstd_start_power failed\n"));
  1277. return ERROR;
  1278. }
  1279. if (sd->num_funcs == 0) {
  1280. sd_err(("%s: No IO funcs!\n", __FUNCTION__));
  1281. return ERROR;
  1282. }
  1283. /* In SPI mode, issue CMD0 first */
  1284. if (sd->sd_mode == SDIOH_MODE_SPI) {
  1285. cmd_arg = 0;
  1286. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
  1287. != SUCCESS) {
  1288. sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
  1289. return status;
  1290. }
  1291. }
  1292. if (sd->sd_mode != SDIOH_MODE_SPI) {
  1293. uint16 rsp6_status;
  1294. /* Card is operational. Ask it to send an RCA */
  1295. cmd_arg = 0;
  1296. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
  1297. != SUCCESS) {
  1298. sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
  1299. return status;
  1300. }
  1301. /* Verify the card status returned with the cmd response */
  1302. sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
  1303. rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
  1304. if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
  1305. GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
  1306. GFIELD(rsp6_status, RSP6STAT_ERROR)) {
  1307. sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
  1308. __FUNCTION__, rsp6_status));
  1309. return ERROR;
  1310. }
  1311. /* Save the Card's RCA */
  1312. sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
  1313. sd_info(("RCA is 0x%x\n", sd->card_rca));
  1314. if (rsp6_status)
  1315. sd_err(("raw status is 0x%x\n", rsp6_status));
  1316. /* Select the card */
  1317. cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
  1318. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
  1319. != SUCCESS) {
  1320. sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
  1321. return status;
  1322. }
  1323. sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
  1324. if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
  1325. sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
  1326. __FUNCTION__, cmd_rsp));
  1327. return ERROR;
  1328. }
  1329. }
  1330. sdstd_card_enablefuncs(sd);
  1331. if (!sdstd_bus_width(sd, sd_sdmode)) {
  1332. sd_err(("sdstd_bus_width failed\n"));
  1333. return ERROR;
  1334. }
  1335. set_client_block_size(sd, 1, BLOCK_SIZE_4318);
  1336. fn_ints = INTR_CTL_FUNC1_EN;
  1337. if (sd->num_funcs >= 2) {
  1338. set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
  1339. fn_ints |= INTR_CTL_FUNC2_EN;
  1340. }
  1341. /* Enable/Disable Client interrupts */
  1342. /* Turn on here but disable at host controller? */
  1343. if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
  1344. (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
  1345. sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
  1346. return ERROR;
  1347. }
  1348. /* Switch to High-speed clocking mode if both host and device support it */
  1349. sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
  1350. /* After configuring for High-Speed mode, set the desired clock rate. */
  1351. if (!sdstd_start_clock(sd, (uint16)sd_divisor)) {
  1352. sd_err(("sdstd_start_clock failed\n"));
  1353. return ERROR;
  1354. }
  1355. sd->card_init_done = TRUE;
  1356. return SUCCESS;
  1357. }
  1358. static int
  1359. sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
  1360. {
  1361. uint32 regdata;
  1362. int status;
  1363. uint8 reg8;
  1364. reg8 = sdstd_rreg8(sd, SD_HostCntrl);
  1365. if (HSMode == TRUE) {
  1366. if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
  1367. sd_err(("Host Controller does not support hi-speed mode.\n"));
  1368. return BCME_ERROR;
  1369. }
  1370. sd_info(("Attempting to enable High-Speed mode.\n"));
  1371. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  1372. 1, &regdata)) != SUCCESS) {
  1373. return BCME_SDIO_ERROR;
  1374. }
  1375. if (regdata & SDIO_SPEED_SHS) {
  1376. sd_info(("Device supports High-Speed mode.\n"));
  1377. regdata |= SDIO_SPEED_EHS;
  1378. sd_info(("Writing %08x to Card at %08x\n",
  1379. regdata, SDIOD_CCCR_SPEED_CONTROL));
  1380. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  1381. 1, regdata)) != BCME_OK) {
  1382. return BCME_SDIO_ERROR;
  1383. }
  1384. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  1385. 1, &regdata)) != BCME_OK) {
  1386. return BCME_SDIO_ERROR;
  1387. }
  1388. sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
  1389. reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
  1390. sd_err(("High-speed clocking mode enabled.\n"));
  1391. }
  1392. else {
  1393. sd_err(("Device does not support High-Speed Mode.\n"));
  1394. reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
  1395. }
  1396. } else {
  1397. /* Force off device bit */
  1398. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  1399. 1, &regdata)) != BCME_OK) {
  1400. return status;
  1401. }
  1402. if (regdata & SDIO_SPEED_EHS) {
  1403. regdata &= ~SDIO_SPEED_EHS;
  1404. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  1405. 1, regdata)) != BCME_OK) {
  1406. return status;
  1407. }
  1408. }
  1409. sd_err(("High-speed clocking mode disabled.\n"));
  1410. reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
  1411. }
  1412. sdstd_wreg8(sd, SD_HostCntrl, reg8);
  1413. return BCME_OK;
  1414. }
  1415. /* Select DMA Mode:
  1416. * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
  1417. * Otherwise, pick the selected mode if supported.
  1418. * If not supported, use PIO mode.
  1419. */
  1420. static int
  1421. sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
  1422. {
  1423. uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
  1424. int8 prev_dma_mode = sd->sd_dma_mode;
  1425. switch (prev_dma_mode) {
  1426. case DMA_MODE_AUTO:
  1427. sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
  1428. __FUNCTION__));
  1429. if (GFIELD(sd->caps, CAP_ADMA2)) {
  1430. sd->sd_dma_mode = DMA_MODE_ADMA2;
  1431. dma_sel_bits = SDIOH_ADMA2_MODE;
  1432. } else if (GFIELD(sd->caps, CAP_ADMA1)) {
  1433. sd->sd_dma_mode = DMA_MODE_ADMA1;
  1434. dma_sel_bits = SDIOH_ADMA1_MODE;
  1435. } else if (GFIELD(sd->caps, CAP_DMA)) {
  1436. sd->sd_dma_mode = DMA_MODE_SDMA;
  1437. } else {
  1438. sd->sd_dma_mode = DMA_MODE_NONE;
  1439. }
  1440. break;
  1441. case DMA_MODE_NONE:
  1442. sd->sd_dma_mode = DMA_MODE_NONE;
  1443. break;
  1444. case DMA_MODE_SDMA:
  1445. if (GFIELD(sd->caps, CAP_DMA)) {
  1446. sd->sd_dma_mode = DMA_MODE_SDMA;
  1447. } else {
  1448. sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
  1449. sd->sd_dma_mode = DMA_MODE_NONE;
  1450. }
  1451. break;
  1452. case DMA_MODE_ADMA1:
  1453. if (GFIELD(sd->caps, CAP_ADMA1)) {
  1454. sd->sd_dma_mode = DMA_MODE_ADMA1;
  1455. dma_sel_bits = SDIOH_ADMA1_MODE;
  1456. } else {
  1457. sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
  1458. sd->sd_dma_mode = DMA_MODE_NONE;
  1459. }
  1460. break;
  1461. case DMA_MODE_ADMA2:
  1462. if (GFIELD(sd->caps, CAP_ADMA2)) {
  1463. sd->sd_dma_mode = DMA_MODE_ADMA2;
  1464. dma_sel_bits = SDIOH_ADMA2_MODE;
  1465. } else {
  1466. sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
  1467. sd->sd_dma_mode = DMA_MODE_NONE;
  1468. }
  1469. break;
  1470. case DMA_MODE_ADMA2_64:
  1471. sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
  1472. sd->sd_dma_mode = DMA_MODE_NONE;
  1473. break;
  1474. default:
  1475. sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
  1476. prev_dma_mode));
  1477. sd->sd_dma_mode = DMA_MODE_NONE;
  1478. break;
  1479. }
  1480. /* clear SysAddr, only used for SDMA */
  1481. sdstd_wreg(sd, SD_SysAddr, 0);
  1482. sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
  1483. reg8 = sdstd_rreg8(sd, SD_HostCntrl);
  1484. reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
  1485. sdstd_wreg8(sd, SD_HostCntrl, reg8);
  1486. sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
  1487. return BCME_OK;
  1488. }
  1489. bool
  1490. sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
  1491. {
  1492. uint rc, count;
  1493. uint16 divisor;
  1494. /* turn off HC clock */
  1495. sdstd_wreg16(sd, SD_ClockCntrl,
  1496. sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
  1497. /* Set divisor */
  1498. divisor = (new_sd_divisor >> 1) << 8;
  1499. sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
  1500. sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
  1501. sd_info(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
  1502. new_sd_divisor, divisor));
  1503. sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_TO_CLKFREQ)));
  1504. if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
  1505. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  1506. ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
  1507. ((50 % new_sd_divisor) ? "KHz" : "MHz")));
  1508. } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
  1509. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  1510. ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
  1511. ((48 % new_sd_divisor) ? "KHz" : "MHz")));
  1512. } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
  1513. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  1514. ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
  1515. ((33 % new_sd_divisor) ? "KHz" : "MHz")));
  1516. } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
  1517. } else {
  1518. sd_err(("Need to determine divisor for %d MHz clocks\n",
  1519. GFIELD(sd->caps, CAP_TO_CLKFREQ)));
  1520. sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
  1521. return (FALSE);
  1522. }
  1523. sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
  1524. /* Wait for clock to stabilize */
  1525. rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
  1526. count = 0;
  1527. while (!rc) {
  1528. OSL_DELAY(1);
  1529. sd_info(("Waiting for clock to become stable 0x%x\n", rc));
  1530. rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
  1531. count++;
  1532. if (count > 10000) {
  1533. sd_err(("%s:Clocks failed to stabilize after %u attempts",
  1534. __FUNCTION__, count));
  1535. return (FALSE);
  1536. }
  1537. }
  1538. /* Turn on clock */
  1539. sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
  1540. /* Set timeout control (adjust default value based on divisor).
  1541. * Disabling timeout interrupts during setting is advised by host spec.
  1542. */
  1543. {
  1544. uint16 regdata;
  1545. uint toval;
  1546. toval = sd_toctl;
  1547. divisor = new_sd_divisor;
  1548. while (toval && !(divisor & 1)) {
  1549. toval -= 1;
  1550. divisor >>= 1;
  1551. }
  1552. regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
  1553. sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
  1554. sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
  1555. sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
  1556. }
  1557. OSL_DELAY(2);
  1558. sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
  1559. return TRUE;
  1560. }
  1561. bool
  1562. sdstd_start_power(sdioh_info_t *sd)
  1563. {
  1564. char *s;
  1565. uint32 cmd_arg;
  1566. uint32 cmd_rsp;
  1567. uint8 pwr = 0;
  1568. int volts;
  1569. volts = 0;
  1570. s = NULL;
  1571. if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
  1572. volts = 5;
  1573. s = "1.8";
  1574. }
  1575. if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
  1576. volts = 6;
  1577. s = "3.0";
  1578. }
  1579. if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
  1580. volts = 7;
  1581. s = "3.3";
  1582. }
  1583. pwr = SFIELD(pwr, PWR_VOLTS, volts);
  1584. pwr = SFIELD(pwr, PWR_BUS_EN, 1);
  1585. sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
  1586. sd_info(("Setting Bus Power to %s Volts\n", s));
  1587. /* Wait for power to stabilize, Dongle takes longer than NIC. */
  1588. OSL_DELAY(250000);
  1589. /* Get the Card's Operation Condition. Occasionally the board
  1590. * takes a while to become ready
  1591. */
  1592. cmd_arg = 0;
  1593. cmd_rsp = 0;
  1594. if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
  1595. sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
  1596. sdstd_reset(sd, 0, 1);
  1597. return FALSE;
  1598. }
  1599. sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
  1600. sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
  1601. sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
  1602. sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
  1603. /* Verify that the card supports I/O mode */
  1604. if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
  1605. sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
  1606. return ERROR;
  1607. }
  1608. sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
  1609. /* Examine voltage: Arasan only supports 3.3 volts,
  1610. * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
  1611. */
  1612. if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
  1613. sd_err(("This client does not support 3.3 volts!\n"));
  1614. return ERROR;
  1615. }
  1616. sd_info(("Leaving bus power at 3.3 Volts\n"));
  1617. cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
  1618. cmd_rsp = 0;
  1619. get_ocr(sd, &cmd_arg, &cmd_rsp);
  1620. sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
  1621. return TRUE;
  1622. }
  1623. bool
  1624. sdstd_bus_width(sdioh_info_t *sd, int new_mode)
  1625. {
  1626. uint32 regdata;
  1627. int status;
  1628. uint8 reg8;
  1629. sd_trace(("%s\n", __FUNCTION__));
  1630. if (sd->sd_mode == new_mode) {
  1631. sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
  1632. /* Could exit, but continue just in case... */
  1633. }
  1634. /* Set client side via reg 0x7 in CCCR */
  1635. if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata)) != SUCCESS)
  1636. return (bool)status;
  1637. regdata &= ~BUS_SD_DATA_WIDTH_MASK;
  1638. if (new_mode == SDIOH_MODE_SD4) {
  1639. sd_info(("Changing to SD4 Mode\n"));
  1640. regdata |= SD4_MODE;
  1641. } else if (new_mode == SDIOH_MODE_SD1) {
  1642. sd_info(("Changing to SD1 Mode\n"));
  1643. } else {
  1644. sd_err(("SPI Mode not supported by Standard Host Controller\n"));
  1645. }
  1646. if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
  1647. return (bool)status;
  1648. /* Set host side via Host reg */
  1649. reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
  1650. if (new_mode == SDIOH_MODE_SD4)
  1651. reg8 |= SD4_MODE;
  1652. sdstd_wreg8(sd, SD_HostCntrl, reg8);
  1653. sd->sd_mode = new_mode;
  1654. return TRUE;
  1655. }
  1656. static int
  1657. sdstd_driver_init(sdioh_info_t *sd)
  1658. {
  1659. sd_trace(("%s\n", __FUNCTION__));
  1660. if ((sdstd_host_init(sd)) != SUCCESS) {
  1661. return ERROR;
  1662. }
  1663. if (sdstd_client_init(sd) != SUCCESS) {
  1664. return ERROR;
  1665. }
  1666. return SUCCESS;
  1667. }
  1668. static int
  1669. sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
  1670. {
  1671. /* read 24 bits and return valid 17 bit addr */
  1672. int i;
  1673. uint32 scratch, regdata;
  1674. uint8 *ptr = (uint8 *)&scratch;
  1675. for (i = 0; i < 3; i++) {
  1676. if ((sdstd_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
  1677. sd_err(("%s: Can't read!\n", __FUNCTION__));
  1678. *ptr++ = (uint8) regdata;
  1679. regaddr++;
  1680. }
  1681. /* Only the lower 17-bits are valid */
  1682. scratch = ltoh32(scratch);
  1683. scratch &= 0x0001FFFF;
  1684. return (scratch);
  1685. }
  1686. static int
  1687. sdstd_card_enablefuncs(sdioh_info_t *sd)
  1688. {
  1689. int status;
  1690. uint32 regdata;
  1691. uint32 fbraddr;
  1692. uint8 func;
  1693. sd_trace(("%s\n", __FUNCTION__));
  1694. /* Get the Card's common CIS address */
  1695. sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
  1696. sd->func_cis_ptr[0] = sd->com_cis_ptr;
  1697. sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
  1698. /* Get the Card's function CIS (for each function) */
  1699. for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
  1700. func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
  1701. sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
  1702. sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
  1703. __FUNCTION__, func, sd->func_cis_ptr[func]));
  1704. }
  1705. /* Enable function 1 on the card */
  1706. regdata = SDIO_FUNC_ENABLE_1;
  1707. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
  1708. return status;
  1709. return SUCCESS;
  1710. }
  1711. /* Read client card reg */
  1712. static int
  1713. sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
  1714. {
  1715. int status;
  1716. uint32 cmd_arg;
  1717. uint32 rsp5;
  1718. cmd_arg = 0;
  1719. if ((func == 0) || (regsize == 1)) {
  1720. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
  1721. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
  1722. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
  1723. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  1724. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
  1725. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
  1726. != SUCCESS)
  1727. return status;
  1728. sdstd_cmd_getrsp(sd, &rsp5, 1);
  1729. if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
  1730. sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
  1731. __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
  1732. }
  1733. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  1734. sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
  1735. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
  1736. if (GFIELD(rsp5, RSP5_STUFF))
  1737. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  1738. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  1739. *data = GFIELD(rsp5, RSP5_DATA);
  1740. } else {
  1741. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
  1742. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
  1743. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
  1744. cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
  1745. cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
  1746. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
  1747. sd->data_xfer_count = regsize;
  1748. /* sdstd_cmd_issue() returns with the command complete bit
  1749. * in the ISR already cleared
  1750. */
  1751. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
  1752. != SUCCESS)
  1753. return status;
  1754. sdstd_cmd_getrsp(sd, &rsp5, 1);
  1755. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  1756. sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
  1757. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
  1758. if (GFIELD(rsp5, RSP5_STUFF))
  1759. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  1760. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  1761. if (sd->polled_mode) {
  1762. volatile uint16 int_reg;
  1763. int retries = RETRIES_LARGE;
  1764. /* Wait for Read Buffer to become ready */
  1765. do {
  1766. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  1767. } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
  1768. if (!retries) {
  1769. sd_err(("%s: Timeout on Buf_Read_Ready: "
  1770. "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
  1771. __FUNCTION__, int_reg,
  1772. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  1773. sdstd_rreg(sd, SD_PresentState)));
  1774. sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
  1775. return (ERROR);
  1776. }
  1777. /* Have Buffer Ready, so clear it and read the data */
  1778. sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
  1779. if (regsize == 2)
  1780. *data = sdstd_rreg16(sd, SD_BufferDataPort0);
  1781. else
  1782. *data = sdstd_rreg(sd, SD_BufferDataPort0);
  1783. /* Check Status.
  1784. * After the data is read, the Transfer Complete bit should be on
  1785. */
  1786. retries = RETRIES_LARGE;
  1787. do {
  1788. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  1789. } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
  1790. /* Check for any errors from the data phase */
  1791. if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
  1792. return ERROR;
  1793. if (!retries) {
  1794. sd_err(("%s: Timeout on xfer complete: "
  1795. "intr 0x%04x err 0x%04x state 0x%08x\n",
  1796. __FUNCTION__, int_reg,
  1797. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  1798. sdstd_rreg(sd, SD_PresentState)));
  1799. return (ERROR);
  1800. }
  1801. sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
  1802. }
  1803. }
  1804. if (sd->polled_mode) {
  1805. if (regsize == 2)
  1806. *data &= 0xffff;
  1807. }
  1808. return SUCCESS;
  1809. }
  1810. bool
  1811. check_client_intr(sdioh_info_t *sd)
  1812. {
  1813. uint16 raw_int, cur_int, old_int;
  1814. raw_int = sdstd_rreg16(sd, SD_IntrStatus);
  1815. cur_int = raw_int & sd->intmask;
  1816. if (!cur_int) {
  1817. /* Not an error -- might share interrupts... */
  1818. return FALSE;
  1819. }
  1820. if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
  1821. old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
  1822. sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
  1823. if (sd->client_intr_enabled && sd->use_client_ints) {
  1824. sd->intrcount++;
  1825. ASSERT(sd->intr_handler);
  1826. ASSERT(sd->intr_handler_arg);
  1827. (sd->intr_handler)(sd->intr_handler_arg);
  1828. } else {
  1829. sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
  1830. __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
  1831. }
  1832. sdstd_wreg16(sd, SD_IntrStatusEnable, old_int);
  1833. } else {
  1834. /* Local interrupt: disable, set flag, and save intrstatus */
  1835. sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
  1836. sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
  1837. sd->local_intrcount++;
  1838. sd->got_hcint = TRUE;
  1839. sd->last_intrstatus = cur_int;
  1840. }
  1841. return TRUE;
  1842. }
  1843. void
  1844. sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
  1845. {
  1846. uint16 int_reg, err_reg;
  1847. int retries = RETRIES_LARGE;
  1848. do {
  1849. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  1850. err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
  1851. } while (--retries && !(int_reg & norm) && !(err_reg & err));
  1852. norm |= sd->intmask;
  1853. if (err_reg & err)
  1854. norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
  1855. sd->last_intrstatus = int_reg & norm;
  1856. }
  1857. /* write a client register */
  1858. static int
  1859. sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
  1860. {
  1861. int status;
  1862. uint32 cmd_arg, rsp5, flags;
  1863. cmd_arg = 0;
  1864. if ((func == 0) || (regsize == 1)) {
  1865. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
  1866. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
  1867. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
  1868. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  1869. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
  1870. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
  1871. != SUCCESS)
  1872. return status;
  1873. sdstd_cmd_getrsp(sd, &rsp5, 1);
  1874. flags = GFIELD(rsp5, RSP5_FLAGS);
  1875. if (flags && (flags != 0x10))
  1876. sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
  1877. __FUNCTION__, flags));
  1878. }
  1879. else {
  1880. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
  1881. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
  1882. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
  1883. cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
  1884. cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
  1885. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
  1886. sd->data_xfer_count = regsize;
  1887. /* sdstd_cmd_issue() returns with the command complete bit
  1888. * in the ISR already cleared
  1889. */
  1890. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
  1891. != SUCCESS)
  1892. return status;
  1893. sdstd_cmd_getrsp(sd, &rsp5, 1);
  1894. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  1895. sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
  1896. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
  1897. if (GFIELD(rsp5, RSP5_STUFF))
  1898. sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
  1899. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  1900. if (sd->polled_mode) {
  1901. uint16 int_reg;
  1902. int retries = RETRIES_LARGE;
  1903. /* Wait for Write Buffer to become ready */
  1904. do {
  1905. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  1906. } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
  1907. if (!retries) {
  1908. sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
  1909. "errint: 0x%x PresentState 0x%x\n",
  1910. __FUNCTION__, int_reg,
  1911. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  1912. sdstd_rreg(sd, SD_PresentState)));
  1913. sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
  1914. return (ERROR);
  1915. }
  1916. /* Clear Write Buf Ready bit */
  1917. int_reg = 0;
  1918. int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
  1919. sdstd_wreg16(sd, SD_IntrStatus, int_reg);
  1920. /* At this point we have Buffer Ready, so write the data */
  1921. if (regsize == 2)
  1922. sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
  1923. else
  1924. sdstd_wreg(sd, SD_BufferDataPort0, data);
  1925. /* Wait for Transfer Complete */
  1926. retries = RETRIES_LARGE;
  1927. do {
  1928. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  1929. } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
  1930. /* Check for any errors from the data phase */
  1931. if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
  1932. return ERROR;
  1933. if (retries == 0) {
  1934. sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
  1935. "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
  1936. __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
  1937. int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
  1938. sd->r_cnt, sd->t_cnt));
  1939. }
  1940. /* Clear the status bits */
  1941. sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
  1942. }
  1943. }
  1944. return SUCCESS;
  1945. }
  1946. void
  1947. sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
  1948. {
  1949. int rsp_count;
  1950. int respaddr = SD_Response0;
  1951. if (count > 4)
  1952. count = 4;
  1953. for (rsp_count = 0; rsp_count < count; rsp_count++) {
  1954. *rsp_buffer++ = sdstd_rreg(sd, respaddr);
  1955. respaddr += 4;
  1956. }
  1957. }
  1958. static int
  1959. sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
  1960. {
  1961. uint16 cmd_reg;
  1962. int retries;
  1963. uint32 cmd_arg;
  1964. uint16 xfer_reg = 0;
  1965. if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
  1966. ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
  1967. sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
  1968. return ERROR;
  1969. }
  1970. retries = RETRIES_SMALL;
  1971. while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
  1972. if (retries == RETRIES_SMALL)
  1973. sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
  1974. __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
  1975. }
  1976. if (!retries) {
  1977. sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
  1978. if (trap_errs)
  1979. ASSERT(0);
  1980. return ERROR;
  1981. }
  1982. cmd_reg = 0;
  1983. switch (cmd) {
  1984. case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
  1985. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  1986. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
  1987. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  1988. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  1989. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  1990. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  1991. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  1992. break;
  1993. case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
  1994. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  1995. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  1996. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  1997. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  1998. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  1999. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  2000. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  2001. break;
  2002. case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
  2003. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  2004. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  2005. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  2006. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  2007. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  2008. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  2009. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  2010. break;
  2011. case SDIOH_CMD_7: /* Select card - Response R1 */
  2012. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  2013. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  2014. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  2015. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  2016. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  2017. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  2018. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  2019. break;
  2020. case SDIOH_CMD_15: /* Set card to inactive state - Response None */
  2021. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  2022. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
  2023. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  2024. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  2025. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  2026. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  2027. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  2028. break;
  2029. case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
  2030. sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
  2031. __FUNCTION__,
  2032. GFIELD(arg, CMD52_FUNCTION),
  2033. GFIELD(arg, CMD52_REG_ADDR),
  2034. GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
  2035. GFIELD(arg, CMD52_DATA)));
  2036. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  2037. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  2038. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  2039. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  2040. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  2041. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  2042. break;
  2043. case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
  2044. sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
  2045. __FUNCTION__,
  2046. GFIELD(arg, CMD53_FUNCTION),
  2047. GFIELD(arg, CMD53_REG_ADDR),
  2048. GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
  2049. GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
  2050. GFIELD(arg, CMD53_BYTE_BLK_CNT),
  2051. GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
  2052. cmd_arg = arg;
  2053. xfer_reg = 0;
  2054. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  2055. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  2056. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  2057. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
  2058. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  2059. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  2060. use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
  2061. if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
  2062. uint16 blocksize;
  2063. uint16 blockcount;
  2064. int func;
  2065. ASSERT(sdioh_info->sd_blockmode);
  2066. func = GFIELD(cmd_arg, CMD53_FUNCTION);
  2067. blocksize = MIN((int)sdioh_info->data_xfer_count,
  2068. sdioh_info->client_block_size[func]);
  2069. blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
  2070. /* data_xfer_cnt is already setup so that for multiblock mode,
  2071. * it is the entire buffer length. For non-block or single block,
  2072. * it is < 64 bytes
  2073. */
  2074. if (use_dma) {
  2075. switch (sdioh_info->sd_dma_mode) {
  2076. case DMA_MODE_SDMA:
  2077. sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
  2078. __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
  2079. (uint32)sdioh_info->dma_phys));
  2080. sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
  2081. break;
  2082. case DMA_MODE_ADMA1:
  2083. case DMA_MODE_ADMA2:
  2084. sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
  2085. sd_create_adma_descriptor(sdioh_info, 0,
  2086. sdioh_info->dma_phys, blockcount*blocksize,
  2087. ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
  2088. ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
  2089. /* Dump descriptor if DMA debugging is enabled. */
  2090. if (sd_msglevel & SDH_DMA_VAL) {
  2091. sd_dump_adma_dscr(sdioh_info);
  2092. }
  2093. sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
  2094. sdioh_info->adma2_dscr_phys);
  2095. break;
  2096. default:
  2097. sd_err(("%s: unsupported DMA mode %d.\n",
  2098. __FUNCTION__, sdioh_info->sd_dma_mode));
  2099. break;
  2100. }
  2101. }
  2102. sd_trace(("%s: Setting block count %d, block size %d bytes\n",
  2103. __FUNCTION__, blockcount, blocksize));
  2104. sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
  2105. sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
  2106. xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
  2107. if (sdioh_info->client_block_size[func] != blocksize)
  2108. set_client_block_size(sdioh_info, 1, blocksize);
  2109. if (blockcount > 1) {
  2110. xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
  2111. xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
  2112. xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
  2113. } else {
  2114. xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
  2115. xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
  2116. xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
  2117. }
  2118. if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
  2119. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
  2120. else
  2121. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
  2122. retries = RETRIES_SMALL;
  2123. while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
  2124. PRES_DAT_INHIBIT) && --retries)
  2125. sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
  2126. __FUNCTION__, cmd));
  2127. if (!retries) {
  2128. sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
  2129. if (trap_errs)
  2130. ASSERT(0);
  2131. return ERROR;
  2132. }
  2133. sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
  2134. } else { /* Non block mode */
  2135. uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
  2136. /* The byte/block count field only has 9 bits,
  2137. * so, to do a 512-byte bytemode transfer, this
  2138. * field will contain 0, but we need to tell the
  2139. * controller we're transferring 512 bytes.
  2140. */
  2141. if (bytes == 0) bytes = 512;
  2142. if (use_dma)
  2143. sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
  2144. /* PCI: Transfer Mode register 0x0c */
  2145. xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
  2146. xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
  2147. if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
  2148. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
  2149. else
  2150. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
  2151. /* See table 2-8 Host Controller spec ver 1.00 */
  2152. xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
  2153. xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
  2154. sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
  2155. sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
  2156. retries = RETRIES_SMALL;
  2157. while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
  2158. PRES_DAT_INHIBIT) && --retries)
  2159. sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
  2160. __FUNCTION__, cmd));
  2161. if (!retries) {
  2162. sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
  2163. if (trap_errs)
  2164. ASSERT(0);
  2165. return ERROR;
  2166. }
  2167. sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
  2168. }
  2169. break;
  2170. default:
  2171. sd_err(("%s: Unknown command\n", __FUNCTION__));
  2172. return ERROR;
  2173. }
  2174. if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
  2175. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  2176. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  2177. }
  2178. /* Setup and issue the SDIO command */
  2179. sdstd_wreg(sdioh_info, SD_Arg0, arg);
  2180. sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
  2181. /* If we are in polled mode, wait for the command to complete.
  2182. * In interrupt mode, return immediately. The calling function will
  2183. * know that the command has completed when the CMDATDONE interrupt
  2184. * is asserted
  2185. */
  2186. if (sdioh_info->polled_mode) {
  2187. uint16 int_reg = 0;
  2188. int retries = RETRIES_LARGE;
  2189. do {
  2190. int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
  2191. } while (--retries &&
  2192. (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
  2193. (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
  2194. if (!retries) {
  2195. sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
  2196. "error stat 0x%x state 0x%x\n",
  2197. __FUNCTION__, int_reg,
  2198. sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
  2199. sdstd_rreg(sdioh_info, SD_PresentState)));
  2200. /* Attempt to reset CMD line when we get a CMD timeout */
  2201. sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
  2202. retries = RETRIES_LARGE;
  2203. do {
  2204. sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
  2205. } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
  2206. SW_RESET_CMD)) && retries--);
  2207. if (!retries) {
  2208. sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
  2209. }
  2210. if (trap_errs)
  2211. ASSERT(0);
  2212. return (ERROR);
  2213. }
  2214. /* Clear Command Complete interrupt */
  2215. int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
  2216. sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
  2217. /* Check for Errors */
  2218. if (sdstd_check_errs(sdioh_info, cmd, arg)) {
  2219. if (trap_errs)
  2220. ASSERT(0);
  2221. return ERROR;
  2222. }
  2223. }
  2224. return SUCCESS;
  2225. }
  2226. static int
  2227. sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
  2228. {
  2229. int status;
  2230. uint32 cmd_arg;
  2231. uint32 rsp5;
  2232. uint16 int_reg, int_bit;
  2233. uint flags;
  2234. int num_blocks, blocksize;
  2235. bool local_blockmode, local_dma;
  2236. bool read = rw == SDIOH_READ ? 1 : 0;
  2237. bool yield = FALSE;
  2238. ASSERT(nbytes);
  2239. cmd_arg = 0;
  2240. sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
  2241. __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
  2242. if (read) sd->r_cnt++; else sd->t_cnt++;
  2243. local_blockmode = sd->sd_blockmode;
  2244. local_dma = USE_DMA(sd);
  2245. /* Don't bother with block mode on small xfers */
  2246. if (nbytes < sd->client_block_size[func]) {
  2247. sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
  2248. nbytes, sd->client_block_size[func]));
  2249. local_blockmode = FALSE;
  2250. local_dma = FALSE;
  2251. }
  2252. if (local_blockmode) {
  2253. blocksize = MIN(sd->client_block_size[func], nbytes);
  2254. num_blocks = nbytes/blocksize;
  2255. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
  2256. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
  2257. } else {
  2258. num_blocks = 1;
  2259. blocksize = nbytes;
  2260. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
  2261. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
  2262. }
  2263. if (local_dma && !read) {
  2264. bcopy(data, sd->dma_buf, nbytes);
  2265. sd_sync_dma(sd, read, nbytes);
  2266. }
  2267. if (fifo)
  2268. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
  2269. else
  2270. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
  2271. cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
  2272. cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
  2273. if (read)
  2274. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
  2275. else
  2276. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
  2277. sd->data_xfer_count = nbytes;
  2278. /* sdstd_cmd_issue() returns with the command complete bit
  2279. * in the ISR already cleared
  2280. */
  2281. if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
  2282. sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
  2283. return status;
  2284. }
  2285. sdstd_cmd_getrsp(sd, &rsp5, 1);
  2286. if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
  2287. sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
  2288. "numblocks %d, blocksize %d\n",
  2289. __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
  2290. if (flags & 1)
  2291. sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
  2292. "bytes %d dma %d\n",
  2293. __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
  2294. GFIELD(cmd_arg, CMD53_BLK_MODE)));
  2295. if (flags & 0x8)
  2296. sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
  2297. sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
  2298. __FUNCTION__, flags));
  2299. if (trap_errs)
  2300. ASSERT(0);
  2301. return ERROR;
  2302. }
  2303. if (GFIELD(rsp5, RSP5_STUFF))
  2304. sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
  2305. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  2306. #ifdef BCMSDYIELD
  2307. yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
  2308. #endif
  2309. if (!local_dma) {
  2310. int bytes, i;
  2311. uint32 tmp;
  2312. for (i = 0; i < num_blocks; i++) {
  2313. int words;
  2314. /* Decide which status bit we're waiting for */
  2315. if (read)
  2316. int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
  2317. else
  2318. int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
  2319. /* If not on, wait for it (or for xfer error) */
  2320. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  2321. if (!(int_reg & int_bit))
  2322. int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield);
  2323. /* Confirm we got the bit w/o error */
  2324. if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
  2325. sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
  2326. "errint: 0x%x PresentState 0x%x\n",
  2327. __FUNCTION__, read ? "Read" : "Write", int_reg,
  2328. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  2329. sdstd_rreg(sd, SD_PresentState)));
  2330. sdstd_dumpregs(sd);
  2331. sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
  2332. return (ERROR);
  2333. }
  2334. /* Clear Buf Ready bit */
  2335. sdstd_wreg16(sd, SD_IntrStatus, int_bit);
  2336. /* At this point we have Buffer Ready, write the data 4 bytes at a time */
  2337. for (words = blocksize/4; words; words--) {
  2338. if (read)
  2339. *data = sdstd_rreg(sd, SD_BufferDataPort0);
  2340. else
  2341. sdstd_wreg(sd, SD_BufferDataPort0, *data);
  2342. data++;
  2343. }
  2344. bytes = blocksize % 4;
  2345. /* If no leftover bytes, go to next block */
  2346. if (!bytes)
  2347. continue;
  2348. switch (bytes) {
  2349. case 1:
  2350. /* R/W 8 bits */
  2351. if (read)
  2352. *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
  2353. else
  2354. sdstd_wreg8(sd, SD_BufferDataPort0,
  2355. (uint8)(*(data++) & 0xff));
  2356. break;
  2357. case 2:
  2358. /* R/W 16 bits */
  2359. if (read)
  2360. *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
  2361. else
  2362. sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
  2363. break;
  2364. case 3:
  2365. /* R/W 24 bits:
  2366. * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
  2367. */
  2368. if (read) {
  2369. tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
  2370. tmp |= ((uint32)(sdstd_rreg8(sd,
  2371. SD_BufferDataPort1)) << 16);
  2372. *(data++) = tmp;
  2373. } else {
  2374. tmp = *(data++);
  2375. sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
  2376. sdstd_wreg8(sd, SD_BufferDataPort1,
  2377. (uint8)((tmp >> 16) & 0xff));
  2378. }
  2379. break;
  2380. default:
  2381. sd_err(("%s: Unexpected bytes leftover %d\n",
  2382. __FUNCTION__, bytes));
  2383. ASSERT(0);
  2384. break;
  2385. }
  2386. }
  2387. } /* End PIO processing */
  2388. /* Wait for Transfer Complete or Transfer Error */
  2389. int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
  2390. /* If not on, wait for it (or for xfer error) */
  2391. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  2392. if (!(int_reg & int_bit))
  2393. int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield);
  2394. /* Check for any errors from the data phase */
  2395. if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
  2396. return ERROR;
  2397. /* May have gotten a software timeout if not blocking? */
  2398. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  2399. if (!(int_reg & int_bit)) {
  2400. sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
  2401. "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
  2402. __FUNCTION__, read ? "R" : "W", local_dma,
  2403. sdstd_rreg(sd, SD_PresentState), int_reg,
  2404. sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
  2405. sd->r_cnt, sd->t_cnt));
  2406. sdstd_dumpregs(sd);
  2407. return ERROR;
  2408. }
  2409. /* Clear the status bits */
  2410. int_reg = int_bit;
  2411. if (local_dma) {
  2412. /* DMA Complete */
  2413. /* Reads in particular don't have DMA_COMPLETE set */
  2414. int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
  2415. }
  2416. sdstd_wreg16(sd, SD_IntrStatus, int_reg);
  2417. /* Fetch data */
  2418. if (local_dma && read) {
  2419. sd_sync_dma(sd, read, nbytes);
  2420. bcopy(sd->dma_buf, data, nbytes);
  2421. }
  2422. return SUCCESS;
  2423. }
  2424. static int
  2425. set_client_block_size(sdioh_info_t *sd, int func, int block_size)
  2426. {
  2427. int base;
  2428. int err = 0;
  2429. sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
  2430. sd->client_block_size[func] = block_size;
  2431. /* Set the block size in the SDIO Card register */
  2432. base = func * SDIOD_FBR_SIZE;
  2433. err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
  2434. if (!err) {
  2435. err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
  2436. (block_size >> 8) & 0xff);
  2437. }
  2438. /* Do not set the block size in the SDIO Host register, that
  2439. * is func dependent and will get done on an individual
  2440. * transaction basis
  2441. */
  2442. return (err ? BCME_SDIO_ERROR : 0);
  2443. }
  2444. /* Reset and re-initialize the device */
  2445. int
  2446. sdioh_sdio_reset(sdioh_info_t *si)
  2447. {
  2448. uint8 hreg;
  2449. /* Reset the attached device (use slower clock for safety) */
  2450. sdstd_start_clock(si, 128);
  2451. sdstd_reset(si, 0, 1);
  2452. /* Reset portions of the host state accordingly */
  2453. hreg = sdstd_rreg8(si, SD_HostCntrl);
  2454. hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
  2455. hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
  2456. si->sd_mode = SDIOH_MODE_SD1;
  2457. /* Reinitialize the card */
  2458. si->card_init_done = FALSE;
  2459. return sdstd_client_init(si);
  2460. }
  2461. static void
  2462. sd_map_dma(sdioh_info_t * sd)
  2463. {
  2464. void *va;
  2465. if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE,
  2466. &sd->dma_start_phys, 0x12, 12)) == NULL) {
  2467. sd->sd_dma_mode = DMA_MODE_NONE;
  2468. sd->dma_start_buf = 0;
  2469. sd->dma_buf = (void *)0;
  2470. sd->dma_phys = 0;
  2471. sd->alloced_dma_size = SD_PAGE;
  2472. sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
  2473. } else {
  2474. sd->dma_start_buf = va;
  2475. sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
  2476. sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
  2477. sd->alloced_dma_size = SD_PAGE;
  2478. sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n",
  2479. __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys));
  2480. sd_fill_dma_data_buf(sd, 0xA5);
  2481. }
  2482. if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE,
  2483. &sd->adma2_dscr_start_phys, 0x12, 12)) == NULL) {
  2484. sd->sd_dma_mode = DMA_MODE_NONE;
  2485. sd->adma2_dscr_start_buf = 0;
  2486. sd->adma2_dscr_buf = (void *)0;
  2487. sd->adma2_dscr_phys = 0;
  2488. sd->alloced_adma2_dscr_size = 0;
  2489. sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
  2490. "Disabling DMA support.\n", __FUNCTION__));
  2491. } else {
  2492. sd->adma2_dscr_start_buf = va;
  2493. sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
  2494. sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
  2495. sd->alloced_adma2_dscr_size = SD_PAGE;
  2496. }
  2497. sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n",
  2498. __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
  2499. sd->adma2_dscr_phys));
  2500. sd_clear_adma_dscr_buf(sd);
  2501. }
  2502. static void
  2503. sd_unmap_dma(sdioh_info_t * sd)
  2504. {
  2505. if (sd->dma_start_buf) {
  2506. DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
  2507. sd->dma_start_phys, 0x12);
  2508. }
  2509. if (sd->adma2_dscr_start_buf) {
  2510. DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
  2511. sd->adma2_dscr_start_phys, 0x12);
  2512. }
  2513. }
  2514. static void sd_clear_adma_dscr_buf(sdioh_info_t *sd)
  2515. {
  2516. bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
  2517. sd_dump_adma_dscr(sd);
  2518. }
  2519. static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
  2520. {
  2521. memset((char *)sd->dma_buf, data, SD_PAGE);
  2522. }
  2523. static void sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
  2524. uint32 addr_phys, uint16 length, uint16 flags)
  2525. {
  2526. adma2_dscr_32b_t *adma2_dscr_table;
  2527. adma1_dscr_t *adma1_dscr_table;
  2528. adma2_dscr_table = sd->adma2_dscr_buf;
  2529. adma1_dscr_table = sd->adma2_dscr_buf;
  2530. switch (sd->sd_dma_mode) {
  2531. case DMA_MODE_ADMA2:
  2532. sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
  2533. __FUNCTION__, index));
  2534. adma2_dscr_table[index].phys_addr = addr_phys;
  2535. adma2_dscr_table[index].len_attr = length << 16;
  2536. adma2_dscr_table[index].len_attr |= flags;
  2537. break;
  2538. case DMA_MODE_ADMA1:
  2539. /* ADMA1 requires two descriptors, one for len
  2540. * and the other for data transfer
  2541. */
  2542. index <<= 1;
  2543. sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
  2544. __FUNCTION__, index));
  2545. adma1_dscr_table[index].phys_addr_attr = length << 12;
  2546. adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
  2547. ADMA2_ATTRIBUTE_VALID);
  2548. adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
  2549. adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
  2550. break;
  2551. default:
  2552. sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
  2553. __FUNCTION__, sd->sd_dma_mode));
  2554. break;
  2555. }
  2556. }
  2557. static void sd_dump_adma_dscr(sdioh_info_t *sd)
  2558. {
  2559. adma2_dscr_32b_t *adma2_dscr_table;
  2560. adma1_dscr_t *adma1_dscr_table;
  2561. uint32 i = 0;
  2562. uint16 flags;
  2563. char flags_str[32];
  2564. ASSERT(sd->adma2_dscr_buf != NULL);
  2565. adma2_dscr_table = sd->adma2_dscr_buf;
  2566. adma1_dscr_table = sd->adma2_dscr_buf;
  2567. switch (sd->sd_dma_mode) {
  2568. case DMA_MODE_ADMA2:
  2569. sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
  2570. SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
  2571. sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
  2572. " |\n"));
  2573. while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
  2574. flags = adma2_dscr_table->len_attr & 0xFFFF;
  2575. sprintf(flags_str, "%s%s%s%s",
  2576. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2577. ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
  2578. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2579. ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
  2580. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2581. ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
  2582. (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
  2583. (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
  2584. (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
  2585. sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
  2586. i, adma2_dscr_table, adma2_dscr_table->phys_addr,
  2587. adma2_dscr_table->len_attr >> 16, flags, flags_str));
  2588. i++;
  2589. /* Follow LINK descriptors or skip to next. */
  2590. if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2591. ADMA2_ATTRIBUTE_ACT_LINK) {
  2592. adma2_dscr_table = phys_to_virt(
  2593. adma2_dscr_table->phys_addr);
  2594. } else {
  2595. adma2_dscr_table++;
  2596. }
  2597. }
  2598. break;
  2599. case DMA_MODE_ADMA1:
  2600. sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
  2601. SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
  2602. sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
  2603. for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
  2604. flags = adma1_dscr_table->phys_addr_attr & 0x3F;
  2605. sprintf(flags_str, "%s%s%s%s",
  2606. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2607. ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
  2608. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2609. ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
  2610. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2611. ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
  2612. (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
  2613. (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
  2614. (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
  2615. sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
  2616. i, adma1_dscr_table,
  2617. adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
  2618. flags, flags_str));
  2619. /* Follow LINK descriptors or skip to next. */
  2620. if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  2621. ADMA2_ATTRIBUTE_ACT_LINK) {
  2622. adma1_dscr_table = phys_to_virt(
  2623. adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
  2624. } else {
  2625. adma1_dscr_table++;
  2626. }
  2627. }
  2628. break;
  2629. default:
  2630. sd_err(("Unknown DMA Descriptor Table Format.\n"));
  2631. break;
  2632. }
  2633. }
  2634. static void sdstd_dumpregs(sdioh_info_t *sd)
  2635. {
  2636. sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
  2637. sdstd_rreg16(sd, SD_IntrStatus),
  2638. sdstd_rreg16(sd, SD_ErrorIntrStatus)));
  2639. sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
  2640. sdstd_rreg16(sd, SD_IntrStatusEnable),
  2641. sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
  2642. sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
  2643. sdstd_rreg16(sd, SD_IntrSignalEnable),
  2644. sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
  2645. }