PageRenderTime 64ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/staging/crystalhd/crystalhd_hw.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 2393 lines | 1689 code | 475 blank | 229 comment | 321 complexity | 4d4d7590cd3b9fb2268ece9abb73e813 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /***************************************************************************
  2. * Copyright (c) 2005-2009, Broadcom Corporation.
  3. *
  4. * Name: crystalhd_hw . c
  5. *
  6. * Description:
  7. * BCM70010 Linux driver HW layer.
  8. *
  9. **********************************************************************
  10. * This file is part of the crystalhd device driver.
  11. *
  12. * This driver is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation, version 2 of the License.
  15. *
  16. * This driver is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this driver. If not, see <http://www.gnu.org/licenses/>.
  23. **********************************************************************/
  24. #include <linux/pci.h>
  25. #include <linux/slab.h>
  26. #include <linux/delay.h>
  27. #include "crystalhd_hw.h"
  28. /* Functions internal to this file */
  29. static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
  30. {
  31. bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
  32. bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
  33. }
  34. static void crystalhd_start_dram(struct crystalhd_adp *adp)
  35. {
  36. bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) |
  37. /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
  38. ((15 / 5 - 1) << 7) | /* trp */
  39. ((10 / 5 - 1) << 10) | /* trrd */
  40. ((15 / 5 + 1) << 12) | /* twr */
  41. ((2 + 1) << 16) | /* twtr */
  42. ((70 / 5 - 2) << 19) | /* trfc */
  43. (0 << 23));
  44. bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
  45. bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
  46. bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
  47. bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
  48. bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
  49. bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
  50. bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
  51. /* setting the refresh rate here */
  52. bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
  53. }
  54. static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
  55. {
  56. union link_misc_perst_deco_ctrl rst_deco_cntrl;
  57. union link_misc_perst_clk_ctrl rst_clk_cntrl;
  58. uint32_t temp;
  59. /*
  60. * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
  61. * delay to allow PLL to lock Clear alternate clock, stop clock bits
  62. */
  63. rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
  64. rst_clk_cntrl.pll_pwr_dn = 0;
  65. crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
  66. msleep_interruptible(50);
  67. rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
  68. rst_clk_cntrl.stop_core_clk = 0;
  69. rst_clk_cntrl.sel_alt_clk = 0;
  70. crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
  71. msleep_interruptible(50);
  72. /*
  73. * Bus Arbiter Timeout: GISB_ARBITER_TIMER
  74. * Set internal bus arbiter timeout to 40us based on core clock speed
  75. * (63MHz * 40us = 0x9D8)
  76. */
  77. crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
  78. /*
  79. * Decoder clocks: MISC_PERST_DECODER_CTRL
  80. * Enable clocks while 7412 reset is asserted, delay
  81. * De-assert 7412 reset
  82. */
  83. rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
  84. rst_deco_cntrl.stop_bcm_7412_clk = 0;
  85. rst_deco_cntrl.bcm7412_rst = 1;
  86. crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
  87. msleep_interruptible(10);
  88. rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
  89. rst_deco_cntrl.bcm7412_rst = 0;
  90. crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
  91. msleep_interruptible(50);
  92. /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
  93. crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
  94. /* Clear bit 29 of 0x404 */
  95. temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
  96. temp &= ~BC_BIT(29);
  97. crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
  98. /* 2.5V regulator must be set to 2.6 volts (+6%) */
  99. /* FIXME: jarod: what's the point of this reg read? */
  100. temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
  101. crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
  102. return true;
  103. }
  104. static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
  105. {
  106. union link_misc_perst_deco_ctrl rst_deco_cntrl;
  107. union link_misc_perst_clk_ctrl rst_clk_cntrl;
  108. uint32_t temp;
  109. /*
  110. * Decoder clocks: MISC_PERST_DECODER_CTRL
  111. * Assert 7412 reset, delay
  112. * Assert 7412 stop clock
  113. */
  114. rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
  115. rst_deco_cntrl.stop_bcm_7412_clk = 1;
  116. crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
  117. msleep_interruptible(50);
  118. /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
  119. * Set internal bus arbiter timeout to 40us based on core clock speed
  120. * (6.75MHZ * 40us = 0x10E)
  121. */
  122. crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
  123. /* Link clocks: MISC_PERST_CLOCK_CTRL
  124. * Stop core clk, delay
  125. * Set alternate clk, delay, set PLL power down
  126. */
  127. rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
  128. rst_clk_cntrl.stop_core_clk = 1;
  129. rst_clk_cntrl.sel_alt_clk = 1;
  130. crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
  131. msleep_interruptible(50);
  132. rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
  133. rst_clk_cntrl.pll_pwr_dn = 1;
  134. crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
  135. /*
  136. * Read and restore the Transaction Configuration Register
  137. * after core reset
  138. */
  139. temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
  140. /*
  141. * Link core soft reset: MISC3_RESET_CTRL
  142. * - Write BIT[0]=1 and read it back for core reset to take place
  143. */
  144. crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
  145. rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
  146. msleep_interruptible(50);
  147. /* restore the transaction configuration register */
  148. crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
  149. return true;
  150. }
  151. static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
  152. {
  153. union intr_mask_reg intr_mask;
  154. intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
  155. intr_mask.mask_pcie_err = 1;
  156. intr_mask.mask_pcie_rbusmast_err = 1;
  157. intr_mask.mask_pcie_rgr_bridge = 1;
  158. intr_mask.mask_rx_done = 1;
  159. intr_mask.mask_rx_err = 1;
  160. intr_mask.mask_tx_done = 1;
  161. intr_mask.mask_tx_err = 1;
  162. crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
  163. return;
  164. }
  165. static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
  166. {
  167. union intr_mask_reg intr_mask;
  168. intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
  169. intr_mask.mask_pcie_err = 1;
  170. intr_mask.mask_pcie_rbusmast_err = 1;
  171. intr_mask.mask_pcie_rgr_bridge = 1;
  172. intr_mask.mask_rx_done = 1;
  173. intr_mask.mask_rx_err = 1;
  174. intr_mask.mask_tx_done = 1;
  175. intr_mask.mask_tx_err = 1;
  176. crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
  177. return;
  178. }
  179. static void crystalhd_clear_errors(struct crystalhd_adp *adp)
  180. {
  181. uint32_t reg;
  182. /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
  183. reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
  184. if (reg)
  185. crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
  186. reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
  187. if (reg)
  188. crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
  189. reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
  190. if (reg)
  191. crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
  192. }
  193. static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
  194. {
  195. uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
  196. if (intr_sts) {
  197. crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
  198. /* Write End Of Interrupt for PCIE */
  199. crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
  200. }
  201. }
  202. static void crystalhd_soft_rst(struct crystalhd_adp *adp)
  203. {
  204. uint32_t val;
  205. /* Assert c011 soft reset*/
  206. bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
  207. msleep_interruptible(50);
  208. /* Release c011 soft reset*/
  209. bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
  210. /* Disable Stuffing..*/
  211. val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
  212. val |= BC_BIT(8);
  213. crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
  214. }
  215. static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
  216. {
  217. uint32_t i = 0, reg;
  218. crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
  219. crystalhd_reg_wr(adp, AES_CMD, 0);
  220. crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
  221. crystalhd_reg_wr(adp, AES_CMD, 0x1);
  222. /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
  223. for (i = 0; i < 100; ++i) {
  224. reg = crystalhd_reg_rd(adp, AES_STATUS);
  225. if (reg & 0x1)
  226. return true;
  227. msleep_interruptible(10);
  228. }
  229. return false;
  230. }
  231. static bool crystalhd_start_device(struct crystalhd_adp *adp)
  232. {
  233. uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
  234. BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
  235. reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
  236. reg_pwrmgmt &= ~ASPM_L1_ENABLE;
  237. crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
  238. if (!crystalhd_bring_out_of_rst(adp)) {
  239. BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
  240. return false;
  241. }
  242. crystalhd_disable_interrupts(adp);
  243. crystalhd_clear_errors(adp);
  244. crystalhd_clear_interrupts(adp);
  245. crystalhd_enable_interrupts(adp);
  246. /* Enable the option for getting the total no. of DWORDS
  247. * that have been transferred by the RXDMA engine
  248. */
  249. dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
  250. dbg_options |= 0x10;
  251. crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
  252. /* Enable PCI Global Control options */
  253. glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
  254. glb_cntrl |= 0x100;
  255. glb_cntrl |= 0x8000;
  256. crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
  257. crystalhd_enable_interrupts(adp);
  258. crystalhd_soft_rst(adp);
  259. crystalhd_start_dram(adp);
  260. crystalhd_enable_uarts(adp);
  261. return true;
  262. }
  263. static bool crystalhd_stop_device(struct crystalhd_adp *adp)
  264. {
  265. uint32_t reg;
  266. BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
  267. /* Clear and disable interrupts */
  268. crystalhd_disable_interrupts(adp);
  269. crystalhd_clear_errors(adp);
  270. crystalhd_clear_interrupts(adp);
  271. if (!crystalhd_put_in_reset(adp))
  272. BCMLOG_ERR("Failed to Put Link To Reset State\n");
  273. reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
  274. reg |= ASPM_L1_ENABLE;
  275. crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
  276. /* Set PCI Clk Req */
  277. reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
  278. reg |= PCI_CLK_REQ_ENABLE;
  279. crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
  280. return true;
  281. }
  282. static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
  283. {
  284. unsigned long flags = 0;
  285. struct crystalhd_rx_dma_pkt *temp = NULL;
  286. if (!hw)
  287. return NULL;
  288. spin_lock_irqsave(&hw->lock, flags);
  289. temp = hw->rx_pkt_pool_head;
  290. if (temp) {
  291. hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
  292. temp->dio_req = NULL;
  293. temp->pkt_tag = 0;
  294. temp->flags = 0;
  295. }
  296. spin_unlock_irqrestore(&hw->lock, flags);
  297. return temp;
  298. }
  299. static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
  300. struct crystalhd_rx_dma_pkt *pkt)
  301. {
  302. unsigned long flags = 0;
  303. if (!hw || !pkt)
  304. return;
  305. spin_lock_irqsave(&hw->lock, flags);
  306. pkt->next = hw->rx_pkt_pool_head;
  307. hw->rx_pkt_pool_head = pkt;
  308. spin_unlock_irqrestore(&hw->lock, flags);
  309. }
  310. /*
  311. * Call back from TX - IOQ deletion.
  312. *
  313. * This routine will release the TX DMA rings allocated
  314. * druing setup_dma rings interface.
  315. *
  316. * Memory is allocated per DMA ring basis. This is just
  317. * a place holder to be able to create the dio queues.
  318. */
  319. static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
  320. {
  321. }
  322. /*
  323. * Rx Packet release callback..
  324. *
  325. * Release All user mapped capture buffers and Our DMA packets
  326. * back to our free pool. The actual cleanup of the DMA
  327. * ring descriptors happen during dma ring release.
  328. */
  329. static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
  330. {
  331. struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
  332. struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
  333. if (!pkt || !hw) {
  334. BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
  335. return;
  336. }
  337. if (pkt->dio_req)
  338. crystalhd_unmap_dio(hw->adp, pkt->dio_req);
  339. else
  340. BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
  341. crystalhd_hw_free_rx_pkt(hw, pkt);
  342. }
  343. #define crystalhd_hw_delete_ioq(adp, q) \
  344. if (q) { \
  345. crystalhd_delete_dioq(adp, q); \
  346. q = NULL; \
  347. }
  348. static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
  349. {
  350. if (!hw)
  351. return;
  352. BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
  353. crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
  354. crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
  355. crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
  356. crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
  357. crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
  358. }
  359. #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
  360. do { \
  361. sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
  362. if (sts != BC_STS_SUCCESS) \
  363. goto hw_create_ioq_err; \
  364. } while (0)
  365. /*
  366. * Create IOQs..
  367. *
  368. * TX - Active & Free
  369. * RX - Active, Ready and Free.
  370. */
  371. static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
  372. {
  373. enum BC_STATUS sts = BC_STS_SUCCESS;
  374. if (!hw) {
  375. BCMLOG_ERR("Invalid Arg!!\n");
  376. return BC_STS_INV_ARG;
  377. }
  378. crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
  379. crystalhd_tx_desc_rel_call_back);
  380. crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
  381. crystalhd_tx_desc_rel_call_back);
  382. crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
  383. crystalhd_rx_pkt_rel_call_back);
  384. crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
  385. crystalhd_rx_pkt_rel_call_back);
  386. crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
  387. crystalhd_rx_pkt_rel_call_back);
  388. return sts;
  389. hw_create_ioq_err:
  390. crystalhd_hw_delete_ioqs(hw);
  391. return sts;
  392. }
  393. static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
  394. bool b_188_byte_pkts, uint8_t flags)
  395. {
  396. uint32_t base, end, writep, readp;
  397. uint32_t cpbSize, cpbFullness, fifoSize;
  398. if (flags & 0x02) { /* ASF Bit is set */
  399. base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
  400. end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
  401. writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
  402. readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
  403. } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
  404. base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
  405. end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
  406. writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
  407. readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
  408. } else {
  409. base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
  410. end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
  411. writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
  412. readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
  413. }
  414. cpbSize = end - base;
  415. if (writep >= readp)
  416. cpbFullness = writep - readp;
  417. else
  418. cpbFullness = (end - base) - (readp - writep);
  419. fifoSize = cpbSize - cpbFullness;
  420. if (fifoSize < BC_INFIFO_THRESHOLD)
  421. return true;
  422. if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
  423. return true;
  424. return false;
  425. }
  426. static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
  427. uint32_t list_id, enum BC_STATUS cs)
  428. {
  429. struct tx_dma_pkt *tx_req;
  430. if (!hw || !list_id) {
  431. BCMLOG_ERR("Invalid Arg..\n");
  432. return BC_STS_INV_ARG;
  433. }
  434. hw->pwr_lock--;
  435. tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
  436. if (!tx_req) {
  437. if (cs != BC_STS_IO_USER_ABORT)
  438. BCMLOG_ERR("Find and Fetch Did not find req\n");
  439. return BC_STS_NO_DATA;
  440. }
  441. if (tx_req->call_back) {
  442. tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
  443. tx_req->dio_req = NULL;
  444. tx_req->cb_event = NULL;
  445. tx_req->call_back = NULL;
  446. } else {
  447. BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
  448. tx_req->list_tag);
  449. }
  450. /* Now put back the tx_list back in FreeQ */
  451. tx_req->list_tag = 0;
  452. return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
  453. }
  454. static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
  455. {
  456. uint32_t err_mask, tmp;
  457. unsigned long flags = 0;
  458. err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
  459. MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
  460. MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
  461. if (!(err_sts & err_mask))
  462. return false;
  463. BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
  464. tmp = err_mask;
  465. if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
  466. tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
  467. if (tmp) {
  468. spin_lock_irqsave(&hw->lock, flags);
  469. /* reset list index.*/
  470. hw->tx_list_post_index = 0;
  471. spin_unlock_irqrestore(&hw->lock, flags);
  472. }
  473. tmp = err_sts & err_mask;
  474. crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
  475. return true;
  476. }
  477. static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
  478. {
  479. uint32_t err_mask, tmp;
  480. unsigned long flags = 0;
  481. err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
  482. MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
  483. MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
  484. if (!(err_sts & err_mask))
  485. return false;
  486. BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
  487. tmp = err_mask;
  488. if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
  489. tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
  490. if (tmp) {
  491. spin_lock_irqsave(&hw->lock, flags);
  492. /* reset list index.*/
  493. hw->tx_list_post_index = 0;
  494. spin_unlock_irqrestore(&hw->lock, flags);
  495. }
  496. tmp = err_sts & err_mask;
  497. crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
  498. return true;
  499. }
  500. static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
  501. {
  502. uint32_t err_sts;
  503. if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
  504. crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
  505. BC_STS_SUCCESS);
  506. if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
  507. crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
  508. BC_STS_SUCCESS);
  509. if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
  510. INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
  511. /* No error mask set.. */
  512. return;
  513. }
  514. /* Handle Tx errors. */
  515. err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
  516. if (crystalhd_tx_list0_handler(hw, err_sts))
  517. crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
  518. BC_STS_ERROR);
  519. if (crystalhd_tx_list1_handler(hw, err_sts))
  520. crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
  521. BC_STS_ERROR);
  522. hw->stats.tx_errors++;
  523. }
  524. static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
  525. uint32_t ul_desc_index, uint32_t cnt)
  526. {
  527. uint32_t ix, ll = 0;
  528. if (!p_dma_desc || !cnt)
  529. return;
  530. /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
  531. * setting ll (log level, I presume) to non-zero? */
  532. if (!ll)
  533. return;
  534. for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
  535. BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
  536. ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
  537. ul_desc_index,
  538. p_dma_desc[ul_desc_index].buff_addr_high,
  539. p_dma_desc[ul_desc_index].buff_addr_low,
  540. p_dma_desc[ul_desc_index].next_desc_addr_high,
  541. p_dma_desc[ul_desc_index].next_desc_addr_low,
  542. p_dma_desc[ul_desc_index].xfer_size,
  543. p_dma_desc[ul_desc_index].intr_enable,
  544. p_dma_desc[ul_desc_index].last_rec_indicator);
  545. }
  546. }
  547. static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
  548. struct dma_descriptor *desc,
  549. dma_addr_t desc_paddr_base,
  550. uint32_t sg_cnt, uint32_t sg_st_ix,
  551. uint32_t sg_st_off, uint32_t xfr_sz)
  552. {
  553. uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
  554. dma_addr_t desc_phy_addr = desc_paddr_base;
  555. union addr_64 addr_temp;
  556. if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
  557. (!sg_cnt && !ioreq->uinfo.dir_tx)) {
  558. BCMLOG_ERR("Invalid Args\n");
  559. return BC_STS_INV_ARG;
  560. }
  561. for (ix = 0; ix < sg_cnt; ix++) {
  562. /* Setup SGLE index. */
  563. sg_ix = ix + sg_st_ix;
  564. /* Get SGLE length */
  565. len = crystalhd_get_sgle_len(ioreq, sg_ix);
  566. if (len % 4) {
  567. BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
  568. return BC_STS_NOT_IMPL;
  569. }
  570. /* Setup DMA desc with Phy addr & Length at current index. */
  571. addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
  572. if (sg_ix == sg_st_ix) {
  573. addr_temp.full_addr += sg_st_off;
  574. len -= sg_st_off;
  575. }
  576. memset(&desc[ix], 0, sizeof(desc[ix]));
  577. desc[ix].buff_addr_low = addr_temp.low_part;
  578. desc[ix].buff_addr_high = addr_temp.high_part;
  579. desc[ix].dma_dir = ioreq->uinfo.dir_tx;
  580. /* Chain DMA descriptor. */
  581. addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor);
  582. desc[ix].next_desc_addr_low = addr_temp.low_part;
  583. desc[ix].next_desc_addr_high = addr_temp.high_part;
  584. if ((count + len) > xfr_sz)
  585. len = xfr_sz - count;
  586. /* Debug.. */
  587. if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
  588. BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
  589. len, ix, count, xfr_sz, sg_cnt);
  590. return BC_STS_ERROR;
  591. }
  592. /* Length expects Multiple of 4 */
  593. desc[ix].xfer_size = (len / 4);
  594. crystalhd_hw_dump_desc(desc, ix, 1);
  595. count += len;
  596. desc_phy_addr += sizeof(struct dma_descriptor);
  597. }
  598. last_desc_ix = ix - 1;
  599. if (ioreq->fb_size) {
  600. memset(&desc[ix], 0, sizeof(desc[ix]));
  601. addr_temp.full_addr = ioreq->fb_pa;
  602. desc[ix].buff_addr_low = addr_temp.low_part;
  603. desc[ix].buff_addr_high = addr_temp.high_part;
  604. desc[ix].dma_dir = ioreq->uinfo.dir_tx;
  605. desc[ix].xfer_size = 1;
  606. desc[ix].fill_bytes = 4 - ioreq->fb_size;
  607. count += ioreq->fb_size;
  608. last_desc_ix++;
  609. }
  610. /* setup last descriptor..*/
  611. desc[last_desc_ix].last_rec_indicator = 1;
  612. desc[last_desc_ix].next_desc_addr_low = 0;
  613. desc[last_desc_ix].next_desc_addr_high = 0;
  614. desc[last_desc_ix].intr_enable = 1;
  615. crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
  616. if (count != xfr_sz) {
  617. BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
  618. return BC_STS_ERROR;
  619. }
  620. return BC_STS_SUCCESS;
  621. }
  622. static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq,
  623. struct dma_desc_mem *pdesc_mem,
  624. uint32_t *uv_desc_index)
  625. {
  626. struct dma_descriptor *desc = NULL;
  627. dma_addr_t desc_paddr_base = 0;
  628. uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
  629. uint32_t xfr_sz = 0;
  630. enum BC_STATUS sts = BC_STS_SUCCESS;
  631. /* Check params.. */
  632. if (!ioreq || !pdesc_mem || !uv_desc_index) {
  633. BCMLOG_ERR("Invalid Args\n");
  634. return BC_STS_INV_ARG;
  635. }
  636. if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
  637. !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
  638. BCMLOG_ERR("Invalid Args\n");
  639. return BC_STS_INV_ARG;
  640. }
  641. if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
  642. BCMLOG_ERR("UV offset for TX??\n");
  643. return BC_STS_INV_ARG;
  644. }
  645. desc = pdesc_mem->pdma_desc_start;
  646. desc_paddr_base = pdesc_mem->phy_addr;
  647. if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
  648. sg_cnt = ioreq->sg_cnt;
  649. xfr_sz = ioreq->uinfo.xfr_len;
  650. } else {
  651. sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
  652. xfr_sz = ioreq->uinfo.uv_offset;
  653. }
  654. sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
  655. sg_st_ix, sg_st_off, xfr_sz);
  656. if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
  657. return sts;
  658. /* Prepare for UV mapping.. */
  659. desc = &pdesc_mem->pdma_desc_start[sg_cnt];
  660. desc_paddr_base = pdesc_mem->phy_addr +
  661. (sg_cnt * sizeof(struct dma_descriptor));
  662. /* Done with desc addr.. now update sg stuff.*/
  663. sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
  664. xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
  665. sg_st_ix = ioreq->uinfo.uv_sg_ix;
  666. sg_st_off = ioreq->uinfo.uv_sg_off;
  667. sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
  668. sg_st_ix, sg_st_off, xfr_sz);
  669. if (sts != BC_STS_SUCCESS)
  670. return sts;
  671. *uv_desc_index = sg_st_ix;
  672. return sts;
  673. }
  674. static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
  675. {
  676. uint32_t dma_cntrl;
  677. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
  678. if (!(dma_cntrl & DMA_START_BIT)) {
  679. dma_cntrl |= DMA_START_BIT;
  680. crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
  681. dma_cntrl);
  682. }
  683. return;
  684. }
  685. /* _CHECK_THIS_
  686. *
  687. * Verify if the Stop generates a completion interrupt or not.
  688. * if it does not generate an interrupt, then add polling here.
  689. */
  690. static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
  691. {
  692. uint32_t dma_cntrl, cnt = 30;
  693. uint32_t l1 = 1, l2 = 1;
  694. unsigned long flags = 0;
  695. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
  696. BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
  697. /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
  698. if (!dma_cntrl & DMA_START_BIT) {
  699. BCMLOG(BCMLOG_DBG, "Already Stopped\n");
  700. return BC_STS_SUCCESS;
  701. }
  702. crystalhd_disable_interrupts(hw->adp);
  703. /* Issue stop to HW */
  704. /* This bit when set gave problems. Please check*/
  705. dma_cntrl &= ~DMA_START_BIT;
  706. crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  707. BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
  708. /* Poll for 3seconds (30 * 100ms) on both the lists..*/
  709. while ((l1 || l2) && cnt) {
  710. if (l1) {
  711. l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
  712. l1 &= DMA_START_BIT;
  713. }
  714. if (l2) {
  715. l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
  716. l2 &= DMA_START_BIT;
  717. }
  718. msleep_interruptible(100);
  719. cnt--;
  720. }
  721. if (!cnt) {
  722. BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
  723. crystalhd_enable_interrupts(hw->adp);
  724. return BC_STS_ERROR;
  725. }
  726. spin_lock_irqsave(&hw->lock, flags);
  727. hw->tx_list_post_index = 0;
  728. spin_unlock_irqrestore(&hw->lock, flags);
  729. BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
  730. crystalhd_enable_interrupts(hw->adp);
  731. return BC_STS_SUCCESS;
  732. }
  733. static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
  734. {
  735. /*
  736. * Position of the PIB Entries can be found at
  737. * 0th and the 1st location of the Circular list.
  738. */
  739. uint32_t Q_addr;
  740. uint32_t pib_cnt, r_offset, w_offset;
  741. Q_addr = hw->pib_del_Q_addr;
  742. /* Get the Read Pointer */
  743. crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
  744. /* Get the Write Pointer */
  745. crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
  746. if (r_offset == w_offset)
  747. return 0; /* Queue is empty */
  748. if (w_offset > r_offset)
  749. pib_cnt = w_offset - r_offset;
  750. else
  751. pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
  752. (r_offset + MIN_PIB_Q_DEPTH);
  753. if (pib_cnt > MAX_PIB_Q_DEPTH) {
  754. BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
  755. return 0;
  756. }
  757. return pib_cnt;
  758. }
  759. static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
  760. {
  761. uint32_t Q_addr;
  762. uint32_t addr_entry, r_offset, w_offset;
  763. Q_addr = hw->pib_del_Q_addr;
  764. /* Get the Read Pointer 0Th Location is Read Pointer */
  765. crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
  766. /* Get the Write Pointer 1st Location is Write pointer */
  767. crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
  768. /* Queue is empty */
  769. if (r_offset == w_offset)
  770. return 0;
  771. if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
  772. return 0;
  773. /* Get the Actual Address of the PIB */
  774. crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
  775. 1, &addr_entry);
  776. /* Increment the Read Pointer */
  777. r_offset++;
  778. if (MAX_PIB_Q_DEPTH == r_offset)
  779. r_offset = MIN_PIB_Q_DEPTH;
  780. /* Write back the read pointer to It's Location */
  781. crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
  782. return addr_entry;
  783. }
  784. static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
  785. {
  786. uint32_t Q_addr;
  787. uint32_t r_offset, w_offset, n_offset;
  788. Q_addr = hw->pib_rel_Q_addr;
  789. /* Get the Read Pointer */
  790. crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
  791. /* Get the Write Pointer */
  792. crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
  793. if ((r_offset < MIN_PIB_Q_DEPTH) ||
  794. (r_offset >= MAX_PIB_Q_DEPTH))
  795. return false;
  796. n_offset = w_offset + 1;
  797. if (MAX_PIB_Q_DEPTH == n_offset)
  798. n_offset = MIN_PIB_Q_DEPTH;
  799. if (r_offset == n_offset)
  800. return false; /* should never happen */
  801. /* Write the DRAM ADDR to the Queue at Next Offset */
  802. crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
  803. 1, &addr_to_rel);
  804. /* Put the New value of the write pointer in Queue */
  805. crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
  806. return true;
  807. }
  808. static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib)
  809. {
  810. if (!src_pib || !dst_pib) {
  811. BCMLOG_ERR("Invalid Arguments\n");
  812. return;
  813. }
  814. dst_pib->timeStamp = 0;
  815. dst_pib->picture_number = src_pib->ppb.picture_number;
  816. dst_pib->width = src_pib->ppb.width;
  817. dst_pib->height = src_pib->ppb.height;
  818. dst_pib->chroma_format = src_pib->ppb.chroma_format;
  819. dst_pib->pulldown = src_pib->ppb.pulldown;
  820. dst_pib->flags = src_pib->ppb.flags;
  821. dst_pib->sess_num = src_pib->ptsStcOffset;
  822. dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
  823. dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
  824. dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
  825. dst_pib->frame_rate = src_pib->resolution ;
  826. return;
  827. }
  828. static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
  829. {
  830. unsigned int cnt;
  831. struct c011_pib src_pib;
  832. uint32_t pib_addr, pib_cnt;
  833. struct BC_PIC_INFO_BLOCK *AppPib;
  834. struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
  835. pib_cnt = crystalhd_get_pib_avail_cnt(hw);
  836. if (!pib_cnt)
  837. return;
  838. for (cnt = 0; cnt < pib_cnt; cnt++) {
  839. pib_addr = crystalhd_get_addr_from_pib_Q(hw);
  840. crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
  841. (uint32_t *)&src_pib);
  842. if (src_pib.bFormatChange) {
  843. rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
  844. if (!rx_pkt)
  845. return;
  846. rx_pkt->flags = 0;
  847. rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
  848. AppPib = &rx_pkt->pib;
  849. cpy_pib_to_app(&src_pib, AppPib);
  850. BCMLOG(BCMLOG_DBG,
  851. "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
  852. rx_pkt->pib.picture_number,
  853. rx_pkt->pib.aspect_ratio,
  854. rx_pkt->pib.chroma_format,
  855. rx_pkt->pib.colour_primaries,
  856. rx_pkt->pib.frame_rate,
  857. rx_pkt->pib.height,
  858. rx_pkt->pib.height,
  859. rx_pkt->pib.n_drop,
  860. rx_pkt->pib.pulldown,
  861. rx_pkt->pib.ycom);
  862. crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
  863. }
  864. crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
  865. }
  866. }
  867. static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
  868. {
  869. uint32_t dma_cntrl;
  870. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
  871. if (!(dma_cntrl & DMA_START_BIT)) {
  872. dma_cntrl |= DMA_START_BIT;
  873. crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  874. }
  875. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
  876. if (!(dma_cntrl & DMA_START_BIT)) {
  877. dma_cntrl |= DMA_START_BIT;
  878. crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  879. }
  880. return;
  881. }
  882. static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
  883. {
  884. uint32_t dma_cntrl = 0, count = 30;
  885. uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
  886. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
  887. if ((dma_cntrl & DMA_START_BIT)) {
  888. dma_cntrl &= ~DMA_START_BIT;
  889. crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  890. }
  891. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
  892. if ((dma_cntrl & DMA_START_BIT)) {
  893. dma_cntrl &= ~DMA_START_BIT;
  894. crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  895. }
  896. /* Poll for 3seconds (30 * 100ms) on both the lists..*/
  897. while ((l0y || l0uv || l1y || l1uv) && count) {
  898. if (l0y) {
  899. l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
  900. l0y &= DMA_START_BIT;
  901. if (!l0y)
  902. hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
  903. }
  904. if (l1y) {
  905. l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
  906. l1y &= DMA_START_BIT;
  907. if (!l1y)
  908. hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
  909. }
  910. if (l0uv) {
  911. l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
  912. l0uv &= DMA_START_BIT;
  913. if (!l0uv)
  914. hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
  915. }
  916. if (l1uv) {
  917. l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
  918. l1uv &= DMA_START_BIT;
  919. if (!l1uv)
  920. hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
  921. }
  922. msleep_interruptible(100);
  923. count--;
  924. }
  925. hw->rx_list_post_index = 0;
  926. BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
  927. count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
  928. }
  929. static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt)
  930. {
  931. uint32_t y_low_addr_reg, y_high_addr_reg;
  932. uint32_t uv_low_addr_reg, uv_high_addr_reg;
  933. union addr_64 desc_addr;
  934. unsigned long flags;
  935. if (!hw || !rx_pkt) {
  936. BCMLOG_ERR("Invalid Arguments\n");
  937. return BC_STS_INV_ARG;
  938. }
  939. if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
  940. BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
  941. return BC_STS_INV_ARG;
  942. }
  943. spin_lock_irqsave(&hw->rx_lock, flags);
  944. /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
  945. if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
  946. spin_unlock_irqrestore(&hw->rx_lock, flags);
  947. return BC_STS_BUSY;
  948. }
  949. if (!hw->rx_list_post_index) {
  950. y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
  951. y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
  952. uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
  953. uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
  954. } else {
  955. y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
  956. y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
  957. uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
  958. uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
  959. }
  960. rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
  961. hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
  962. if (rx_pkt->uv_phy_addr)
  963. hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
  964. hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
  965. spin_unlock_irqrestore(&hw->rx_lock, flags);
  966. crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
  967. crystalhd_start_rx_dma_engine(hw);
  968. /* Program the Y descriptor */
  969. desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
  970. crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
  971. crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
  972. if (rx_pkt->uv_phy_addr) {
  973. /* Program the UV descriptor */
  974. desc_addr.full_addr = rx_pkt->uv_phy_addr;
  975. crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
  976. crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
  977. }
  978. return BC_STS_SUCCESS;
  979. }
  980. static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
  981. struct crystalhd_rx_dma_pkt *rx_pkt)
  982. {
  983. enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
  984. if (sts == BC_STS_BUSY)
  985. crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
  986. false, rx_pkt->pkt_tag);
  987. return sts;
  988. }
  989. static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
  990. uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
  991. {
  992. uint32_t y_dn_sz_reg, uv_dn_sz_reg;
  993. if (!list_index) {
  994. y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
  995. uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
  996. } else {
  997. y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
  998. uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
  999. }
  1000. *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
  1001. *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
  1002. }
  1003. /*
  1004. * This function should be called only after making sure that the two DMA
  1005. * lists are free. This function does not check if DMA's are active, before
  1006. * turning off the DMA.
  1007. */
  1008. static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
  1009. {
  1010. uint32_t dma_cntrl, aspm;
  1011. hw->stop_pending = 0;
  1012. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
  1013. if (dma_cntrl & DMA_START_BIT) {
  1014. dma_cntrl &= ~DMA_START_BIT;
  1015. crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  1016. }
  1017. dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
  1018. if (dma_cntrl & DMA_START_BIT) {
  1019. dma_cntrl &= ~DMA_START_BIT;
  1020. crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
  1021. }
  1022. hw->rx_list_post_index = 0;
  1023. aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
  1024. aspm |= ASPM_L1_ENABLE;
  1025. /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
  1026. crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
  1027. }
  1028. static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
  1029. enum BC_STATUS comp_sts)
  1030. {
  1031. struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
  1032. uint32_t y_dw_dnsz, uv_dw_dnsz;
  1033. enum BC_STATUS sts = BC_STS_SUCCESS;
  1034. if (!hw || list_index >= DMA_ENGINE_CNT) {
  1035. BCMLOG_ERR("Invalid Arguments\n");
  1036. return BC_STS_INV_ARG;
  1037. }
  1038. rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
  1039. hw->rx_pkt_tag_seed + list_index);
  1040. if (!rx_pkt) {
  1041. BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
  1042. hw->rx_list_post_index, hw->rx_list_sts[0],
  1043. hw->rx_list_sts[1], list_index,
  1044. hw->rx_pkt_tag_seed + list_index, comp_sts);
  1045. return BC_STS_INV_ARG;
  1046. }
  1047. if (comp_sts == BC_STS_SUCCESS) {
  1048. crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
  1049. rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
  1050. rx_pkt->flags = COMP_FLAG_DATA_VALID;
  1051. if (rx_pkt->uv_phy_addr)
  1052. rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
  1053. crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
  1054. hw->rx_pkt_tag_seed + list_index);
  1055. return sts;
  1056. }
  1057. /* Check if we can post this DIO again. */
  1058. return crystalhd_hw_post_cap_buff(hw, rx_pkt);
  1059. }
  1060. static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
  1061. uint32_t y_err_sts, uint32_t uv_err_sts)
  1062. {
  1063. uint32_t tmp;
  1064. enum list_sts tmp_lsts;
  1065. if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
  1066. return false;
  1067. tmp_lsts = hw->rx_list_sts[0];
  1068. /* Y0 - DMA */
  1069. tmp = y_err_sts & GET_Y0_ERR_MSK;
  1070. if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
  1071. hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
  1072. if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
  1073. hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
  1074. tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
  1075. }
  1076. if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
  1077. hw->rx_list_sts[0] &= ~rx_y_mask;
  1078. hw->rx_list_sts[0] |= rx_y_error;
  1079. tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
  1080. }
  1081. if (tmp) {
  1082. hw->rx_list_sts[0] &= ~rx_y_mask;
  1083. hw->rx_list_sts[0] |= rx_y_error;
  1084. hw->rx_list_post_index = 0;
  1085. }
  1086. /* UV0 - DMA */
  1087. tmp = uv_err_sts & GET_UV0_ERR_MSK;
  1088. if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
  1089. hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
  1090. if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
  1091. hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
  1092. tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
  1093. }
  1094. if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
  1095. hw->rx_list_sts[0] &= ~rx_uv_mask;
  1096. hw->rx_list_sts[0] |= rx_uv_error;
  1097. tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
  1098. }
  1099. if (tmp) {
  1100. hw->rx_list_sts[0] &= ~rx_uv_mask;
  1101. hw->rx_list_sts[0] |= rx_uv_error;
  1102. hw->rx_list_post_index = 0;
  1103. }
  1104. if (y_err_sts & GET_Y0_ERR_MSK) {
  1105. tmp = y_err_sts & GET_Y0_ERR_MSK;
  1106. crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
  1107. }
  1108. if (uv_err_sts & GET_UV0_ERR_MSK) {
  1109. tmp = uv_err_sts & GET_UV0_ERR_MSK;
  1110. crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
  1111. }
  1112. return (tmp_lsts != hw->rx_list_sts[0]);
  1113. }
  1114. static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
  1115. uint32_t y_err_sts, uint32_t uv_err_sts)
  1116. {
  1117. uint32_t tmp;
  1118. enum list_sts tmp_lsts;
  1119. if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
  1120. return false;
  1121. tmp_lsts = hw->rx_list_sts[1];
  1122. /* Y1 - DMA */
  1123. tmp = y_err_sts & GET_Y1_ERR_MSK;
  1124. if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
  1125. hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
  1126. if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
  1127. hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
  1128. tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
  1129. }
  1130. if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
  1131. /* Add retry-support..*/
  1132. hw->rx_list_sts[1] &= ~rx_y_mask;
  1133. hw->rx_list_sts[1] |= rx_y_error;
  1134. tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
  1135. }
  1136. if (tmp) {
  1137. hw->rx_list_sts[1] &= ~rx_y_mask;
  1138. hw->rx_list_sts[1] |= rx_y_error;
  1139. hw->rx_list_post_index = 0;
  1140. }
  1141. /* UV1 - DMA */
  1142. tmp = uv_err_sts & GET_UV1_ERR_MSK;
  1143. if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
  1144. hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
  1145. if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
  1146. hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
  1147. tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
  1148. }
  1149. if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
  1150. /* Add retry-support*/
  1151. hw->rx_list_sts[1] &= ~rx_uv_mask;
  1152. hw->rx_list_sts[1] |= rx_uv_error;
  1153. tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
  1154. }
  1155. if (tmp) {
  1156. hw->rx_list_sts[1] &= ~rx_uv_mask;
  1157. hw->rx_list_sts[1] |= rx_uv_error;
  1158. hw->rx_list_post_index = 0;
  1159. }
  1160. if (y_err_sts & GET_Y1_ERR_MSK) {
  1161. tmp = y_err_sts & GET_Y1_ERR_MSK;
  1162. crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
  1163. }
  1164. if (uv_err_sts & GET_UV1_ERR_MSK) {
  1165. tmp = uv_err_sts & GET_UV1_ERR_MSK;
  1166. crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
  1167. }
  1168. return (tmp_lsts != hw->rx_list_sts[1]);
  1169. }
  1170. static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
  1171. {
  1172. unsigned long flags;
  1173. uint32_t i, list_avail = 0;
  1174. enum BC_STATUS comp_sts = BC_STS_NO_DATA;
  1175. uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
  1176. bool ret = 0;
  1177. if (!hw) {
  1178. BCMLOG_ERR("Invalid Arguments\n");
  1179. return;
  1180. }
  1181. if (!(intr_sts & GET_RX_INTR_MASK))
  1182. return;
  1183. y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
  1184. uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
  1185. for (i = 0; i < DMA_ENGINE_CNT; i++) {
  1186. /* Update States..*/
  1187. spin_lock_irqsave(&hw->rx_lock, flags);
  1188. if (i == 0)
  1189. ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
  1190. else
  1191. ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
  1192. if (ret) {
  1193. switch (hw->rx_list_sts[i]) {
  1194. case sts_free:
  1195. comp_sts = BC_STS_SUCCESS;
  1196. list_avail = 1;
  1197. break;
  1198. case rx_y_error:
  1199. case rx_uv_error:
  1200. case rx_sts_error:
  1201. /* We got error on both or Y or uv. */
  1202. hw->stats.rx_errors++;
  1203. crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
  1204. /* FIXME: jarod: this is where my mini pci-e card is tripping up */
  1205. BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
  1206. "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
  1207. i, hw->stats.rx_errors, y_err_sts,
  1208. uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
  1209. hw->rx_list_sts[i] = sts_free;
  1210. comp_sts = BC_STS_ERROR;
  1211. break;
  1212. default:
  1213. /* Wait for completion..*/
  1214. comp_sts = BC_STS_NO_DATA;
  1215. break;
  1216. }
  1217. }
  1218. spin_unlock_irqrestore(&hw->rx_lock, flags);
  1219. /* handle completion...*/
  1220. if (comp_sts != BC_STS_NO_DATA) {
  1221. crystalhd_rx_pkt_done(hw, i, comp_sts);
  1222. comp_sts = BC_STS_NO_DATA;
  1223. }
  1224. }
  1225. if (list_avail) {
  1226. if (hw->stop_pending) {
  1227. if ((hw->rx_list_sts[0] == sts_free) &&
  1228. (hw->rx_list_sts[1] == sts_free))
  1229. crystalhd_hw_finalize_pause(hw);
  1230. } else {
  1231. crystalhd_hw_start_capture(hw);
  1232. }
  1233. }
  1234. }
  1235. static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
  1236. struct BC_FW_CMD *fw_cmd)
  1237. {
  1238. enum BC_STATUS sts = BC_STS_SUCCESS;
  1239. struct dec_rsp_channel_start_video *st_rsp = NULL;
  1240. switch (fw_cmd->cmd[0]) {
  1241. case eCMD_C011_DEC_CHAN_START_VIDEO:
  1242. st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
  1243. hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
  1244. hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
  1245. BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
  1246. hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
  1247. break;
  1248. case eCMD_C011_INIT:
  1249. if (!(crystalhd_load_firmware_config(hw->adp))) {
  1250. BCMLOG_ERR("Invalid Params.\n");
  1251. sts = BC_STS_FW_AUTH_FAILED;
  1252. }
  1253. break;
  1254. default:
  1255. break;
  1256. }
  1257. return sts;
  1258. }
  1259. static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
  1260. {
  1261. uint32_t reg;
  1262. union link_misc_perst_decoder_ctrl rst_cntrl_reg;
  1263. /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
  1264. rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
  1265. rst_cntrl_reg.bcm_7412_rst = 1;
  1266. crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
  1267. msleep_interruptible(50);
  1268. rst_cntrl_reg.bcm_7412_rst = 0;
  1269. crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
  1270. /* Close all banks, put DDR in idle */
  1271. bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
  1272. /* Set bit 25 (drop CKE pin of DDR) */
  1273. reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
  1274. reg |= 0x02000000;
  1275. bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
  1276. /* Reset the audio block */
  1277. bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
  1278. /* Power down Raptor PLL */
  1279. reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
  1280. reg |= 0x00008000;
  1281. bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
  1282. /* Power down all Audio PLL */
  1283. bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
  1284. /* Power down video clock (75MHz) */
  1285. reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
  1286. reg |= 0x00008000;
  1287. bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
  1288. /* Power down video clock (75MHz) */
  1289. reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
  1290. reg |= 0x00008000;
  1291. bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
  1292. /* Power down core clock (200MHz) */
  1293. reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
  1294. reg |= 0x00008000;
  1295. bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
  1296. /* Power down core clock (200MHz) */
  1297. reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
  1298. reg |= 0x00008000;
  1299. bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
  1300. return BC_STS_SUCCESS;
  1301. }
  1302. /************************************************
  1303. **
  1304. *************************************************/
  1305. enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
  1306. {
  1307. uint32_t reg_data, cnt, *temp_buff;
  1308. uint32_t fw_sig_len = 36;
  1309. uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
  1310. BCMLOG_ENTER;
  1311. if (!adp || !buffer || !sz) {
  1312. BCMLOG_ERR("Invalid Params.\n");
  1313. return BC_STS_INV_ARG;
  1314. }
  1315. reg_data = crystalhd_reg_rd(adp, OTP_CMD);
  1316. if (!(reg_data & 0x02)) {
  1317. BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
  1318. return BC_STS_ERROR;
  1319. }
  1320. reg_data = 0;
  1321. crystalhd_reg_wr(adp, DCI_CMD, 0);
  1322. reg_data |= BC_BIT(0);
  1323. crystalhd_reg_wr(adp, DCI_CMD, reg_data);
  1324. reg_data = 0;
  1325. cnt = 1000;
  1326. msleep_interruptible(10);
  1327. while (reg_data != BC_BIT(4)) {
  1328. reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
  1329. reg_data &= BC_BIT(4);
  1330. if (--cnt == 0) {
  1331. BCMLOG_ERR("Firmware Download RDY Timeout.\n");
  1332. return BC_STS_TIMEOUT;
  1333. }
  1334. }
  1335. msleep_interruptible(10);
  1336. /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
  1337. crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
  1338. temp_buff = (uint32_t *)buffer;
  1339. for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
  1340. crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
  1341. crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
  1342. dram_offset += 4;
  1343. temp_buff++;
  1344. }
  1345. msleep_interruptible(10);
  1346. temp_buff++;
  1347. sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
  1348. for (cnt = 0; cnt < 8; cnt++) {
  1349. uint32_t swapped_data = *temp_buff;
  1350. swapped_data = bswap_32_1(swapped_data);
  1351. crystalhd_reg_wr(adp, sig_reg, swapped_data);
  1352. sig_reg -= 4;
  1353. temp_buff++;
  1354. }
  1355. msleep_interruptible(10);
  1356. reg_data = 0;
  1357. reg_data |= BC_BIT(1);
  1358. crystalhd_reg_wr(adp, DCI_CMD, reg_data);
  1359. msleep_interruptible(10);
  1360. reg_data = 0;
  1361. reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
  1362. if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
  1363. cnt = 1000;
  1364. while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
  1365. reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
  1366. reg_data &= BC_BIT(0);
  1367. if (!(--cnt))
  1368. break;
  1369. msleep_interruptible(10);
  1370. }
  1371. reg_data = 0;
  1372. reg_data = crystalhd_reg_rd(adp, DCI_CMD);
  1373. reg_data |= BC_BIT(4);
  1374. crystalhd_reg_wr(adp, DCI_CMD, reg_data);
  1375. } else {
  1376. BCMLOG_ERR("F/w Signature mismatch\n");
  1377. return BC_STS_FW_AUTH_FAILED;
  1378. }
  1379. BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
  1380. return BC_STS_SUCCESS;
  1381. }
  1382. enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
  1383. struct BC_FW_CMD *fw_cmd)
  1384. {
  1385. uint32_t cnt = 0, cmd_res_addr;
  1386. uint32_t *cmd_buff, *res_buff;
  1387. wait_queue_head_t fw_cmd_event;
  1388. int rc = 0;
  1389. enum BC_STATUS sts;
  1390. crystalhd_create_event(&fw_cmd_event);
  1391. BCMLOG_ENTER;
  1392. if (!hw || !fw_cmd) {
  1393. BCMLOG_ERR("Invalid Arguments\n");
  1394. return BC_STS_INV_ARG;
  1395. }
  1396. cmd_buff = fw_cmd->cmd;
  1397. res_buff = fw_cmd->rsp;
  1398. if (!cmd_buff || !res_buff) {
  1399. BCMLOG_ERR("Invalid Parameters for F/W Command\n");
  1400. return BC_STS_INV_ARG;
  1401. }
  1402. hw->pwr_lock++;
  1403. hw->fwcmd_evt_sts = 0;
  1404. hw->pfw_cmd_event = &fw_cmd_event;
  1405. /*Write the command to the memory*/
  1406. crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
  1407. /*Memory Read for memory arbitrator flush*/
  1408. crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
  1409. /* Write the command address to mailbox */
  1410. bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
  1411. msleep_interruptible(50);
  1412. crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
  1413. if (!rc) {
  1414. sts = BC_STS_SUCCESS;
  1415. } else if (rc == -EBUSY) {
  1416. BCMLOG_ERR("Firmware command T/O\n");
  1417. sts = BC_STS_TIMEOUT;
  1418. } else if (rc == -EINTR) {
  1419. BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
  1420. sts = BC_STS_IO_USER_ABORT;
  1421. } else {
  1422. BCMLOG_ERR("FwCmd IO Error.\n");
  1423. sts = BC_STS_IO_ERROR;
  1424. }
  1425. if (sts != BC_STS_SUCCESS) {
  1426. BCMLOG_ERR("FwCmd Failed.\n");
  1427. hw->pwr_lock--;
  1428. return sts;
  1429. }
  1430. /*Get the Response Address*/
  1431. cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
  1432. /*Read the Response*/
  1433. crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
  1434. hw->pwr_lock--;
  1435. if (res_buff[2] != C011_RET_SUCCESS) {
  1436. BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
  1437. return BC_STS_FW_CMD_ERR;
  1438. }
  1439. sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
  1440. if (sts != BC_STS_SUCCESS)
  1441. BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
  1442. return sts;
  1443. }
  1444. bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
  1445. {
  1446. uint32_t intr_sts = 0;
  1447. uint32_t deco_intr = 0;
  1448. bool rc = 0;
  1449. if (!adp || !hw->dev_started)
  1450. return rc;
  1451. hw->stats.num_interrupts++;
  1452. hw->pwr_lock++;
  1453. deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
  1454. intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
  1455. if (intr_sts) {
  1456. /* let system know we processed interrupt..*/
  1457. rc = 1;
  1458. hw->stats.dev_interrupts++;
  1459. }
  1460. if (deco_intr && (deco_intr != 0xdeaddead)) {
  1461. if (deco_intr & 0x80000000) {
  1462. /*Set the Event and the status flag*/
  1463. if (hw->pfw_cmd_event) {
  1464. hw->fwcmd_evt_sts = 1;
  1465. crystalhd_set_event(hw->pfw_cmd_event);
  1466. }
  1467. }
  1468. if (deco_intr & BC_BIT(1))
  1469. crystalhd_hw_proc_pib(hw);
  1470. bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
  1471. /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
  1472. bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
  1473. rc = 1;
  1474. }
  1475. /* Rx interrupts */
  1476. crystalhd_rx_isr(hw, intr_sts);
  1477. /* Tx interrupts*/
  1478. crystalhd_tx_isr(hw, intr_sts);
  1479. /* Clear interrupts */
  1480. if (rc) {
  1481. if (intr_sts)
  1482. crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
  1483. crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
  1484. }
  1485. hw->pwr_lock--;
  1486. return rc;
  1487. }
  1488. enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
  1489. {
  1490. if (!hw || !adp) {
  1491. BCMLOG_ERR("Invalid Arguments\n");
  1492. return BC_STS_INV_ARG;
  1493. }
  1494. if (hw->dev_started)
  1495. return BC_STS_SUCCESS;
  1496. memset(hw, 0, sizeof(struct crystalhd_hw));
  1497. hw->adp = adp;
  1498. spin_lock_init(&hw->lock);
  1499. spin_lock_init(&hw->rx_lock);
  1500. /* FIXME: jarod: what are these magic numbers?!? */
  1501. hw->tx_ioq_tag_seed = 0x70023070;
  1502. hw->rx_pkt_tag_seed = 0x70029070;
  1503. hw->stop_pending = 0;
  1504. crystalhd_start_device(hw->adp);
  1505. hw->dev_started = true;
  1506. /* set initial core clock */
  1507. hw->core_clock_mhz = CLOCK_PRESET;
  1508. hw->prev_n = 0;
  1509. hw->pwr_lock = 0;
  1510. crystalhd_hw_set_core_clock(hw);
  1511. return BC_STS_SUCCESS;
  1512. }
  1513. enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
  1514. {
  1515. if (!hw) {
  1516. BCMLOG_ERR("Invalid Arguments\n");
  1517. return BC_STS_INV_ARG;
  1518. }
  1519. if (!hw->dev_started)
  1520. return BC_STS_SUCCESS;
  1521. /* Stop and DDR sleep will happen in here */
  1522. crystalhd_hw_suspend(hw);
  1523. hw->dev_started = false;
  1524. return BC_STS_SUCCESS;
  1525. }
  1526. enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
  1527. {
  1528. unsigned int i;
  1529. void *mem;
  1530. size_t mem_len;
  1531. dma_addr_t phy_addr;
  1532. enum BC_STATUS sts = BC_STS_SUCCESS;
  1533. struct crystalhd_rx_dma_pkt *rpkt;
  1534. if (!hw || !hw->adp) {
  1535. BCMLOG_ERR("Invalid Arguments\n");
  1536. return BC_STS_INV_ARG;
  1537. }
  1538. sts = crystalhd_hw_create_ioqs(hw);
  1539. if (sts != BC_STS_SUCCESS) {
  1540. BCMLOG_ERR("Failed to create IOQs..\n");
  1541. return sts;
  1542. }
  1543. mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
  1544. for (i = 0; i < BC_TX_LIST_CNT; i++) {
  1545. mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
  1546. if (mem) {
  1547. memset(mem, 0, mem_len);
  1548. } else {
  1549. BCMLOG_ERR("Insufficient Memory For TX\n");
  1550. crystalhd_hw_free_dma_rings(hw);
  1551. return BC_STS_INSUFF_RES;
  1552. }
  1553. /* rx_pkt_pool -- static memory allocation */
  1554. hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
  1555. hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
  1556. hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
  1557. sizeof(struct dma_descriptor);
  1558. hw->tx_pkt_pool[i].list_tag = 0;
  1559. /* Add TX dma requests to Free Queue..*/
  1560. sts = crystalhd_dioq_add(hw->tx_freeq,
  1561. &hw->tx_pkt_pool[i], false, 0);
  1562. if (sts != BC_STS_SUCCESS) {
  1563. crystalhd_hw_free_dma_rings(hw);
  1564. return sts;
  1565. }
  1566. }
  1567. for (i = 0; i < BC_RX_LIST_CNT; i++) {
  1568. rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
  1569. if (!rpkt) {
  1570. BCMLOG_ERR("Insufficient Memory For RX\n");
  1571. crystalhd_hw_free_dma_rings(hw);
  1572. return BC_STS_INSUFF_RES;
  1573. }
  1574. mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
  1575. if (mem) {
  1576. memset(mem, 0, mem_len);
  1577. } else {
  1578. BCMLOG_ERR("Insufficient Memory For RX\n");
  1579. crystalhd_hw_free_dma_rings(hw);
  1580. kfree(rpkt);
  1581. return BC_STS_INSUFF_RES;
  1582. }
  1583. rpkt->desc_mem.pdma_desc_start = mem;
  1584. rpkt->desc_mem.phy_addr = phy_addr;
  1585. rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
  1586. rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
  1587. crystalhd_hw_free_rx_pkt(hw, rpkt);
  1588. }
  1589. return BC_STS_SUCCESS;
  1590. }
  1591. enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
  1592. {
  1593. unsigned int i;
  1594. struct crystalhd_rx_dma_pkt *rpkt = NULL;
  1595. if (!hw || !hw->adp) {
  1596. BCMLOG_ERR("Invalid Arguments\n");
  1597. return BC_STS_INV_ARG;
  1598. }
  1599. /* Delete all IOQs.. */
  1600. crystalhd_hw_delete_ioqs(hw);
  1601. for (i = 0; i < BC_TX_LIST_CNT; i++) {
  1602. if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
  1603. bc_kern_dma_free(hw->adp,
  1604. hw->tx_pkt_pool[i].desc_mem.sz,
  1605. hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
  1606. hw->tx_pkt_pool[i].desc_mem.phy_addr);
  1607. hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
  1608. }
  1609. }
  1610. BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
  1611. do {
  1612. rpkt = crystalhd_hw_alloc_rx_pkt(hw);
  1613. if (!rpkt)
  1614. break;
  1615. bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
  1616. rpkt->desc_mem.pdma_desc_start,
  1617. rpkt->desc_mem.phy_addr);
  1618. kfree(rpkt);
  1619. } while (rpkt);
  1620. return BC_STS_SUCCESS;
  1621. }
  1622. enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
  1623. hw_comp_callback call_back,
  1624. wait_queue_head_t *cb_event, uint32_t *list_id,
  1625. uint8_t data_flags)
  1626. {
  1627. struct tx_dma_pkt *tx_dma_packet = NULL;
  1628. uint32_t first_desc_u_addr, first_desc_l_addr;
  1629. uint32_t low_addr, high_addr;
  1630. union addr_64 desc_addr;
  1631. enum BC_STATUS sts, add_sts;
  1632. uint32_t dummy_index = 0;
  1633. unsigned long flags;
  1634. bool rc;
  1635. if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
  1636. BCMLOG_ERR("Invalid Arguments\n");
  1637. return BC_STS_INV_ARG;
  1638. }
  1639. /*
  1640. * Since we hit code in busy condition very frequently,
  1641. * we will check the code in status first before
  1642. * checking the availability of free elem.
  1643. *
  1644. * This will avoid the Q fetch/add in normal condition.
  1645. */
  1646. rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
  1647. false, data_flags);
  1648. if (rc) {
  1649. hw->stats.cin_busy++;
  1650. return BC_STS_BUSY;
  1651. }
  1652. /* Get a list from TxFreeQ */
  1653. tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
  1654. if (!tx_dma_packet) {
  1655. BCMLOG_ERR("No empty elements..\n");
  1656. return BC_STS_ERR_USAGE;
  1657. }
  1658. sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
  1659. &tx_dma_packet->desc_mem,
  1660. &dummy_index);
  1661. if (sts != BC_STS_SUCCESS) {
  1662. add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
  1663. false, 0);
  1664. if (add_sts != BC_STS_SUCCESS)
  1665. BCMLOG_ERR("double fault..\n");
  1666. return sts;
  1667. }
  1668. hw->pwr_lock++;
  1669. desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
  1670. low_addr = desc_addr.low_part;
  1671. high_addr = desc_addr.high_part;
  1672. tx_dma_packet->call_back = call_back;
  1673. tx_dma_packet->cb_event = cb_event;
  1674. tx_dma_packet->dio_req = ioreq;
  1675. spin_lock_irqsave(&hw->lock, flags);
  1676. if (hw->tx_list_post_index == 0) {
  1677. first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
  1678. first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
  1679. } else {
  1680. first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
  1681. first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
  1682. }
  1683. *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
  1684. hw->tx_list_post_index;
  1685. hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
  1686. spin_unlock_irqrestore(&hw->lock, flags);
  1687. /* Insert in Active Q..*/
  1688. crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
  1689. tx_dma_packet->list_tag);
  1690. /*
  1691. * Interrupt will come as soon as you write
  1692. * the valid bit. So be ready for that. All
  1693. * the initialization should happen before that.
  1694. */
  1695. crystalhd_start_tx_dma_engine(hw);
  1696. crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
  1697. crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
  1698. /* Be sure we set the valid bit ^^^^ */
  1699. return BC_STS_SUCCESS;
  1700. }
  1701. /*
  1702. * This is a force cancel and we are racing with ISR.
  1703. *
  1704. * Will try to remove the req from ActQ before ISR gets it.
  1705. * If ISR gets it first then the completion happens in the
  1706. * normal path and we will return _STS_NO_DATA from here.
  1707. *
  1708. * FIX_ME: Not Tested the actual condition..
  1709. */
  1710. enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
  1711. {
  1712. if (!hw || !list_id) {
  1713. BCMLOG_ERR("Invalid Arguments\n");
  1714. return BC_STS_INV_ARG;
  1715. }
  1716. crystalhd_stop_tx_dma_engine(hw);
  1717. crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
  1718. return BC_STS_SUCCESS;
  1719. }
  1720. enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
  1721. struct crystalhd_dio_req *ioreq, bool en_post)
  1722. {
  1723. struct crystalhd_rx_dma_pkt *rpkt;
  1724. uint32_t tag, uv_desc_ix = 0;
  1725. enum BC_STATUS sts;
  1726. if (!hw || !ioreq) {
  1727. BCMLOG_ERR("Invalid Arguments\n");
  1728. return BC_STS_INV_ARG;
  1729. }
  1730. rpkt = crystalhd_hw_alloc_rx_pkt(hw);
  1731. if (!rpkt) {
  1732. BCMLOG_ERR("Insufficient resources\n");
  1733. return BC_STS_INSUFF_RES;
  1734. }
  1735. rpkt->dio_req = ioreq;
  1736. tag = rpkt->pkt_tag;
  1737. sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
  1738. if (sts != BC_STS_SUCCESS)
  1739. return sts;
  1740. rpkt->uv_phy_addr = 0;
  1741. /* Store the address of UV in the rx packet for post*/
  1742. if (uv_desc_ix)
  1743. rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
  1744. (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
  1745. if (en_post)
  1746. sts = crystalhd_hw_post_cap_buff(hw, rpkt);
  1747. else
  1748. sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
  1749. return sts;
  1750. }
  1751. enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
  1752. struct BC_PIC_INFO_BLOCK *pib,
  1753. struct crystalhd_dio_req **ioreq)
  1754. {
  1755. struct crystalhd_rx_dma_pkt *rpkt;
  1756. uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
  1757. uint32_t sig_pending = 0;
  1758. if (!hw || !ioreq || !pib) {
  1759. BCMLOG_ERR("Invalid Arguments\n");
  1760. return BC_STS_INV_ARG;
  1761. }
  1762. rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
  1763. if (!rpkt) {
  1764. if (sig_pending) {
  1765. BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
  1766. return BC_STS_IO_USER_ABORT;
  1767. } else {
  1768. return BC_STS_TIMEOUT;
  1769. }
  1770. }
  1771. rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
  1772. if (rpkt->flags & COMP_FLAG_PIB_VALID)
  1773. memcpy(pib, &rpkt->pib, sizeof(*pib));
  1774. *ioreq = rpkt->dio_req;
  1775. crystalhd_hw_free_rx_pkt(hw, rpkt);
  1776. return BC_STS_SUCCESS;
  1777. }
  1778. enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
  1779. {
  1780. struct crystalhd_rx_dma_pkt *rx_pkt;
  1781. enum BC_STATUS sts;
  1782. uint32_t i;
  1783. if (!hw) {
  1784. BCMLOG_ERR("Invalid Arguments\n");
  1785. return BC_STS_INV_ARG;
  1786. }
  1787. /* This is start of capture.. Post to both the lists.. */
  1788. for (i = 0; i < DMA_ENGINE_CNT; i++) {
  1789. rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
  1790. if (!rx_pkt)
  1791. return BC_STS_NO_DATA;
  1792. sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
  1793. if (BC_STS_SUCCESS != sts)
  1794. break;
  1795. }
  1796. return BC_STS_SUCCESS;
  1797. }
  1798. enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
  1799. {
  1800. void *temp = NULL;
  1801. if (!hw) {
  1802. BCMLOG_ERR("Invalid Arguments\n");
  1803. return BC_STS_INV_ARG;
  1804. }
  1805. crystalhd_stop_rx_dma_engine(hw);
  1806. do {
  1807. temp = crystalhd_dioq_fetch(hw->rx_freeq);
  1808. if (temp)
  1809. crystalhd_rx_pkt_rel_call_back(hw, temp);
  1810. } while (temp);
  1811. return BC_STS_SUCCESS;
  1812. }
  1813. enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
  1814. {
  1815. hw->stats.pause_cnt++;
  1816. hw->stop_pending = 1;
  1817. if ((hw->rx_list_sts[0] == sts_free) &&
  1818. (hw->rx_list_sts[1] == sts_free))
  1819. crystalhd_hw_finalize_pause(hw);
  1820. return BC_STS_SUCCESS;
  1821. }
  1822. enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
  1823. {
  1824. enum BC_STATUS sts;
  1825. uint32_t aspm;
  1826. hw->stop_pending = 0;
  1827. aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
  1828. aspm &= ~ASPM_L1_ENABLE;
  1829. /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
  1830. crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
  1831. sts = crystalhd_hw_start_capture(hw);
  1832. return sts;
  1833. }
  1834. enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
  1835. {
  1836. enum BC_STATUS sts;
  1837. if (!hw) {
  1838. BCMLOG_ERR("Invalid Arguments\n");
  1839. return BC_STS_INV_ARG;
  1840. }
  1841. sts = crystalhd_put_ddr2sleep(hw);
  1842. if (sts != BC_STS_SUCCESS) {
  1843. BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
  1844. return BC_STS_ERROR;
  1845. }
  1846. if (!crystalhd_stop_device(hw->adp)) {
  1847. BCMLOG_ERR("Failed to Stop Device!!\n");
  1848. return BC_STS_ERROR;
  1849. }
  1850. return BC_STS_SUCCESS;
  1851. }
  1852. void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
  1853. {
  1854. if (!hw) {
  1855. BCMLOG_ERR("Invalid Arguments\n");
  1856. return;
  1857. }
  1858. /* if called w/NULL stats, its a req to zero out the stats */
  1859. if (!stats) {
  1860. memset(&hw->stats, 0, sizeof(hw->stats));
  1861. return;
  1862. }
  1863. hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
  1864. hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq);
  1865. memcpy(stats, &hw->stats, sizeof(*stats));
  1866. }
  1867. enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
  1868. {
  1869. uint32_t reg, n, i;
  1870. uint32_t vco_mg, refresh_reg;
  1871. if (!hw) {
  1872. BCMLOG_ERR("Invalid Arguments\n");
  1873. return BC_STS_INV_ARG;
  1874. }
  1875. /* FIXME: jarod: wha? */
  1876. /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
  1877. n = hw->core_clock_mhz/5;
  1878. if (n == hw->prev_n)
  1879. return BC_STS_CLK_NOCHG;
  1880. if (hw->pwr_lock > 0) {
  1881. /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
  1882. return BC_STS_CLK_NOCHG;
  1883. }
  1884. i = n * 27;
  1885. if (i < 560)
  1886. vco_mg = 0;
  1887. else if (i < 900)
  1888. vco_mg = 1;
  1889. else if (i < 1030)
  1890. vco_mg = 2;
  1891. else
  1892. vco_mg = 3;
  1893. reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
  1894. reg &= 0xFFFFCFC0;
  1895. reg |= n;
  1896. reg |= vco_mg << 12;
  1897. BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
  1898. hw->core_clock_mhz, n, vco_mg);
  1899. /* Change the DRAM refresh rate to accommodate the new frequency */
  1900. /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
  1901. refresh_reg = (7 * hw->core_clock_mhz / 16);
  1902. bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
  1903. bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
  1904. i = 0;
  1905. for (i = 0; i < 10; i++) {
  1906. reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
  1907. if (reg & 0x00020000) {
  1908. hw->prev_n = n;
  1909. /* FIXME: jarod: outputting a random "C" is... confusing... */
  1910. BCMLOG(BCMLOG_INFO, "C");
  1911. return BC_STS_SUCCESS;
  1912. } else {
  1913. msleep_interruptible(10);
  1914. }
  1915. }
  1916. BCMLOG(BCMLOG_INFO, "clk change failed\n");
  1917. return BC_STS_CLK_NOCHG;
  1918. }