/drivers/scsi/aic94xx/aic94xx_seq.c

http://github.com/mirrors/linux · C · 1397 lines · 905 code · 204 blank · 288 comment · 83 complexity · 41e2644f812e91afa12b1a731aaba813 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Aic94xx SAS/SATA driver sequencer interface.
  4. *
  5. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  6. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  7. *
  8. * Parts of this code adapted from David Chaw's adp94xx_seq.c.
  9. */
  10. #include <linux/delay.h>
  11. #include <linux/gfp.h>
  12. #include <linux/pci.h>
  13. #include <linux/module.h>
  14. #include <linux/firmware.h>
  15. #include "aic94xx_reg.h"
  16. #include "aic94xx_hwi.h"
  17. #include "aic94xx_seq.h"
  18. #include "aic94xx_dump.h"
  19. /* It takes no more than 0.05 us for an instruction
  20. * to complete. So waiting for 1 us should be more than
  21. * plenty.
  22. */
  23. #define PAUSE_DELAY 1
  24. #define PAUSE_TRIES 1000
  25. static const struct firmware *sequencer_fw;
  26. static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
  27. cseq_idle_loop, lseq_idle_loop;
  28. static const u8 *cseq_code, *lseq_code;
  29. static u32 cseq_code_size, lseq_code_size;
  30. static u16 first_scb_site_no = 0xFFFF;
  31. static u16 last_scb_site_no;
  32. /* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
  33. /**
  34. * asd_pause_cseq - pause the central sequencer
  35. * @asd_ha: pointer to host adapter structure
  36. *
  37. * Return 0 on success, negative on failure.
  38. */
  39. static int asd_pause_cseq(struct asd_ha_struct *asd_ha)
  40. {
  41. int count = PAUSE_TRIES;
  42. u32 arp2ctl;
  43. arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
  44. if (arp2ctl & PAUSED)
  45. return 0;
  46. asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
  47. do {
  48. arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
  49. if (arp2ctl & PAUSED)
  50. return 0;
  51. udelay(PAUSE_DELAY);
  52. } while (--count > 0);
  53. ASD_DPRINTK("couldn't pause CSEQ\n");
  54. return -1;
  55. }
  56. /**
  57. * asd_unpause_cseq - unpause the central sequencer.
  58. * @asd_ha: pointer to host adapter structure.
  59. *
  60. * Return 0 on success, negative on error.
  61. */
  62. static int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
  63. {
  64. u32 arp2ctl;
  65. int count = PAUSE_TRIES;
  66. arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
  67. if (!(arp2ctl & PAUSED))
  68. return 0;
  69. asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
  70. do {
  71. arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
  72. if (!(arp2ctl & PAUSED))
  73. return 0;
  74. udelay(PAUSE_DELAY);
  75. } while (--count > 0);
  76. ASD_DPRINTK("couldn't unpause the CSEQ\n");
  77. return -1;
  78. }
  79. /**
  80. * asd_seq_pause_lseq - pause a link sequencer
  81. * @asd_ha: pointer to a host adapter structure
  82. * @lseq: link sequencer of interest
  83. *
  84. * Return 0 on success, negative on error.
  85. */
  86. static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
  87. {
  88. u32 arp2ctl;
  89. int count = PAUSE_TRIES;
  90. arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
  91. if (arp2ctl & PAUSED)
  92. return 0;
  93. asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
  94. do {
  95. arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
  96. if (arp2ctl & PAUSED)
  97. return 0;
  98. udelay(PAUSE_DELAY);
  99. } while (--count > 0);
  100. ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
  101. return -1;
  102. }
  103. /**
  104. * asd_pause_lseq - pause the link sequencer(s)
  105. * @asd_ha: pointer to host adapter structure
  106. * @lseq_mask: mask of link sequencers of interest
  107. *
  108. * Return 0 on success, negative on failure.
  109. */
  110. static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
  111. {
  112. int lseq;
  113. int err = 0;
  114. for_each_sequencer(lseq_mask, lseq_mask, lseq) {
  115. err = asd_seq_pause_lseq(asd_ha, lseq);
  116. if (err)
  117. return err;
  118. }
  119. return err;
  120. }
  121. /**
  122. * asd_seq_unpause_lseq - unpause a link sequencer
  123. * @asd_ha: pointer to host adapter structure
  124. * @lseq: link sequencer of interest
  125. *
  126. * Return 0 on success, negative on error.
  127. */
  128. static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
  129. {
  130. u32 arp2ctl;
  131. int count = PAUSE_TRIES;
  132. arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
  133. if (!(arp2ctl & PAUSED))
  134. return 0;
  135. asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
  136. do {
  137. arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
  138. if (!(arp2ctl & PAUSED))
  139. return 0;
  140. udelay(PAUSE_DELAY);
  141. } while (--count > 0);
  142. ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
  143. return 0;
  144. }
  145. /* ---------- Downloading CSEQ/LSEQ microcode ---------- */
  146. static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
  147. u32 size)
  148. {
  149. u32 addr = CSEQ_RAM_REG_BASE_ADR;
  150. const u32 *prog = (u32 *) _prog;
  151. u32 i;
  152. for (i = 0; i < size; i += 4, prog++, addr += 4) {
  153. u32 val = asd_read_reg_dword(asd_ha, addr);
  154. if (le32_to_cpu(*prog) != val) {
  155. asd_printk("%s: cseq verify failed at %u "
  156. "read:0x%x, wanted:0x%x\n",
  157. pci_name(asd_ha->pcidev),
  158. i, val, le32_to_cpu(*prog));
  159. return -1;
  160. }
  161. }
  162. ASD_DPRINTK("verified %d bytes, passed\n", size);
  163. return 0;
  164. }
  165. /**
  166. * asd_verify_lseq - verify the microcode of a link sequencer
  167. * @asd_ha: pointer to host adapter structure
  168. * @_prog: pointer to the microcode
  169. * @size: size of the microcode in bytes
  170. * @lseq: link sequencer of interest
  171. *
  172. * The link sequencer code is accessed in 4 KB pages, which are selected
  173. * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
  174. * The 10 KB LSEQm instruction code is mapped, page at a time, at
  175. * LmSEQRAM address.
  176. */
  177. static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
  178. u32 size, int lseq)
  179. {
  180. #define LSEQ_CODEPAGE_SIZE 4096
  181. int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
  182. u32 page;
  183. const u32 *prog = (u32 *) _prog;
  184. for (page = 0; page < pages; page++) {
  185. u32 i;
  186. asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
  187. page << LmRAMPAGE_LSHIFT);
  188. for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
  189. i += 4, prog++, size-=4) {
  190. u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
  191. if (le32_to_cpu(*prog) != val) {
  192. asd_printk("%s: LSEQ%d verify failed "
  193. "page:%d, offs:%d\n",
  194. pci_name(asd_ha->pcidev),
  195. lseq, page, i);
  196. return -1;
  197. }
  198. }
  199. }
  200. ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
  201. (int)((u8 *)prog-_prog));
  202. return 0;
  203. }
  204. /**
  205. * asd_verify_seq -- verify CSEQ/LSEQ microcode
  206. * @asd_ha: pointer to host adapter structure
  207. * @prog: pointer to microcode
  208. * @size: size of the microcode
  209. * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
  210. *
  211. * Return 0 if microcode is correct, negative on mismatch.
  212. */
  213. static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
  214. u32 size, u8 lseq_mask)
  215. {
  216. if (lseq_mask == 0)
  217. return asd_verify_cseq(asd_ha, prog, size);
  218. else {
  219. int lseq, err;
  220. for_each_sequencer(lseq_mask, lseq_mask, lseq) {
  221. err = asd_verify_lseq(asd_ha, prog, size, lseq);
  222. if (err)
  223. return err;
  224. }
  225. }
  226. return 0;
  227. }
  228. #define ASD_DMA_MODE_DOWNLOAD
  229. #ifdef ASD_DMA_MODE_DOWNLOAD
  230. /* This is the size of the CSEQ Mapped instruction page */
  231. #define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
  232. static int asd_download_seq(struct asd_ha_struct *asd_ha,
  233. const u8 * const prog, u32 size, u8 lseq_mask)
  234. {
  235. u32 comstaten;
  236. u32 reg;
  237. int page;
  238. const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
  239. struct asd_dma_tok *token;
  240. int err = 0;
  241. if (size % 4) {
  242. asd_printk("sequencer program not multiple of 4\n");
  243. return -1;
  244. }
  245. asd_pause_cseq(asd_ha);
  246. asd_pause_lseq(asd_ha, 0xFF);
  247. /* save, disable and clear interrupts */
  248. comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
  249. asd_write_reg_dword(asd_ha, COMSTATEN, 0);
  250. asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
  251. asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
  252. asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
  253. token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
  254. if (!token) {
  255. asd_printk("out of memory for dma SEQ download\n");
  256. err = -ENOMEM;
  257. goto out;
  258. }
  259. ASD_DPRINTK("dma-ing %d bytes\n", size);
  260. for (page = 0; page < pages; page++) {
  261. int i;
  262. u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
  263. (u32)MAX_DMA_OVLY_COUNT);
  264. memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
  265. asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
  266. asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
  267. reg = !page ? RESETOVLYDMA : 0;
  268. reg |= (STARTOVLYDMA | OVLYHALTERR);
  269. reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
  270. /* Start DMA. */
  271. asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
  272. for (i = PAUSE_TRIES*100; i > 0; i--) {
  273. u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
  274. if (!(dmadone & OVLYDMAACT))
  275. break;
  276. udelay(PAUSE_DELAY);
  277. }
  278. }
  279. reg = asd_read_reg_dword(asd_ha, COMSTAT);
  280. if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
  281. || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
  282. asd_printk("%s: error DMA-ing sequencer code\n",
  283. pci_name(asd_ha->pcidev));
  284. err = -ENODEV;
  285. }
  286. asd_free_coherent(asd_ha, token);
  287. out:
  288. asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
  289. return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
  290. }
  291. #else /* ASD_DMA_MODE_DOWNLOAD */
  292. static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
  293. u32 size, u8 lseq_mask)
  294. {
  295. int i;
  296. u32 reg = 0;
  297. const u32 *prog = (u32 *) _prog;
  298. if (size % 4) {
  299. asd_printk("sequencer program not multiple of 4\n");
  300. return -1;
  301. }
  302. asd_pause_cseq(asd_ha);
  303. asd_pause_lseq(asd_ha, 0xFF);
  304. reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
  305. reg |= PIOCMODE;
  306. asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
  307. asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
  308. ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
  309. lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
  310. for (i = 0; i < size; i += 4, prog++)
  311. asd_write_reg_dword(asd_ha, SPIODATA, *prog);
  312. reg = (reg & ~PIOCMODE) | OVLYHALTERR;
  313. asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
  314. return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
  315. }
  316. #endif /* ASD_DMA_MODE_DOWNLOAD */
  317. /**
  318. * asd_seq_download_seqs - download the sequencer microcode
  319. * @asd_ha: pointer to host adapter structure
  320. *
  321. * Download the central and link sequencer microcode.
  322. */
  323. static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
  324. {
  325. int err;
  326. if (!asd_ha->hw_prof.enabled_phys) {
  327. asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
  328. return -ENODEV;
  329. }
  330. /* Download the CSEQ */
  331. ASD_DPRINTK("downloading CSEQ...\n");
  332. err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
  333. if (err) {
  334. asd_printk("CSEQ download failed:%d\n", err);
  335. return err;
  336. }
  337. /* Download the Link Sequencers code. All of the Link Sequencers
  338. * microcode can be downloaded at the same time.
  339. */
  340. ASD_DPRINTK("downloading LSEQs...\n");
  341. err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
  342. asd_ha->hw_prof.enabled_phys);
  343. if (err) {
  344. /* Try it one at a time */
  345. u8 lseq;
  346. u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
  347. for_each_sequencer(lseq_mask, lseq_mask, lseq) {
  348. err = asd_download_seq(asd_ha, lseq_code,
  349. lseq_code_size, 1<<lseq);
  350. if (err)
  351. break;
  352. }
  353. }
  354. if (err)
  355. asd_printk("LSEQs download failed:%d\n", err);
  356. return err;
  357. }
  358. /* ---------- Initializing the chip, chip memory, etc. ---------- */
  359. /**
  360. * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
  361. * @asd_ha: pointer to host adapter structure
  362. */
  363. static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
  364. {
  365. /* CSEQ Mode Independent, page 4 setup. */
  366. asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
  367. asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
  368. asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
  369. asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
  370. asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
  371. asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
  372. asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
  373. asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
  374. asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
  375. asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
  376. asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
  377. asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
  378. asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
  379. asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
  380. {
  381. u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
  382. u8 val = hweight8(con);
  383. asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
  384. }
  385. asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
  386. /* CSEQ Mode independent, page 5 setup. */
  387. asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
  388. asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
  389. asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
  390. asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
  391. asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
  392. asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
  393. asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
  394. asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
  395. asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
  396. asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
  397. /* CSEQ Mode independent, page 6 setup. */
  398. asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
  399. asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
  400. asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
  401. asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
  402. asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
  403. asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
  404. asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
  405. asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
  406. asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
  407. /* Calculate the free scb mask. */
  408. {
  409. u16 cmdctx = asd_get_cmdctx_size(asd_ha);
  410. cmdctx = (~((cmdctx/128)-1)) >> 8;
  411. asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
  412. }
  413. asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
  414. first_scb_site_no);
  415. asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
  416. last_scb_site_no);
  417. asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
  418. asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
  419. /* CSEQ Mode independent, page 7 setup. */
  420. asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
  421. asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
  422. asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
  423. asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
  424. asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
  425. asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
  426. asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
  427. asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
  428. asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
  429. asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
  430. asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
  431. asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
  432. }
  433. /**
  434. * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
  435. * @asd_ha: pointer to host adapter structure
  436. */
  437. static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
  438. {
  439. int i;
  440. int moffs;
  441. moffs = CSEQ_PAGE_SIZE * 2;
  442. /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
  443. for (i = 0; i < 8; i++) {
  444. asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
  445. asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
  446. asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
  447. asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
  448. asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
  449. }
  450. /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
  451. /* CSEQ Mode dependent, mode 8, page 0 setup. */
  452. asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
  453. asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
  454. asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
  455. asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
  456. asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
  457. asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
  458. asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
  459. asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
  460. asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
  461. asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
  462. asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
  463. asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
  464. asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
  465. (u16)last_scb_site_no+1);
  466. asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
  467. (u16)asd_ha->hw_prof.max_ddbs);
  468. /* CSEQ Mode dependent, mode 8, page 1 setup. */
  469. asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
  470. asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
  471. asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
  472. asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
  473. /* CSEQ Mode dependent, mode 8, page 2 setup. */
  474. /* Tell the sequencer the bus address of the first SCB. */
  475. asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
  476. asd_ha->seq.next_scb.dma_handle);
  477. ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
  478. (unsigned long long)asd_ha->seq.next_scb.dma_handle);
  479. /* Tell the sequencer the first Done List entry address. */
  480. asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
  481. asd_ha->seq.actual_dl->dma_handle);
  482. /* Initialize the Q_DONE_POINTER with the least significant
  483. * 4 bytes of the first Done List address. */
  484. asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
  485. ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
  486. asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
  487. /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
  488. }
  489. /**
  490. * asd_init_cseq_scratch -- setup and init CSEQ
  491. * @asd_ha: pointer to host adapter structure
  492. *
  493. * Setup and initialize Central sequencers. Initialize the mode
  494. * independent and dependent scratch page to the default settings.
  495. */
  496. static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
  497. {
  498. asd_init_cseq_mip(asd_ha);
  499. asd_init_cseq_mdp(asd_ha);
  500. }
  501. /**
  502. * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
  503. * @asd_ha: pointer to host adapter structure
  504. */
  505. static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
  506. {
  507. int i;
  508. /* LSEQ Mode independent page 0 setup. */
  509. asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
  510. asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
  511. asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
  512. asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
  513. ASD_NOTIFY_ENABLE_SPINUP);
  514. asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
  515. asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
  516. asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
  517. asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
  518. asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
  519. asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
  520. asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
  521. asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
  522. asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
  523. /* LSEQ Mode independent page 1 setup. */
  524. asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
  525. asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
  526. asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
  527. asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
  528. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
  529. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
  530. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
  531. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
  532. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
  533. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
  534. asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
  535. asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
  536. asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
  537. asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
  538. /* LSEQ Mode Independent page 2 setup. */
  539. asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
  540. asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
  541. asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
  542. asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
  543. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
  544. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
  545. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
  546. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
  547. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
  548. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
  549. asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
  550. for (i = 0; i < 12; i += 4)
  551. asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
  552. /* LSEQ Mode Independent page 3 setup. */
  553. /* Device present timer timeout */
  554. asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
  555. ASD_DEV_PRESENT_TIMEOUT);
  556. /* SATA interlock timer disabled */
  557. asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
  558. ASD_SATA_INTERLOCK_TIMEOUT);
  559. /* STP shutdown timer timeout constant, IGNORED by the sequencer,
  560. * always 0. */
  561. asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
  562. ASD_STP_SHUTDOWN_TIMEOUT);
  563. asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
  564. ASD_SRST_ASSERT_TIMEOUT);
  565. asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
  566. ASD_RCV_FIS_TIMEOUT);
  567. asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
  568. ASD_ONE_MILLISEC_TIMEOUT);
  569. /* COM_INIT timer */
  570. asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
  571. ASD_TEN_MILLISEC_TIMEOUT);
  572. asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
  573. ASD_SMP_RCV_TIMEOUT);
  574. }
  575. /**
  576. * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
  577. * @asd_ha: pointer to host adapter structure
  578. */
  579. static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
  580. {
  581. int i;
  582. u32 moffs;
  583. u16 ret_addr[] = {
  584. 0xFFFF, /* mode 0 */
  585. 0xFFFF, /* mode 1 */
  586. mode2_task, /* mode 2 */
  587. 0,
  588. 0xFFFF, /* mode 4/5 */
  589. 0xFFFF, /* mode 4/5 */
  590. };
  591. /*
  592. * Mode 0,1,2 and 4/5 have common field on page 0 for the first
  593. * 14 bytes.
  594. */
  595. for (i = 0; i < 3; i++) {
  596. moffs = i * LSEQ_MODE_SCRATCH_SIZE;
  597. asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
  598. ret_addr[i]);
  599. asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
  600. asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
  601. asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
  602. asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
  603. asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
  604. asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
  605. }
  606. /*
  607. * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
  608. */
  609. asd_write_reg_word(asd_ha,
  610. LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
  611. ret_addr[5]);
  612. asd_write_reg_word(asd_ha,
  613. LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
  614. asd_write_reg_word(asd_ha,
  615. LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
  616. asd_write_reg_word(asd_ha,
  617. LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
  618. asd_write_reg_word(asd_ha,
  619. LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
  620. asd_write_reg_byte(asd_ha,
  621. LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
  622. asd_write_reg_word(asd_ha,
  623. LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
  624. /* LSEQ Mode dependent 0, page 0 setup. */
  625. asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
  626. (u16)asd_ha->hw_prof.max_ddbs);
  627. asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
  628. asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
  629. asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
  630. (u16)last_scb_site_no+1);
  631. asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
  632. (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16));
  633. asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
  634. (u16) LmM0INTEN_MASK & 0xFFFF);
  635. asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
  636. asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
  637. asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
  638. asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
  639. asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
  640. /* LSEQ mode dependent, mode 1, page 0 setup. */
  641. asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
  642. asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
  643. asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
  644. asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
  645. asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
  646. asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
  647. asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
  648. asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
  649. /* LSEQ Mode dependent mode 2, page 0 setup */
  650. asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
  651. asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
  652. asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
  653. asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
  654. asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
  655. asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
  656. /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
  657. asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
  658. asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
  659. asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
  660. asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
  661. asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
  662. asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
  663. asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
  664. asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
  665. asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
  666. /*
  667. * Set the desired interval between transmissions of the NOTIFY
  668. * (ENABLE SPINUP) primitive. Must be initialized to val - 1.
  669. */
  670. asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
  671. ASD_NOTIFY_TIMEOUT - 1);
  672. /* No delay for the first NOTIFY to be sent to the attached target. */
  673. asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
  674. ASD_NOTIFY_DOWN_COUNT);
  675. asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq),
  676. ASD_NOTIFY_DOWN_COUNT);
  677. /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
  678. for (i = 0; i < 2; i++) {
  679. int j;
  680. /* Start from Page 1 of Mode 0 and 1. */
  681. moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
  682. /* All the fields of page 1 can be initialized to 0. */
  683. for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
  684. asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
  685. }
  686. /* LSEQ Mode dependent, mode 2, page 1 setup. */
  687. asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
  688. asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
  689. asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
  690. /* LSEQ Mode dependent, mode 4/5, page 1. */
  691. for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
  692. asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
  693. asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
  694. asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
  695. asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
  696. asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
  697. asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
  698. asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
  699. asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
  700. asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
  701. /* LSEQ Mode dependent, mode 0, page 2 setup. */
  702. asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
  703. asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
  704. asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
  705. asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
  706. asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
  707. /* LSEQ Mode Dependent 1, page 2 setup. */
  708. asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
  709. asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
  710. asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
  711. asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
  712. asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
  713. /* LSEQ Mode Dependent 2, page 2 setup. */
  714. /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
  715. * i.e. always 0. */
  716. asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
  717. asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
  718. asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
  719. asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
  720. asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
  721. asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
  722. /* LSEQ Mode Dependent 4/5, page 2 setup. */
  723. asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
  724. asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
  725. asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
  726. asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
  727. }
  728. /**
  729. * asd_init_lseq_scratch -- setup and init link sequencers
  730. * @asd_ha: pointer to host adapter struct
  731. */
  732. static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
  733. {
  734. u8 lseq;
  735. u8 lseq_mask;
  736. lseq_mask = asd_ha->hw_prof.enabled_phys;
  737. for_each_sequencer(lseq_mask, lseq_mask, lseq) {
  738. asd_init_lseq_mip(asd_ha, lseq);
  739. asd_init_lseq_mdp(asd_ha, lseq);
  740. }
  741. }
  742. /**
  743. * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
  744. * @asd_ha: pointer to host adapter structure
  745. *
  746. * This should be done before initializing common CSEQ and LSEQ
  747. * scratch since those areas depend on some computed values here,
  748. * last_scb_site_no, etc.
  749. */
  750. static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
  751. {
  752. u16 site_no;
  753. u16 max_scbs = 0;
  754. for (site_no = asd_ha->hw_prof.max_scbs-1;
  755. site_no != (u16) -1;
  756. site_no--) {
  757. u16 i;
  758. /* Initialize all fields in the SCB site to 0. */
  759. for (i = 0; i < ASD_SCB_SIZE; i += 4)
  760. asd_scbsite_write_dword(asd_ha, site_no, i, 0);
  761. /* Initialize SCB Site Opcode field to invalid. */
  762. asd_scbsite_write_byte(asd_ha, site_no,
  763. offsetof(struct scb_header, opcode),
  764. 0xFF);
  765. /* Initialize SCB Site Flags field to mean a response
  766. * frame has been received. This means inadvertent
  767. * frames received to be dropped. */
  768. asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
  769. /* Workaround needed by SEQ to fix a SATA issue is to exclude
  770. * certain SCB sites from the free list. */
  771. if (!SCB_SITE_VALID(site_no))
  772. continue;
  773. if (last_scb_site_no == 0)
  774. last_scb_site_no = site_no;
  775. /* For every SCB site, we need to initialize the
  776. * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
  777. * and SG Element Flag. */
  778. /* Q_NEXT field of the last SCB is invalidated. */
  779. asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
  780. first_scb_site_no = site_no;
  781. max_scbs++;
  782. }
  783. asd_ha->hw_prof.max_scbs = max_scbs;
  784. ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
  785. ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
  786. ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
  787. }
  788. /**
  789. * asd_init_cseq_cio - initialize CSEQ CIO registers
  790. * @asd_ha: pointer to host adapter structure
  791. */
  792. static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
  793. {
  794. int i;
  795. asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
  796. asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
  797. asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
  798. asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
  799. asd_ha->seq.scbpro = 0;
  800. asd_write_reg_dword(asd_ha, SCBPRO, 0);
  801. asd_write_reg_dword(asd_ha, CSEQCON, 0);
  802. /* Initialize CSEQ Mode 11 Interrupt Vectors.
  803. * The addresses are 16 bit wide and in dword units.
  804. * The values of their macros are in byte units.
  805. * Thus we have to divide by 4. */
  806. asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
  807. asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
  808. asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
  809. /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
  810. asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
  811. /* Initialize CSEQ Scratch Page to 0x04. */
  812. asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
  813. /* Initialize CSEQ Mode[0-8] Dependent registers. */
  814. /* Initialize Scratch Page to 0. */
  815. for (i = 0; i < 9; i++)
  816. asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
  817. /* Reset the ARP2 Program Count. */
  818. asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
  819. for (i = 0; i < 8; i++) {
  820. /* Initialize Mode n Link m Interrupt Enable. */
  821. asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
  822. /* Initialize Mode n Request Mailbox. */
  823. asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
  824. }
  825. }
  826. /**
  827. * asd_init_lseq_cio -- initialize LmSEQ CIO registers
  828. * @asd_ha: pointer to host adapter structure
  829. */
  830. static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
  831. {
  832. u8 *sas_addr;
  833. int i;
  834. /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
  835. asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
  836. asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
  837. /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
  838. for (i = 0; i < 3; i++)
  839. asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
  840. /* Initialize Mode 5 SCRATCHPAGE to 0. */
  841. asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
  842. asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
  843. /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
  844. * Interrupt registers. */
  845. asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
  846. asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
  847. /* Mode 1 */
  848. asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
  849. asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
  850. /* Mode 2 */
  851. asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
  852. asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
  853. /* Mode 5 */
  854. asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
  855. asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
  856. /* Enable HW Timer status. */
  857. asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
  858. /* Enable Primitive Status 0 and 1. */
  859. asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
  860. asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
  861. /* Enable Frame Error. */
  862. asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
  863. asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
  864. /* Initialize Mode 0 Transfer Level to 512. */
  865. asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
  866. /* Initialize Mode 1 Transfer Level to 256. */
  867. asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
  868. /* Initialize Program Count. */
  869. asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
  870. /* Enable Blind SG Move. */
  871. asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
  872. asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
  873. ASD_SATA_INTERLOCK_TIMEOUT);
  874. (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
  875. /* Clear Primitive Status 0 and 1. */
  876. asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
  877. asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
  878. /* Clear HW Timer status. */
  879. asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
  880. /* Clear DMA Errors for Mode 0 and 1. */
  881. asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
  882. asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
  883. /* Clear SG DMA Errors for Mode 0 and 1. */
  884. asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
  885. asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
  886. /* Clear Mode 0 Buffer Parity Error. */
  887. asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
  888. /* Clear Mode 0 Frame Error register. */
  889. asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
  890. /* Reset LSEQ external interrupt arbiter. */
  891. asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
  892. /* Set the Phy SAS for the LmSEQ WWN. */
  893. sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
  894. for (i = 0; i < SAS_ADDR_SIZE; i++)
  895. asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
  896. /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
  897. asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
  898. /* Set the Bus Inactivity Time Limit Timer. */
  899. asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
  900. /* Enable SATA Port Multiplier. */
  901. asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
  902. /* Initialize Interrupt Vector[0-10] address in Mode 3.
  903. * See the comment on CSEQ_INT_* */
  904. asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
  905. asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
  906. asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
  907. asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
  908. asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
  909. asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
  910. asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
  911. asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
  912. asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
  913. asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
  914. asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
  915. /*
  916. * Program the Link LED control, applicable only for
  917. * Chip Rev. B or later.
  918. */
  919. asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
  920. (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
  921. /* Set the Align Rate for SAS and STP mode. */
  922. asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
  923. asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
  924. }
  925. /**
  926. * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
  927. * @asd_ha: pointer to host adapter struct
  928. */
  929. static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
  930. {
  931. int i;
  932. for (i = 0; i < 8; i++)
  933. asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
  934. for (i = 0; i < 8; i++)
  935. asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
  936. /* Reset the external interrupt arbiter. */
  937. asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
  938. }
  939. /**
  940. * asd_init_ddb_0 -- initialize DDB 0
  941. * @asd_ha: pointer to host adapter structure
  942. *
  943. * Initialize DDB site 0 which is used internally by the sequencer.
  944. */
  945. static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
  946. {
  947. int i;
  948. /* Zero out the DDB explicitly */
  949. for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
  950. asd_ddbsite_write_dword(asd_ha, 0, i, 0);
  951. asd_ddbsite_write_word(asd_ha, 0,
  952. offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
  953. asd_ddbsite_write_word(asd_ha, 0,
  954. offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
  955. asd_ha->hw_prof.max_ddbs-1);
  956. asd_ddbsite_write_word(asd_ha, 0,
  957. offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
  958. asd_ddbsite_write_word(asd_ha, 0,
  959. offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
  960. asd_ddbsite_write_word(asd_ha, 0,
  961. offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
  962. asd_ddbsite_write_word(asd_ha, 0,
  963. offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
  964. asd_ddbsite_write_word(asd_ha, 0,
  965. offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
  966. asd_ddbsite_write_word(asd_ha, 0,
  967. offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
  968. asd_ddbsite_write_word(asd_ha, 0,
  969. offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
  970. asd_ha->hw_prof.num_phys * 2);
  971. asd_ddbsite_write_byte(asd_ha, 0,
  972. offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
  973. asd_ddbsite_write_byte(asd_ha, 0,
  974. offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
  975. asd_ddbsite_write_byte(asd_ha, 0,
  976. offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
  977. /* DDB 0 is reserved */
  978. set_bit(0, asd_ha->hw_prof.ddb_bitmap);
  979. }
  980. static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha)
  981. {
  982. unsigned int i;
  983. unsigned int ddb_site;
  984. for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++)
  985. for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
  986. asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0);
  987. }
  988. /**
  989. * asd_seq_setup_seqs -- setup and initialize central and link sequencers
  990. * @asd_ha: pointer to host adapter structure
  991. */
  992. static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
  993. {
  994. int lseq;
  995. u8 lseq_mask;
  996. /* Initialize DDB sites */
  997. asd_seq_init_ddb_sites(asd_ha);
  998. /* Initialize SCB sites. Done first to compute some values which
  999. * the rest of the init code depends on. */
  1000. asd_init_scb_sites(asd_ha);
  1001. /* Initialize CSEQ Scratch RAM registers. */
  1002. asd_init_cseq_scratch(asd_ha);
  1003. /* Initialize LmSEQ Scratch RAM registers. */
  1004. asd_init_lseq_scratch(asd_ha);
  1005. /* Initialize CSEQ CIO registers. */
  1006. asd_init_cseq_cio(asd_ha);
  1007. asd_init_ddb_0(asd_ha);
  1008. /* Initialize LmSEQ CIO registers. */
  1009. lseq_mask = asd_ha->hw_prof.enabled_phys;
  1010. for_each_sequencer(lseq_mask, lseq_mask, lseq)
  1011. asd_init_lseq_cio(asd_ha, lseq);
  1012. asd_post_init_cseq(asd_ha);
  1013. }
  1014. /**
  1015. * asd_seq_start_cseq -- start the central sequencer, CSEQ
  1016. * @asd_ha: pointer to host adapter structure
  1017. */
  1018. static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
  1019. {
  1020. /* Reset the ARP2 instruction to location zero. */
  1021. asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
  1022. /* Unpause the CSEQ */
  1023. return asd_unpause_cseq(asd_ha);
  1024. }
  1025. /**
  1026. * asd_seq_start_lseq -- start a link sequencer
  1027. * @asd_ha: pointer to host adapter structure
  1028. * @lseq: the link sequencer of interest
  1029. */
  1030. static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
  1031. {
  1032. /* Reset the ARP2 instruction to location zero. */
  1033. asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
  1034. /* Unpause the LmSEQ */
  1035. return asd_seq_unpause_lseq(asd_ha, lseq);
  1036. }
  1037. int asd_release_firmware(void)
  1038. {
  1039. release_firmware(sequencer_fw);
  1040. return 0;
  1041. }
  1042. static int asd_request_firmware(struct asd_ha_struct *asd_ha)
  1043. {
  1044. int err, i;
  1045. struct sequencer_file_header header;
  1046. const struct sequencer_file_header *hdr_ptr;
  1047. u32 csum = 0;
  1048. u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
  1049. if (sequencer_fw)
  1050. /* already loaded */
  1051. return 0;
  1052. err = request_firmware(&sequencer_fw,
  1053. SAS_RAZOR_SEQUENCER_FW_FILE,
  1054. &asd_ha->pcidev->dev);
  1055. if (err)
  1056. return err;
  1057. hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data;
  1058. header.csum = le32_to_cpu(hdr_ptr->csum);
  1059. header.major = le32_to_cpu(hdr_ptr->major);
  1060. header.minor = le32_to_cpu(hdr_ptr->minor);
  1061. header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
  1062. header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
  1063. header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
  1064. header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
  1065. header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
  1066. header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
  1067. header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
  1068. header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
  1069. header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
  1070. header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
  1071. header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
  1072. for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
  1073. csum += sequencer_fw->data[i];
  1074. if (csum != header.csum) {
  1075. asd_printk("Firmware file checksum mismatch\n");
  1076. return -EINVAL;
  1077. }
  1078. if (header.cseq_table_size != CSEQ_NUM_VECS ||
  1079. header.lseq_table_size != LSEQ_NUM_VECS) {
  1080. asd_printk("Firmware file table size mismatch\n");
  1081. return -EINVAL;
  1082. }
  1083. asd_printk("Found sequencer Firmware version %d.%d (%s)\n",
  1084. header.major, header.minor, hdr_ptr->version);
  1085. if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) {
  1086. asd_printk("Firmware Major Version Mismatch;"
  1087. "driver requires version %d.X",
  1088. SAS_RAZOR_SEQUENCER_FW_MAJOR);
  1089. return -EINVAL;
  1090. }
  1091. ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
  1092. ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
  1093. mode2_task = header.mode2_task;
  1094. cseq_idle_loop = header.cseq_idle_loop;
  1095. lseq_idle_loop = header.lseq_idle_loop;
  1096. for (i = 0; i < CSEQ_NUM_VECS; i++)
  1097. cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
  1098. for (i = 0; i < LSEQ_NUM_VECS; i++)
  1099. lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
  1100. cseq_code = &sequencer_fw->data[header.cseq_code_offset];
  1101. cseq_code_size = header.cseq_code_size;
  1102. lseq_code = &sequencer_fw->data[header.lseq_code_offset];
  1103. lseq_code_size = header.lseq_code_size;
  1104. return 0;
  1105. }
  1106. int asd_init_seqs(struct asd_ha_struct *asd_ha)
  1107. {
  1108. int err;
  1109. err = asd_request_firmware(asd_ha);
  1110. if (err) {
  1111. asd_printk("Failed to load sequencer firmware file %s, error %d\n",
  1112. SAS_RAZOR_SEQUENCER_FW_FILE, err);
  1113. return err;
  1114. }
  1115. err = asd_seq_download_seqs(asd_ha);
  1116. if (err) {
  1117. asd_printk("couldn't download sequencers for %s\n",
  1118. pci_name(asd_ha->pcidev));
  1119. return err;
  1120. }
  1121. asd_seq_setup_seqs(asd_ha);
  1122. return 0;
  1123. }
  1124. int asd_start_seqs(struct asd_ha_struct *asd_ha)
  1125. {
  1126. int err;
  1127. u8 lseq_mask;
  1128. int lseq;
  1129. err = asd_seq_start_cseq(asd_ha);
  1130. if (err) {
  1131. asd_printk("couldn't start CSEQ for %s\n",
  1132. pci_name(asd_ha->pcidev));
  1133. return err;
  1134. }
  1135. lseq_mask = asd_ha->hw_prof.enabled_phys;
  1136. for_each_sequencer(lseq_mask, lseq_mask, lseq) {
  1137. err = asd_seq_start_lseq(asd_ha, lseq);
  1138. if (err) {
  1139. asd_printk("couldn't start LSEQ %d for %s\n", lseq,
  1140. pci_name(asd_ha->pcidev));
  1141. return err;
  1142. }
  1143. }
  1144. return 0;
  1145. }
  1146. /**
  1147. * asd_update_port_links -- update port_map_by_links and phy_is_up
  1148. * @sas_phy: pointer to the phy which has been added to a port
  1149. *
  1150. * 1) When a link reset has completed and we got BYTES DMAED with a
  1151. * valid frame we call this function for that phy, to indicate that
  1152. * the phy is up, i.e. we update the phy_is_up in DDB 0. The
  1153. * sequencer checks phy_is_up when pending SCBs are to be sent, and
  1154. * when an open address frame has been received.
  1155. *
  1156. * 2) When we know of ports, we call this function to update the map
  1157. * of phys participaing in that port, i.e. we update the
  1158. * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
  1159. * received, the sequencer disables all phys in that port.
  1160. * port_map_by_links is also used as the conn_mask byte in the
  1161. * initiator/target port DDB.
  1162. */
  1163. void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
  1164. {
  1165. const u8 phy_mask = (u8) phy->asd_port->phy_mask;
  1166. u8 phy_is_up;
  1167. u8 mask;
  1168. int i, err;
  1169. unsigned long flags;
  1170. spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
  1171. for_each_phy(phy_mask, mask, i)
  1172. asd_ddbsite_write_byte(asd_ha, 0,
  1173. offsetof(struct asd_ddb_seq_shared,
  1174. port_map_by_links)+i,phy_mask);
  1175. for (i = 0; i < 12; i++) {
  1176. phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
  1177. offsetof(struct asd_ddb_seq_shared, phy_is_up));
  1178. err = asd_ddbsite_update_byte(asd_ha, 0,
  1179. offsetof(struct asd_ddb_seq_shared, phy_is_up),
  1180. phy_is_up,
  1181. phy_is_up | phy_mask);
  1182. if (!err)
  1183. break;
  1184. else if (err == -EFAULT) {
  1185. asd_printk("phy_is_up: parity error in DDB 0\n");
  1186. break;
  1187. }
  1188. }
  1189. spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
  1190. if (err)
  1191. asd_printk("couldn't update DDB 0:error:%d\n", err);
  1192. }
  1193. MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);