PageRenderTime 47ms CodeModel.GetById 10ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/scsi/qla2xxx/qla_dbg.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 1724 lines | 1307 code | 316 blank | 101 comment | 206 complexity | 7509b03a9fbee86a49953f915627d7a1 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2011 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/delay.h>
  9. static inline void
  10. qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
  11. {
  12. fw_dump->fw_major_version = htonl(ha->fw_major_version);
  13. fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
  14. fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
  15. fw_dump->fw_attributes = htonl(ha->fw_attributes);
  16. fw_dump->vendor = htonl(ha->pdev->vendor);
  17. fw_dump->device = htonl(ha->pdev->device);
  18. fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
  19. fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
  20. }
  21. static inline void *
  22. qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
  23. {
  24. struct req_que *req = ha->req_q_map[0];
  25. struct rsp_que *rsp = ha->rsp_q_map[0];
  26. /* Request queue. */
  27. memcpy(ptr, req->ring, req->length *
  28. sizeof(request_t));
  29. /* Response queue. */
  30. ptr += req->length * sizeof(request_t);
  31. memcpy(ptr, rsp->ring, rsp->length *
  32. sizeof(response_t));
  33. return ptr + (rsp->length * sizeof(response_t));
  34. }
  35. static int
  36. qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
  37. uint32_t ram_dwords, void **nxt)
  38. {
  39. int rval;
  40. uint32_t cnt, stat, timer, dwords, idx;
  41. uint16_t mb0;
  42. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  43. dma_addr_t dump_dma = ha->gid_list_dma;
  44. uint32_t *dump = (uint32_t *)ha->gid_list;
  45. rval = QLA_SUCCESS;
  46. mb0 = 0;
  47. WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
  48. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  49. dwords = GID_LIST_SIZE / 4;
  50. for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
  51. cnt += dwords, addr += dwords) {
  52. if (cnt + dwords > ram_dwords)
  53. dwords = ram_dwords - cnt;
  54. WRT_REG_WORD(&reg->mailbox1, LSW(addr));
  55. WRT_REG_WORD(&reg->mailbox8, MSW(addr));
  56. WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
  57. WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
  58. WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
  59. WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
  60. WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
  61. WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
  62. WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
  63. for (timer = 6000000; timer; timer--) {
  64. /* Check for pending interrupts. */
  65. stat = RD_REG_DWORD(&reg->host_status);
  66. if (stat & HSRX_RISC_INT) {
  67. stat &= 0xff;
  68. if (stat == 0x1 || stat == 0x2 ||
  69. stat == 0x10 || stat == 0x11) {
  70. set_bit(MBX_INTERRUPT,
  71. &ha->mbx_cmd_flags);
  72. mb0 = RD_REG_WORD(&reg->mailbox0);
  73. WRT_REG_DWORD(&reg->hccr,
  74. HCCRX_CLR_RISC_INT);
  75. RD_REG_DWORD(&reg->hccr);
  76. break;
  77. }
  78. /* Clear this intr; it wasn't a mailbox intr */
  79. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  80. RD_REG_DWORD(&reg->hccr);
  81. }
  82. udelay(5);
  83. }
  84. if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
  85. rval = mb0 & MBS_MASK;
  86. for (idx = 0; idx < dwords; idx++)
  87. ram[cnt + idx] = swab32(dump[idx]);
  88. } else {
  89. rval = QLA_FUNCTION_FAILED;
  90. }
  91. }
  92. *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
  93. return rval;
  94. }
  95. static int
  96. qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
  97. uint32_t cram_size, void **nxt)
  98. {
  99. int rval;
  100. /* Code RAM. */
  101. rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
  102. if (rval != QLA_SUCCESS)
  103. return rval;
  104. /* External Memory. */
  105. return qla24xx_dump_ram(ha, 0x100000, *nxt,
  106. ha->fw_memory_size - 0x100000 + 1, nxt);
  107. }
  108. static uint32_t *
  109. qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
  110. uint32_t count, uint32_t *buf)
  111. {
  112. uint32_t __iomem *dmp_reg;
  113. WRT_REG_DWORD(&reg->iobase_addr, iobase);
  114. dmp_reg = &reg->iobase_window;
  115. while (count--)
  116. *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
  117. return buf;
  118. }
  119. static inline int
  120. qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
  121. {
  122. int rval = QLA_SUCCESS;
  123. uint32_t cnt;
  124. WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
  125. for (cnt = 30000;
  126. ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
  127. rval == QLA_SUCCESS; cnt--) {
  128. if (cnt)
  129. udelay(100);
  130. else
  131. rval = QLA_FUNCTION_TIMEOUT;
  132. }
  133. return rval;
  134. }
  135. static int
  136. qla24xx_soft_reset(struct qla_hw_data *ha)
  137. {
  138. int rval = QLA_SUCCESS;
  139. uint32_t cnt;
  140. uint16_t mb0, wd;
  141. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  142. /* Reset RISC. */
  143. WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
  144. for (cnt = 0; cnt < 30000; cnt++) {
  145. if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
  146. break;
  147. udelay(10);
  148. }
  149. WRT_REG_DWORD(&reg->ctrl_status,
  150. CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
  151. pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
  152. udelay(100);
  153. /* Wait for firmware to complete NVRAM accesses. */
  154. mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
  155. for (cnt = 10000 ; cnt && mb0; cnt--) {
  156. udelay(5);
  157. mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
  158. barrier();
  159. }
  160. /* Wait for soft-reset to complete. */
  161. for (cnt = 0; cnt < 30000; cnt++) {
  162. if ((RD_REG_DWORD(&reg->ctrl_status) &
  163. CSRX_ISP_SOFT_RESET) == 0)
  164. break;
  165. udelay(10);
  166. }
  167. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
  168. RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
  169. for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
  170. rval == QLA_SUCCESS; cnt--) {
  171. if (cnt)
  172. udelay(100);
  173. else
  174. rval = QLA_FUNCTION_TIMEOUT;
  175. }
  176. return rval;
  177. }
  178. static int
  179. qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
  180. uint32_t ram_words, void **nxt)
  181. {
  182. int rval;
  183. uint32_t cnt, stat, timer, words, idx;
  184. uint16_t mb0;
  185. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  186. dma_addr_t dump_dma = ha->gid_list_dma;
  187. uint16_t *dump = (uint16_t *)ha->gid_list;
  188. rval = QLA_SUCCESS;
  189. mb0 = 0;
  190. WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
  191. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  192. words = GID_LIST_SIZE / 2;
  193. for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
  194. cnt += words, addr += words) {
  195. if (cnt + words > ram_words)
  196. words = ram_words - cnt;
  197. WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
  198. WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
  199. WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
  200. WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
  201. WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
  202. WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
  203. WRT_MAILBOX_REG(ha, reg, 4, words);
  204. WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
  205. for (timer = 6000000; timer; timer--) {
  206. /* Check for pending interrupts. */
  207. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  208. if (stat & HSR_RISC_INT) {
  209. stat &= 0xff;
  210. if (stat == 0x1 || stat == 0x2) {
  211. set_bit(MBX_INTERRUPT,
  212. &ha->mbx_cmd_flags);
  213. mb0 = RD_MAILBOX_REG(ha, reg, 0);
  214. /* Release mailbox registers. */
  215. WRT_REG_WORD(&reg->semaphore, 0);
  216. WRT_REG_WORD(&reg->hccr,
  217. HCCR_CLR_RISC_INT);
  218. RD_REG_WORD(&reg->hccr);
  219. break;
  220. } else if (stat == 0x10 || stat == 0x11) {
  221. set_bit(MBX_INTERRUPT,
  222. &ha->mbx_cmd_flags);
  223. mb0 = RD_MAILBOX_REG(ha, reg, 0);
  224. WRT_REG_WORD(&reg->hccr,
  225. HCCR_CLR_RISC_INT);
  226. RD_REG_WORD(&reg->hccr);
  227. break;
  228. }
  229. /* clear this intr; it wasn't a mailbox intr */
  230. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  231. RD_REG_WORD(&reg->hccr);
  232. }
  233. udelay(5);
  234. }
  235. if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
  236. rval = mb0 & MBS_MASK;
  237. for (idx = 0; idx < words; idx++)
  238. ram[cnt + idx] = swab16(dump[idx]);
  239. } else {
  240. rval = QLA_FUNCTION_FAILED;
  241. }
  242. }
  243. *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
  244. return rval;
  245. }
  246. static inline void
  247. qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
  248. uint16_t *buf)
  249. {
  250. uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
  251. while (count--)
  252. *buf++ = htons(RD_REG_WORD(dmp_reg++));
  253. }
  254. static inline void *
  255. qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
  256. {
  257. if (!ha->eft)
  258. return ptr;
  259. memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
  260. return ptr + ntohl(ha->fw_dump->eft_size);
  261. }
  262. static inline void *
  263. qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
  264. {
  265. uint32_t cnt;
  266. uint32_t *iter_reg;
  267. struct qla2xxx_fce_chain *fcec = ptr;
  268. if (!ha->fce)
  269. return ptr;
  270. *last_chain = &fcec->type;
  271. fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
  272. fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
  273. fce_calc_size(ha->fce_bufs));
  274. fcec->size = htonl(fce_calc_size(ha->fce_bufs));
  275. fcec->addr_l = htonl(LSD(ha->fce_dma));
  276. fcec->addr_h = htonl(MSD(ha->fce_dma));
  277. iter_reg = fcec->eregs;
  278. for (cnt = 0; cnt < 8; cnt++)
  279. *iter_reg++ = htonl(ha->fce_mb[cnt]);
  280. memcpy(iter_reg, ha->fce, ntohl(fcec->size));
  281. return iter_reg;
  282. }
  283. static inline void *
  284. qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
  285. {
  286. uint32_t cnt, que_idx;
  287. uint8_t que_cnt;
  288. struct qla2xxx_mq_chain *mq = ptr;
  289. struct device_reg_25xxmq __iomem *reg;
  290. if (!ha->mqenable)
  291. return ptr;
  292. mq = ptr;
  293. *last_chain = &mq->type;
  294. mq->type = __constant_htonl(DUMP_CHAIN_MQ);
  295. mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
  296. que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
  297. ha->max_req_queues : ha->max_rsp_queues;
  298. mq->count = htonl(que_cnt);
  299. for (cnt = 0; cnt < que_cnt; cnt++) {
  300. reg = (struct device_reg_25xxmq *) ((void *)
  301. ha->mqiobase + cnt * QLA_QUE_PAGE);
  302. que_idx = cnt * 4;
  303. mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
  304. mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
  305. mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
  306. mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
  307. }
  308. return ptr + sizeof(struct qla2xxx_mq_chain);
  309. }
  310. static void
  311. qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
  312. {
  313. struct qla_hw_data *ha = vha->hw;
  314. if (rval != QLA_SUCCESS) {
  315. qla_printk(KERN_WARNING, ha,
  316. "Failed to dump firmware (%x)!!!\n", rval);
  317. ha->fw_dumped = 0;
  318. } else {
  319. qla_printk(KERN_INFO, ha,
  320. "Firmware dump saved to temp buffer (%ld/%p).\n",
  321. vha->host_no, ha->fw_dump);
  322. ha->fw_dumped = 1;
  323. qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
  324. }
  325. }
  326. /**
  327. * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
  328. * @ha: HA context
  329. * @hardware_locked: Called with the hardware_lock
  330. */
  331. void
  332. qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
  333. {
  334. int rval;
  335. uint32_t cnt;
  336. struct qla_hw_data *ha = vha->hw;
  337. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  338. uint16_t __iomem *dmp_reg;
  339. unsigned long flags;
  340. struct qla2300_fw_dump *fw;
  341. void *nxt;
  342. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  343. flags = 0;
  344. if (!hardware_locked)
  345. spin_lock_irqsave(&ha->hardware_lock, flags);
  346. if (!ha->fw_dump) {
  347. qla_printk(KERN_WARNING, ha,
  348. "No buffer available for dump!!!\n");
  349. goto qla2300_fw_dump_failed;
  350. }
  351. if (ha->fw_dumped) {
  352. qla_printk(KERN_WARNING, ha,
  353. "Firmware has been previously dumped (%p) -- ignoring "
  354. "request...\n", ha->fw_dump);
  355. goto qla2300_fw_dump_failed;
  356. }
  357. fw = &ha->fw_dump->isp.isp23;
  358. qla2xxx_prep_dump(ha, ha->fw_dump);
  359. rval = QLA_SUCCESS;
  360. fw->hccr = htons(RD_REG_WORD(&reg->hccr));
  361. /* Pause RISC. */
  362. WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
  363. if (IS_QLA2300(ha)) {
  364. for (cnt = 30000;
  365. (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
  366. rval == QLA_SUCCESS; cnt--) {
  367. if (cnt)
  368. udelay(100);
  369. else
  370. rval = QLA_FUNCTION_TIMEOUT;
  371. }
  372. } else {
  373. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  374. udelay(10);
  375. }
  376. if (rval == QLA_SUCCESS) {
  377. dmp_reg = &reg->flash_address;
  378. for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
  379. fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  380. dmp_reg = &reg->u.isp2300.req_q_in;
  381. for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
  382. fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  383. dmp_reg = &reg->u.isp2300.mailbox0;
  384. for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
  385. fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  386. WRT_REG_WORD(&reg->ctrl_status, 0x40);
  387. qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
  388. WRT_REG_WORD(&reg->ctrl_status, 0x50);
  389. qla2xxx_read_window(reg, 48, fw->dma_reg);
  390. WRT_REG_WORD(&reg->ctrl_status, 0x00);
  391. dmp_reg = &reg->risc_hw;
  392. for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
  393. fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  394. WRT_REG_WORD(&reg->pcr, 0x2000);
  395. qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
  396. WRT_REG_WORD(&reg->pcr, 0x2200);
  397. qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
  398. WRT_REG_WORD(&reg->pcr, 0x2400);
  399. qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
  400. WRT_REG_WORD(&reg->pcr, 0x2600);
  401. qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
  402. WRT_REG_WORD(&reg->pcr, 0x2800);
  403. qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
  404. WRT_REG_WORD(&reg->pcr, 0x2A00);
  405. qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
  406. WRT_REG_WORD(&reg->pcr, 0x2C00);
  407. qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
  408. WRT_REG_WORD(&reg->pcr, 0x2E00);
  409. qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
  410. WRT_REG_WORD(&reg->ctrl_status, 0x10);
  411. qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
  412. WRT_REG_WORD(&reg->ctrl_status, 0x20);
  413. qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
  414. WRT_REG_WORD(&reg->ctrl_status, 0x30);
  415. qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
  416. /* Reset RISC. */
  417. WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
  418. for (cnt = 0; cnt < 30000; cnt++) {
  419. if ((RD_REG_WORD(&reg->ctrl_status) &
  420. CSR_ISP_SOFT_RESET) == 0)
  421. break;
  422. udelay(10);
  423. }
  424. }
  425. if (!IS_QLA2300(ha)) {
  426. for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
  427. rval == QLA_SUCCESS; cnt--) {
  428. if (cnt)
  429. udelay(100);
  430. else
  431. rval = QLA_FUNCTION_TIMEOUT;
  432. }
  433. }
  434. /* Get RISC SRAM. */
  435. if (rval == QLA_SUCCESS)
  436. rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
  437. sizeof(fw->risc_ram) / 2, &nxt);
  438. /* Get stack SRAM. */
  439. if (rval == QLA_SUCCESS)
  440. rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
  441. sizeof(fw->stack_ram) / 2, &nxt);
  442. /* Get data SRAM. */
  443. if (rval == QLA_SUCCESS)
  444. rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
  445. ha->fw_memory_size - 0x11000 + 1, &nxt);
  446. if (rval == QLA_SUCCESS)
  447. qla2xxx_copy_queues(ha, nxt);
  448. qla2xxx_dump_post_process(base_vha, rval);
  449. qla2300_fw_dump_failed:
  450. if (!hardware_locked)
  451. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  452. }
  453. /**
  454. * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
  455. * @ha: HA context
  456. * @hardware_locked: Called with the hardware_lock
  457. */
  458. void
  459. qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
  460. {
  461. int rval;
  462. uint32_t cnt, timer;
  463. uint16_t risc_address;
  464. uint16_t mb0, mb2;
  465. struct qla_hw_data *ha = vha->hw;
  466. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  467. uint16_t __iomem *dmp_reg;
  468. unsigned long flags;
  469. struct qla2100_fw_dump *fw;
  470. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  471. risc_address = 0;
  472. mb0 = mb2 = 0;
  473. flags = 0;
  474. if (!hardware_locked)
  475. spin_lock_irqsave(&ha->hardware_lock, flags);
  476. if (!ha->fw_dump) {
  477. qla_printk(KERN_WARNING, ha,
  478. "No buffer available for dump!!!\n");
  479. goto qla2100_fw_dump_failed;
  480. }
  481. if (ha->fw_dumped) {
  482. qla_printk(KERN_WARNING, ha,
  483. "Firmware has been previously dumped (%p) -- ignoring "
  484. "request...\n", ha->fw_dump);
  485. goto qla2100_fw_dump_failed;
  486. }
  487. fw = &ha->fw_dump->isp.isp21;
  488. qla2xxx_prep_dump(ha, ha->fw_dump);
  489. rval = QLA_SUCCESS;
  490. fw->hccr = htons(RD_REG_WORD(&reg->hccr));
  491. /* Pause RISC. */
  492. WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
  493. for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
  494. rval == QLA_SUCCESS; cnt--) {
  495. if (cnt)
  496. udelay(100);
  497. else
  498. rval = QLA_FUNCTION_TIMEOUT;
  499. }
  500. if (rval == QLA_SUCCESS) {
  501. dmp_reg = &reg->flash_address;
  502. for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
  503. fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  504. dmp_reg = &reg->u.isp2100.mailbox0;
  505. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  506. if (cnt == 8)
  507. dmp_reg = &reg->u_end.isp2200.mailbox8;
  508. fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  509. }
  510. dmp_reg = &reg->u.isp2100.unused_2[0];
  511. for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
  512. fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  513. WRT_REG_WORD(&reg->ctrl_status, 0x00);
  514. dmp_reg = &reg->risc_hw;
  515. for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
  516. fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
  517. WRT_REG_WORD(&reg->pcr, 0x2000);
  518. qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
  519. WRT_REG_WORD(&reg->pcr, 0x2100);
  520. qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
  521. WRT_REG_WORD(&reg->pcr, 0x2200);
  522. qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
  523. WRT_REG_WORD(&reg->pcr, 0x2300);
  524. qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
  525. WRT_REG_WORD(&reg->pcr, 0x2400);
  526. qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
  527. WRT_REG_WORD(&reg->pcr, 0x2500);
  528. qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
  529. WRT_REG_WORD(&reg->pcr, 0x2600);
  530. qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
  531. WRT_REG_WORD(&reg->pcr, 0x2700);
  532. qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
  533. WRT_REG_WORD(&reg->ctrl_status, 0x10);
  534. qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
  535. WRT_REG_WORD(&reg->ctrl_status, 0x20);
  536. qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
  537. WRT_REG_WORD(&reg->ctrl_status, 0x30);
  538. qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
  539. /* Reset the ISP. */
  540. WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
  541. }
  542. for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
  543. rval == QLA_SUCCESS; cnt--) {
  544. if (cnt)
  545. udelay(100);
  546. else
  547. rval = QLA_FUNCTION_TIMEOUT;
  548. }
  549. /* Pause RISC. */
  550. if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
  551. (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
  552. WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
  553. for (cnt = 30000;
  554. (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
  555. rval == QLA_SUCCESS; cnt--) {
  556. if (cnt)
  557. udelay(100);
  558. else
  559. rval = QLA_FUNCTION_TIMEOUT;
  560. }
  561. if (rval == QLA_SUCCESS) {
  562. /* Set memory configuration and timing. */
  563. if (IS_QLA2100(ha))
  564. WRT_REG_WORD(&reg->mctr, 0xf1);
  565. else
  566. WRT_REG_WORD(&reg->mctr, 0xf2);
  567. RD_REG_WORD(&reg->mctr); /* PCI Posting. */
  568. /* Release RISC. */
  569. WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
  570. }
  571. }
  572. if (rval == QLA_SUCCESS) {
  573. /* Get RISC SRAM. */
  574. risc_address = 0x1000;
  575. WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
  576. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  577. }
  578. for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
  579. cnt++, risc_address++) {
  580. WRT_MAILBOX_REG(ha, reg, 1, risc_address);
  581. WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
  582. for (timer = 6000000; timer != 0; timer--) {
  583. /* Check for pending interrupts. */
  584. if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
  585. if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
  586. set_bit(MBX_INTERRUPT,
  587. &ha->mbx_cmd_flags);
  588. mb0 = RD_MAILBOX_REG(ha, reg, 0);
  589. mb2 = RD_MAILBOX_REG(ha, reg, 2);
  590. WRT_REG_WORD(&reg->semaphore, 0);
  591. WRT_REG_WORD(&reg->hccr,
  592. HCCR_CLR_RISC_INT);
  593. RD_REG_WORD(&reg->hccr);
  594. break;
  595. }
  596. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  597. RD_REG_WORD(&reg->hccr);
  598. }
  599. udelay(5);
  600. }
  601. if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
  602. rval = mb0 & MBS_MASK;
  603. fw->risc_ram[cnt] = htons(mb2);
  604. } else {
  605. rval = QLA_FUNCTION_FAILED;
  606. }
  607. }
  608. if (rval == QLA_SUCCESS)
  609. qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
  610. qla2xxx_dump_post_process(base_vha, rval);
  611. qla2100_fw_dump_failed:
  612. if (!hardware_locked)
  613. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  614. }
  615. void
  616. qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
  617. {
  618. int rval;
  619. uint32_t cnt;
  620. uint32_t risc_address;
  621. struct qla_hw_data *ha = vha->hw;
  622. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  623. uint32_t __iomem *dmp_reg;
  624. uint32_t *iter_reg;
  625. uint16_t __iomem *mbx_reg;
  626. unsigned long flags;
  627. struct qla24xx_fw_dump *fw;
  628. uint32_t ext_mem_cnt;
  629. void *nxt;
  630. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  631. if (IS_QLA82XX(ha))
  632. return;
  633. risc_address = ext_mem_cnt = 0;
  634. flags = 0;
  635. if (!hardware_locked)
  636. spin_lock_irqsave(&ha->hardware_lock, flags);
  637. if (!ha->fw_dump) {
  638. qla_printk(KERN_WARNING, ha,
  639. "No buffer available for dump!!!\n");
  640. goto qla24xx_fw_dump_failed;
  641. }
  642. if (ha->fw_dumped) {
  643. qla_printk(KERN_WARNING, ha,
  644. "Firmware has been previously dumped (%p) -- ignoring "
  645. "request...\n", ha->fw_dump);
  646. goto qla24xx_fw_dump_failed;
  647. }
  648. fw = &ha->fw_dump->isp.isp24;
  649. qla2xxx_prep_dump(ha, ha->fw_dump);
  650. fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
  651. /* Pause RISC. */
  652. rval = qla24xx_pause_risc(reg);
  653. if (rval != QLA_SUCCESS)
  654. goto qla24xx_fw_dump_failed_0;
  655. /* Host interface registers. */
  656. dmp_reg = &reg->flash_addr;
  657. for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
  658. fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
  659. /* Disable interrupts. */
  660. WRT_REG_DWORD(&reg->ictrl, 0);
  661. RD_REG_DWORD(&reg->ictrl);
  662. /* Shadow registers. */
  663. WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
  664. RD_REG_DWORD(&reg->iobase_addr);
  665. WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
  666. fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  667. WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
  668. fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  669. WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
  670. fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  671. WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
  672. fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  673. WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
  674. fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  675. WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
  676. fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  677. WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
  678. fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  679. /* Mailbox registers. */
  680. mbx_reg = &reg->mailbox0;
  681. for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
  682. fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
  683. /* Transfer sequence registers. */
  684. iter_reg = fw->xseq_gp_reg;
  685. iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
  686. iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
  687. iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
  688. iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
  689. iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
  690. iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
  691. iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
  692. qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
  693. qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
  694. qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
  695. /* Receive sequence registers. */
  696. iter_reg = fw->rseq_gp_reg;
  697. iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
  698. iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
  699. iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
  700. iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
  701. iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
  702. iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
  703. iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
  704. qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
  705. qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
  706. qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
  707. qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
  708. /* Command DMA registers. */
  709. qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
  710. /* Queues. */
  711. iter_reg = fw->req0_dma_reg;
  712. iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
  713. dmp_reg = &reg->iobase_q;
  714. for (cnt = 0; cnt < 7; cnt++)
  715. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  716. iter_reg = fw->resp0_dma_reg;
  717. iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
  718. dmp_reg = &reg->iobase_q;
  719. for (cnt = 0; cnt < 7; cnt++)
  720. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  721. iter_reg = fw->req1_dma_reg;
  722. iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
  723. dmp_reg = &reg->iobase_q;
  724. for (cnt = 0; cnt < 7; cnt++)
  725. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  726. /* Transmit DMA registers. */
  727. iter_reg = fw->xmt0_dma_reg;
  728. iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
  729. qla24xx_read_window(reg, 0x7610, 16, iter_reg);
  730. iter_reg = fw->xmt1_dma_reg;
  731. iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
  732. qla24xx_read_window(reg, 0x7630, 16, iter_reg);
  733. iter_reg = fw->xmt2_dma_reg;
  734. iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
  735. qla24xx_read_window(reg, 0x7650, 16, iter_reg);
  736. iter_reg = fw->xmt3_dma_reg;
  737. iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
  738. qla24xx_read_window(reg, 0x7670, 16, iter_reg);
  739. iter_reg = fw->xmt4_dma_reg;
  740. iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
  741. qla24xx_read_window(reg, 0x7690, 16, iter_reg);
  742. qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
  743. /* Receive DMA registers. */
  744. iter_reg = fw->rcvt0_data_dma_reg;
  745. iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
  746. qla24xx_read_window(reg, 0x7710, 16, iter_reg);
  747. iter_reg = fw->rcvt1_data_dma_reg;
  748. iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
  749. qla24xx_read_window(reg, 0x7730, 16, iter_reg);
  750. /* RISC registers. */
  751. iter_reg = fw->risc_gp_reg;
  752. iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
  753. iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
  754. iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
  755. iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
  756. iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
  757. iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
  758. iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
  759. qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
  760. /* Local memory controller registers. */
  761. iter_reg = fw->lmc_reg;
  762. iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
  763. iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
  764. iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
  765. iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
  766. iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
  767. iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
  768. qla24xx_read_window(reg, 0x3060, 16, iter_reg);
  769. /* Fibre Protocol Module registers. */
  770. iter_reg = fw->fpm_hdw_reg;
  771. iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
  772. iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
  773. iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
  774. iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
  775. iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
  776. iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
  777. iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
  778. iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
  779. iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
  780. iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
  781. iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
  782. qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
  783. /* Frame Buffer registers. */
  784. iter_reg = fw->fb_hdw_reg;
  785. iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
  786. iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
  787. iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
  788. iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
  789. iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
  790. iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
  791. iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
  792. iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
  793. iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
  794. iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
  795. qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
  796. rval = qla24xx_soft_reset(ha);
  797. if (rval != QLA_SUCCESS)
  798. goto qla24xx_fw_dump_failed_0;
  799. rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
  800. &nxt);
  801. if (rval != QLA_SUCCESS)
  802. goto qla24xx_fw_dump_failed_0;
  803. nxt = qla2xxx_copy_queues(ha, nxt);
  804. qla24xx_copy_eft(ha, nxt);
  805. qla24xx_fw_dump_failed_0:
  806. qla2xxx_dump_post_process(base_vha, rval);
  807. qla24xx_fw_dump_failed:
  808. if (!hardware_locked)
  809. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  810. }
  811. void
  812. qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
  813. {
  814. int rval;
  815. uint32_t cnt;
  816. uint32_t risc_address;
  817. struct qla_hw_data *ha = vha->hw;
  818. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  819. uint32_t __iomem *dmp_reg;
  820. uint32_t *iter_reg;
  821. uint16_t __iomem *mbx_reg;
  822. unsigned long flags;
  823. struct qla25xx_fw_dump *fw;
  824. uint32_t ext_mem_cnt;
  825. void *nxt, *nxt_chain;
  826. uint32_t *last_chain = NULL;
  827. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  828. risc_address = ext_mem_cnt = 0;
  829. flags = 0;
  830. if (!hardware_locked)
  831. spin_lock_irqsave(&ha->hardware_lock, flags);
  832. if (!ha->fw_dump) {
  833. qla_printk(KERN_WARNING, ha,
  834. "No buffer available for dump!!!\n");
  835. goto qla25xx_fw_dump_failed;
  836. }
  837. if (ha->fw_dumped) {
  838. qla_printk(KERN_WARNING, ha,
  839. "Firmware has been previously dumped (%p) -- ignoring "
  840. "request...\n", ha->fw_dump);
  841. goto qla25xx_fw_dump_failed;
  842. }
  843. fw = &ha->fw_dump->isp.isp25;
  844. qla2xxx_prep_dump(ha, ha->fw_dump);
  845. ha->fw_dump->version = __constant_htonl(2);
  846. fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
  847. /* Pause RISC. */
  848. rval = qla24xx_pause_risc(reg);
  849. if (rval != QLA_SUCCESS)
  850. goto qla25xx_fw_dump_failed_0;
  851. /* Host/Risc registers. */
  852. iter_reg = fw->host_risc_reg;
  853. iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
  854. qla24xx_read_window(reg, 0x7010, 16, iter_reg);
  855. /* PCIe registers. */
  856. WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
  857. RD_REG_DWORD(&reg->iobase_addr);
  858. WRT_REG_DWORD(&reg->iobase_window, 0x01);
  859. dmp_reg = &reg->iobase_c4;
  860. fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
  861. fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
  862. fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
  863. fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
  864. WRT_REG_DWORD(&reg->iobase_window, 0x00);
  865. RD_REG_DWORD(&reg->iobase_window);
  866. /* Host interface registers. */
  867. dmp_reg = &reg->flash_addr;
  868. for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
  869. fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
  870. /* Disable interrupts. */
  871. WRT_REG_DWORD(&reg->ictrl, 0);
  872. RD_REG_DWORD(&reg->ictrl);
  873. /* Shadow registers. */
  874. WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
  875. RD_REG_DWORD(&reg->iobase_addr);
  876. WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
  877. fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  878. WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
  879. fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  880. WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
  881. fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  882. WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
  883. fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  884. WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
  885. fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  886. WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
  887. fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  888. WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
  889. fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  890. WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
  891. fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  892. WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
  893. fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  894. WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
  895. fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  896. WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
  897. fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  898. /* RISC I/O register. */
  899. WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
  900. fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
  901. /* Mailbox registers. */
  902. mbx_reg = &reg->mailbox0;
  903. for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
  904. fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
  905. /* Transfer sequence registers. */
  906. iter_reg = fw->xseq_gp_reg;
  907. iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
  908. iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
  909. iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
  910. iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
  911. iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
  912. iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
  913. iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
  914. qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
  915. iter_reg = fw->xseq_0_reg;
  916. iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
  917. iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
  918. qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
  919. qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
  920. /* Receive sequence registers. */
  921. iter_reg = fw->rseq_gp_reg;
  922. iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
  923. iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
  924. iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
  925. iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
  926. iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
  927. iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
  928. iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
  929. qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
  930. iter_reg = fw->rseq_0_reg;
  931. iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
  932. qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
  933. qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
  934. qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
  935. /* Auxiliary sequence registers. */
  936. iter_reg = fw->aseq_gp_reg;
  937. iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
  938. iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
  939. iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
  940. iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
  941. iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
  942. iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
  943. iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
  944. qla24xx_read_window(reg, 0xB070, 16, iter_reg);
  945. iter_reg = fw->aseq_0_reg;
  946. iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
  947. qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
  948. qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
  949. qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
  950. /* Command DMA registers. */
  951. qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
  952. /* Queues. */
  953. iter_reg = fw->req0_dma_reg;
  954. iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
  955. dmp_reg = &reg->iobase_q;
  956. for (cnt = 0; cnt < 7; cnt++)
  957. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  958. iter_reg = fw->resp0_dma_reg;
  959. iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
  960. dmp_reg = &reg->iobase_q;
  961. for (cnt = 0; cnt < 7; cnt++)
  962. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  963. iter_reg = fw->req1_dma_reg;
  964. iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
  965. dmp_reg = &reg->iobase_q;
  966. for (cnt = 0; cnt < 7; cnt++)
  967. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  968. /* Transmit DMA registers. */
  969. iter_reg = fw->xmt0_dma_reg;
  970. iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
  971. qla24xx_read_window(reg, 0x7610, 16, iter_reg);
  972. iter_reg = fw->xmt1_dma_reg;
  973. iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
  974. qla24xx_read_window(reg, 0x7630, 16, iter_reg);
  975. iter_reg = fw->xmt2_dma_reg;
  976. iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
  977. qla24xx_read_window(reg, 0x7650, 16, iter_reg);
  978. iter_reg = fw->xmt3_dma_reg;
  979. iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
  980. qla24xx_read_window(reg, 0x7670, 16, iter_reg);
  981. iter_reg = fw->xmt4_dma_reg;
  982. iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
  983. qla24xx_read_window(reg, 0x7690, 16, iter_reg);
  984. qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
  985. /* Receive DMA registers. */
  986. iter_reg = fw->rcvt0_data_dma_reg;
  987. iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
  988. qla24xx_read_window(reg, 0x7710, 16, iter_reg);
  989. iter_reg = fw->rcvt1_data_dma_reg;
  990. iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
  991. qla24xx_read_window(reg, 0x7730, 16, iter_reg);
  992. /* RISC registers. */
  993. iter_reg = fw->risc_gp_reg;
  994. iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
  995. iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
  996. iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
  997. iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
  998. iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
  999. iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
  1000. iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
  1001. qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
  1002. /* Local memory controller registers. */
  1003. iter_reg = fw->lmc_reg;
  1004. iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
  1005. iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
  1006. iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
  1007. iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
  1008. iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
  1009. iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
  1010. iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
  1011. qla24xx_read_window(reg, 0x3070, 16, iter_reg);
  1012. /* Fibre Protocol Module registers. */
  1013. iter_reg = fw->fpm_hdw_reg;
  1014. iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
  1015. iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
  1016. iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
  1017. iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
  1018. iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
  1019. iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
  1020. iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
  1021. iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
  1022. iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
  1023. iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
  1024. iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
  1025. qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
  1026. /* Frame Buffer registers. */
  1027. iter_reg = fw->fb_hdw_reg;
  1028. iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
  1029. iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
  1030. iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
  1031. iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
  1032. iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
  1033. iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
  1034. iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
  1035. iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
  1036. iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
  1037. iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
  1038. iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
  1039. qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
  1040. /* Multi queue registers */
  1041. nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
  1042. &last_chain);
  1043. rval = qla24xx_soft_reset(ha);
  1044. if (rval != QLA_SUCCESS)
  1045. goto qla25xx_fw_dump_failed_0;
  1046. rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
  1047. &nxt);
  1048. if (rval != QLA_SUCCESS)
  1049. goto qla25xx_fw_dump_failed_0;
  1050. nxt = qla2xxx_copy_queues(ha, nxt);
  1051. nxt = qla24xx_copy_eft(ha, nxt);
  1052. /* Chain entries -- started with MQ. */
  1053. qla25xx_copy_fce(ha, nxt_chain, &last_chain);
  1054. if (last_chain) {
  1055. ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
  1056. *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
  1057. }
  1058. qla25xx_fw_dump_failed_0:
  1059. qla2xxx_dump_post_process(base_vha, rval);
  1060. qla25xx_fw_dump_failed:
  1061. if (!hardware_locked)
  1062. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1063. }
  1064. void
  1065. qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
  1066. {
  1067. int rval;
  1068. uint32_t cnt;
  1069. uint32_t risc_address;
  1070. struct qla_hw_data *ha = vha->hw;
  1071. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1072. uint32_t __iomem *dmp_reg;
  1073. uint32_t *iter_reg;
  1074. uint16_t __iomem *mbx_reg;
  1075. unsigned long flags;
  1076. struct qla81xx_fw_dump *fw;
  1077. uint32_t ext_mem_cnt;
  1078. void *nxt, *nxt_chain;
  1079. uint32_t *last_chain = NULL;
  1080. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  1081. risc_address = ext_mem_cnt = 0;
  1082. flags = 0;
  1083. if (!hardware_locked)
  1084. spin_lock_irqsave(&ha->hardware_lock, flags);
  1085. if (!ha->fw_dump) {
  1086. qla_printk(KERN_WARNING, ha,
  1087. "No buffer available for dump!!!\n");
  1088. goto qla81xx_fw_dump_failed;
  1089. }
  1090. if (ha->fw_dumped) {
  1091. qla_printk(KERN_WARNING, ha,
  1092. "Firmware has been previously dumped (%p) -- ignoring "
  1093. "request...\n", ha->fw_dump);
  1094. goto qla81xx_fw_dump_failed;
  1095. }
  1096. fw = &ha->fw_dump->isp.isp81;
  1097. qla2xxx_prep_dump(ha, ha->fw_dump);
  1098. fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
  1099. /* Pause RISC. */
  1100. rval = qla24xx_pause_risc(reg);
  1101. if (rval != QLA_SUCCESS)
  1102. goto qla81xx_fw_dump_failed_0;
  1103. /* Host/Risc registers. */
  1104. iter_reg = fw->host_risc_reg;
  1105. iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
  1106. qla24xx_read_window(reg, 0x7010, 16, iter_reg);
  1107. /* PCIe registers. */
  1108. WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
  1109. RD_REG_DWORD(&reg->iobase_addr);
  1110. WRT_REG_DWORD(&reg->iobase_window, 0x01);
  1111. dmp_reg = &reg->iobase_c4;
  1112. fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
  1113. fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
  1114. fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
  1115. fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
  1116. WRT_REG_DWORD(&reg->iobase_window, 0x00);
  1117. RD_REG_DWORD(&reg->iobase_window);
  1118. /* Host interface registers. */
  1119. dmp_reg = &reg->flash_addr;
  1120. for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
  1121. fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
  1122. /* Disable interrupts. */
  1123. WRT_REG_DWORD(&reg->ictrl, 0);
  1124. RD_REG_DWORD(&reg->ictrl);
  1125. /* Shadow registers. */
  1126. WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
  1127. RD_REG_DWORD(&reg->iobase_addr);
  1128. WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
  1129. fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1130. WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
  1131. fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1132. WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
  1133. fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1134. WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
  1135. fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1136. WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
  1137. fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1138. WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
  1139. fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1140. WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
  1141. fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1142. WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
  1143. fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1144. WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
  1145. fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1146. WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
  1147. fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1148. WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
  1149. fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
  1150. /* RISC I/O register. */
  1151. WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
  1152. fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
  1153. /* Mailbox registers. */
  1154. mbx_reg = &reg->mailbox0;
  1155. for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
  1156. fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
  1157. /* Transfer sequence registers. */
  1158. iter_reg = fw->xseq_gp_reg;
  1159. iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
  1160. iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
  1161. iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
  1162. iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
  1163. iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
  1164. iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
  1165. iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
  1166. qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
  1167. iter_reg = fw->xseq_0_reg;
  1168. iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
  1169. iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
  1170. qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
  1171. qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
  1172. /* Receive sequence registers. */
  1173. iter_reg = fw->rseq_gp_reg;
  1174. iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
  1175. iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
  1176. iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
  1177. iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
  1178. iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
  1179. iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
  1180. iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
  1181. qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
  1182. iter_reg = fw->rseq_0_reg;
  1183. iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
  1184. qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
  1185. qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
  1186. qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
  1187. /* Auxiliary sequence registers. */
  1188. iter_reg = fw->aseq_gp_reg;
  1189. iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
  1190. iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
  1191. iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
  1192. iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
  1193. iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
  1194. iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
  1195. iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
  1196. qla24xx_read_window(reg, 0xB070, 16, iter_reg);
  1197. iter_reg = fw->aseq_0_reg;
  1198. iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
  1199. qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
  1200. qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
  1201. qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
  1202. /* Command DMA registers. */
  1203. qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
  1204. /* Queues. */
  1205. iter_reg = fw->req0_dma_reg;
  1206. iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
  1207. dmp_reg = &reg->iobase_q;
  1208. for (cnt = 0; cnt < 7; cnt++)
  1209. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  1210. iter_reg = fw->resp0_dma_reg;
  1211. iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
  1212. dmp_reg = &reg->iobase_q;
  1213. for (cnt = 0; cnt < 7; cnt++)
  1214. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  1215. iter_reg = fw->req1_dma_reg;
  1216. iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
  1217. dmp_reg = &reg->iobase_q;
  1218. for (cnt = 0; cnt < 7; cnt++)
  1219. *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
  1220. /* Transmit DMA registers. */
  1221. iter_reg = fw->xmt0_dma_reg;
  1222. iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
  1223. qla24xx_read_window(reg, 0x7610, 16, iter_reg);
  1224. iter_reg = fw->xmt1_dma_reg;
  1225. iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
  1226. qla24xx_read_window(reg, 0x7630, 16, iter_reg);
  1227. iter_reg = fw->xmt2_dma_reg;
  1228. iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
  1229. qla24xx_read_window(reg, 0x7650, 16, iter_reg);
  1230. iter_reg = fw->xmt3_dma_reg;
  1231. iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
  1232. qla24xx_read_window(reg, 0x7670, 16, iter_reg);
  1233. iter_reg = fw->xmt4_dma_reg;
  1234. iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
  1235. qla24xx_read_window(reg, 0x7690, 16, iter_reg);
  1236. qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
  1237. /* Receive DMA registers. */
  1238. iter_reg = fw->rcvt0_data_dma_reg;
  1239. iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
  1240. qla24xx_read_window(reg, 0x7710, 16, iter_reg);
  1241. iter_reg = fw->rcvt1_data_dma_reg;
  1242. iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
  1243. qla24xx_read_window(reg, 0x7730, 16, iter_reg);
  1244. /* RISC registers. */
  1245. iter_reg = fw->risc_gp_reg;
  1246. iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
  1247. iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
  1248. iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
  1249. iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
  1250. iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
  1251. iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
  1252. iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
  1253. qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
  1254. /* Local memory controller registers. */
  1255. iter_reg = fw->lmc_reg;
  1256. iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
  1257. iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
  1258. iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
  1259. iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
  1260. iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
  1261. iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
  1262. iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
  1263. qla24xx_read_window(reg, 0x3070, 16, iter_reg);
  1264. /* Fibre Protocol Module registers. */
  1265. iter_reg = fw->fpm_hdw_reg;
  1266. iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
  1267. iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
  1268. iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
  1269. iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
  1270. iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
  1271. iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
  1272. iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
  1273. iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
  1274. iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
  1275. iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
  1276. iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
  1277. iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
  1278. iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
  1279. qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
  1280. /* Frame Buffer registers. */
  1281. iter_reg = fw->fb_hdw_reg;
  1282. iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
  1283. iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
  1284. iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
  1285. iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
  1286. iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
  1287. iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
  1288. iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
  1289. iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
  1290. iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
  1291. iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
  1292. iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
  1293. iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
  1294. qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
  1295. /* Multi queue registers */
  1296. nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
  1297. &last_chain);
  1298. rval = qla24xx_soft_reset(ha);
  1299. if (rval != QLA_SUCCESS)
  1300. goto qla81xx_fw_dump_failed_0;
  1301. rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
  1302. &nxt);
  1303. if (rval != QLA_SUCCESS)
  1304. goto qla81xx_fw_dump_failed_0;
  1305. nxt = qla2xxx_copy_queues(ha, nxt);
  1306. nxt = qla24xx_copy_eft(ha, nxt);
  1307. /* Chain entries -- started with MQ. */
  1308. qla25xx_copy_fce(ha, nxt_chain, &last_chain);
  1309. if (last_chain) {
  1310. ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
  1311. *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
  1312. }
  1313. qla81xx_fw_dump_failed_0:
  1314. qla2xxx_dump_post_process(base_vha, rval);
  1315. qla81xx_fw_dump_failed:
  1316. if (!hardware_locked)
  1317. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1318. }
  1319. /****************************************************************************/
  1320. /* Driver Debug Functions. */
  1321. /****************************************************************************/
  1322. void
  1323. qla2x00_dump_regs(scsi_qla_host_t *vha)
  1324. {
  1325. int i;
  1326. struct qla_hw_data *ha = vha->hw;
  1327. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1328. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  1329. uint16_t __iomem *mbx_reg;
  1330. mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0:
  1331. MAILBOX_REG(ha, reg, 0);
  1332. printk("Mailbox registers:\n");
  1333. for (i = 0; i < 6; i++)
  1334. printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
  1335. RD_REG_WORD(mbx_reg++));
  1336. }
  1337. void
  1338. qla2x00_dump_buffer(uint8_t * b, uint32_t size)
  1339. {
  1340. uint32_t cnt;
  1341. uint8_t c;
  1342. printk(" 0 1 2 3 4 5 6 7 8 9 "
  1343. "Ah Bh Ch Dh Eh Fh\n");
  1344. printk("----------------------------------------"
  1345. "----------------------\n");
  1346. for (cnt = 0; cnt < size;) {
  1347. c = *b++;
  1348. printk("%02x",(uint32_t) c);
  1349. cnt++;
  1350. if (!(cnt % 16))
  1351. printk("\n");
  1352. else
  1353. printk(" ");
  1354. }
  1355. if (cnt % 16)
  1356. printk("\n");
  1357. }
  1358. void
  1359. qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
  1360. {
  1361. uint32_t cnt;
  1362. uint8_t c;
  1363. uint8_t last16[16], cur16[16];
  1364. uint32_t lc = 0, num_same16 = 0, j;
  1365. printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
  1366. "Ah Bh Ch Dh Eh Fh\n");
  1367. printk(KERN_DEBUG "----------------------------------------"
  1368. "----------------------\n");
  1369. for (cnt = 0; cnt < size;) {
  1370. c = *b++;
  1371. cur16[lc++] = c;
  1372. cnt++;
  1373. if (cnt % 16)
  1374. continue;
  1375. /* We have 16 now */
  1376. lc = 0;
  1377. if (num_same16 == 0) {
  1378. memcpy(last16, cur16, 16);
  1379. num_same16++;
  1380. continue;
  1381. }
  1382. if (memcmp(cur16, last16, 16) == 0) {
  1383. num_same16++;
  1384. continue;
  1385. }
  1386. for (j = 0; j < 16; j++)
  1387. printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
  1388. printk(KERN_DEBUG "\n");
  1389. if (num_same16 > 1)
  1390. printk(KERN_DEBUG "> prev pattern repeats (%u)"
  1391. "more times\n", num_same16-1);
  1392. memcpy(last16, cur16, 16);
  1393. num_same16 = 1;
  1394. }
  1395. if (num_same16) {
  1396. for (j = 0; j < 16; j++)
  1397. printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
  1398. printk(KERN_DEBUG "\n");
  1399. if (num_same16 > 1)
  1400. printk(KERN_DEBUG "> prev pattern repeats (%u)"
  1401. "more times\n", num_same16-1);
  1402. }
  1403. if (lc) {
  1404. for (j = 0; j < lc; j++)
  1405. printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
  1406. printk(KERN_DEBUG "\n");
  1407. }
  1408. }