PageRenderTime 68ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 3ms

/drivers/scsi/lpfc/lpfc_init.c

http://github.com/mirrors/linux
C | 14064 lines | 9295 code | 1414 blank | 3355 comment | 1383 complexity | 79910e1fdb09cd24495d6340221fbfd4 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/idr.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/module.h>
  29. #include <linux/kthread.h>
  30. #include <linux/pci.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/ctype.h>
  33. #include <linux/aer.h>
  34. #include <linux/slab.h>
  35. #include <linux/firmware.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/percpu.h>
  38. #include <linux/msi.h>
  39. #include <linux/irq.h>
  40. #include <linux/bitops.h>
  41. #include <linux/crash_dump.h>
  42. #include <linux/cpu.h>
  43. #include <linux/cpuhotplug.h>
  44. #include <scsi/scsi.h>
  45. #include <scsi/scsi_device.h>
  46. #include <scsi/scsi_host.h>
  47. #include <scsi/scsi_transport_fc.h>
  48. #include <scsi/scsi_tcq.h>
  49. #include <scsi/fc/fc_fs.h>
  50. #include <linux/nvme-fc-driver.h>
  51. #include "lpfc_hw4.h"
  52. #include "lpfc_hw.h"
  53. #include "lpfc_sli.h"
  54. #include "lpfc_sli4.h"
  55. #include "lpfc_nl.h"
  56. #include "lpfc_disc.h"
  57. #include "lpfc.h"
  58. #include "lpfc_scsi.h"
  59. #include "lpfc_nvme.h"
  60. #include "lpfc_nvmet.h"
  61. #include "lpfc_logmsg.h"
  62. #include "lpfc_crtn.h"
  63. #include "lpfc_vport.h"
  64. #include "lpfc_version.h"
  65. #include "lpfc_ids.h"
  66. static enum cpuhp_state lpfc_cpuhp_state;
  67. /* Used when mapping IRQ vectors in a driver centric manner */
  68. static uint32_t lpfc_present_cpu;
  69. static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
  70. static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
  71. static void lpfc_cpuhp_add(struct lpfc_hba *phba);
  72. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  73. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  74. static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  75. static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  76. static int lpfc_setup_endian_order(struct lpfc_hba *);
  77. static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  78. static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  79. static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  80. static void lpfc_init_sgl_list(struct lpfc_hba *);
  81. static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  82. static void lpfc_free_active_sgl(struct lpfc_hba *);
  83. static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  84. static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  85. static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  86. static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  87. static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  88. static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  89. static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  90. static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  91. static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
  92. static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
  93. static struct scsi_transport_template *lpfc_transport_template = NULL;
  94. static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  95. static DEFINE_IDR(lpfc_hba_index);
  96. #define LPFC_NVMET_BUF_POST 254
  97. /**
  98. * lpfc_config_port_prep - Perform lpfc initialization prior to config port
  99. * @phba: pointer to lpfc hba data structure.
  100. *
  101. * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
  102. * mailbox command. It retrieves the revision information from the HBA and
  103. * collects the Vital Product Data (VPD) about the HBA for preparing the
  104. * configuration of the HBA.
  105. *
  106. * Return codes:
  107. * 0 - success.
  108. * -ERESTART - requests the SLI layer to reset the HBA and try again.
  109. * Any other value - indicates an error.
  110. **/
  111. int
  112. lpfc_config_port_prep(struct lpfc_hba *phba)
  113. {
  114. lpfc_vpd_t *vp = &phba->vpd;
  115. int i = 0, rc;
  116. LPFC_MBOXQ_t *pmb;
  117. MAILBOX_t *mb;
  118. char *lpfc_vpd_data = NULL;
  119. uint16_t offset = 0;
  120. static char licensed[56] =
  121. "key unlock for use with gnu public licensed code only\0";
  122. static int init_key = 1;
  123. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  124. if (!pmb) {
  125. phba->link_state = LPFC_HBA_ERROR;
  126. return -ENOMEM;
  127. }
  128. mb = &pmb->u.mb;
  129. phba->link_state = LPFC_INIT_MBX_CMDS;
  130. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  131. if (init_key) {
  132. uint32_t *ptext = (uint32_t *) licensed;
  133. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  134. *ptext = cpu_to_be32(*ptext);
  135. init_key = 0;
  136. }
  137. lpfc_read_nv(phba, pmb);
  138. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  139. sizeof (mb->un.varRDnvp.rsvd3));
  140. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  141. sizeof (licensed));
  142. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  143. if (rc != MBX_SUCCESS) {
  144. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  145. "0324 Config Port initialization "
  146. "error, mbxCmd x%x READ_NVPARM, "
  147. "mbxStatus x%x\n",
  148. mb->mbxCommand, mb->mbxStatus);
  149. mempool_free(pmb, phba->mbox_mem_pool);
  150. return -ERESTART;
  151. }
  152. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  153. sizeof(phba->wwnn));
  154. memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
  155. sizeof(phba->wwpn));
  156. }
  157. /*
  158. * Clear all option bits except LPFC_SLI3_BG_ENABLED,
  159. * which was already set in lpfc_get_cfgparam()
  160. */
  161. phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
  162. /* Setup and issue mailbox READ REV command */
  163. lpfc_read_rev(phba, pmb);
  164. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  165. if (rc != MBX_SUCCESS) {
  166. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  167. "0439 Adapter failed to init, mbxCmd x%x "
  168. "READ_REV, mbxStatus x%x\n",
  169. mb->mbxCommand, mb->mbxStatus);
  170. mempool_free( pmb, phba->mbox_mem_pool);
  171. return -ERESTART;
  172. }
  173. /*
  174. * The value of rr must be 1 since the driver set the cv field to 1.
  175. * This setting requires the FW to set all revision fields.
  176. */
  177. if (mb->un.varRdRev.rr == 0) {
  178. vp->rev.rBit = 0;
  179. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  180. "0440 Adapter failed to init, READ_REV has "
  181. "missing revision information.\n");
  182. mempool_free(pmb, phba->mbox_mem_pool);
  183. return -ERESTART;
  184. }
  185. if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
  186. mempool_free(pmb, phba->mbox_mem_pool);
  187. return -EINVAL;
  188. }
  189. /* Save information as VPD data */
  190. vp->rev.rBit = 1;
  191. memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
  192. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  193. memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
  194. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  195. memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
  196. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  197. vp->rev.smRev = mb->un.varRdRev.smRev;
  198. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  199. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  200. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  201. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  202. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  203. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  204. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  205. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  206. /* If the sli feature level is less then 9, we must
  207. * tear down all RPIs and VPIs on link down if NPIV
  208. * is enabled.
  209. */
  210. if (vp->rev.feaLevelHigh < 9)
  211. phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
  212. if (lpfc_is_LC_HBA(phba->pcidev->device))
  213. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  214. sizeof (phba->RandomData));
  215. /* Get adapter VPD information */
  216. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  217. if (!lpfc_vpd_data)
  218. goto out_free_mbox;
  219. do {
  220. lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
  221. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  222. if (rc != MBX_SUCCESS) {
  223. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  224. "0441 VPD not present on adapter, "
  225. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  226. mb->mbxCommand, mb->mbxStatus);
  227. mb->un.varDmp.word_cnt = 0;
  228. }
  229. /* dump mem may return a zero when finished or we got a
  230. * mailbox error, either way we are done.
  231. */
  232. if (mb->un.varDmp.word_cnt == 0)
  233. break;
  234. if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
  235. mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
  236. lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
  237. lpfc_vpd_data + offset,
  238. mb->un.varDmp.word_cnt);
  239. offset += mb->un.varDmp.word_cnt;
  240. } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
  241. lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
  242. kfree(lpfc_vpd_data);
  243. out_free_mbox:
  244. mempool_free(pmb, phba->mbox_mem_pool);
  245. return 0;
  246. }
  247. /**
  248. * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
  249. * @phba: pointer to lpfc hba data structure.
  250. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  251. *
  252. * This is the completion handler for driver's configuring asynchronous event
  253. * mailbox command to the device. If the mailbox command returns successfully,
  254. * it will set internal async event support flag to 1; otherwise, it will
  255. * set internal async event support flag to 0.
  256. **/
  257. static void
  258. lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  259. {
  260. if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
  261. phba->temp_sensor_support = 1;
  262. else
  263. phba->temp_sensor_support = 0;
  264. mempool_free(pmboxq, phba->mbox_mem_pool);
  265. return;
  266. }
  267. /**
  268. * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
  269. * @phba: pointer to lpfc hba data structure.
  270. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  271. *
  272. * This is the completion handler for dump mailbox command for getting
  273. * wake up parameters. When this command complete, the response contain
  274. * Option rom version of the HBA. This function translate the version number
  275. * into a human readable string and store it in OptionROMVersion.
  276. **/
  277. static void
  278. lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  279. {
  280. struct prog_id *prg;
  281. uint32_t prog_id_word;
  282. char dist = ' ';
  283. /* character array used for decoding dist type. */
  284. char dist_char[] = "nabx";
  285. if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
  286. mempool_free(pmboxq, phba->mbox_mem_pool);
  287. return;
  288. }
  289. prg = (struct prog_id *) &prog_id_word;
  290. /* word 7 contain option rom version */
  291. prog_id_word = pmboxq->u.mb.un.varWords[7];
  292. /* Decode the Option rom version word to a readable string */
  293. if (prg->dist < 4)
  294. dist = dist_char[prg->dist];
  295. if ((prg->dist == 3) && (prg->num == 0))
  296. snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
  297. prg->ver, prg->rev, prg->lev);
  298. else
  299. snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
  300. prg->ver, prg->rev, prg->lev,
  301. dist, prg->num);
  302. mempool_free(pmboxq, phba->mbox_mem_pool);
  303. return;
  304. }
  305. /**
  306. * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
  307. * cfg_soft_wwnn, cfg_soft_wwpn
  308. * @vport: pointer to lpfc vport data structure.
  309. *
  310. *
  311. * Return codes
  312. * None.
  313. **/
  314. void
  315. lpfc_update_vport_wwn(struct lpfc_vport *vport)
  316. {
  317. uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
  318. u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
  319. /* If the soft name exists then update it using the service params */
  320. if (vport->phba->cfg_soft_wwnn)
  321. u64_to_wwn(vport->phba->cfg_soft_wwnn,
  322. vport->fc_sparam.nodeName.u.wwn);
  323. if (vport->phba->cfg_soft_wwpn)
  324. u64_to_wwn(vport->phba->cfg_soft_wwpn,
  325. vport->fc_sparam.portName.u.wwn);
  326. /*
  327. * If the name is empty or there exists a soft name
  328. * then copy the service params name, otherwise use the fc name
  329. */
  330. if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
  331. memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
  332. sizeof(struct lpfc_name));
  333. else
  334. memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
  335. sizeof(struct lpfc_name));
  336. /*
  337. * If the port name has changed, then set the Param changes flag
  338. * to unreg the login
  339. */
  340. if (vport->fc_portname.u.wwn[0] != 0 &&
  341. memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
  342. sizeof(struct lpfc_name)))
  343. vport->vport_flag |= FAWWPN_PARAM_CHG;
  344. if (vport->fc_portname.u.wwn[0] == 0 ||
  345. vport->phba->cfg_soft_wwpn ||
  346. (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
  347. vport->vport_flag & FAWWPN_SET) {
  348. memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
  349. sizeof(struct lpfc_name));
  350. vport->vport_flag &= ~FAWWPN_SET;
  351. if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
  352. vport->vport_flag |= FAWWPN_SET;
  353. }
  354. else
  355. memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
  356. sizeof(struct lpfc_name));
  357. }
  358. /**
  359. * lpfc_config_port_post - Perform lpfc initialization after config port
  360. * @phba: pointer to lpfc hba data structure.
  361. *
  362. * This routine will do LPFC initialization after the CONFIG_PORT mailbox
  363. * command call. It performs all internal resource and state setups on the
  364. * port: post IOCB buffers, enable appropriate host interrupt attentions,
  365. * ELS ring timers, etc.
  366. *
  367. * Return codes
  368. * 0 - success.
  369. * Any other value - error.
  370. **/
  371. int
  372. lpfc_config_port_post(struct lpfc_hba *phba)
  373. {
  374. struct lpfc_vport *vport = phba->pport;
  375. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  376. LPFC_MBOXQ_t *pmb;
  377. MAILBOX_t *mb;
  378. struct lpfc_dmabuf *mp;
  379. struct lpfc_sli *psli = &phba->sli;
  380. uint32_t status, timeout;
  381. int i, j;
  382. int rc;
  383. spin_lock_irq(&phba->hbalock);
  384. /*
  385. * If the Config port completed correctly the HBA is not
  386. * over heated any more.
  387. */
  388. if (phba->over_temp_state == HBA_OVER_TEMP)
  389. phba->over_temp_state = HBA_NORMAL_TEMP;
  390. spin_unlock_irq(&phba->hbalock);
  391. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  392. if (!pmb) {
  393. phba->link_state = LPFC_HBA_ERROR;
  394. return -ENOMEM;
  395. }
  396. mb = &pmb->u.mb;
  397. /* Get login parameters for NID. */
  398. rc = lpfc_read_sparam(phba, pmb, 0);
  399. if (rc) {
  400. mempool_free(pmb, phba->mbox_mem_pool);
  401. return -ENOMEM;
  402. }
  403. pmb->vport = vport;
  404. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  405. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  406. "0448 Adapter failed init, mbxCmd x%x "
  407. "READ_SPARM mbxStatus x%x\n",
  408. mb->mbxCommand, mb->mbxStatus);
  409. phba->link_state = LPFC_HBA_ERROR;
  410. mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
  411. mempool_free(pmb, phba->mbox_mem_pool);
  412. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  413. kfree(mp);
  414. return -EIO;
  415. }
  416. mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
  417. memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
  418. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  419. kfree(mp);
  420. pmb->ctx_buf = NULL;
  421. lpfc_update_vport_wwn(vport);
  422. /* Update the fc_host data structures with new wwn. */
  423. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  424. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  425. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  426. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  427. /* This should be consolidated into parse_vpd ? - mr */
  428. if (phba->SerialNumber[0] == 0) {
  429. uint8_t *outptr;
  430. outptr = &vport->fc_nodename.u.s.IEEE[0];
  431. for (i = 0; i < 12; i++) {
  432. status = *outptr++;
  433. j = ((status & 0xf0) >> 4);
  434. if (j <= 9)
  435. phba->SerialNumber[i] =
  436. (char)((uint8_t) 0x30 + (uint8_t) j);
  437. else
  438. phba->SerialNumber[i] =
  439. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  440. i++;
  441. j = (status & 0xf);
  442. if (j <= 9)
  443. phba->SerialNumber[i] =
  444. (char)((uint8_t) 0x30 + (uint8_t) j);
  445. else
  446. phba->SerialNumber[i] =
  447. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  448. }
  449. }
  450. lpfc_read_config(phba, pmb);
  451. pmb->vport = vport;
  452. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  453. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  454. "0453 Adapter failed to init, mbxCmd x%x "
  455. "READ_CONFIG, mbxStatus x%x\n",
  456. mb->mbxCommand, mb->mbxStatus);
  457. phba->link_state = LPFC_HBA_ERROR;
  458. mempool_free( pmb, phba->mbox_mem_pool);
  459. return -EIO;
  460. }
  461. /* Check if the port is disabled */
  462. lpfc_sli_read_link_ste(phba);
  463. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  464. if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
  465. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  466. "3359 HBA queue depth changed from %d to %d\n",
  467. phba->cfg_hba_queue_depth,
  468. mb->un.varRdConfig.max_xri);
  469. phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
  470. }
  471. phba->lmt = mb->un.varRdConfig.lmt;
  472. /* Get the default values for Model Name and Description */
  473. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  474. phba->link_state = LPFC_LINK_DOWN;
  475. /* Only process IOCBs on ELS ring till hba_state is READY */
  476. if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
  477. psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
  478. if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
  479. psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
  480. /* Post receive buffers for desired rings */
  481. if (phba->sli_rev != 3)
  482. lpfc_post_rcv_buf(phba);
  483. /*
  484. * Configure HBA MSI-X attention conditions to messages if MSI-X mode
  485. */
  486. if (phba->intr_type == MSIX) {
  487. rc = lpfc_config_msi(phba, pmb);
  488. if (rc) {
  489. mempool_free(pmb, phba->mbox_mem_pool);
  490. return -EIO;
  491. }
  492. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  493. if (rc != MBX_SUCCESS) {
  494. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  495. "0352 Config MSI mailbox command "
  496. "failed, mbxCmd x%x, mbxStatus x%x\n",
  497. pmb->u.mb.mbxCommand,
  498. pmb->u.mb.mbxStatus);
  499. mempool_free(pmb, phba->mbox_mem_pool);
  500. return -EIO;
  501. }
  502. }
  503. spin_lock_irq(&phba->hbalock);
  504. /* Initialize ERATT handling flag */
  505. phba->hba_flag &= ~HBA_ERATT_HANDLED;
  506. /* Enable appropriate host interrupts */
  507. if (lpfc_readl(phba->HCregaddr, &status)) {
  508. spin_unlock_irq(&phba->hbalock);
  509. return -EIO;
  510. }
  511. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  512. if (psli->num_rings > 0)
  513. status |= HC_R0INT_ENA;
  514. if (psli->num_rings > 1)
  515. status |= HC_R1INT_ENA;
  516. if (psli->num_rings > 2)
  517. status |= HC_R2INT_ENA;
  518. if (psli->num_rings > 3)
  519. status |= HC_R3INT_ENA;
  520. if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
  521. (phba->cfg_poll & DISABLE_FCP_RING_INT))
  522. status &= ~(HC_R0INT_ENA);
  523. writel(status, phba->HCregaddr);
  524. readl(phba->HCregaddr); /* flush */
  525. spin_unlock_irq(&phba->hbalock);
  526. /* Set up ring-0 (ELS) timer */
  527. timeout = phba->fc_ratov * 2;
  528. mod_timer(&vport->els_tmofunc,
  529. jiffies + msecs_to_jiffies(1000 * timeout));
  530. /* Set up heart beat (HB) timer */
  531. mod_timer(&phba->hb_tmofunc,
  532. jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  533. phba->hb_outstanding = 0;
  534. phba->last_completion_time = jiffies;
  535. /* Set up error attention (ERATT) polling timer */
  536. mod_timer(&phba->eratt_poll,
  537. jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
  538. if (phba->hba_flag & LINK_DISABLED) {
  539. lpfc_printf_log(phba,
  540. KERN_ERR, LOG_INIT,
  541. "2598 Adapter Link is disabled.\n");
  542. lpfc_down_link(phba, pmb);
  543. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  544. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  545. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  546. lpfc_printf_log(phba,
  547. KERN_ERR, LOG_INIT,
  548. "2599 Adapter failed to issue DOWN_LINK"
  549. " mbox command rc 0x%x\n", rc);
  550. mempool_free(pmb, phba->mbox_mem_pool);
  551. return -EIO;
  552. }
  553. } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
  554. mempool_free(pmb, phba->mbox_mem_pool);
  555. rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
  556. if (rc)
  557. return rc;
  558. }
  559. /* MBOX buffer will be freed in mbox compl */
  560. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  561. if (!pmb) {
  562. phba->link_state = LPFC_HBA_ERROR;
  563. return -ENOMEM;
  564. }
  565. lpfc_config_async(phba, pmb, LPFC_ELS_RING);
  566. pmb->mbox_cmpl = lpfc_config_async_cmpl;
  567. pmb->vport = phba->pport;
  568. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  569. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  570. lpfc_printf_log(phba,
  571. KERN_ERR,
  572. LOG_INIT,
  573. "0456 Adapter failed to issue "
  574. "ASYNCEVT_ENABLE mbox status x%x\n",
  575. rc);
  576. mempool_free(pmb, phba->mbox_mem_pool);
  577. }
  578. /* Get Option rom version */
  579. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  580. if (!pmb) {
  581. phba->link_state = LPFC_HBA_ERROR;
  582. return -ENOMEM;
  583. }
  584. lpfc_dump_wakeup_param(phba, pmb);
  585. pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
  586. pmb->vport = phba->pport;
  587. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  588. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  589. lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
  590. "to get Option ROM version status x%x\n", rc);
  591. mempool_free(pmb, phba->mbox_mem_pool);
  592. }
  593. return 0;
  594. }
  595. /**
  596. * lpfc_hba_init_link - Initialize the FC link
  597. * @phba: pointer to lpfc hba data structure.
  598. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  599. *
  600. * This routine will issue the INIT_LINK mailbox command call.
  601. * It is available to other drivers through the lpfc_hba data
  602. * structure for use as a delayed link up mechanism with the
  603. * module parameter lpfc_suppress_link_up.
  604. *
  605. * Return code
  606. * 0 - success
  607. * Any other value - error
  608. **/
  609. static int
  610. lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
  611. {
  612. return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
  613. }
  614. /**
  615. * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
  616. * @phba: pointer to lpfc hba data structure.
  617. * @fc_topology: desired fc topology.
  618. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  619. *
  620. * This routine will issue the INIT_LINK mailbox command call.
  621. * It is available to other drivers through the lpfc_hba data
  622. * structure for use as a delayed link up mechanism with the
  623. * module parameter lpfc_suppress_link_up.
  624. *
  625. * Return code
  626. * 0 - success
  627. * Any other value - error
  628. **/
  629. int
  630. lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
  631. uint32_t flag)
  632. {
  633. struct lpfc_vport *vport = phba->pport;
  634. LPFC_MBOXQ_t *pmb;
  635. MAILBOX_t *mb;
  636. int rc;
  637. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  638. if (!pmb) {
  639. phba->link_state = LPFC_HBA_ERROR;
  640. return -ENOMEM;
  641. }
  642. mb = &pmb->u.mb;
  643. pmb->vport = vport;
  644. if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
  645. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
  646. !(phba->lmt & LMT_1Gb)) ||
  647. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
  648. !(phba->lmt & LMT_2Gb)) ||
  649. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
  650. !(phba->lmt & LMT_4Gb)) ||
  651. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
  652. !(phba->lmt & LMT_8Gb)) ||
  653. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
  654. !(phba->lmt & LMT_10Gb)) ||
  655. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
  656. !(phba->lmt & LMT_16Gb)) ||
  657. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
  658. !(phba->lmt & LMT_32Gb)) ||
  659. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
  660. !(phba->lmt & LMT_64Gb))) {
  661. /* Reset link speed to auto */
  662. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  663. "1302 Invalid speed for this board:%d "
  664. "Reset link speed to auto.\n",
  665. phba->cfg_link_speed);
  666. phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
  667. }
  668. lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
  669. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  670. if (phba->sli_rev < LPFC_SLI_REV4)
  671. lpfc_set_loopback_flag(phba);
  672. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  673. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  674. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  675. "0498 Adapter failed to init, mbxCmd x%x "
  676. "INIT_LINK, mbxStatus x%x\n",
  677. mb->mbxCommand, mb->mbxStatus);
  678. if (phba->sli_rev <= LPFC_SLI_REV3) {
  679. /* Clear all interrupt enable conditions */
  680. writel(0, phba->HCregaddr);
  681. readl(phba->HCregaddr); /* flush */
  682. /* Clear all pending interrupts */
  683. writel(0xffffffff, phba->HAregaddr);
  684. readl(phba->HAregaddr); /* flush */
  685. }
  686. phba->link_state = LPFC_HBA_ERROR;
  687. if (rc != MBX_BUSY || flag == MBX_POLL)
  688. mempool_free(pmb, phba->mbox_mem_pool);
  689. return -EIO;
  690. }
  691. phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
  692. if (flag == MBX_POLL)
  693. mempool_free(pmb, phba->mbox_mem_pool);
  694. return 0;
  695. }
  696. /**
  697. * lpfc_hba_down_link - this routine downs the FC link
  698. * @phba: pointer to lpfc hba data structure.
  699. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  700. *
  701. * This routine will issue the DOWN_LINK mailbox command call.
  702. * It is available to other drivers through the lpfc_hba data
  703. * structure for use to stop the link.
  704. *
  705. * Return code
  706. * 0 - success
  707. * Any other value - error
  708. **/
  709. static int
  710. lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
  711. {
  712. LPFC_MBOXQ_t *pmb;
  713. int rc;
  714. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  715. if (!pmb) {
  716. phba->link_state = LPFC_HBA_ERROR;
  717. return -ENOMEM;
  718. }
  719. lpfc_printf_log(phba,
  720. KERN_ERR, LOG_INIT,
  721. "0491 Adapter Link is disabled.\n");
  722. lpfc_down_link(phba, pmb);
  723. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  724. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  725. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  726. lpfc_printf_log(phba,
  727. KERN_ERR, LOG_INIT,
  728. "2522 Adapter failed to issue DOWN_LINK"
  729. " mbox command rc 0x%x\n", rc);
  730. mempool_free(pmb, phba->mbox_mem_pool);
  731. return -EIO;
  732. }
  733. if (flag == MBX_POLL)
  734. mempool_free(pmb, phba->mbox_mem_pool);
  735. return 0;
  736. }
  737. /**
  738. * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
  739. * @phba: pointer to lpfc HBA data structure.
  740. *
  741. * This routine will do LPFC uninitialization before the HBA is reset when
  742. * bringing down the SLI Layer.
  743. *
  744. * Return codes
  745. * 0 - success.
  746. * Any other value - error.
  747. **/
  748. int
  749. lpfc_hba_down_prep(struct lpfc_hba *phba)
  750. {
  751. struct lpfc_vport **vports;
  752. int i;
  753. if (phba->sli_rev <= LPFC_SLI_REV3) {
  754. /* Disable interrupts */
  755. writel(0, phba->HCregaddr);
  756. readl(phba->HCregaddr); /* flush */
  757. }
  758. if (phba->pport->load_flag & FC_UNLOADING)
  759. lpfc_cleanup_discovery_resources(phba->pport);
  760. else {
  761. vports = lpfc_create_vport_work_array(phba);
  762. if (vports != NULL)
  763. for (i = 0; i <= phba->max_vports &&
  764. vports[i] != NULL; i++)
  765. lpfc_cleanup_discovery_resources(vports[i]);
  766. lpfc_destroy_vport_work_array(phba, vports);
  767. }
  768. return 0;
  769. }
  770. /**
  771. * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
  772. * rspiocb which got deferred
  773. *
  774. * @phba: pointer to lpfc HBA data structure.
  775. *
  776. * This routine will cleanup completed slow path events after HBA is reset
  777. * when bringing down the SLI Layer.
  778. *
  779. *
  780. * Return codes
  781. * void.
  782. **/
  783. static void
  784. lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
  785. {
  786. struct lpfc_iocbq *rspiocbq;
  787. struct hbq_dmabuf *dmabuf;
  788. struct lpfc_cq_event *cq_event;
  789. spin_lock_irq(&phba->hbalock);
  790. phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
  791. spin_unlock_irq(&phba->hbalock);
  792. while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
  793. /* Get the response iocb from the head of work queue */
  794. spin_lock_irq(&phba->hbalock);
  795. list_remove_head(&phba->sli4_hba.sp_queue_event,
  796. cq_event, struct lpfc_cq_event, list);
  797. spin_unlock_irq(&phba->hbalock);
  798. switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
  799. case CQE_CODE_COMPL_WQE:
  800. rspiocbq = container_of(cq_event, struct lpfc_iocbq,
  801. cq_event);
  802. lpfc_sli_release_iocbq(phba, rspiocbq);
  803. break;
  804. case CQE_CODE_RECEIVE:
  805. case CQE_CODE_RECEIVE_V1:
  806. dmabuf = container_of(cq_event, struct hbq_dmabuf,
  807. cq_event);
  808. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  809. }
  810. }
  811. }
  812. /**
  813. * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
  814. * @phba: pointer to lpfc HBA data structure.
  815. *
  816. * This routine will cleanup posted ELS buffers after the HBA is reset
  817. * when bringing down the SLI Layer.
  818. *
  819. *
  820. * Return codes
  821. * void.
  822. **/
  823. static void
  824. lpfc_hba_free_post_buf(struct lpfc_hba *phba)
  825. {
  826. struct lpfc_sli *psli = &phba->sli;
  827. struct lpfc_sli_ring *pring;
  828. struct lpfc_dmabuf *mp, *next_mp;
  829. LIST_HEAD(buflist);
  830. int count;
  831. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  832. lpfc_sli_hbqbuf_free_all(phba);
  833. else {
  834. /* Cleanup preposted buffers on the ELS ring */
  835. pring = &psli->sli3_ring[LPFC_ELS_RING];
  836. spin_lock_irq(&phba->hbalock);
  837. list_splice_init(&pring->postbufq, &buflist);
  838. spin_unlock_irq(&phba->hbalock);
  839. count = 0;
  840. list_for_each_entry_safe(mp, next_mp, &buflist, list) {
  841. list_del(&mp->list);
  842. count++;
  843. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  844. kfree(mp);
  845. }
  846. spin_lock_irq(&phba->hbalock);
  847. pring->postbufq_cnt -= count;
  848. spin_unlock_irq(&phba->hbalock);
  849. }
  850. }
  851. /**
  852. * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
  853. * @phba: pointer to lpfc HBA data structure.
  854. *
  855. * This routine will cleanup the txcmplq after the HBA is reset when bringing
  856. * down the SLI Layer.
  857. *
  858. * Return codes
  859. * void
  860. **/
  861. static void
  862. lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
  863. {
  864. struct lpfc_sli *psli = &phba->sli;
  865. struct lpfc_queue *qp = NULL;
  866. struct lpfc_sli_ring *pring;
  867. LIST_HEAD(completions);
  868. int i;
  869. struct lpfc_iocbq *piocb, *next_iocb;
  870. if (phba->sli_rev != LPFC_SLI_REV4) {
  871. for (i = 0; i < psli->num_rings; i++) {
  872. pring = &psli->sli3_ring[i];
  873. spin_lock_irq(&phba->hbalock);
  874. /* At this point in time the HBA is either reset or DOA
  875. * Nothing should be on txcmplq as it will
  876. * NEVER complete.
  877. */
  878. list_splice_init(&pring->txcmplq, &completions);
  879. pring->txcmplq_cnt = 0;
  880. spin_unlock_irq(&phba->hbalock);
  881. lpfc_sli_abort_iocb_ring(phba, pring);
  882. }
  883. /* Cancel all the IOCBs from the completions list */
  884. lpfc_sli_cancel_iocbs(phba, &completions,
  885. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  886. return;
  887. }
  888. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  889. pring = qp->pring;
  890. if (!pring)
  891. continue;
  892. spin_lock_irq(&pring->ring_lock);
  893. list_for_each_entry_safe(piocb, next_iocb,
  894. &pring->txcmplq, list)
  895. piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
  896. list_splice_init(&pring->txcmplq, &completions);
  897. pring->txcmplq_cnt = 0;
  898. spin_unlock_irq(&pring->ring_lock);
  899. lpfc_sli_abort_iocb_ring(phba, pring);
  900. }
  901. /* Cancel all the IOCBs from the completions list */
  902. lpfc_sli_cancel_iocbs(phba, &completions,
  903. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  904. }
  905. /**
  906. * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
  907. int i;
  908. * @phba: pointer to lpfc HBA data structure.
  909. *
  910. * This routine will do uninitialization after the HBA is reset when bring
  911. * down the SLI Layer.
  912. *
  913. * Return codes
  914. * 0 - success.
  915. * Any other value - error.
  916. **/
  917. static int
  918. lpfc_hba_down_post_s3(struct lpfc_hba *phba)
  919. {
  920. lpfc_hba_free_post_buf(phba);
  921. lpfc_hba_clean_txcmplq(phba);
  922. return 0;
  923. }
  924. /**
  925. * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
  926. * @phba: pointer to lpfc HBA data structure.
  927. *
  928. * This routine will do uninitialization after the HBA is reset when bring
  929. * down the SLI Layer.
  930. *
  931. * Return codes
  932. * 0 - success.
  933. * Any other value - error.
  934. **/
  935. static int
  936. lpfc_hba_down_post_s4(struct lpfc_hba *phba)
  937. {
  938. struct lpfc_io_buf *psb, *psb_next;
  939. struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
  940. struct lpfc_sli4_hdw_queue *qp;
  941. LIST_HEAD(aborts);
  942. LIST_HEAD(nvme_aborts);
  943. LIST_HEAD(nvmet_aborts);
  944. struct lpfc_sglq *sglq_entry = NULL;
  945. int cnt, idx;
  946. lpfc_sli_hbqbuf_free_all(phba);
  947. lpfc_hba_clean_txcmplq(phba);
  948. /* At this point in time the HBA is either reset or DOA. Either
  949. * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
  950. * on the lpfc_els_sgl_list so that it can either be freed if the
  951. * driver is unloading or reposted if the driver is restarting
  952. * the port.
  953. */
  954. spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
  955. /* scsl_buf_list */
  956. /* sgl_list_lock required because worker thread uses this
  957. * list.
  958. */
  959. spin_lock(&phba->sli4_hba.sgl_list_lock);
  960. list_for_each_entry(sglq_entry,
  961. &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
  962. sglq_entry->state = SGL_FREED;
  963. list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
  964. &phba->sli4_hba.lpfc_els_sgl_list);
  965. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  966. /* abts_xxxx_buf_list_lock required because worker thread uses this
  967. * list.
  968. */
  969. cnt = 0;
  970. for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
  971. qp = &phba->sli4_hba.hdwq[idx];
  972. spin_lock(&qp->abts_io_buf_list_lock);
  973. list_splice_init(&qp->lpfc_abts_io_buf_list,
  974. &aborts);
  975. list_for_each_entry_safe(psb, psb_next, &aborts, list) {
  976. psb->pCmd = NULL;
  977. psb->status = IOSTAT_SUCCESS;
  978. cnt++;
  979. }
  980. spin_lock(&qp->io_buf_list_put_lock);
  981. list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
  982. qp->put_io_bufs += qp->abts_scsi_io_bufs;
  983. qp->put_io_bufs += qp->abts_nvme_io_bufs;
  984. qp->abts_scsi_io_bufs = 0;
  985. qp->abts_nvme_io_bufs = 0;
  986. spin_unlock(&qp->io_buf_list_put_lock);
  987. spin_unlock(&qp->abts_io_buf_list_lock);
  988. }
  989. spin_unlock_irq(&phba->hbalock);
  990. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  991. spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
  992. list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  993. &nvmet_aborts);
  994. spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
  995. list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
  996. ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
  997. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  998. }
  999. }
  1000. lpfc_sli4_free_sp_events(phba);
  1001. return cnt;
  1002. }
  1003. /**
  1004. * lpfc_hba_down_post - Wrapper func for hba down post routine
  1005. * @phba: pointer to lpfc HBA data structure.
  1006. *
  1007. * This routine wraps the actual SLI3 or SLI4 routine for performing
  1008. * uninitialization after the HBA is reset when bring down the SLI Layer.
  1009. *
  1010. * Return codes
  1011. * 0 - success.
  1012. * Any other value - error.
  1013. **/
  1014. int
  1015. lpfc_hba_down_post(struct lpfc_hba *phba)
  1016. {
  1017. return (*phba->lpfc_hba_down_post)(phba);
  1018. }
  1019. /**
  1020. * lpfc_hb_timeout - The HBA-timer timeout handler
  1021. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1022. *
  1023. * This is the HBA-timer timeout handler registered to the lpfc driver. When
  1024. * this timer fires, a HBA timeout event shall be posted to the lpfc driver
  1025. * work-port-events bitmap and the worker thread is notified. This timeout
  1026. * event will be used by the worker thread to invoke the actual timeout
  1027. * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
  1028. * be performed in the timeout handler and the HBA timeout event bit shall
  1029. * be cleared by the worker thread after it has taken the event bitmap out.
  1030. **/
  1031. static void
  1032. lpfc_hb_timeout(struct timer_list *t)
  1033. {
  1034. struct lpfc_hba *phba;
  1035. uint32_t tmo_posted;
  1036. unsigned long iflag;
  1037. phba = from_timer(phba, t, hb_tmofunc);
  1038. /* Check for heart beat timeout conditions */
  1039. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1040. tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
  1041. if (!tmo_posted)
  1042. phba->pport->work_port_events |= WORKER_HB_TMO;
  1043. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1044. /* Tell the worker thread there is work to do */
  1045. if (!tmo_posted)
  1046. lpfc_worker_wake_up(phba);
  1047. return;
  1048. }
  1049. /**
  1050. * lpfc_rrq_timeout - The RRQ-timer timeout handler
  1051. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1052. *
  1053. * This is the RRQ-timer timeout handler registered to the lpfc driver. When
  1054. * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
  1055. * work-port-events bitmap and the worker thread is notified. This timeout
  1056. * event will be used by the worker thread to invoke the actual timeout
  1057. * handler routine, lpfc_rrq_handler. Any periodical operations will
  1058. * be performed in the timeout handler and the RRQ timeout event bit shall
  1059. * be cleared by the worker thread after it has taken the event bitmap out.
  1060. **/
  1061. static void
  1062. lpfc_rrq_timeout(struct timer_list *t)
  1063. {
  1064. struct lpfc_hba *phba;
  1065. unsigned long iflag;
  1066. phba = from_timer(phba, t, rrq_tmr);
  1067. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1068. if (!(phba->pport->load_flag & FC_UNLOADING))
  1069. phba->hba_flag |= HBA_RRQ_ACTIVE;
  1070. else
  1071. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  1072. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1073. if (!(phba->pport->load_flag & FC_UNLOADING))
  1074. lpfc_worker_wake_up(phba);
  1075. }
  1076. /**
  1077. * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
  1078. * @phba: pointer to lpfc hba data structure.
  1079. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  1080. *
  1081. * This is the callback function to the lpfc heart-beat mailbox command.
  1082. * If configured, the lpfc driver issues the heart-beat mailbox command to
  1083. * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
  1084. * heart-beat mailbox command is issued, the driver shall set up heart-beat
  1085. * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
  1086. * heart-beat outstanding state. Once the mailbox command comes back and
  1087. * no error conditions detected, the heart-beat mailbox command timer is
  1088. * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
  1089. * state is cleared for the next heart-beat. If the timer expired with the
  1090. * heart-beat outstanding state set, the driver will put the HBA offline.
  1091. **/
  1092. static void
  1093. lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  1094. {
  1095. unsigned long drvr_flag;
  1096. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  1097. phba->hb_outstanding = 0;
  1098. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  1099. /* Check and reset heart-beat timer is necessary */
  1100. mempool_free(pmboxq, phba->mbox_mem_pool);
  1101. if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
  1102. !(phba->link_state == LPFC_HBA_ERROR) &&
  1103. !(phba->pport->load_flag & FC_UNLOADING))
  1104. mod_timer(&phba->hb_tmofunc,
  1105. jiffies +
  1106. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1107. return;
  1108. }
  1109. static void
  1110. lpfc_hb_eq_delay_work(struct work_struct *work)
  1111. {
  1112. struct lpfc_hba *phba = container_of(to_delayed_work(work),
  1113. struct lpfc_hba, eq_delay_work);
  1114. struct lpfc_eq_intr_info *eqi, *eqi_new;
  1115. struct lpfc_queue *eq, *eq_next;
  1116. unsigned char *ena_delay = NULL;
  1117. uint32_t usdelay;
  1118. int i;
  1119. if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
  1120. return;
  1121. if (phba->link_state == LPFC_HBA_ERROR ||
  1122. phba->pport->fc_flag & FC_OFFLINE_MODE)
  1123. goto requeue;
  1124. ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
  1125. GFP_KERNEL);
  1126. if (!ena_delay)
  1127. goto requeue;
  1128. for (i = 0; i < phba->cfg_irq_chann; i++) {
  1129. /* Get the EQ corresponding to the IRQ vector */
  1130. eq = phba->sli4_hba.hba_eq_hdl[i].eq;
  1131. if (!eq)
  1132. continue;
  1133. if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
  1134. eq->q_flag &= ~HBA_EQ_DELAY_CHK;
  1135. ena_delay[eq->last_cpu] = 1;
  1136. }
  1137. }
  1138. for_each_present_cpu(i) {
  1139. eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
  1140. if (ena_delay[i]) {
  1141. usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
  1142. if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
  1143. usdelay = LPFC_MAX_AUTO_EQ_DELAY;
  1144. } else {
  1145. usdelay = 0;
  1146. }
  1147. eqi->icnt = 0;
  1148. list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
  1149. if (unlikely(eq->last_cpu != i)) {
  1150. eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
  1151. eq->last_cpu);
  1152. list_move_tail(&eq->cpu_list, &eqi_new->list);
  1153. continue;
  1154. }
  1155. if (usdelay != eq->q_mode)
  1156. lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
  1157. usdelay);
  1158. }
  1159. }
  1160. kfree(ena_delay);
  1161. requeue:
  1162. queue_delayed_work(phba->wq, &phba->eq_delay_work,
  1163. msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
  1164. }
  1165. /**
  1166. * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
  1167. * @phba: pointer to lpfc hba data structure.
  1168. *
  1169. * For each heartbeat, this routine does some heuristic methods to adjust
  1170. * XRI distribution. The goal is to fully utilize free XRIs.
  1171. **/
  1172. static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
  1173. {
  1174. u32 i;
  1175. u32 hwq_count;
  1176. hwq_count = phba->cfg_hdw_queue;
  1177. for (i = 0; i < hwq_count; i++) {
  1178. /* Adjust XRIs in private pool */
  1179. lpfc_adjust_pvt_pool_count(phba, i);
  1180. /* Adjust high watermark */
  1181. lpfc_adjust_high_watermark(phba, i);
  1182. #ifdef LPFC_MXP_STAT
  1183. /* Snapshot pbl, pvt and busy count */
  1184. lpfc_snapshot_mxp(phba, i);
  1185. #endif
  1186. }
  1187. }
  1188. /**
  1189. * lpfc_hb_timeout_handler - The HBA-timer timeout handler
  1190. * @phba: pointer to lpfc hba data structure.
  1191. *
  1192. * This is the actual HBA-timer timeout handler to be invoked by the worker
  1193. * thread whenever the HBA timer fired and HBA-timeout event posted. This
  1194. * handler performs any periodic operations needed for the device. If such
  1195. * periodic event has already been attended to either in the interrupt handler
  1196. * or by processing slow-ring or fast-ring events within the HBA-timer
  1197. * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
  1198. * the timer for the next timeout period. If lpfc heart-beat mailbox command
  1199. * is configured and there is no heart-beat mailbox command outstanding, a
  1200. * heart-beat mailbox is issued and timer set properly. Otherwise, if there
  1201. * has been a heart-beat mailbox command outstanding, the HBA shall be put
  1202. * to offline.
  1203. **/
  1204. void
  1205. lpfc_hb_timeout_handler(struct lpfc_hba *phba)
  1206. {
  1207. struct lpfc_vport **vports;
  1208. LPFC_MBOXQ_t *pmboxq;
  1209. struct lpfc_dmabuf *buf_ptr;
  1210. int retval, i;
  1211. struct lpfc_sli *psli = &phba->sli;
  1212. LIST_HEAD(completions);
  1213. if (phba->cfg_xri_rebalancing) {
  1214. /* Multi-XRI pools handler */
  1215. lpfc_hb_mxp_handler(phba);
  1216. }
  1217. vports = lpfc_create_vport_work_array(phba);
  1218. if (vports != NULL)
  1219. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  1220. lpfc_rcv_seq_check_edtov(vports[i]);
  1221. lpfc_fdmi_change_check(vports[i]);
  1222. }
  1223. lpfc_destroy_vport_work_array(phba, vports);
  1224. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1225. (phba->pport->load_flag & FC_UNLOADING) ||
  1226. (phba->pport->fc_flag & FC_OFFLINE_MODE))
  1227. return;
  1228. spin_lock_irq(&phba->pport->work_port_lock);
  1229. if (time_after(phba->last_completion_time +
  1230. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
  1231. jiffies)) {
  1232. spin_unlock_irq(&phba->pport->work_port_lock);
  1233. if (!phba->hb_outstanding)
  1234. mod_timer(&phba->hb_tmofunc,
  1235. jiffies +
  1236. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1237. else
  1238. mod_timer(&phba->hb_tmofunc,
  1239. jiffies +
  1240. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1241. return;
  1242. }
  1243. spin_unlock_irq(&phba->pport->work_port_lock);
  1244. if (phba->elsbuf_cnt &&
  1245. (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
  1246. spin_lock_irq(&phba->hbalock);
  1247. list_splice_init(&phba->elsbuf, &completions);
  1248. phba->elsbuf_cnt = 0;
  1249. phba->elsbuf_prev_cnt = 0;
  1250. spin_unlock_irq(&phba->hbalock);
  1251. while (!list_empty(&completions)) {
  1252. list_remove_head(&completions, buf_ptr,
  1253. struct lpfc_dmabuf, list);
  1254. lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  1255. kfree(buf_ptr);
  1256. }
  1257. }
  1258. phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
  1259. /* If there is no heart beat outstanding, issue a heartbeat command */
  1260. if (phba->cfg_enable_hba_heartbeat) {
  1261. if (!phba->hb_outstanding) {
  1262. if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
  1263. (list_empty(&psli->mboxq))) {
  1264. pmboxq = mempool_alloc(phba->mbox_mem_pool,
  1265. GFP_KERNEL);
  1266. if (!pmboxq) {
  1267. mod_timer(&phba->hb_tmofunc,
  1268. jiffies +
  1269. msecs_to_jiffies(1000 *
  1270. LPFC_HB_MBOX_INTERVAL));
  1271. return;
  1272. }
  1273. lpfc_heart_beat(phba, pmboxq);
  1274. pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
  1275. pmboxq->vport = phba->pport;
  1276. retval = lpfc_sli_issue_mbox(phba, pmboxq,
  1277. MBX_NOWAIT);
  1278. if (retval != MBX_BUSY &&
  1279. retval != MBX_SUCCESS) {
  1280. mempool_free(pmboxq,
  1281. phba->mbox_mem_pool);
  1282. mod_timer(&phba->hb_tmofunc,
  1283. jiffies +
  1284. msecs_to_jiffies(1000 *
  1285. LPFC_HB_MBOX_INTERVAL));
  1286. return;
  1287. }
  1288. phba->skipped_hb = 0;
  1289. phba->hb_outstanding = 1;
  1290. } else if (time_before_eq(phba->last_completion_time,
  1291. phba->skipped_hb)) {
  1292. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1293. "2857 Last completion time not "
  1294. " updated in %d ms\n",
  1295. jiffies_to_msecs(jiffies
  1296. - phba->last_completion_time));
  1297. } else
  1298. phba->skipped_hb = jiffies;
  1299. mod_timer(&phba->hb_tmofunc,
  1300. jiffies +
  1301. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1302. return;
  1303. } else {
  1304. /*
  1305. * If heart beat timeout called with hb_outstanding set
  1306. * we need to give the hb mailbox cmd a chance to
  1307. * complete or TMO.
  1308. */
  1309. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1310. "0459 Adapter heartbeat still out"
  1311. "standing:last compl time was %d ms.\n",
  1312. jiffies_to_msecs(jiffies
  1313. - phba->last_completion_time));
  1314. mod_timer(&phba->hb_tmofunc,
  1315. jiffies +
  1316. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1317. }
  1318. } else {
  1319. mod_timer(&phba->hb_tmofunc,
  1320. jiffies +
  1321. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1322. }
  1323. }
  1324. /**
  1325. * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
  1326. * @phba: pointer to lpfc hba data structure.
  1327. *
  1328. * This routine is called to bring the HBA offline when HBA hardware error
  1329. * other than Port Error 6 has been detected.
  1330. **/
  1331. static void
  1332. lpfc_offline_eratt(struct lpfc_hba *phba)
  1333. {
  1334. struct lpfc_sli *psli = &phba->sli;
  1335. spin_lock_irq(&phba->hbalock);
  1336. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1337. spin_unlock_irq(&phba->hbalock);
  1338. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1339. lpfc_offline(phba);
  1340. lpfc_reset_barrier(phba);
  1341. spin_lock_irq(&phba->hbalock);
  1342. lpfc_sli_brdreset(phba);
  1343. spin_unlock_irq(&phba->hbalock);
  1344. lpfc_hba_down_post(phba);
  1345. lpfc_sli_brdready(phba, HS_MBRDY);
  1346. lpfc_unblock_mgmt_io(phba);
  1347. phba->link_state = LPFC_HBA_ERROR;
  1348. return;
  1349. }
  1350. /**
  1351. * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
  1352. * @phba: pointer to lpfc hba data structure.
  1353. *
  1354. * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  1355. * other than Port Error 6 has been detected.
  1356. **/
  1357. void
  1358. lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
  1359. {
  1360. spin_lock_irq(&phba->hbalock);
  1361. phba->link_state = LPFC_HBA_ERROR;
  1362. spin_unlock_irq(&phba->hbalock);
  1363. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1364. lpfc_sli_flush_io_rings(phba);
  1365. lpfc_offline(phba);
  1366. lpfc_hba_down_post(phba);
  1367. lpfc_unblock_mgmt_io(phba);
  1368. }
  1369. /**
  1370. * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
  1371. * @phba: pointer to lpfc hba data structure.
  1372. *
  1373. * This routine is invoked to handle the deferred HBA hardware error
  1374. * conditions. This type of error is indicated by HBA by setting ER1
  1375. * and another ER bit in the host status register. The driver will
  1376. * wait until the ER1 bit clears before handling the error condition.
  1377. **/
  1378. static void
  1379. lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
  1380. {
  1381. uint32_t old_host_status = phba->work_hs;
  1382. struct lpfc_sli *psli = &phba->sli;
  1383. /* If the pci channel is offline, ignore possible errors,
  1384. * since we cannot communicate with the pci card anyway.
  1385. */
  1386. if (pci_channel_offline(phba->pcidev)) {
  1387. spin_lock_irq(&phba->hbalock);
  1388. phba->hba_flag &= ~DEFER_ERATT;
  1389. spin_unlock_irq(&phba->hbalock);
  1390. return;
  1391. }
  1392. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1393. "0479 Deferred Adapter Hardware Error "
  1394. "Data: x%x x%x x%x\n",
  1395. phba->work_hs,
  1396. phba->work_status[0], phba->work_status[1]);
  1397. spin_lock_irq(&phba->hbalock);
  1398. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1399. spin_unlock_irq(&phba->hbalock);
  1400. /*
  1401. * Firmware stops when it triggred erratt. That could cause the I/Os
  1402. * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
  1403. * SCSI layer retry it after re-establishing link.
  1404. */
  1405. lpfc_sli_abort_fcp_rings(phba);
  1406. /*
  1407. * There was a firmware error. Take the hba offline and then
  1408. * attempt to restart it.
  1409. */
  1410. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  1411. lpfc_offline(phba);
  1412. /* Wait for the ER1 bit to clear.*/
  1413. while (phba->work_hs & HS_FFER1) {
  1414. msleep(100);
  1415. if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
  1416. phba->work_hs = UNPLUG_ERR ;
  1417. break;
  1418. }
  1419. /* If driver is unloading let the worker thread continue */
  1420. if (phba->pport->load_flag & FC_UNLOADING) {
  1421. phba->work_hs = 0;
  1422. break;
  1423. }
  1424. }
  1425. /*
  1426. * This is to ptrotect against a race condition in which
  1427. * first write to the host attention register clear the
  1428. * host status register.
  1429. */
  1430. if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
  1431. phba->work_hs = old_host_status & ~HS_FFER1;
  1432. spin_lock_irq(&phba->hbalock);
  1433. phba->hba_flag &= ~DEFER_ERATT;
  1434. spin_unlock_irq(&phba->hbalock);
  1435. phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
  1436. phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
  1437. }
  1438. static void
  1439. lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
  1440. {
  1441. struct lpfc_board_event_header board_event;
  1442. struct Scsi_Host *shost;
  1443. board_event.event_type = FC_REG_BOARD_EVENT;
  1444. board_event.subcategory = LPFC_EVENT_PORTINTERR;
  1445. shost = lpfc_shost_from_vport(phba->pport);
  1446. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1447. sizeof(board_event),
  1448. (char *) &board_event,
  1449. LPFC_NL_VENDOR_ID);
  1450. }
  1451. /**
  1452. * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
  1453. * @phba: pointer to lpfc hba data structure.
  1454. *
  1455. * This routine is invoked to handle the following HBA hardware error
  1456. * conditions:
  1457. * 1 - HBA error attention interrupt
  1458. * 2 - DMA ring index out of range
  1459. * 3 - Mailbox command came back as unknown
  1460. **/
  1461. static void
  1462. lpfc_handle_eratt_s3(struct lpfc_hba *phba)
  1463. {
  1464. struct lpfc_vport *vport = phba->pport;
  1465. struct lpfc_sli *psli = &phba->sli;
  1466. uint32_t event_data;
  1467. unsigned long temperature;
  1468. struct temp_event temp_event_data;
  1469. struct Scsi_Host *shost;
  1470. /* If the pci channel is offline, ignore possible errors,
  1471. * since we cannot communicate with the …

Large files files are truncated, but you can click here to view the full file