PageRenderTime 126ms CodeModel.GetById 30ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/scsi/lpfc/lpfc_init.c

https://bitbucket.org/alfredchen/linux-gc
C | 12172 lines | 8003 code | 1192 blank | 2977 comment | 1189 complexity | a0c3b4ae19d36e3c38b849d13cf70e37 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/idr.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/module.h>
  29. #include <linux/kthread.h>
  30. #include <linux/pci.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/ctype.h>
  33. #include <linux/aer.h>
  34. #include <linux/slab.h>
  35. #include <linux/firmware.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/percpu.h>
  38. #include <linux/msi.h>
  39. #include <scsi/scsi.h>
  40. #include <scsi/scsi_device.h>
  41. #include <scsi/scsi_host.h>
  42. #include <scsi/scsi_transport_fc.h>
  43. #include "lpfc_hw4.h"
  44. #include "lpfc_hw.h"
  45. #include "lpfc_sli.h"
  46. #include "lpfc_sli4.h"
  47. #include "lpfc_nl.h"
  48. #include "lpfc_disc.h"
  49. #include "lpfc.h"
  50. #include "lpfc_scsi.h"
  51. #include "lpfc_nvme.h"
  52. #include "lpfc_logmsg.h"
  53. #include "lpfc_crtn.h"
  54. #include "lpfc_vport.h"
  55. #include "lpfc_version.h"
  56. #include "lpfc_ids.h"
  57. char *_dump_buf_data;
  58. unsigned long _dump_buf_data_order;
  59. char *_dump_buf_dif;
  60. unsigned long _dump_buf_dif_order;
  61. spinlock_t _dump_buf_lock;
  62. /* Used when mapping IRQ vectors in a driver centric manner */
  63. uint16_t *lpfc_used_cpu;
  64. uint32_t lpfc_present_cpu;
  65. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  66. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  67. static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  68. static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  69. static int lpfc_setup_endian_order(struct lpfc_hba *);
  70. static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  71. static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  72. static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  73. static void lpfc_init_sgl_list(struct lpfc_hba *);
  74. static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  75. static void lpfc_free_active_sgl(struct lpfc_hba *);
  76. static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  77. static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  78. static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  79. static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  80. static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  81. static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  82. static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  83. static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  84. static struct scsi_transport_template *lpfc_transport_template = NULL;
  85. static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  86. static DEFINE_IDR(lpfc_hba_index);
  87. #define LPFC_NVMET_BUF_POST 254
  88. /**
  89. * lpfc_config_port_prep - Perform lpfc initialization prior to config port
  90. * @phba: pointer to lpfc hba data structure.
  91. *
  92. * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
  93. * mailbox command. It retrieves the revision information from the HBA and
  94. * collects the Vital Product Data (VPD) about the HBA for preparing the
  95. * configuration of the HBA.
  96. *
  97. * Return codes:
  98. * 0 - success.
  99. * -ERESTART - requests the SLI layer to reset the HBA and try again.
  100. * Any other value - indicates an error.
  101. **/
  102. int
  103. lpfc_config_port_prep(struct lpfc_hba *phba)
  104. {
  105. lpfc_vpd_t *vp = &phba->vpd;
  106. int i = 0, rc;
  107. LPFC_MBOXQ_t *pmb;
  108. MAILBOX_t *mb;
  109. char *lpfc_vpd_data = NULL;
  110. uint16_t offset = 0;
  111. static char licensed[56] =
  112. "key unlock for use with gnu public licensed code only\0";
  113. static int init_key = 1;
  114. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  115. if (!pmb) {
  116. phba->link_state = LPFC_HBA_ERROR;
  117. return -ENOMEM;
  118. }
  119. mb = &pmb->u.mb;
  120. phba->link_state = LPFC_INIT_MBX_CMDS;
  121. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  122. if (init_key) {
  123. uint32_t *ptext = (uint32_t *) licensed;
  124. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  125. *ptext = cpu_to_be32(*ptext);
  126. init_key = 0;
  127. }
  128. lpfc_read_nv(phba, pmb);
  129. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  130. sizeof (mb->un.varRDnvp.rsvd3));
  131. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  132. sizeof (licensed));
  133. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  134. if (rc != MBX_SUCCESS) {
  135. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  136. "0324 Config Port initialization "
  137. "error, mbxCmd x%x READ_NVPARM, "
  138. "mbxStatus x%x\n",
  139. mb->mbxCommand, mb->mbxStatus);
  140. mempool_free(pmb, phba->mbox_mem_pool);
  141. return -ERESTART;
  142. }
  143. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  144. sizeof(phba->wwnn));
  145. memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
  146. sizeof(phba->wwpn));
  147. }
  148. phba->sli3_options = 0x0;
  149. /* Setup and issue mailbox READ REV command */
  150. lpfc_read_rev(phba, pmb);
  151. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  152. if (rc != MBX_SUCCESS) {
  153. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  154. "0439 Adapter failed to init, mbxCmd x%x "
  155. "READ_REV, mbxStatus x%x\n",
  156. mb->mbxCommand, mb->mbxStatus);
  157. mempool_free( pmb, phba->mbox_mem_pool);
  158. return -ERESTART;
  159. }
  160. /*
  161. * The value of rr must be 1 since the driver set the cv field to 1.
  162. * This setting requires the FW to set all revision fields.
  163. */
  164. if (mb->un.varRdRev.rr == 0) {
  165. vp->rev.rBit = 0;
  166. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  167. "0440 Adapter failed to init, READ_REV has "
  168. "missing revision information.\n");
  169. mempool_free(pmb, phba->mbox_mem_pool);
  170. return -ERESTART;
  171. }
  172. if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
  173. mempool_free(pmb, phba->mbox_mem_pool);
  174. return -EINVAL;
  175. }
  176. /* Save information as VPD data */
  177. vp->rev.rBit = 1;
  178. memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
  179. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  180. memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
  181. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  182. memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
  183. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  184. vp->rev.smRev = mb->un.varRdRev.smRev;
  185. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  186. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  187. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  188. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  189. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  190. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  191. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  192. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  193. /* If the sli feature level is less then 9, we must
  194. * tear down all RPIs and VPIs on link down if NPIV
  195. * is enabled.
  196. */
  197. if (vp->rev.feaLevelHigh < 9)
  198. phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
  199. if (lpfc_is_LC_HBA(phba->pcidev->device))
  200. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  201. sizeof (phba->RandomData));
  202. /* Get adapter VPD information */
  203. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  204. if (!lpfc_vpd_data)
  205. goto out_free_mbox;
  206. do {
  207. lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
  208. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  209. if (rc != MBX_SUCCESS) {
  210. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  211. "0441 VPD not present on adapter, "
  212. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  213. mb->mbxCommand, mb->mbxStatus);
  214. mb->un.varDmp.word_cnt = 0;
  215. }
  216. /* dump mem may return a zero when finished or we got a
  217. * mailbox error, either way we are done.
  218. */
  219. if (mb->un.varDmp.word_cnt == 0)
  220. break;
  221. if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
  222. mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
  223. lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
  224. lpfc_vpd_data + offset,
  225. mb->un.varDmp.word_cnt);
  226. offset += mb->un.varDmp.word_cnt;
  227. } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
  228. lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
  229. kfree(lpfc_vpd_data);
  230. out_free_mbox:
  231. mempool_free(pmb, phba->mbox_mem_pool);
  232. return 0;
  233. }
  234. /**
  235. * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
  236. * @phba: pointer to lpfc hba data structure.
  237. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  238. *
  239. * This is the completion handler for driver's configuring asynchronous event
  240. * mailbox command to the device. If the mailbox command returns successfully,
  241. * it will set internal async event support flag to 1; otherwise, it will
  242. * set internal async event support flag to 0.
  243. **/
  244. static void
  245. lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  246. {
  247. if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
  248. phba->temp_sensor_support = 1;
  249. else
  250. phba->temp_sensor_support = 0;
  251. mempool_free(pmboxq, phba->mbox_mem_pool);
  252. return;
  253. }
  254. /**
  255. * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
  256. * @phba: pointer to lpfc hba data structure.
  257. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  258. *
  259. * This is the completion handler for dump mailbox command for getting
  260. * wake up parameters. When this command complete, the response contain
  261. * Option rom version of the HBA. This function translate the version number
  262. * into a human readable string and store it in OptionROMVersion.
  263. **/
  264. static void
  265. lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  266. {
  267. struct prog_id *prg;
  268. uint32_t prog_id_word;
  269. char dist = ' ';
  270. /* character array used for decoding dist type. */
  271. char dist_char[] = "nabx";
  272. if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
  273. mempool_free(pmboxq, phba->mbox_mem_pool);
  274. return;
  275. }
  276. prg = (struct prog_id *) &prog_id_word;
  277. /* word 7 contain option rom version */
  278. prog_id_word = pmboxq->u.mb.un.varWords[7];
  279. /* Decode the Option rom version word to a readable string */
  280. if (prg->dist < 4)
  281. dist = dist_char[prg->dist];
  282. if ((prg->dist == 3) && (prg->num == 0))
  283. snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
  284. prg->ver, prg->rev, prg->lev);
  285. else
  286. snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
  287. prg->ver, prg->rev, prg->lev,
  288. dist, prg->num);
  289. mempool_free(pmboxq, phba->mbox_mem_pool);
  290. return;
  291. }
  292. /**
  293. * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
  294. * cfg_soft_wwnn, cfg_soft_wwpn
  295. * @vport: pointer to lpfc vport data structure.
  296. *
  297. *
  298. * Return codes
  299. * None.
  300. **/
  301. void
  302. lpfc_update_vport_wwn(struct lpfc_vport *vport)
  303. {
  304. /* If the soft name exists then update it using the service params */
  305. if (vport->phba->cfg_soft_wwnn)
  306. u64_to_wwn(vport->phba->cfg_soft_wwnn,
  307. vport->fc_sparam.nodeName.u.wwn);
  308. if (vport->phba->cfg_soft_wwpn)
  309. u64_to_wwn(vport->phba->cfg_soft_wwpn,
  310. vport->fc_sparam.portName.u.wwn);
  311. /*
  312. * If the name is empty or there exists a soft name
  313. * then copy the service params name, otherwise use the fc name
  314. */
  315. if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
  316. memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
  317. sizeof(struct lpfc_name));
  318. else
  319. memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
  320. sizeof(struct lpfc_name));
  321. if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
  322. memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
  323. sizeof(struct lpfc_name));
  324. else
  325. memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
  326. sizeof(struct lpfc_name));
  327. }
  328. /**
  329. * lpfc_config_port_post - Perform lpfc initialization after config port
  330. * @phba: pointer to lpfc hba data structure.
  331. *
  332. * This routine will do LPFC initialization after the CONFIG_PORT mailbox
  333. * command call. It performs all internal resource and state setups on the
  334. * port: post IOCB buffers, enable appropriate host interrupt attentions,
  335. * ELS ring timers, etc.
  336. *
  337. * Return codes
  338. * 0 - success.
  339. * Any other value - error.
  340. **/
  341. int
  342. lpfc_config_port_post(struct lpfc_hba *phba)
  343. {
  344. struct lpfc_vport *vport = phba->pport;
  345. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  346. LPFC_MBOXQ_t *pmb;
  347. MAILBOX_t *mb;
  348. struct lpfc_dmabuf *mp;
  349. struct lpfc_sli *psli = &phba->sli;
  350. uint32_t status, timeout;
  351. int i, j;
  352. int rc;
  353. spin_lock_irq(&phba->hbalock);
  354. /*
  355. * If the Config port completed correctly the HBA is not
  356. * over heated any more.
  357. */
  358. if (phba->over_temp_state == HBA_OVER_TEMP)
  359. phba->over_temp_state = HBA_NORMAL_TEMP;
  360. spin_unlock_irq(&phba->hbalock);
  361. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  362. if (!pmb) {
  363. phba->link_state = LPFC_HBA_ERROR;
  364. return -ENOMEM;
  365. }
  366. mb = &pmb->u.mb;
  367. /* Get login parameters for NID. */
  368. rc = lpfc_read_sparam(phba, pmb, 0);
  369. if (rc) {
  370. mempool_free(pmb, phba->mbox_mem_pool);
  371. return -ENOMEM;
  372. }
  373. pmb->vport = vport;
  374. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  375. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  376. "0448 Adapter failed init, mbxCmd x%x "
  377. "READ_SPARM mbxStatus x%x\n",
  378. mb->mbxCommand, mb->mbxStatus);
  379. phba->link_state = LPFC_HBA_ERROR;
  380. mp = (struct lpfc_dmabuf *) pmb->context1;
  381. mempool_free(pmb, phba->mbox_mem_pool);
  382. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  383. kfree(mp);
  384. return -EIO;
  385. }
  386. mp = (struct lpfc_dmabuf *) pmb->context1;
  387. memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
  388. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  389. kfree(mp);
  390. pmb->context1 = NULL;
  391. lpfc_update_vport_wwn(vport);
  392. /* Update the fc_host data structures with new wwn. */
  393. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  394. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  395. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  396. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  397. /* This should be consolidated into parse_vpd ? - mr */
  398. if (phba->SerialNumber[0] == 0) {
  399. uint8_t *outptr;
  400. outptr = &vport->fc_nodename.u.s.IEEE[0];
  401. for (i = 0; i < 12; i++) {
  402. status = *outptr++;
  403. j = ((status & 0xf0) >> 4);
  404. if (j <= 9)
  405. phba->SerialNumber[i] =
  406. (char)((uint8_t) 0x30 + (uint8_t) j);
  407. else
  408. phba->SerialNumber[i] =
  409. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  410. i++;
  411. j = (status & 0xf);
  412. if (j <= 9)
  413. phba->SerialNumber[i] =
  414. (char)((uint8_t) 0x30 + (uint8_t) j);
  415. else
  416. phba->SerialNumber[i] =
  417. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  418. }
  419. }
  420. lpfc_read_config(phba, pmb);
  421. pmb->vport = vport;
  422. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  423. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  424. "0453 Adapter failed to init, mbxCmd x%x "
  425. "READ_CONFIG, mbxStatus x%x\n",
  426. mb->mbxCommand, mb->mbxStatus);
  427. phba->link_state = LPFC_HBA_ERROR;
  428. mempool_free( pmb, phba->mbox_mem_pool);
  429. return -EIO;
  430. }
  431. /* Check if the port is disabled */
  432. lpfc_sli_read_link_ste(phba);
  433. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  434. i = (mb->un.varRdConfig.max_xri + 1);
  435. if (phba->cfg_hba_queue_depth > i) {
  436. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  437. "3359 HBA queue depth changed from %d to %d\n",
  438. phba->cfg_hba_queue_depth, i);
  439. phba->cfg_hba_queue_depth = i;
  440. }
  441. /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
  442. i = (mb->un.varRdConfig.max_xri >> 3);
  443. if (phba->pport->cfg_lun_queue_depth > i) {
  444. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  445. "3360 LUN queue depth changed from %d to %d\n",
  446. phba->pport->cfg_lun_queue_depth, i);
  447. phba->pport->cfg_lun_queue_depth = i;
  448. }
  449. phba->lmt = mb->un.varRdConfig.lmt;
  450. /* Get the default values for Model Name and Description */
  451. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  452. phba->link_state = LPFC_LINK_DOWN;
  453. /* Only process IOCBs on ELS ring till hba_state is READY */
  454. if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
  455. psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
  456. if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
  457. psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
  458. /* Post receive buffers for desired rings */
  459. if (phba->sli_rev != 3)
  460. lpfc_post_rcv_buf(phba);
  461. /*
  462. * Configure HBA MSI-X attention conditions to messages if MSI-X mode
  463. */
  464. if (phba->intr_type == MSIX) {
  465. rc = lpfc_config_msi(phba, pmb);
  466. if (rc) {
  467. mempool_free(pmb, phba->mbox_mem_pool);
  468. return -EIO;
  469. }
  470. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  471. if (rc != MBX_SUCCESS) {
  472. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  473. "0352 Config MSI mailbox command "
  474. "failed, mbxCmd x%x, mbxStatus x%x\n",
  475. pmb->u.mb.mbxCommand,
  476. pmb->u.mb.mbxStatus);
  477. mempool_free(pmb, phba->mbox_mem_pool);
  478. return -EIO;
  479. }
  480. }
  481. spin_lock_irq(&phba->hbalock);
  482. /* Initialize ERATT handling flag */
  483. phba->hba_flag &= ~HBA_ERATT_HANDLED;
  484. /* Enable appropriate host interrupts */
  485. if (lpfc_readl(phba->HCregaddr, &status)) {
  486. spin_unlock_irq(&phba->hbalock);
  487. return -EIO;
  488. }
  489. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  490. if (psli->num_rings > 0)
  491. status |= HC_R0INT_ENA;
  492. if (psli->num_rings > 1)
  493. status |= HC_R1INT_ENA;
  494. if (psli->num_rings > 2)
  495. status |= HC_R2INT_ENA;
  496. if (psli->num_rings > 3)
  497. status |= HC_R3INT_ENA;
  498. if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
  499. (phba->cfg_poll & DISABLE_FCP_RING_INT))
  500. status &= ~(HC_R0INT_ENA);
  501. writel(status, phba->HCregaddr);
  502. readl(phba->HCregaddr); /* flush */
  503. spin_unlock_irq(&phba->hbalock);
  504. /* Set up ring-0 (ELS) timer */
  505. timeout = phba->fc_ratov * 2;
  506. mod_timer(&vport->els_tmofunc,
  507. jiffies + msecs_to_jiffies(1000 * timeout));
  508. /* Set up heart beat (HB) timer */
  509. mod_timer(&phba->hb_tmofunc,
  510. jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  511. phba->hb_outstanding = 0;
  512. phba->last_completion_time = jiffies;
  513. /* Set up error attention (ERATT) polling timer */
  514. mod_timer(&phba->eratt_poll,
  515. jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
  516. if (phba->hba_flag & LINK_DISABLED) {
  517. lpfc_printf_log(phba,
  518. KERN_ERR, LOG_INIT,
  519. "2598 Adapter Link is disabled.\n");
  520. lpfc_down_link(phba, pmb);
  521. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  522. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  523. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  524. lpfc_printf_log(phba,
  525. KERN_ERR, LOG_INIT,
  526. "2599 Adapter failed to issue DOWN_LINK"
  527. " mbox command rc 0x%x\n", rc);
  528. mempool_free(pmb, phba->mbox_mem_pool);
  529. return -EIO;
  530. }
  531. } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
  532. mempool_free(pmb, phba->mbox_mem_pool);
  533. rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
  534. if (rc)
  535. return rc;
  536. }
  537. /* MBOX buffer will be freed in mbox compl */
  538. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  539. if (!pmb) {
  540. phba->link_state = LPFC_HBA_ERROR;
  541. return -ENOMEM;
  542. }
  543. lpfc_config_async(phba, pmb, LPFC_ELS_RING);
  544. pmb->mbox_cmpl = lpfc_config_async_cmpl;
  545. pmb->vport = phba->pport;
  546. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  547. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  548. lpfc_printf_log(phba,
  549. KERN_ERR,
  550. LOG_INIT,
  551. "0456 Adapter failed to issue "
  552. "ASYNCEVT_ENABLE mbox status x%x\n",
  553. rc);
  554. mempool_free(pmb, phba->mbox_mem_pool);
  555. }
  556. /* Get Option rom version */
  557. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  558. if (!pmb) {
  559. phba->link_state = LPFC_HBA_ERROR;
  560. return -ENOMEM;
  561. }
  562. lpfc_dump_wakeup_param(phba, pmb);
  563. pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
  564. pmb->vport = phba->pport;
  565. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  566. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  567. lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
  568. "to get Option ROM version status x%x\n", rc);
  569. mempool_free(pmb, phba->mbox_mem_pool);
  570. }
  571. return 0;
  572. }
  573. /**
  574. * lpfc_hba_init_link - Initialize the FC link
  575. * @phba: pointer to lpfc hba data structure.
  576. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  577. *
  578. * This routine will issue the INIT_LINK mailbox command call.
  579. * It is available to other drivers through the lpfc_hba data
  580. * structure for use as a delayed link up mechanism with the
  581. * module parameter lpfc_suppress_link_up.
  582. *
  583. * Return code
  584. * 0 - success
  585. * Any other value - error
  586. **/
  587. static int
  588. lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
  589. {
  590. return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
  591. }
  592. /**
  593. * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
  594. * @phba: pointer to lpfc hba data structure.
  595. * @fc_topology: desired fc topology.
  596. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  597. *
  598. * This routine will issue the INIT_LINK mailbox command call.
  599. * It is available to other drivers through the lpfc_hba data
  600. * structure for use as a delayed link up mechanism with the
  601. * module parameter lpfc_suppress_link_up.
  602. *
  603. * Return code
  604. * 0 - success
  605. * Any other value - error
  606. **/
  607. int
  608. lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
  609. uint32_t flag)
  610. {
  611. struct lpfc_vport *vport = phba->pport;
  612. LPFC_MBOXQ_t *pmb;
  613. MAILBOX_t *mb;
  614. int rc;
  615. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  616. if (!pmb) {
  617. phba->link_state = LPFC_HBA_ERROR;
  618. return -ENOMEM;
  619. }
  620. mb = &pmb->u.mb;
  621. pmb->vport = vport;
  622. if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
  623. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
  624. !(phba->lmt & LMT_1Gb)) ||
  625. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
  626. !(phba->lmt & LMT_2Gb)) ||
  627. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
  628. !(phba->lmt & LMT_4Gb)) ||
  629. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
  630. !(phba->lmt & LMT_8Gb)) ||
  631. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
  632. !(phba->lmt & LMT_10Gb)) ||
  633. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
  634. !(phba->lmt & LMT_16Gb)) ||
  635. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
  636. !(phba->lmt & LMT_32Gb))) {
  637. /* Reset link speed to auto */
  638. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  639. "1302 Invalid speed for this board:%d "
  640. "Reset link speed to auto.\n",
  641. phba->cfg_link_speed);
  642. phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
  643. }
  644. lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
  645. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  646. if (phba->sli_rev < LPFC_SLI_REV4)
  647. lpfc_set_loopback_flag(phba);
  648. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  649. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  650. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  651. "0498 Adapter failed to init, mbxCmd x%x "
  652. "INIT_LINK, mbxStatus x%x\n",
  653. mb->mbxCommand, mb->mbxStatus);
  654. if (phba->sli_rev <= LPFC_SLI_REV3) {
  655. /* Clear all interrupt enable conditions */
  656. writel(0, phba->HCregaddr);
  657. readl(phba->HCregaddr); /* flush */
  658. /* Clear all pending interrupts */
  659. writel(0xffffffff, phba->HAregaddr);
  660. readl(phba->HAregaddr); /* flush */
  661. }
  662. phba->link_state = LPFC_HBA_ERROR;
  663. if (rc != MBX_BUSY || flag == MBX_POLL)
  664. mempool_free(pmb, phba->mbox_mem_pool);
  665. return -EIO;
  666. }
  667. phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
  668. if (flag == MBX_POLL)
  669. mempool_free(pmb, phba->mbox_mem_pool);
  670. return 0;
  671. }
  672. /**
  673. * lpfc_hba_down_link - this routine downs the FC link
  674. * @phba: pointer to lpfc hba data structure.
  675. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  676. *
  677. * This routine will issue the DOWN_LINK mailbox command call.
  678. * It is available to other drivers through the lpfc_hba data
  679. * structure for use to stop the link.
  680. *
  681. * Return code
  682. * 0 - success
  683. * Any other value - error
  684. **/
  685. static int
  686. lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
  687. {
  688. LPFC_MBOXQ_t *pmb;
  689. int rc;
  690. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  691. if (!pmb) {
  692. phba->link_state = LPFC_HBA_ERROR;
  693. return -ENOMEM;
  694. }
  695. lpfc_printf_log(phba,
  696. KERN_ERR, LOG_INIT,
  697. "0491 Adapter Link is disabled.\n");
  698. lpfc_down_link(phba, pmb);
  699. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  700. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  701. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  702. lpfc_printf_log(phba,
  703. KERN_ERR, LOG_INIT,
  704. "2522 Adapter failed to issue DOWN_LINK"
  705. " mbox command rc 0x%x\n", rc);
  706. mempool_free(pmb, phba->mbox_mem_pool);
  707. return -EIO;
  708. }
  709. if (flag == MBX_POLL)
  710. mempool_free(pmb, phba->mbox_mem_pool);
  711. return 0;
  712. }
  713. /**
  714. * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
  715. * @phba: pointer to lpfc HBA data structure.
  716. *
  717. * This routine will do LPFC uninitialization before the HBA is reset when
  718. * bringing down the SLI Layer.
  719. *
  720. * Return codes
  721. * 0 - success.
  722. * Any other value - error.
  723. **/
  724. int
  725. lpfc_hba_down_prep(struct lpfc_hba *phba)
  726. {
  727. struct lpfc_vport **vports;
  728. int i;
  729. if (phba->sli_rev <= LPFC_SLI_REV3) {
  730. /* Disable interrupts */
  731. writel(0, phba->HCregaddr);
  732. readl(phba->HCregaddr); /* flush */
  733. }
  734. if (phba->pport->load_flag & FC_UNLOADING)
  735. lpfc_cleanup_discovery_resources(phba->pport);
  736. else {
  737. vports = lpfc_create_vport_work_array(phba);
  738. if (vports != NULL)
  739. for (i = 0; i <= phba->max_vports &&
  740. vports[i] != NULL; i++)
  741. lpfc_cleanup_discovery_resources(vports[i]);
  742. lpfc_destroy_vport_work_array(phba, vports);
  743. }
  744. return 0;
  745. }
  746. /**
  747. * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
  748. * rspiocb which got deferred
  749. *
  750. * @phba: pointer to lpfc HBA data structure.
  751. *
  752. * This routine will cleanup completed slow path events after HBA is reset
  753. * when bringing down the SLI Layer.
  754. *
  755. *
  756. * Return codes
  757. * void.
  758. **/
  759. static void
  760. lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
  761. {
  762. struct lpfc_iocbq *rspiocbq;
  763. struct hbq_dmabuf *dmabuf;
  764. struct lpfc_cq_event *cq_event;
  765. spin_lock_irq(&phba->hbalock);
  766. phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
  767. spin_unlock_irq(&phba->hbalock);
  768. while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
  769. /* Get the response iocb from the head of work queue */
  770. spin_lock_irq(&phba->hbalock);
  771. list_remove_head(&phba->sli4_hba.sp_queue_event,
  772. cq_event, struct lpfc_cq_event, list);
  773. spin_unlock_irq(&phba->hbalock);
  774. switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
  775. case CQE_CODE_COMPL_WQE:
  776. rspiocbq = container_of(cq_event, struct lpfc_iocbq,
  777. cq_event);
  778. lpfc_sli_release_iocbq(phba, rspiocbq);
  779. break;
  780. case CQE_CODE_RECEIVE:
  781. case CQE_CODE_RECEIVE_V1:
  782. dmabuf = container_of(cq_event, struct hbq_dmabuf,
  783. cq_event);
  784. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  785. }
  786. }
  787. }
  788. /**
  789. * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
  790. * @phba: pointer to lpfc HBA data structure.
  791. *
  792. * This routine will cleanup posted ELS buffers after the HBA is reset
  793. * when bringing down the SLI Layer.
  794. *
  795. *
  796. * Return codes
  797. * void.
  798. **/
  799. static void
  800. lpfc_hba_free_post_buf(struct lpfc_hba *phba)
  801. {
  802. struct lpfc_sli *psli = &phba->sli;
  803. struct lpfc_sli_ring *pring;
  804. struct lpfc_dmabuf *mp, *next_mp;
  805. LIST_HEAD(buflist);
  806. int count;
  807. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  808. lpfc_sli_hbqbuf_free_all(phba);
  809. else {
  810. /* Cleanup preposted buffers on the ELS ring */
  811. pring = &psli->sli3_ring[LPFC_ELS_RING];
  812. spin_lock_irq(&phba->hbalock);
  813. list_splice_init(&pring->postbufq, &buflist);
  814. spin_unlock_irq(&phba->hbalock);
  815. count = 0;
  816. list_for_each_entry_safe(mp, next_mp, &buflist, list) {
  817. list_del(&mp->list);
  818. count++;
  819. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  820. kfree(mp);
  821. }
  822. spin_lock_irq(&phba->hbalock);
  823. pring->postbufq_cnt -= count;
  824. spin_unlock_irq(&phba->hbalock);
  825. }
  826. }
  827. /**
  828. * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
  829. * @phba: pointer to lpfc HBA data structure.
  830. *
  831. * This routine will cleanup the txcmplq after the HBA is reset when bringing
  832. * down the SLI Layer.
  833. *
  834. * Return codes
  835. * void
  836. **/
  837. static void
  838. lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
  839. {
  840. struct lpfc_sli *psli = &phba->sli;
  841. struct lpfc_queue *qp = NULL;
  842. struct lpfc_sli_ring *pring;
  843. LIST_HEAD(completions);
  844. int i;
  845. if (phba->sli_rev != LPFC_SLI_REV4) {
  846. for (i = 0; i < psli->num_rings; i++) {
  847. pring = &psli->sli3_ring[i];
  848. spin_lock_irq(&phba->hbalock);
  849. /* At this point in time the HBA is either reset or DOA
  850. * Nothing should be on txcmplq as it will
  851. * NEVER complete.
  852. */
  853. list_splice_init(&pring->txcmplq, &completions);
  854. pring->txcmplq_cnt = 0;
  855. spin_unlock_irq(&phba->hbalock);
  856. lpfc_sli_abort_iocb_ring(phba, pring);
  857. }
  858. /* Cancel all the IOCBs from the completions list */
  859. lpfc_sli_cancel_iocbs(phba, &completions,
  860. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  861. return;
  862. }
  863. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  864. pring = qp->pring;
  865. if (!pring)
  866. continue;
  867. spin_lock_irq(&pring->ring_lock);
  868. list_splice_init(&pring->txcmplq, &completions);
  869. pring->txcmplq_cnt = 0;
  870. spin_unlock_irq(&pring->ring_lock);
  871. lpfc_sli_abort_iocb_ring(phba, pring);
  872. }
  873. /* Cancel all the IOCBs from the completions list */
  874. lpfc_sli_cancel_iocbs(phba, &completions,
  875. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  876. }
  877. /**
  878. * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
  879. int i;
  880. * @phba: pointer to lpfc HBA data structure.
  881. *
  882. * This routine will do uninitialization after the HBA is reset when bring
  883. * down the SLI Layer.
  884. *
  885. * Return codes
  886. * 0 - success.
  887. * Any other value - error.
  888. **/
  889. static int
  890. lpfc_hba_down_post_s3(struct lpfc_hba *phba)
  891. {
  892. lpfc_hba_free_post_buf(phba);
  893. lpfc_hba_clean_txcmplq(phba);
  894. return 0;
  895. }
  896. /**
  897. * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
  898. * @phba: pointer to lpfc HBA data structure.
  899. *
  900. * This routine will do uninitialization after the HBA is reset when bring
  901. * down the SLI Layer.
  902. *
  903. * Return codes
  904. * 0 - success.
  905. * Any other value - error.
  906. **/
  907. static int
  908. lpfc_hba_down_post_s4(struct lpfc_hba *phba)
  909. {
  910. struct lpfc_scsi_buf *psb, *psb_next;
  911. LIST_HEAD(aborts);
  912. LIST_HEAD(nvme_aborts);
  913. unsigned long iflag = 0;
  914. struct lpfc_sglq *sglq_entry = NULL;
  915. lpfc_sli_hbqbuf_free_all(phba);
  916. lpfc_hba_clean_txcmplq(phba);
  917. /* At this point in time the HBA is either reset or DOA. Either
  918. * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
  919. * on the lpfc_els_sgl_list so that it can either be freed if the
  920. * driver is unloading or reposted if the driver is restarting
  921. * the port.
  922. */
  923. spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
  924. /* scsl_buf_list */
  925. /* sgl_list_lock required because worker thread uses this
  926. * list.
  927. */
  928. spin_lock(&phba->sli4_hba.sgl_list_lock);
  929. list_for_each_entry(sglq_entry,
  930. &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
  931. sglq_entry->state = SGL_FREED;
  932. list_for_each_entry(sglq_entry,
  933. &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list)
  934. sglq_entry->state = SGL_FREED;
  935. list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
  936. &phba->sli4_hba.lpfc_els_sgl_list);
  937. if (phba->sli4_hba.nvme_wq)
  938. list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list,
  939. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  940. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  941. /* abts_scsi_buf_list_lock required because worker thread uses this
  942. * list.
  943. */
  944. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  945. spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
  946. list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
  947. &aborts);
  948. spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
  949. }
  950. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  951. spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  952. list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
  953. &nvme_aborts);
  954. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  955. }
  956. spin_unlock_irq(&phba->hbalock);
  957. list_for_each_entry_safe(psb, psb_next, &aborts, list) {
  958. psb->pCmd = NULL;
  959. psb->status = IOSTAT_SUCCESS;
  960. }
  961. spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
  962. list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
  963. spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
  964. list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
  965. psb->pCmd = NULL;
  966. psb->status = IOSTAT_SUCCESS;
  967. }
  968. spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
  969. list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
  970. spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
  971. lpfc_sli4_free_sp_events(phba);
  972. return 0;
  973. }
  974. /**
  975. * lpfc_hba_down_post - Wrapper func for hba down post routine
  976. * @phba: pointer to lpfc HBA data structure.
  977. *
  978. * This routine wraps the actual SLI3 or SLI4 routine for performing
  979. * uninitialization after the HBA is reset when bring down the SLI Layer.
  980. *
  981. * Return codes
  982. * 0 - success.
  983. * Any other value - error.
  984. **/
  985. int
  986. lpfc_hba_down_post(struct lpfc_hba *phba)
  987. {
  988. return (*phba->lpfc_hba_down_post)(phba);
  989. }
  990. /**
  991. * lpfc_hb_timeout - The HBA-timer timeout handler
  992. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  993. *
  994. * This is the HBA-timer timeout handler registered to the lpfc driver. When
  995. * this timer fires, a HBA timeout event shall be posted to the lpfc driver
  996. * work-port-events bitmap and the worker thread is notified. This timeout
  997. * event will be used by the worker thread to invoke the actual timeout
  998. * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
  999. * be performed in the timeout handler and the HBA timeout event bit shall
  1000. * be cleared by the worker thread after it has taken the event bitmap out.
  1001. **/
  1002. static void
  1003. lpfc_hb_timeout(unsigned long ptr)
  1004. {
  1005. struct lpfc_hba *phba;
  1006. uint32_t tmo_posted;
  1007. unsigned long iflag;
  1008. phba = (struct lpfc_hba *)ptr;
  1009. /* Check for heart beat timeout conditions */
  1010. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1011. tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
  1012. if (!tmo_posted)
  1013. phba->pport->work_port_events |= WORKER_HB_TMO;
  1014. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1015. /* Tell the worker thread there is work to do */
  1016. if (!tmo_posted)
  1017. lpfc_worker_wake_up(phba);
  1018. return;
  1019. }
  1020. /**
  1021. * lpfc_rrq_timeout - The RRQ-timer timeout handler
  1022. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1023. *
  1024. * This is the RRQ-timer timeout handler registered to the lpfc driver. When
  1025. * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
  1026. * work-port-events bitmap and the worker thread is notified. This timeout
  1027. * event will be used by the worker thread to invoke the actual timeout
  1028. * handler routine, lpfc_rrq_handler. Any periodical operations will
  1029. * be performed in the timeout handler and the RRQ timeout event bit shall
  1030. * be cleared by the worker thread after it has taken the event bitmap out.
  1031. **/
  1032. static void
  1033. lpfc_rrq_timeout(unsigned long ptr)
  1034. {
  1035. struct lpfc_hba *phba;
  1036. unsigned long iflag;
  1037. phba = (struct lpfc_hba *)ptr;
  1038. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1039. if (!(phba->pport->load_flag & FC_UNLOADING))
  1040. phba->hba_flag |= HBA_RRQ_ACTIVE;
  1041. else
  1042. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  1043. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1044. if (!(phba->pport->load_flag & FC_UNLOADING))
  1045. lpfc_worker_wake_up(phba);
  1046. }
  1047. /**
  1048. * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
  1049. * @phba: pointer to lpfc hba data structure.
  1050. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  1051. *
  1052. * This is the callback function to the lpfc heart-beat mailbox command.
  1053. * If configured, the lpfc driver issues the heart-beat mailbox command to
  1054. * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
  1055. * heart-beat mailbox command is issued, the driver shall set up heart-beat
  1056. * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
  1057. * heart-beat outstanding state. Once the mailbox command comes back and
  1058. * no error conditions detected, the heart-beat mailbox command timer is
  1059. * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
  1060. * state is cleared for the next heart-beat. If the timer expired with the
  1061. * heart-beat outstanding state set, the driver will put the HBA offline.
  1062. **/
  1063. static void
  1064. lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  1065. {
  1066. unsigned long drvr_flag;
  1067. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  1068. phba->hb_outstanding = 0;
  1069. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  1070. /* Check and reset heart-beat timer is necessary */
  1071. mempool_free(pmboxq, phba->mbox_mem_pool);
  1072. if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
  1073. !(phba->link_state == LPFC_HBA_ERROR) &&
  1074. !(phba->pport->load_flag & FC_UNLOADING))
  1075. mod_timer(&phba->hb_tmofunc,
  1076. jiffies +
  1077. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1078. return;
  1079. }
  1080. /**
  1081. * lpfc_hb_timeout_handler - The HBA-timer timeout handler
  1082. * @phba: pointer to lpfc hba data structure.
  1083. *
  1084. * This is the actual HBA-timer timeout handler to be invoked by the worker
  1085. * thread whenever the HBA timer fired and HBA-timeout event posted. This
  1086. * handler performs any periodic operations needed for the device. If such
  1087. * periodic event has already been attended to either in the interrupt handler
  1088. * or by processing slow-ring or fast-ring events within the HBA-timer
  1089. * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
  1090. * the timer for the next timeout period. If lpfc heart-beat mailbox command
  1091. * is configured and there is no heart-beat mailbox command outstanding, a
  1092. * heart-beat mailbox is issued and timer set properly. Otherwise, if there
  1093. * has been a heart-beat mailbox command outstanding, the HBA shall be put
  1094. * to offline.
  1095. **/
  1096. void
  1097. lpfc_hb_timeout_handler(struct lpfc_hba *phba)
  1098. {
  1099. struct lpfc_vport **vports;
  1100. LPFC_MBOXQ_t *pmboxq;
  1101. struct lpfc_dmabuf *buf_ptr;
  1102. int retval, i;
  1103. struct lpfc_sli *psli = &phba->sli;
  1104. LIST_HEAD(completions);
  1105. vports = lpfc_create_vport_work_array(phba);
  1106. if (vports != NULL)
  1107. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  1108. lpfc_rcv_seq_check_edtov(vports[i]);
  1109. lpfc_fdmi_num_disc_check(vports[i]);
  1110. }
  1111. lpfc_destroy_vport_work_array(phba, vports);
  1112. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1113. (phba->pport->load_flag & FC_UNLOADING) ||
  1114. (phba->pport->fc_flag & FC_OFFLINE_MODE))
  1115. return;
  1116. spin_lock_irq(&phba->pport->work_port_lock);
  1117. if (time_after(phba->last_completion_time +
  1118. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
  1119. jiffies)) {
  1120. spin_unlock_irq(&phba->pport->work_port_lock);
  1121. if (!phba->hb_outstanding)
  1122. mod_timer(&phba->hb_tmofunc,
  1123. jiffies +
  1124. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1125. else
  1126. mod_timer(&phba->hb_tmofunc,
  1127. jiffies +
  1128. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1129. return;
  1130. }
  1131. spin_unlock_irq(&phba->pport->work_port_lock);
  1132. if (phba->elsbuf_cnt &&
  1133. (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
  1134. spin_lock_irq(&phba->hbalock);
  1135. list_splice_init(&phba->elsbuf, &completions);
  1136. phba->elsbuf_cnt = 0;
  1137. phba->elsbuf_prev_cnt = 0;
  1138. spin_unlock_irq(&phba->hbalock);
  1139. while (!list_empty(&completions)) {
  1140. list_remove_head(&completions, buf_ptr,
  1141. struct lpfc_dmabuf, list);
  1142. lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  1143. kfree(buf_ptr);
  1144. }
  1145. }
  1146. phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
  1147. /* If there is no heart beat outstanding, issue a heartbeat command */
  1148. if (phba->cfg_enable_hba_heartbeat) {
  1149. if (!phba->hb_outstanding) {
  1150. if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
  1151. (list_empty(&psli->mboxq))) {
  1152. pmboxq = mempool_alloc(phba->mbox_mem_pool,
  1153. GFP_KERNEL);
  1154. if (!pmboxq) {
  1155. mod_timer(&phba->hb_tmofunc,
  1156. jiffies +
  1157. msecs_to_jiffies(1000 *
  1158. LPFC_HB_MBOX_INTERVAL));
  1159. return;
  1160. }
  1161. lpfc_heart_beat(phba, pmboxq);
  1162. pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
  1163. pmboxq->vport = phba->pport;
  1164. retval = lpfc_sli_issue_mbox(phba, pmboxq,
  1165. MBX_NOWAIT);
  1166. if (retval != MBX_BUSY &&
  1167. retval != MBX_SUCCESS) {
  1168. mempool_free(pmboxq,
  1169. phba->mbox_mem_pool);
  1170. mod_timer(&phba->hb_tmofunc,
  1171. jiffies +
  1172. msecs_to_jiffies(1000 *
  1173. LPFC_HB_MBOX_INTERVAL));
  1174. return;
  1175. }
  1176. phba->skipped_hb = 0;
  1177. phba->hb_outstanding = 1;
  1178. } else if (time_before_eq(phba->last_completion_time,
  1179. phba->skipped_hb)) {
  1180. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1181. "2857 Last completion time not "
  1182. " updated in %d ms\n",
  1183. jiffies_to_msecs(jiffies
  1184. - phba->last_completion_time));
  1185. } else
  1186. phba->skipped_hb = jiffies;
  1187. mod_timer(&phba->hb_tmofunc,
  1188. jiffies +
  1189. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1190. return;
  1191. } else {
  1192. /*
  1193. * If heart beat timeout called with hb_outstanding set
  1194. * we need to give the hb mailbox cmd a chance to
  1195. * complete or TMO.
  1196. */
  1197. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1198. "0459 Adapter heartbeat still out"
  1199. "standing:last compl time was %d ms.\n",
  1200. jiffies_to_msecs(jiffies
  1201. - phba->last_completion_time));
  1202. mod_timer(&phba->hb_tmofunc,
  1203. jiffies +
  1204. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1205. }
  1206. } else {
  1207. mod_timer(&phba->hb_tmofunc,
  1208. jiffies +
  1209. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1210. }
  1211. }
  1212. /**
  1213. * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
  1214. * @phba: pointer to lpfc hba data structure.
  1215. *
  1216. * This routine is called to bring the HBA offline when HBA hardware error
  1217. * other than Port Error 6 has been detected.
  1218. **/
  1219. static void
  1220. lpfc_offline_eratt(struct lpfc_hba *phba)
  1221. {
  1222. struct lpfc_sli *psli = &phba->sli;
  1223. spin_lock_irq(&phba->hbalock);
  1224. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1225. spin_unlock_irq(&phba->hbalock);
  1226. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1227. lpfc_offline(phba);
  1228. lpfc_reset_barrier(phba);
  1229. spin_lock_irq(&phba->hbalock);
  1230. lpfc_sli_brdreset(phba);
  1231. spin_unlock_irq(&phba->hbalock);
  1232. lpfc_hba_down_post(phba);
  1233. lpfc_sli_brdready(phba, HS_MBRDY);
  1234. lpfc_unblock_mgmt_io(phba);
  1235. phba->link_state = LPFC_HBA_ERROR;
  1236. return;
  1237. }
  1238. /**
  1239. * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
  1240. * @phba: pointer to lpfc hba data structure.
  1241. *
  1242. * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  1243. * other than Port Error 6 has been detected.
  1244. **/
  1245. void
  1246. lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
  1247. {
  1248. spin_lock_irq(&phba->hbalock);
  1249. phba->link_state = LPFC_HBA_ERROR;
  1250. spin_unlock_irq(&phba->hbalock);
  1251. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1252. lpfc_offline(phba);
  1253. lpfc_hba_down_post(phba);
  1254. lpfc_unblock_mgmt_io(phba);
  1255. }
  1256. /**
  1257. * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
  1258. * @phba: pointer to lpfc hba data structure.
  1259. *
  1260. * This routine is invoked to handle the deferred HBA hardware error
  1261. * conditions. This type of error is indicated by HBA by setting ER1
  1262. * and another ER bit in the host status register. The driver will
  1263. * wait until the ER1 bit clears before handling the error condition.
  1264. **/
  1265. static void
  1266. lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
  1267. {
  1268. uint32_t old_host_status = phba->work_hs;
  1269. struct lpfc_sli *psli = &phba->sli;
  1270. /* If the pci channel is offline, ignore possible errors,
  1271. * since we cannot communicate with the pci card anyway.
  1272. */
  1273. if (pci_channel_offline(phba->pcidev)) {
  1274. spin_lock_irq(&phba->hbalock);
  1275. phba->hba_flag &= ~DEFER_ERATT;
  1276. spin_unlock_irq(&phba->hbalock);
  1277. return;
  1278. }
  1279. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1280. "0479 Deferred Adapter Hardware Error "
  1281. "Data: x%x x%x x%x\n",
  1282. phba->work_hs,
  1283. phba->work_status[0], phba->work_status[1]);
  1284. spin_lock_irq(&phba->hbalock);
  1285. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1286. spin_unlock_irq(&phba->hbalock);
  1287. /*
  1288. * Firmware stops when it triggred erratt. That could cause the I/Os
  1289. * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
  1290. * SCSI layer retry it after re-establishing link.
  1291. */
  1292. lpfc_sli_abort_fcp_rings(phba);
  1293. /*
  1294. * There was a firmware error. Take the hba offline and then
  1295. * attempt to restart it.
  1296. */
  1297. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  1298. lpfc_offline(phba);
  1299. /* Wait for the ER1 bit to clear.*/
  1300. while (phba->work_hs & HS_FFER1) {
  1301. msleep(100);
  1302. if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
  1303. phba->work_hs = UNPLUG_ERR ;
  1304. break;
  1305. }
  1306. /* If driver is unloading let the worker thread continue */
  1307. if (phba->pport->load_flag & FC_UNLOADING) {
  1308. phba->work_hs = 0;
  1309. break;
  1310. }
  1311. }
  1312. /*
  1313. * This is to ptrotect against a race condition in which
  1314. * first write to the host attention register clear the
  1315. * host status register.
  1316. */
  1317. if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
  1318. phba->work_hs = old_host_status & ~HS_FFER1;
  1319. spin_lock_irq(&phba->hbalock);
  1320. phba->hba_flag &= ~DEFER_ERATT;
  1321. spin_unlock_irq(&phba->hbalock);
  1322. phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
  1323. phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
  1324. }
  1325. static void
  1326. lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
  1327. {
  1328. struct lpfc_board_event_header board_event;
  1329. struct Scsi_Host *shost;
  1330. board_event.event_type = FC_REG_BOARD_EVENT;
  1331. board_event.subcategory = LPFC_EVENT_PORTINTERR;
  1332. shost = lpfc_shost_from_vport(phba->pport);
  1333. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1334. sizeof(board_event),
  1335. (char *) &board_event,
  1336. LPFC_NL_VENDOR_ID);
  1337. }
  1338. /**
  1339. * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
  1340. * @phba: pointer to lpfc hba data structure.
  1341. *
  1342. * This routine is invoked to handle the following HBA hardware error
  1343. * conditions:
  1344. * 1 - HBA error attention interrupt
  1345. * 2 - DMA ring index out of range
  1346. * 3 - Mailbox command came back as unknown
  1347. **/
  1348. static void
  1349. lpfc_handle_eratt_s3(struct lpfc_hba *phba)
  1350. {
  1351. struct lpfc_vport *vport = phba->pport;
  1352. struct lpfc_sli *psli = &phba->sli;
  1353. uint32_t event_data;
  1354. unsigned long temperature;
  1355. struct temp_event temp_event_data;
  1356. struct Scsi_Host *shost;
  1357. /* If the pci channel is offline, ignore possible errors,
  1358. * since we cannot communicate with the pci card anyway.
  1359. */
  1360. if (pci_channel_offline(phba->pcidev)) {
  1361. spin_lock_irq(&phba->hbalock);
  1362. phba->hba_flag &= ~DEFER_ERATT;
  1363. spin_unlock_irq(&phba->hbalock);
  1364. return;
  1365. }
  1366. /* If resets are disabled then leave the HBA alone and return */
  1367. if (!phba->cfg_enable_hba_reset)
  1368. return;
  1369. /* Send an internal error event to mgmt application */
  1370. lpfc_board_errevt_to_mgmt(phba);
  1371. if (phba->hba_flag & DEFER_ERATT)
  1372. lpfc_handle_deferred_eratt(phba);
  1373. if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
  1374. if (phba->work_hs & HS_FFER6)
  1375. /* Re-establishing Link */
  1376. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  1377. "1301 Re-establishing Link "
  1378. "Data: x%x x%x x%x\n",
  1379. phba->work_hs, phba->work_status[0],
  1380. phba->work_status[1]);
  1381. if (phba->work_hs & HS_FFER8)
  1382. /* Device Zeroization */
  1383. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  1384. "2861 Host Authentication device "
  1385. "zeroization Data:x%x x%x x%x\n",
  1386. phba->work_hs, phba->work_status[0],
  1387. phba->work_status[1]);
  1388. spin_lock_irq(&phba->hbalock);
  1389. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1390. spin_unlock_irq(&phba->hbalock);
  1391. /*
  1392. * Firmware stops when it triggled erratt with HS_FFER6.
  1393. * That could cause the I/Os dropped by the firmware.
  1394. * Error iocb (I/O) on txcmplq and let the SCSI layer
  1395. * retry it after re-establishing link.
  1396. */
  1397. lpfc_sli_abort_fcp_rings(phba);
  1398. /*
  1399. * There was a firmware error. Take the hba offline and then
  1400. * attempt to restart it.
  1401. */
  1402. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1403. lpfc_offline(phba);
  1404. lpfc_sli_brdrestart(phba);
  1405. if (lpfc_online(phba) == 0) { /* Initialize the HBA */
  1406. lpfc_unblock_mgmt_io(phba);
  1407. return;
  1408. }
  1409. lpfc_unblock_mgmt_io(phba);
  1410. } else if (phba->work_hs & HS_CRIT_TEMP) {
  1411. temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
  1412. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  1413. temp_event_data.event_code = LPFC_CRIT_TEMP;
  1414. temp_event_data.data = (uint32_t)temperature;
  1415. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1416. "0406 Adapter maximum temperature exceeded "
  1417. "(%ld), taking this port offline "
  1418. "Data: x%x x%x x%x\n",
  1419. temperature, phba->work_hs,
  1420. phba->work_status[0], phba->work_status[1]);
  1421. shost = lpfc_shost_from_vport(phba->pport);
  1422. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1423. sizeof(temp_event_data),
  1424. (char *) &temp_event_data,
  1425. SCSI_NL_VID_TYPE_PCI
  1426. | PCI_VENDOR_ID_EMULEX);
  1427. spin_lock_irq(&phba->hbalock);
  1428. phba->over_temp_state = HBA_OVER_TEMP;
  1429. spin_unlock_irq(&phba->hbalock);
  1430. lpfc_offline_eratt(phba);
  1431. } else {
  1432. /* The if clause above forces this code path when the status
  1433. * failure is a value other than FFER6. Do not call the offline
  1434. * twice. This is the adapter hardware error path.
  1435. */
  1436. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1437. "0457 Adapter Hardware Error "
  1438. "Data: x%x x%x x%x\n",
  1439. phba->work_hs,
  1440. phba->work_status[0], phba->work_status[1]);
  1441. event_data = FC_REG_DUMP_EVENT;
  1442. shost = lpfc_shost_from_vport(vport);
  1443. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1444. sizeof(event_data), (char *) &event_data,
  1445. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  1446. lpfc_offline_eratt(phba);
  1447. }
  1448. return;
  1449. }
  1450. /**
  1451. * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
  1452. * @phba: pointer to lpfc hba data structure.
  1453. * @mbx_action: flag for mailbox shutdown action.
  1454. *
  1455. * This routine is invoked to perform an SLI4 port PCI function reset in
  1456. * response to port status register polling attention. It waits for port
  1457. * status register (ERR, RDY, RN) bits before proceeding with function reset.
  1458. * During this process, interrupt vectors are freed and later requested
  1459. * for handling possible port resource change.
  1460. **/
  1461. static int
  1462. lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
  1463. bool en_rn_msg)
  1464. {
  1465. int rc;
  1466. uint32_t intr_mode;
  1467. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  1468. LPFC_SLI_INTF_IF_TYPE_2) {
  1469. /*
  1470. * On error status condition, driver need to wait for port
  1471. * ready before performing reset.
  1472. */
  1473. rc = lpfc_sli4_pdev_status_reg_wait(phba);
  1474. if (rc)
  1475. return rc;
  1476. }
  1477. /* need reset: attempt for port recovery */
  1478. if (en_rn_msg)
  1479. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1480. "2887 Reset Needed: Attempting Port "
  1481. "Recovery...\n");
  1482. lpfc_offline_prep(phba, mbx_action);
  1483. lpfc_offline(phba);
  1484. /* release interrupt for possible resource change */
  1485. lpfc_sli4_disable_intr(phba);
  1486. lpfc_sli_brdrestart(phba);
  1487. /* request and enable interrupt */
  1488. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  1489. if (intr_mode == LPFC_INTR_ERROR) {
  1490. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1491. "3175 Failed to enable interrupt\n");
  1492. return -EIO;
  1493. }
  1494. phba->intr_mode = intr_mode;
  1495. rc = lpfc_online(phba);
  1496. if (rc == 0)
  1497. lpfc_unblock_mgmt_io(phba);
  1498. return rc;
  1499. }
  1500. /**
  1501. * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
  1502. * @phba: pointer to lpfc hba data structure.
  1503. *
  1504. * This routine is invoked to handle the SLI4 HBA hardware error attention
  1505. * conditions.
  1506. **/
  1507. static void
  1508. lpfc_handle_eratt_s4(struct lpfc_hba *phba)
  1509. {
  1510. struct lpfc_vport *vport = phba->pport;
  1511. uint32_t event_data;
  1512. struct Scsi_Host *shost;
  1513. uint32_t if_type;
  1514. struct lpfc_register portstat_reg = {0};
  1515. uint32_t reg_err1, reg_err2;
  1516. uint32_t uerrlo_reg, uemasklo_reg;
  1517. uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
  1518. bool en_rn_msg = true;
  1519. struct temp_event temp_event_data;
  1520. struct lpfc_register portsmphr_reg;
  1521. int rc, i;
  1522. /* If the pci channel is offline, ignore possible errors, since
  1523. * we cannot communicate with the pci card anyway.
  1524. */
  1525. if (pci_channel_offline(phba->pcidev))
  1526. return;
  1527. memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
  1528. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  1529. switch (if_type) {
  1530. case LPFC_SLI_INTF_IF_TYPE_0:
  1531. pci_rd_rc1 = lpfc_readl(
  1532. phba->sli4_hba.u.if_type0.UERRLOregaddr,
  1533. &uerrlo_reg);
  1534. pci_rd_rc2 = lpfc_readl(
  1535. phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
  1536. &uemasklo_reg);
  1537. /* consider PCI bus read error as pci_channel_offline */
  1538. if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
  1539. return;
  1540. if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
  1541. lpfc_sli4_offline_eratt(phba);
  1542. return;
  1543. }
  1544. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1545. "7623 Checking UE recoverable");
  1546. for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
  1547. if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  1548. &portsmphr_reg.word0))
  1549. continue;
  1550. smphr_port_status = bf_get(lpfc_port_smphr_port_status,
  1551. &portsmphr_reg);
  1552. if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
  1553. LPFC_PORT_SEM_UE_RECOVERABLE)
  1554. break;
  1555. /*Sleep for 1Sec, before checking SEMAPHORE */
  1556. msleep(1000);
  1557. }
  1558. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1559. "4827 smphr_port_status x%x : Waited %dSec",
  1560. smphr_port_status, i);
  1561. /* Recoverable UE, reset the HBA device */
  1562. if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
  1563. LPFC_PORT_SEM_UE_RECOVERABLE) {
  1564. for (i = 0; i < 20; i++) {
  1565. msleep(1000);
  1566. if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  1567. &portsmphr_reg.word0) &&
  1568. (LPFC_POST_STAGE_PORT_READY ==
  1569. bf_get(lpfc_port_smphr_port_status,
  1570. &portsmphr_reg))) {
  1571. rc = lpfc_sli4_port_sta_fn_reset(phba,
  1572. LPFC_MBX_NO_WAIT, en_rn_msg);
  1573. if (rc == 0)
  1574. return;
  1575. lpfc_printf_log(phba,
  1576. KERN_ERR, LOG_INIT,
  1577. "4215 Failed to recover UE");
  1578. break;
  1579. }
  1580. }
  1581. }
  1582. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1583. "7624 Firmware not ready: Failing UE recovery,"
  1584. " waited %dSec", i);
  1585. lpfc_sli4_offline_eratt(phba);
  1586. break;
  1587. case LPFC_SLI_INTF_IF_TYPE_2:
  1588. pci_rd_rc1 = lpfc_readl(
  1589. phba->sli4_hba.u.if_type2.STATUSregaddr,
  1590. &portstat_reg.word0);
  1591. /* consider PCI bus read error as pci_channel_offline */
  1592. if (pci_rd_rc1 == -EIO) {
  1593. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1594. "3151 PCI bus read access failure: x%x\n",
  1595. readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
  1596. return;
  1597. }
  1598. reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
  1599. reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
  1600. if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
  1601. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1602. "2889 Port Overtemperature event, "
  1603. "taking port offline Data: x%x x%x\n",
  1604. reg_err1, reg_err2);
  1605. phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
  1606. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  1607. temp_event_data.event_code = LPFC_CRIT_TEMP;
  1608. temp_event_data.data = 0xFFFFFFFF;
  1609. shost = lpfc_shost_from_vport(phba->pport);
  1610. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1611. sizeof(temp_event_data),
  1612. (char *)&temp_event_data,
  1613. SCSI_NL_VID_TYPE_PCI
  1614. | PCI_VENDOR_ID_EMULEX);
  1615. spin_lock_irq(&phba->hbalock);
  1616. phba->over_temp_state = HBA_OVER_TEMP;
  1617. spin_unlock_irq(&phba->hbalock);
  1618. lpfc_sli4_offline_eratt(phba);
  1619. return;
  1620. }
  1621. if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1622. reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
  1623. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1624. "3143 Port Down: Firmware Update "
  1625. "Detected\n");
  1626. en_rn_msg = false;
  1627. } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1628. reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
  1629. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1630. "3144 Port Down: Debug Dump\n");
  1631. else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1632. reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
  1633. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1634. "3145 Port Down: Provisioning\n");
  1635. /* If resets are disabled then leave the HBA alone and return */
  1636. if (!phba->cfg_enable_hba_reset)
  1637. return;
  1638. /* Check port status register for function reset */
  1639. rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
  1640. en_rn_msg);
  1641. if (rc == 0) {
  1642. /* don't report event on forced debug dump */
  1643. if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1644. reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
  1645. return;
  1646. else
  1647. break;
  1648. }
  1649. /* fall through for not able to recover */
  1650. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1651. "3152 Unrecoverable error, bring the port "
  1652. "offline\n");
  1653. lpfc_sli4_offline_eratt(phba);
  1654. break;
  1655. case LPFC_SLI_INTF_IF_TYPE_1:
  1656. default:
  1657. break;
  1658. }
  1659. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1660. "3123 Report dump event to upper layer\n");
  1661. /* Send an internal error event to mgmt application */
  1662. lpfc_board_errevt_to_mgmt(phba);
  1663. event_data = FC_REG_DUMP_EVENT;
  1664. shost = lpfc_shost_from_vport(vport);
  1665. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1666. sizeof(event_data), (char *) &event_data,
  1667. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  1668. }
  1669. /**
  1670. * lpfc_handle_eratt - Wrapper func for handling hba error attention
  1671. * @phba: pointer to lpfc HBA data structure.
  1672. *
  1673. * This routine wraps the actual SLI3 or SLI4 hba error attention handling
  1674. * routine from the API jump table function pointer from the lpfc_hba struct.
  1675. *
  1676. * Return codes
  1677. * 0 - success.
  1678. * Any other value - error.
  1679. **/
  1680. void
  1681. lpfc_handle_eratt(struct lpfc_hba *phba)
  1682. {
  1683. (*phba->lpfc_handle_eratt)(phba);
  1684. }
  1685. /**
  1686. * lpfc_handle_latt - The HBA link event handler
  1687. * @phba: pointer to lpfc hba data structure.
  1688. *
  1689. * This routine is invoked from the worker thread to handle a HBA host
  1690. * attention link event. SLI3 only.
  1691. **/
  1692. void
  1693. lpfc_handle_latt(struct lpfc_hba *phba)
  1694. {
  1695. struct lpfc_vport *vport = phba->pport;
  1696. struct lpfc_sli *psli = &phba->sli;
  1697. LPFC_MBOXQ_t *pmb;
  1698. volatile uint32_t control;
  1699. struct lpfc_dmabuf *mp;
  1700. int rc = 0;
  1701. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1702. if (!pmb) {
  1703. rc = 1;
  1704. goto lpfc_handle_latt_err_exit;
  1705. }
  1706. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1707. if (!mp) {
  1708. rc = 2;
  1709. goto lpfc_handle_latt_free_pmb;
  1710. }
  1711. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  1712. if (!mp->virt) {
  1713. rc = 3;
  1714. goto lpfc_handle_latt_free_mp;
  1715. }
  1716. /* Cleanup any outstanding ELS commands */
  1717. lpfc_els_flush_all_cmd(phba);
  1718. psli->slistat.link_event++;
  1719. lpfc_read_topology(phba, pmb, mp);
  1720. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  1721. pmb->vport = vport;
  1722. /* Block ELS IOCBs until we have processed this mbox command */
  1723. phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
  1724. rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
  1725. if (rc == MBX_NOT_FINISHED) {
  1726. rc = 4;
  1727. goto lpfc_handle_latt_free_mbuf;
  1728. }
  1729. /* Clear Link Attention in HA REG */
  1730. spin_lock_irq(&phba->hbalock);
  1731. writel(HA_LATT, phba->HAregaddr);
  1732. readl(phba->HAregaddr); /* flush */
  1733. spin_unlock_irq(&phba->hbalock);
  1734. return;
  1735. lpfc_handle_latt_free_mbuf:
  1736. phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
  1737. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1738. lpfc_handle_latt_free_mp:
  1739. kfree(mp);
  1740. lpfc_handle_latt_free_pmb:
  1741. mempool_free(pmb, phba->mbox_mem_pool);
  1742. lpfc_handle_latt_err_exit:
  1743. /* Enable Link attention interrupts */
  1744. spin_lock_irq(&phba->hbalock);
  1745. psli->sli_flag |= LPFC_PROCESS_LA;
  1746. control = readl(phba->HCregaddr);
  1747. control |= HC_LAINT_ENA;
  1748. writel(control, phba->HCregaddr);
  1749. readl(phba->HCregaddr); /* flush */
  1750. /* Clear Link Attention in HA REG */
  1751. writel(HA_LATT, phba->HAregaddr);
  1752. readl(phba->HAregaddr); /* flush */
  1753. spin_unlock_irq(&phba->hbalock);
  1754. lpfc_linkdown(phba);
  1755. phba->link_state = LPFC_HBA_ERROR;
  1756. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  1757. "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
  1758. return;
  1759. }
  1760. /**
  1761. * lpfc_parse_vpd - Parse VPD (Vital Product Data)
  1762. * @phba: pointer to lpfc hba data structure.
  1763. * @vpd: pointer to the vital product data.
  1764. * @len: length of the vital product data in bytes.
  1765. *
  1766. * This routine parses the Vital Product Data (VPD). The VPD is treated as
  1767. * an array of characters. In this routine, the ModelName, ProgramType, and
  1768. * ModelDesc, etc. fields of the phba data structure will be populated.
  1769. *
  1770. * Return codes
  1771. * 0 - pointer to the VPD passed in is NULL
  1772. * 1 - success
  1773. **/
  1774. int
  1775. lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
  1776. {
  1777. uint8_t lenlo, lenhi;
  1778. int Length;
  1779. int i, j;
  1780. int finished = 0;
  1781. int index = 0;
  1782. if (!vpd)
  1783. return 0;
  1784. /* Vital Product */
  1785. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1786. "0455 Vital Product Data: x%x x%x x%x x%x\n",
  1787. (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
  1788. (uint32_t) vpd[3]);
  1789. while (!finished && (index < (len - 4))) {
  1790. switch (vpd[index]) {
  1791. case 0x82:
  1792. case 0x91:
  1793. index += 1;
  1794. lenlo = vpd[index];
  1795. index += 1;
  1796. lenhi = vpd[index];
  1797. index += 1;
  1798. i = ((((unsigned short)lenhi) << 8) + lenlo);
  1799. index += i;
  1800. break;
  1801. case 0x90:
  1802. index += 1;
  1803. lenlo = vpd[index];
  1804. index += 1;
  1805. lenhi = vpd[index];
  1806. index += 1;
  1807. Length = ((((unsigned short)lenhi) << 8) + lenlo);
  1808. if (Length > len - index)
  1809. Length = len - index;
  1810. while (Length > 0) {
  1811. /* Look for Serial Number */
  1812. if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
  1813. index += 2;
  1814. i = vpd[index];
  1815. index += 1;
  1816. j = 0;
  1817. Length -= (3+i);
  1818. while(i--) {
  1819. phba->SerialNumber[j++] = vpd[index++];
  1820. if (j == 31)
  1821. break;
  1822. }
  1823. phba->SerialNumber[j] = 0;
  1824. continue;
  1825. }
  1826. else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
  1827. phba->vpd_flag |= VPD_MODEL_DESC;
  1828. index += 2;
  1829. i = vpd[index];
  1830. index += 1;
  1831. j = 0;
  1832. Length -= (3+i);
  1833. while(i--) {
  1834. phba->ModelDesc[j++] = vpd[index++];
  1835. if (j == 255)
  1836. break;
  1837. }
  1838. phba->ModelDesc[j] = 0;
  1839. continue;
  1840. }
  1841. else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
  1842. phba->vpd_flag |= VPD_MODEL_NAME;
  1843. index += 2;
  1844. i = vpd[index];
  1845. index += 1;
  1846. j = 0;
  1847. Length -= (3+i);
  1848. while(i--) {
  1849. phba->ModelName[j++] = vpd[index++];
  1850. if (j == 79)
  1851. break;
  1852. }
  1853. phba->ModelName[j] = 0;
  1854. continue;
  1855. }
  1856. else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
  1857. phba->vpd_flag |= VPD_PROGRAM_TYPE;
  1858. index += 2;
  1859. i = vpd[index];
  1860. index += 1;
  1861. j = 0;
  1862. Length -= (3+i);
  1863. while(i--) {
  1864. phba->ProgramType[j++] = vpd[index++];
  1865. if (j == 255)
  1866. break;
  1867. }
  1868. phba->ProgramType[j] = 0;
  1869. continue;
  1870. }
  1871. else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
  1872. phba->vpd_flag |= VPD_PORT;
  1873. index += 2;
  1874. i = vpd[index];
  1875. index += 1;
  1876. j = 0;
  1877. Length -= (3+i);
  1878. while(i--) {
  1879. if ((phba->sli_rev == LPFC_SLI_REV4) &&
  1880. (phba->sli4_hba.pport_name_sta ==
  1881. LPFC_SLI4_PPNAME_GET)) {
  1882. j++;
  1883. index++;
  1884. } else
  1885. phba->Port[j++] = vpd[index++];
  1886. if (j == 19)
  1887. break;
  1888. }
  1889. if ((phba->sli_rev != LPFC_SLI_REV4) ||
  1890. (phba->sli4_hba.pport_name_sta ==
  1891. LPFC_SLI4_PPNAME_NON))
  1892. phba->Port[j] = 0;
  1893. continue;
  1894. }
  1895. else {
  1896. index += 2;
  1897. i = vpd[index];
  1898. index += 1;
  1899. index += i;
  1900. Length -= (3 + i);
  1901. }
  1902. }
  1903. finished = 0;
  1904. break;
  1905. case 0x78:
  1906. finished = 1;
  1907. break;
  1908. default:
  1909. index ++;
  1910. break;
  1911. }
  1912. }
  1913. return(1);
  1914. }
  1915. /**
  1916. * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
  1917. * @phba: pointer to lpfc hba data structure.
  1918. * @mdp: pointer to the data structure to hold the derived model name.
  1919. * @descp: pointer to the data structure to hold the derived description.
  1920. *
  1921. * This routine retrieves HBA's description based on its registered PCI device
  1922. * ID. The @descp passed into this function points to an array of 256 chars. It
  1923. * shall be returned with the model name, maximum speed, and the host bus type.
  1924. * The @mdp passed into this function points to an array of 80 chars. When the
  1925. * function returns, the @mdp will be filled with the model name.
  1926. **/
  1927. static void
  1928. lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
  1929. {
  1930. lpfc_vpd_t *vp;
  1931. uint16_t dev_id = phba->pcidev->device;
  1932. int max_speed;
  1933. int GE = 0;
  1934. int oneConnect = 0; /* default is not a oneConnect */
  1935. struct {
  1936. char *name;
  1937. char *bus;
  1938. char *function;
  1939. } m = {"<Unknown>", "", ""};
  1940. if (mdp && mdp[0] != '\0'
  1941. && descp && descp[0] != '\0')
  1942. return;
  1943. if (phba->lmt & LMT_32Gb)
  1944. max_speed = 32;
  1945. else if (phba->lmt & LMT_16Gb)
  1946. max_speed = 16;
  1947. else if (phba->lmt & LMT_10Gb)
  1948. max_speed = 10;
  1949. else if (phba->lmt & LMT_8Gb)
  1950. max_speed = 8;
  1951. else if (phba->lmt & LMT_4Gb)
  1952. max_speed = 4;
  1953. else if (phba->lmt & LMT_2Gb)
  1954. max_speed = 2;
  1955. else if (phba->lmt & LMT_1Gb)
  1956. max_speed = 1;
  1957. else
  1958. max_speed = 0;
  1959. vp = &phba->vpd;
  1960. switch (dev_id) {
  1961. case PCI_DEVICE_ID_FIREFLY:
  1962. m = (typeof(m)){"LP6000", "PCI",
  1963. "Obsolete, Unsupported Fibre Channel Adapter"};
  1964. break;
  1965. case PCI_DEVICE_ID_SUPERFLY:
  1966. if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
  1967. m = (typeof(m)){"LP7000", "PCI", ""};
  1968. else
  1969. m = (typeof(m)){"LP7000E", "PCI", ""};
  1970. m.function = "Obsolete, Unsupported Fibre Channel Adapter";
  1971. break;
  1972. case PCI_DEVICE_ID_DRAGONFLY:
  1973. m = (typeof(m)){"LP8000", "PCI",
  1974. "Obsolete, Unsupported Fibre Channel Adapter"};
  1975. break;
  1976. case PCI_DEVICE_ID_CENTAUR:
  1977. if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
  1978. m = (typeof(m)){"LP9002", "PCI", ""};
  1979. else
  1980. m = (typeof(m)){"LP9000", "PCI", ""};
  1981. m.function = "Obsolete, Unsupported Fibre Channel Adapter";
  1982. break;
  1983. case PCI_DEVICE_ID_RFLY:
  1984. m = (typeof(m)){"LP952", "PCI",
  1985. "Obsolete, Unsupported Fibre Channel Adapter"};
  1986. break;
  1987. case PCI_DEVICE_ID_PEGASUS:
  1988. m = (typeof(m)){"LP9802", "PCI-X",
  1989. "Obsolete, Unsupported Fibre Channel Adapter"};
  1990. break;
  1991. case PCI_DEVICE_ID_THOR:
  1992. m = (typeof(m)){"LP10000", "PCI-X",
  1993. "Obsolete, Unsupported Fibre Channel Adapter"};
  1994. break;
  1995. case PCI_DEVICE_ID_VIPER:
  1996. m = (typeof(m)){"LPX1000", "PCI-X",
  1997. "Obsolete, Unsupported Fibre Channel Adapter"};
  1998. break;
  1999. case PCI_DEVICE_ID_PFLY:
  2000. m = (typeof(m)){"LP982", "PCI-X",
  2001. "Obsolete, Unsupported Fibre Channel Adapter"};
  2002. break;
  2003. case PCI_DEVICE_ID_TFLY:
  2004. m = (typeof(m)){"LP1050", "PCI-X",
  2005. "Obsolete, Unsupported Fibre Channel Adapter"};
  2006. break;
  2007. case PCI_DEVICE_ID_HELIOS:
  2008. m = (typeof(m)){"LP11000", "PCI-X2",
  2009. "Obsolete, Unsupported Fibre Channel Adapter"};
  2010. break;
  2011. case PCI_DEVICE_ID_HELIOS_SCSP:
  2012. m = (typeof(m)){"LP11000-SP", "PCI-X2",
  2013. "Obsolete, Unsupported Fibre Channel Adapter"};
  2014. break;
  2015. case PCI_DEVICE_ID_HELIOS_DCSP:
  2016. m = (typeof(m)){"LP11002-SP", "PCI-X2",
  2017. "Obsolete, Unsupported Fibre Channel Adapter"};
  2018. break;
  2019. case PCI_DEVICE_ID_NEPTUNE:
  2020. m = (typeof(m)){"LPe1000", "PCIe",
  2021. "Obsolete, Unsupported Fibre Channel Adapter"};
  2022. break;
  2023. case PCI_DEVICE_ID_NEPTUNE_SCSP:
  2024. m = (typeof(m)){"LPe1000-SP", "PCIe",
  2025. "Obsolete, Unsupported Fibre Channel Adapter"};
  2026. break;
  2027. case PCI_DEVICE_ID_NEPTUNE_DCSP:
  2028. m = (typeof(m)){"LPe1002-SP", "PCIe",
  2029. "Obsolete, Unsupported Fibre Channel Adapter"};
  2030. break;
  2031. case PCI_DEVICE_ID_BMID:
  2032. m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
  2033. break;
  2034. case PCI_DEVICE_ID_BSMB:
  2035. m = (typeof(m)){"LP111", "PCI-X2",
  2036. "Obsolete, Unsupported Fibre Channel Adapter"};
  2037. break;
  2038. case PCI_DEVICE_ID_ZEPHYR:
  2039. m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
  2040. break;
  2041. case PCI_DEVICE_ID_ZEPHYR_SCSP:
  2042. m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
  2043. break;
  2044. case PCI_DEVICE_ID_ZEPHYR_DCSP:
  2045. m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
  2046. GE = 1;
  2047. break;
  2048. case PCI_DEVICE_ID_ZMID:
  2049. m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
  2050. break;
  2051. case PCI_DEVICE_ID_ZSMB:
  2052. m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
  2053. break;
  2054. case PCI_DEVICE_ID_LP101:
  2055. m = (typeof(m)){"LP101", "PCI-X",
  2056. "Obsolete, Unsupported Fibre Channel Adapter"};
  2057. break;
  2058. case PCI_DEVICE_ID_LP10000S:
  2059. m = (typeof(m)){"LP10000-S", "PCI",
  2060. "Obsolete, Unsupported Fibre Channel Adapter"};
  2061. break;
  2062. case PCI_DEVICE_ID_LP11000S:
  2063. m = (typeof(m)){"LP11000-S", "PCI-X2",
  2064. "Obsolete, Unsupported Fibre Channel Adapter"};
  2065. break;
  2066. case PCI_DEVICE_ID_LPE11000S:
  2067. m = (typeof(m)){"LPe11000-S", "PCIe",
  2068. "Obsolete, Unsupported Fibre Channel Adapter"};
  2069. break;
  2070. case PCI_DEVICE_ID_SAT:
  2071. m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
  2072. break;
  2073. case PCI_DEVICE_ID_SAT_MID:
  2074. m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
  2075. break;
  2076. case PCI_DEVICE_ID_SAT_SMB:
  2077. m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
  2078. break;
  2079. case PCI_DEVICE_ID_SAT_DCSP:
  2080. m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
  2081. break;
  2082. case PCI_DEVICE_ID_SAT_SCSP:
  2083. m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
  2084. break;
  2085. case PCI_DEVICE_ID_SAT_S:
  2086. m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
  2087. break;
  2088. case PCI_DEVICE_ID_HORNET:
  2089. m = (typeof(m)){"LP21000", "PCIe",
  2090. "Obsolete, Unsupported FCoE Adapter"};
  2091. GE = 1;
  2092. break;
  2093. case PCI_DEVICE_ID_PROTEUS_VF:
  2094. m = (typeof(m)){"LPev12000", "PCIe IOV",
  2095. "Obsolete, Unsupported Fibre Channel Adapter"};
  2096. break;
  2097. case PCI_DEVICE_ID_PROTEUS_PF:
  2098. m = (typeof(m)){"LPev12000", "PCIe IOV",
  2099. "Obsolete, Unsupported Fibre Channel Adapter"};
  2100. break;
  2101. case PCI_DEVICE_ID_PROTEUS_S:
  2102. m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
  2103. "Obsolete, Unsupported Fibre Channel Adapter"};
  2104. break;
  2105. case PCI_DEVICE_ID_TIGERSHARK:
  2106. oneConnect = 1;
  2107. m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
  2108. break;
  2109. case PCI_DEVICE_ID_TOMCAT:
  2110. oneConnect = 1;
  2111. m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
  2112. break;
  2113. case PCI_DEVICE_ID_FALCON:
  2114. m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
  2115. "EmulexSecure Fibre"};
  2116. break;
  2117. case PCI_DEVICE_ID_BALIUS:
  2118. m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
  2119. "Obsolete, Unsupported Fibre Channel Adapter"};
  2120. break;
  2121. case PCI_DEVICE_ID_LANCER_FC:
  2122. m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
  2123. break;
  2124. case PCI_DEVICE_ID_LANCER_FC_VF:
  2125. m = (typeof(m)){"LPe16000", "PCIe",
  2126. "Obsolete, Unsupported Fibre Channel Adapter"};
  2127. break;
  2128. case PCI_DEVICE_ID_LANCER_FCOE:
  2129. oneConnect = 1;
  2130. m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
  2131. break;
  2132. case PCI_DEVICE_ID_LANCER_FCOE_VF:
  2133. oneConnect = 1;
  2134. m = (typeof(m)){"OCe15100", "PCIe",
  2135. "Obsolete, Unsupported FCoE"};
  2136. break;
  2137. case PCI_DEVICE_ID_LANCER_G6_FC:
  2138. m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
  2139. break;
  2140. case PCI_DEVICE_ID_SKYHAWK:
  2141. case PCI_DEVICE_ID_SKYHAWK_VF:
  2142. oneConnect = 1;
  2143. m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
  2144. break;
  2145. default:
  2146. m = (typeof(m)){"Unknown", "", ""};
  2147. break;
  2148. }
  2149. if (mdp && mdp[0] == '\0')
  2150. snprintf(mdp, 79,"%s", m.name);
  2151. /*
  2152. * oneConnect hba requires special processing, they are all initiators
  2153. * and we put the port number on the end
  2154. */
  2155. if (descp && descp[0] == '\0') {
  2156. if (oneConnect)
  2157. snprintf(descp, 255,
  2158. "Emulex OneConnect %s, %s Initiator %s",
  2159. m.name, m.function,
  2160. phba->Port);
  2161. else if (max_speed == 0)
  2162. snprintf(descp, 255,
  2163. "Emulex %s %s %s",
  2164. m.name, m.bus, m.function);
  2165. else
  2166. snprintf(descp, 255,
  2167. "Emulex %s %d%s %s %s",
  2168. m.name, max_speed, (GE) ? "GE" : "Gb",
  2169. m.bus, m.function);
  2170. }
  2171. }
  2172. /**
  2173. * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
  2174. * @phba: pointer to lpfc hba data structure.
  2175. * @pring: pointer to a IOCB ring.
  2176. * @cnt: the number of IOCBs to be posted to the IOCB ring.
  2177. *
  2178. * This routine posts a given number of IOCBs with the associated DMA buffer
  2179. * descriptors specified by the cnt argument to the given IOCB ring.
  2180. *
  2181. * Return codes
  2182. * The number of IOCBs NOT able to be posted to the IOCB ring.
  2183. **/
  2184. int
  2185. lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
  2186. {
  2187. IOCB_t *icmd;
  2188. struct lpfc_iocbq *iocb;
  2189. struct lpfc_dmabuf *mp1, *mp2;
  2190. cnt += pring->missbufcnt;
  2191. /* While there are buffers to post */
  2192. while (cnt > 0) {
  2193. /* Allocate buffer for command iocb */
  2194. iocb = lpfc_sli_get_iocbq(phba);
  2195. if (iocb == NULL) {
  2196. pring->missbufcnt = cnt;
  2197. return cnt;
  2198. }
  2199. icmd = &iocb->iocb;
  2200. /* 2 buffers can be posted per command */
  2201. /* Allocate buffer to post */
  2202. mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  2203. if (mp1)
  2204. mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
  2205. if (!mp1 || !mp1->virt) {
  2206. kfree(mp1);
  2207. lpfc_sli_release_iocbq(phba, iocb);
  2208. pring->missbufcnt = cnt;
  2209. return cnt;
  2210. }
  2211. INIT_LIST_HEAD(&mp1->list);
  2212. /* Allocate buffer to post */
  2213. if (cnt > 1) {
  2214. mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  2215. if (mp2)
  2216. mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  2217. &mp2->phys);
  2218. if (!mp2 || !mp2->virt) {
  2219. kfree(mp2);
  2220. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  2221. kfree(mp1);
  2222. lpfc_sli_release_iocbq(phba, iocb);
  2223. pring->missbufcnt = cnt;
  2224. return cnt;
  2225. }
  2226. INIT_LIST_HEAD(&mp2->list);
  2227. } else {
  2228. mp2 = NULL;
  2229. }
  2230. icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
  2231. icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
  2232. icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
  2233. icmd->ulpBdeCount = 1;
  2234. cnt--;
  2235. if (mp2) {
  2236. icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
  2237. icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
  2238. icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
  2239. cnt--;
  2240. icmd->ulpBdeCount = 2;
  2241. }
  2242. icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
  2243. icmd->ulpLe = 1;
  2244. if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
  2245. IOCB_ERROR) {
  2246. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  2247. kfree(mp1);
  2248. cnt++;
  2249. if (mp2) {
  2250. lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
  2251. kfree(mp2);
  2252. cnt++;
  2253. }
  2254. lpfc_sli_release_iocbq(phba, iocb);
  2255. pring->missbufcnt = cnt;
  2256. return cnt;
  2257. }
  2258. lpfc_sli_ringpostbuf_put(phba, pring, mp1);
  2259. if (mp2)
  2260. lpfc_sli_ringpostbuf_put(phba, pring, mp2);
  2261. }
  2262. pring->missbufcnt = 0;
  2263. return 0;
  2264. }
  2265. /**
  2266. * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
  2267. * @phba: pointer to lpfc hba data structure.
  2268. *
  2269. * This routine posts initial receive IOCB buffers to the ELS ring. The
  2270. * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
  2271. * set to 64 IOCBs. SLI3 only.
  2272. *
  2273. * Return codes
  2274. * 0 - success (currently always success)
  2275. **/
  2276. static int
  2277. lpfc_post_rcv_buf(struct lpfc_hba *phba)
  2278. {
  2279. struct lpfc_sli *psli = &phba->sli;
  2280. /* Ring 0, ELS / CT buffers */
  2281. lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
  2282. /* Ring 2 - FCP no buffers needed */
  2283. return 0;
  2284. }
  2285. #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
  2286. /**
  2287. * lpfc_sha_init - Set up initial array of hash table entries
  2288. * @HashResultPointer: pointer to an array as hash table.
  2289. *
  2290. * This routine sets up the initial values to the array of hash table entries
  2291. * for the LC HBAs.
  2292. **/
  2293. static void
  2294. lpfc_sha_init(uint32_t * HashResultPointer)
  2295. {
  2296. HashResultPointer[0] = 0x67452301;
  2297. HashResultPointer[1] = 0xEFCDAB89;
  2298. HashResultPointer[2] = 0x98BADCFE;
  2299. HashResultPointer[3] = 0x10325476;
  2300. HashResultPointer[4] = 0xC3D2E1F0;
  2301. }
  2302. /**
  2303. * lpfc_sha_iterate - Iterate initial hash table with the working hash table
  2304. * @HashResultPointer: pointer to an initial/result hash table.
  2305. * @HashWorkingPointer: pointer to an working hash table.
  2306. *
  2307. * This routine iterates an initial hash table pointed by @HashResultPointer
  2308. * with the values from the working hash table pointeed by @HashWorkingPointer.
  2309. * The results are putting back to the initial hash table, returned through
  2310. * the @HashResultPointer as the result hash table.
  2311. **/
  2312. static void
  2313. lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
  2314. {
  2315. int t;
  2316. uint32_t TEMP;
  2317. uint32_t A, B, C, D, E;
  2318. t = 16;
  2319. do {
  2320. HashWorkingPointer[t] =
  2321. S(1,
  2322. HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
  2323. 8] ^
  2324. HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
  2325. } while (++t <= 79);
  2326. t = 0;
  2327. A = HashResultPointer[0];
  2328. B = HashResultPointer[1];
  2329. C = HashResultPointer[2];
  2330. D = HashResultPointer[3];
  2331. E = HashResultPointer[4];
  2332. do {
  2333. if (t < 20) {
  2334. TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
  2335. } else if (t < 40) {
  2336. TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
  2337. } else if (t < 60) {
  2338. TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
  2339. } else {
  2340. TEMP = (B ^ C ^ D) + 0xCA62C1D6;
  2341. }
  2342. TEMP += S(5, A) + E + HashWorkingPointer[t];
  2343. E = D;
  2344. D = C;
  2345. C = S(30, B);
  2346. B = A;
  2347. A = TEMP;
  2348. } while (++t <= 79);
  2349. HashResultPointer[0] += A;
  2350. HashResultPointer[1] += B;
  2351. HashResultPointer[2] += C;
  2352. HashResultPointer[3] += D;
  2353. HashResultPointer[4] += E;
  2354. }
  2355. /**
  2356. * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
  2357. * @RandomChallenge: pointer to the entry of host challenge random number array.
  2358. * @HashWorking: pointer to the entry of the working hash array.
  2359. *
  2360. * This routine calculates the working hash array referred by @HashWorking
  2361. * from the challenge random numbers associated with the host, referred by
  2362. * @RandomChallenge. The result is put into the entry of the working hash
  2363. * array and returned by reference through @HashWorking.
  2364. **/
  2365. static void
  2366. lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
  2367. {
  2368. *HashWorking = (*RandomChallenge ^ *HashWorking);
  2369. }
  2370. /**
  2371. * lpfc_hba_init - Perform special handling for LC HBA initialization
  2372. * @phba: pointer to lpfc hba data structure.
  2373. * @hbainit: pointer to an array of unsigned 32-bit integers.
  2374. *
  2375. * This routine performs the special handling for LC HBA initialization.
  2376. **/
  2377. void
  2378. lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
  2379. {
  2380. int t;
  2381. uint32_t *HashWorking;
  2382. uint32_t *pwwnn = (uint32_t *) phba->wwnn;
  2383. HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
  2384. if (!HashWorking)
  2385. return;
  2386. HashWorking[0] = HashWorking[78] = *pwwnn++;
  2387. HashWorking[1] = HashWorking[79] = *pwwnn;
  2388. for (t = 0; t < 7; t++)
  2389. lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
  2390. lpfc_sha_init(hbainit);
  2391. lpfc_sha_iterate(hbainit, HashWorking);
  2392. kfree(HashWorking);
  2393. }
  2394. /**
  2395. * lpfc_cleanup - Performs vport cleanups before deleting a vport
  2396. * @vport: pointer to a virtual N_Port data structure.
  2397. *
  2398. * This routine performs the necessary cleanups before deleting the @vport.
  2399. * It invokes the discovery state machine to perform necessary state
  2400. * transitions and to release the ndlps associated with the @vport. Note,
  2401. * the physical port is treated as @vport 0.
  2402. **/
  2403. void
  2404. lpfc_cleanup(struct lpfc_vport *vport)
  2405. {
  2406. struct lpfc_hba *phba = vport->phba;
  2407. struct lpfc_nodelist *ndlp, *next_ndlp;
  2408. int i = 0;
  2409. if (phba->link_state > LPFC_LINK_DOWN)
  2410. lpfc_port_link_failure(vport);
  2411. list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
  2412. if (!NLP_CHK_NODE_ACT(ndlp)) {
  2413. ndlp = lpfc_enable_node(vport, ndlp,
  2414. NLP_STE_UNUSED_NODE);
  2415. if (!ndlp)
  2416. continue;
  2417. spin_lock_irq(&phba->ndlp_lock);
  2418. NLP_SET_FREE_REQ(ndlp);
  2419. spin_unlock_irq(&phba->ndlp_lock);
  2420. /* Trigger the release of the ndlp memory */
  2421. lpfc_nlp_put(ndlp);
  2422. continue;
  2423. }
  2424. spin_lock_irq(&phba->ndlp_lock);
  2425. if (NLP_CHK_FREE_REQ(ndlp)) {
  2426. /* The ndlp should not be in memory free mode already */
  2427. spin_unlock_irq(&phba->ndlp_lock);
  2428. continue;
  2429. } else
  2430. /* Indicate request for freeing ndlp memory */
  2431. NLP_SET_FREE_REQ(ndlp);
  2432. spin_unlock_irq(&phba->ndlp_lock);
  2433. if (vport->port_type != LPFC_PHYSICAL_PORT &&
  2434. ndlp->nlp_DID == Fabric_DID) {
  2435. /* Just free up ndlp with Fabric_DID for vports */
  2436. lpfc_nlp_put(ndlp);
  2437. continue;
  2438. }
  2439. /* take care of nodes in unused state before the state
  2440. * machine taking action.
  2441. */
  2442. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
  2443. lpfc_nlp_put(ndlp);
  2444. continue;
  2445. }
  2446. if (ndlp->nlp_type & NLP_FABRIC)
  2447. lpfc_disc_state_machine(vport, ndlp, NULL,
  2448. NLP_EVT_DEVICE_RECOVERY);
  2449. if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
  2450. /* Remove the NVME transport reference now and
  2451. * continue to remove the node.
  2452. */
  2453. lpfc_nlp_put(ndlp);
  2454. }
  2455. lpfc_disc_state_machine(vport, ndlp, NULL,
  2456. NLP_EVT_DEVICE_RM);
  2457. }
  2458. /* At this point, ALL ndlp's should be gone
  2459. * because of the previous NLP_EVT_DEVICE_RM.
  2460. * Lets wait for this to happen, if needed.
  2461. */
  2462. while (!list_empty(&vport->fc_nodes)) {
  2463. if (i++ > 3000) {
  2464. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  2465. "0233 Nodelist not empty\n");
  2466. list_for_each_entry_safe(ndlp, next_ndlp,
  2467. &vport->fc_nodes, nlp_listp) {
  2468. lpfc_printf_vlog(ndlp->vport, KERN_ERR,
  2469. LOG_NODE,
  2470. "0282 did:x%x ndlp:x%p "
  2471. "usgmap:x%x refcnt:%d\n",
  2472. ndlp->nlp_DID, (void *)ndlp,
  2473. ndlp->nlp_usg_map,
  2474. kref_read(&ndlp->kref));
  2475. }
  2476. break;
  2477. }
  2478. /* Wait for any activity on ndlps to settle */
  2479. msleep(10);
  2480. }
  2481. lpfc_cleanup_vports_rrqs(vport, NULL);
  2482. }
  2483. /**
  2484. * lpfc_stop_vport_timers - Stop all the timers associated with a vport
  2485. * @vport: pointer to a virtual N_Port data structure.
  2486. *
  2487. * This routine stops all the timers associated with a @vport. This function
  2488. * is invoked before disabling or deleting a @vport. Note that the physical
  2489. * port is treated as @vport 0.
  2490. **/
  2491. void
  2492. lpfc_stop_vport_timers(struct lpfc_vport *vport)
  2493. {
  2494. del_timer_sync(&vport->els_tmofunc);
  2495. del_timer_sync(&vport->delayed_disc_tmo);
  2496. lpfc_can_disctmo(vport);
  2497. return;
  2498. }
  2499. /**
  2500. * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
  2501. * @phba: pointer to lpfc hba data structure.
  2502. *
  2503. * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
  2504. * caller of this routine should already hold the host lock.
  2505. **/
  2506. void
  2507. __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
  2508. {
  2509. /* Clear pending FCF rediscovery wait flag */
  2510. phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
  2511. /* Now, try to stop the timer */
  2512. del_timer(&phba->fcf.redisc_wait);
  2513. }
  2514. /**
  2515. * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
  2516. * @phba: pointer to lpfc hba data structure.
  2517. *
  2518. * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
  2519. * checks whether the FCF rediscovery wait timer is pending with the host
  2520. * lock held before proceeding with disabling the timer and clearing the
  2521. * wait timer pendig flag.
  2522. **/
  2523. void
  2524. lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
  2525. {
  2526. spin_lock_irq(&phba->hbalock);
  2527. if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
  2528. /* FCF rediscovery timer already fired or stopped */
  2529. spin_unlock_irq(&phba->hbalock);
  2530. return;
  2531. }
  2532. __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
  2533. /* Clear failover in progress flags */
  2534. phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
  2535. spin_unlock_irq(&phba->hbalock);
  2536. }
  2537. /**
  2538. * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  2539. * @phba: pointer to lpfc hba data structure.
  2540. *
  2541. * This routine stops all the timers associated with a HBA. This function is
  2542. * invoked before either putting a HBA offline or unloading the driver.
  2543. **/
  2544. void
  2545. lpfc_stop_hba_timers(struct lpfc_hba *phba)
  2546. {
  2547. lpfc_stop_vport_timers(phba->pport);
  2548. del_timer_sync(&phba->sli.mbox_tmo);
  2549. del_timer_sync(&phba->fabric_block_timer);
  2550. del_timer_sync(&phba->eratt_poll);
  2551. del_timer_sync(&phba->hb_tmofunc);
  2552. if (phba->sli_rev == LPFC_SLI_REV4) {
  2553. del_timer_sync(&phba->rrq_tmr);
  2554. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  2555. }
  2556. phba->hb_outstanding = 0;
  2557. switch (phba->pci_dev_grp) {
  2558. case LPFC_PCI_DEV_LP:
  2559. /* Stop any LightPulse device specific driver timers */
  2560. del_timer_sync(&phba->fcp_poll_timer);
  2561. break;
  2562. case LPFC_PCI_DEV_OC:
  2563. /* Stop any OneConnect device sepcific driver timers */
  2564. lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
  2565. break;
  2566. default:
  2567. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  2568. "0297 Invalid device group (x%x)\n",
  2569. phba->pci_dev_grp);
  2570. break;
  2571. }
  2572. return;
  2573. }
  2574. /**
  2575. * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
  2576. * @phba: pointer to lpfc hba data structure.
  2577. *
  2578. * This routine marks a HBA's management interface as blocked. Once the HBA's
  2579. * management interface is marked as blocked, all the user space access to
  2580. * the HBA, whether they are from sysfs interface or libdfc interface will
  2581. * all be blocked. The HBA is set to block the management interface when the
  2582. * driver prepares the HBA interface for online or offline.
  2583. **/
  2584. static void
  2585. lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
  2586. {
  2587. unsigned long iflag;
  2588. uint8_t actcmd = MBX_HEARTBEAT;
  2589. unsigned long timeout;
  2590. spin_lock_irqsave(&phba->hbalock, iflag);
  2591. phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
  2592. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2593. if (mbx_action == LPFC_MBX_NO_WAIT)
  2594. return;
  2595. timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
  2596. spin_lock_irqsave(&phba->hbalock, iflag);
  2597. if (phba->sli.mbox_active) {
  2598. actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
  2599. /* Determine how long we might wait for the active mailbox
  2600. * command to be gracefully completed by firmware.
  2601. */
  2602. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
  2603. phba->sli.mbox_active) * 1000) + jiffies;
  2604. }
  2605. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2606. /* Wait for the outstnading mailbox command to complete */
  2607. while (phba->sli.mbox_active) {
  2608. /* Check active mailbox complete status every 2ms */
  2609. msleep(2);
  2610. if (time_after(jiffies, timeout)) {
  2611. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  2612. "2813 Mgmt IO is Blocked %x "
  2613. "- mbox cmd %x still active\n",
  2614. phba->sli.sli_flag, actcmd);
  2615. break;
  2616. }
  2617. }
  2618. }
  2619. /**
  2620. * lpfc_sli4_node_prep - Assign RPIs for active nodes.
  2621. * @phba: pointer to lpfc hba data structure.
  2622. *
  2623. * Allocate RPIs for all active remote nodes. This is needed whenever
  2624. * an SLI4 adapter is reset and the driver is not unloading. Its purpose
  2625. * is to fixup the temporary rpi assignments.
  2626. **/
  2627. void
  2628. lpfc_sli4_node_prep(struct lpfc_hba *phba)
  2629. {
  2630. struct lpfc_nodelist *ndlp, *next_ndlp;
  2631. struct lpfc_vport **vports;
  2632. int i;
  2633. if (phba->sli_rev != LPFC_SLI_REV4)
  2634. return;
  2635. vports = lpfc_create_vport_work_array(phba);
  2636. if (vports != NULL) {
  2637. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2638. if (vports[i]->load_flag & FC_UNLOADING)
  2639. continue;
  2640. list_for_each_entry_safe(ndlp, next_ndlp,
  2641. &vports[i]->fc_nodes,
  2642. nlp_listp) {
  2643. if (NLP_CHK_NODE_ACT(ndlp)) {
  2644. ndlp->nlp_rpi =
  2645. lpfc_sli4_alloc_rpi(phba);
  2646. lpfc_printf_vlog(ndlp->vport, KERN_INFO,
  2647. LOG_NODE,
  2648. "0009 rpi:%x DID:%x "
  2649. "flg:%x map:%x %p\n",
  2650. ndlp->nlp_rpi,
  2651. ndlp->nlp_DID,
  2652. ndlp->nlp_flag,
  2653. ndlp->nlp_usg_map,
  2654. ndlp);
  2655. }
  2656. }
  2657. }
  2658. }
  2659. lpfc_destroy_vport_work_array(phba, vports);
  2660. }
  2661. /**
  2662. * lpfc_online - Initialize and bring a HBA online
  2663. * @phba: pointer to lpfc hba data structure.
  2664. *
  2665. * This routine initializes the HBA and brings a HBA online. During this
  2666. * process, the management interface is blocked to prevent user space access
  2667. * to the HBA interfering with the driver initialization.
  2668. *
  2669. * Return codes
  2670. * 0 - successful
  2671. * 1 - failed
  2672. **/
  2673. int
  2674. lpfc_online(struct lpfc_hba *phba)
  2675. {
  2676. struct lpfc_vport *vport;
  2677. struct lpfc_vport **vports;
  2678. int i;
  2679. bool vpis_cleared = false;
  2680. if (!phba)
  2681. return 0;
  2682. vport = phba->pport;
  2683. if (!(vport->fc_flag & FC_OFFLINE_MODE))
  2684. return 0;
  2685. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  2686. "0458 Bring Adapter online\n");
  2687. lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
  2688. if (phba->sli_rev == LPFC_SLI_REV4) {
  2689. if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
  2690. lpfc_unblock_mgmt_io(phba);
  2691. return 1;
  2692. }
  2693. spin_lock_irq(&phba->hbalock);
  2694. if (!phba->sli4_hba.max_cfg_param.vpi_used)
  2695. vpis_cleared = true;
  2696. spin_unlock_irq(&phba->hbalock);
  2697. } else {
  2698. lpfc_sli_queue_init(phba);
  2699. if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
  2700. lpfc_unblock_mgmt_io(phba);
  2701. return 1;
  2702. }
  2703. }
  2704. vports = lpfc_create_vport_work_array(phba);
  2705. if (vports != NULL) {
  2706. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2707. struct Scsi_Host *shost;
  2708. shost = lpfc_shost_from_vport(vports[i]);
  2709. spin_lock_irq(shost->host_lock);
  2710. vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
  2711. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
  2712. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  2713. if (phba->sli_rev == LPFC_SLI_REV4) {
  2714. vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
  2715. if ((vpis_cleared) &&
  2716. (vports[i]->port_type !=
  2717. LPFC_PHYSICAL_PORT))
  2718. vports[i]->vpi = 0;
  2719. }
  2720. spin_unlock_irq(shost->host_lock);
  2721. }
  2722. }
  2723. lpfc_destroy_vport_work_array(phba, vports);
  2724. lpfc_unblock_mgmt_io(phba);
  2725. return 0;
  2726. }
  2727. /**
  2728. * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
  2729. * @phba: pointer to lpfc hba data structure.
  2730. *
  2731. * This routine marks a HBA's management interface as not blocked. Once the
  2732. * HBA's management interface is marked as not blocked, all the user space
  2733. * access to the HBA, whether they are from sysfs interface or libdfc
  2734. * interface will be allowed. The HBA is set to block the management interface
  2735. * when the driver prepares the HBA interface for online or offline and then
  2736. * set to unblock the management interface afterwards.
  2737. **/
  2738. void
  2739. lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
  2740. {
  2741. unsigned long iflag;
  2742. spin_lock_irqsave(&phba->hbalock, iflag);
  2743. phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
  2744. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2745. }
  2746. /**
  2747. * lpfc_offline_prep - Prepare a HBA to be brought offline
  2748. * @phba: pointer to lpfc hba data structure.
  2749. *
  2750. * This routine is invoked to prepare a HBA to be brought offline. It performs
  2751. * unregistration login to all the nodes on all vports and flushes the mailbox
  2752. * queue to make it ready to be brought offline.
  2753. **/
  2754. void
  2755. lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
  2756. {
  2757. struct lpfc_vport *vport = phba->pport;
  2758. struct lpfc_nodelist *ndlp, *next_ndlp;
  2759. struct lpfc_vport **vports;
  2760. struct Scsi_Host *shost;
  2761. int i;
  2762. if (vport->fc_flag & FC_OFFLINE_MODE)
  2763. return;
  2764. lpfc_block_mgmt_io(phba, mbx_action);
  2765. lpfc_linkdown(phba);
  2766. /* Issue an unreg_login to all nodes on all vports */
  2767. vports = lpfc_create_vport_work_array(phba);
  2768. if (vports != NULL) {
  2769. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2770. if (vports[i]->load_flag & FC_UNLOADING)
  2771. continue;
  2772. shost = lpfc_shost_from_vport(vports[i]);
  2773. spin_lock_irq(shost->host_lock);
  2774. vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
  2775. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  2776. vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
  2777. spin_unlock_irq(shost->host_lock);
  2778. shost = lpfc_shost_from_vport(vports[i]);
  2779. list_for_each_entry_safe(ndlp, next_ndlp,
  2780. &vports[i]->fc_nodes,
  2781. nlp_listp) {
  2782. if (!NLP_CHK_NODE_ACT(ndlp))
  2783. continue;
  2784. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
  2785. continue;
  2786. if (ndlp->nlp_type & NLP_FABRIC) {
  2787. lpfc_disc_state_machine(vports[i], ndlp,
  2788. NULL, NLP_EVT_DEVICE_RECOVERY);
  2789. lpfc_disc_state_machine(vports[i], ndlp,
  2790. NULL, NLP_EVT_DEVICE_RM);
  2791. }
  2792. spin_lock_irq(shost->host_lock);
  2793. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2794. spin_unlock_irq(shost->host_lock);
  2795. /*
  2796. * Whenever an SLI4 port goes offline, free the
  2797. * RPI. Get a new RPI when the adapter port
  2798. * comes back online.
  2799. */
  2800. if (phba->sli_rev == LPFC_SLI_REV4) {
  2801. lpfc_printf_vlog(ndlp->vport,
  2802. KERN_INFO, LOG_NODE,
  2803. "0011 lpfc_offline: "
  2804. "ndlp:x%p did %x "
  2805. "usgmap:x%x rpi:%x\n",
  2806. ndlp, ndlp->nlp_DID,
  2807. ndlp->nlp_usg_map,
  2808. ndlp->nlp_rpi);
  2809. lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
  2810. }
  2811. lpfc_unreg_rpi(vports[i], ndlp);
  2812. }
  2813. }
  2814. }
  2815. lpfc_destroy_vport_work_array(phba, vports);
  2816. lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
  2817. }
  2818. /**
  2819. * lpfc_offline - Bring a HBA offline
  2820. * @phba: pointer to lpfc hba data structure.
  2821. *
  2822. * This routine actually brings a HBA offline. It stops all the timers
  2823. * associated with the HBA, brings down the SLI layer, and eventually
  2824. * marks the HBA as in offline state for the upper layer protocol.
  2825. **/
  2826. void
  2827. lpfc_offline(struct lpfc_hba *phba)
  2828. {
  2829. struct Scsi_Host *shost;
  2830. struct lpfc_vport **vports;
  2831. int i;
  2832. if (phba->pport->fc_flag & FC_OFFLINE_MODE)
  2833. return;
  2834. /* stop port and all timers associated with this hba */
  2835. lpfc_stop_port(phba);
  2836. vports = lpfc_create_vport_work_array(phba);
  2837. if (vports != NULL)
  2838. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
  2839. lpfc_stop_vport_timers(vports[i]);
  2840. lpfc_destroy_vport_work_array(phba, vports);
  2841. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  2842. "0460 Bring Adapter offline\n");
  2843. /* Bring down the SLI Layer and cleanup. The HBA is offline
  2844. now. */
  2845. lpfc_sli_hba_down(phba);
  2846. spin_lock_irq(&phba->hbalock);
  2847. phba->work_ha = 0;
  2848. spin_unlock_irq(&phba->hbalock);
  2849. vports = lpfc_create_vport_work_array(phba);
  2850. if (vports != NULL)
  2851. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2852. shost = lpfc_shost_from_vport(vports[i]);
  2853. spin_lock_irq(shost->host_lock);
  2854. vports[i]->work_port_events = 0;
  2855. vports[i]->fc_flag |= FC_OFFLINE_MODE;
  2856. spin_unlock_irq(shost->host_lock);
  2857. }
  2858. lpfc_destroy_vport_work_array(phba, vports);
  2859. }
  2860. /**
  2861. * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  2862. * @phba: pointer to lpfc hba data structure.
  2863. *
  2864. * This routine is to free all the SCSI buffers and IOCBs from the driver
  2865. * list back to kernel. It is called from lpfc_pci_remove_one to free
  2866. * the internal resources before the device is removed from the system.
  2867. **/
  2868. static void
  2869. lpfc_scsi_free(struct lpfc_hba *phba)
  2870. {
  2871. struct lpfc_scsi_buf *sb, *sb_next;
  2872. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  2873. return;
  2874. spin_lock_irq(&phba->hbalock);
  2875. /* Release all the lpfc_scsi_bufs maintained by this host. */
  2876. spin_lock(&phba->scsi_buf_list_put_lock);
  2877. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
  2878. list) {
  2879. list_del(&sb->list);
  2880. pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
  2881. sb->dma_handle);
  2882. kfree(sb);
  2883. phba->total_scsi_bufs--;
  2884. }
  2885. spin_unlock(&phba->scsi_buf_list_put_lock);
  2886. spin_lock(&phba->scsi_buf_list_get_lock);
  2887. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
  2888. list) {
  2889. list_del(&sb->list);
  2890. pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
  2891. sb->dma_handle);
  2892. kfree(sb);
  2893. phba->total_scsi_bufs--;
  2894. }
  2895. spin_unlock(&phba->scsi_buf_list_get_lock);
  2896. spin_unlock_irq(&phba->hbalock);
  2897. }
  2898. /**
  2899. * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
  2900. * @phba: pointer to lpfc hba data structure.
  2901. *
  2902. * This routine is to free all the NVME buffers and IOCBs from the driver
  2903. * list back to kernel. It is called from lpfc_pci_remove_one to free
  2904. * the internal resources before the device is removed from the system.
  2905. **/
  2906. static void
  2907. lpfc_nvme_free(struct lpfc_hba *phba)
  2908. {
  2909. struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
  2910. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  2911. return;
  2912. spin_lock_irq(&phba->hbalock);
  2913. /* Release all the lpfc_nvme_bufs maintained by this host. */
  2914. spin_lock(&phba->nvme_buf_list_put_lock);
  2915. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  2916. &phba->lpfc_nvme_buf_list_put, list) {
  2917. list_del(&lpfc_ncmd->list);
  2918. pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
  2919. lpfc_ncmd->dma_handle);
  2920. kfree(lpfc_ncmd);
  2921. phba->total_nvme_bufs--;
  2922. }
  2923. spin_unlock(&phba->nvme_buf_list_put_lock);
  2924. spin_lock(&phba->nvme_buf_list_get_lock);
  2925. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  2926. &phba->lpfc_nvme_buf_list_get, list) {
  2927. list_del(&lpfc_ncmd->list);
  2928. pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
  2929. lpfc_ncmd->dma_handle);
  2930. kfree(lpfc_ncmd);
  2931. phba->total_nvme_bufs--;
  2932. }
  2933. spin_unlock(&phba->nvme_buf_list_get_lock);
  2934. spin_unlock_irq(&phba->hbalock);
  2935. }
  2936. /**
  2937. * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
  2938. * @phba: pointer to lpfc hba data structure.
  2939. *
  2940. * This routine first calculates the sizes of the current els and allocated
  2941. * scsi sgl lists, and then goes through all sgls to updates the physical
  2942. * XRIs assigned due to port function reset. During port initialization, the
  2943. * current els and allocated scsi sgl lists are 0s.
  2944. *
  2945. * Return codes
  2946. * 0 - successful (for now, it always returns 0)
  2947. **/
  2948. int
  2949. lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
  2950. {
  2951. struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
  2952. uint16_t i, lxri, xri_cnt, els_xri_cnt;
  2953. LIST_HEAD(els_sgl_list);
  2954. int rc;
  2955. /*
  2956. * update on pci function's els xri-sgl list
  2957. */
  2958. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  2959. if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
  2960. /* els xri-sgl expanded */
  2961. xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
  2962. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  2963. "3157 ELS xri-sgl count increased from "
  2964. "%d to %d\n", phba->sli4_hba.els_xri_cnt,
  2965. els_xri_cnt);
  2966. /* allocate the additional els sgls */
  2967. for (i = 0; i < xri_cnt; i++) {
  2968. sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
  2969. GFP_KERNEL);
  2970. if (sglq_entry == NULL) {
  2971. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  2972. "2562 Failure to allocate an "
  2973. "ELS sgl entry:%d\n", i);
  2974. rc = -ENOMEM;
  2975. goto out_free_mem;
  2976. }
  2977. sglq_entry->buff_type = GEN_BUFF_TYPE;
  2978. sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
  2979. &sglq_entry->phys);
  2980. if (sglq_entry->virt == NULL) {
  2981. kfree(sglq_entry);
  2982. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  2983. "2563 Failure to allocate an "
  2984. "ELS mbuf:%d\n", i);
  2985. rc = -ENOMEM;
  2986. goto out_free_mem;
  2987. }
  2988. sglq_entry->sgl = sglq_entry->virt;
  2989. memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
  2990. sglq_entry->state = SGL_FREED;
  2991. list_add_tail(&sglq_entry->list, &els_sgl_list);
  2992. }
  2993. spin_lock_irq(&phba->hbalock);
  2994. spin_lock(&phba->sli4_hba.sgl_list_lock);
  2995. list_splice_init(&els_sgl_list,
  2996. &phba->sli4_hba.lpfc_els_sgl_list);
  2997. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  2998. spin_unlock_irq(&phba->hbalock);
  2999. } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
  3000. /* els xri-sgl shrinked */
  3001. xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
  3002. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3003. "3158 ELS xri-sgl count decreased from "
  3004. "%d to %d\n", phba->sli4_hba.els_xri_cnt,
  3005. els_xri_cnt);
  3006. spin_lock_irq(&phba->hbalock);
  3007. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3008. list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
  3009. &els_sgl_list);
  3010. /* release extra els sgls from list */
  3011. for (i = 0; i < xri_cnt; i++) {
  3012. list_remove_head(&els_sgl_list,
  3013. sglq_entry, struct lpfc_sglq, list);
  3014. if (sglq_entry) {
  3015. __lpfc_mbuf_free(phba, sglq_entry->virt,
  3016. sglq_entry->phys);
  3017. kfree(sglq_entry);
  3018. }
  3019. }
  3020. list_splice_init(&els_sgl_list,
  3021. &phba->sli4_hba.lpfc_els_sgl_list);
  3022. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3023. spin_unlock_irq(&phba->hbalock);
  3024. } else
  3025. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3026. "3163 ELS xri-sgl count unchanged: %d\n",
  3027. els_xri_cnt);
  3028. phba->sli4_hba.els_xri_cnt = els_xri_cnt;
  3029. /* update xris to els sgls on the list */
  3030. sglq_entry = NULL;
  3031. sglq_entry_next = NULL;
  3032. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  3033. &phba->sli4_hba.lpfc_els_sgl_list, list) {
  3034. lxri = lpfc_sli4_next_xritag(phba);
  3035. if (lxri == NO_XRI) {
  3036. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3037. "2400 Failed to allocate xri for "
  3038. "ELS sgl\n");
  3039. rc = -ENOMEM;
  3040. goto out_free_mem;
  3041. }
  3042. sglq_entry->sli4_lxritag = lxri;
  3043. sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3044. }
  3045. return 0;
  3046. out_free_mem:
  3047. lpfc_free_els_sgl_list(phba);
  3048. return rc;
  3049. }
  3050. /**
  3051. * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
  3052. * @phba: pointer to lpfc hba data structure.
  3053. *
  3054. * This routine first calculates the sizes of the current els and allocated
  3055. * scsi sgl lists, and then goes through all sgls to updates the physical
  3056. * XRIs assigned due to port function reset. During port initialization, the
  3057. * current els and allocated scsi sgl lists are 0s.
  3058. *
  3059. * Return codes
  3060. * 0 - successful (for now, it always returns 0)
  3061. **/
  3062. int
  3063. lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
  3064. {
  3065. struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
  3066. uint16_t i, lxri, xri_cnt, els_xri_cnt;
  3067. uint16_t nvmet_xri_cnt, tot_cnt;
  3068. LIST_HEAD(nvmet_sgl_list);
  3069. int rc;
  3070. /*
  3071. * update on pci function's nvmet xri-sgl list
  3072. */
  3073. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3074. nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
  3075. tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
  3076. if (nvmet_xri_cnt > tot_cnt) {
  3077. phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
  3078. nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
  3079. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3080. "6301 NVMET post-sgl count changed to %d\n",
  3081. phba->cfg_nvmet_mrq_post);
  3082. }
  3083. if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
  3084. /* els xri-sgl expanded */
  3085. xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
  3086. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3087. "6302 NVMET xri-sgl cnt grew from %d to %d\n",
  3088. phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
  3089. /* allocate the additional nvmet sgls */
  3090. for (i = 0; i < xri_cnt; i++) {
  3091. sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
  3092. GFP_KERNEL);
  3093. if (sglq_entry == NULL) {
  3094. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3095. "6303 Failure to allocate an "
  3096. "NVMET sgl entry:%d\n", i);
  3097. rc = -ENOMEM;
  3098. goto out_free_mem;
  3099. }
  3100. sglq_entry->buff_type = NVMET_BUFF_TYPE;
  3101. sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
  3102. &sglq_entry->phys);
  3103. if (sglq_entry->virt == NULL) {
  3104. kfree(sglq_entry);
  3105. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3106. "6304 Failure to allocate an "
  3107. "NVMET buf:%d\n", i);
  3108. rc = -ENOMEM;
  3109. goto out_free_mem;
  3110. }
  3111. sglq_entry->sgl = sglq_entry->virt;
  3112. memset(sglq_entry->sgl, 0,
  3113. phba->cfg_sg_dma_buf_size);
  3114. sglq_entry->state = SGL_FREED;
  3115. list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
  3116. }
  3117. spin_lock_irq(&phba->hbalock);
  3118. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3119. list_splice_init(&nvmet_sgl_list,
  3120. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  3121. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3122. spin_unlock_irq(&phba->hbalock);
  3123. } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
  3124. /* nvmet xri-sgl shrunk */
  3125. xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
  3126. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3127. "6305 NVMET xri-sgl count decreased from "
  3128. "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
  3129. nvmet_xri_cnt);
  3130. spin_lock_irq(&phba->hbalock);
  3131. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3132. list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
  3133. &nvmet_sgl_list);
  3134. /* release extra nvmet sgls from list */
  3135. for (i = 0; i < xri_cnt; i++) {
  3136. list_remove_head(&nvmet_sgl_list,
  3137. sglq_entry, struct lpfc_sglq, list);
  3138. if (sglq_entry) {
  3139. lpfc_nvmet_buf_free(phba, sglq_entry->virt,
  3140. sglq_entry->phys);
  3141. kfree(sglq_entry);
  3142. }
  3143. }
  3144. list_splice_init(&nvmet_sgl_list,
  3145. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  3146. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3147. spin_unlock_irq(&phba->hbalock);
  3148. } else
  3149. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3150. "6306 NVMET xri-sgl count unchanged: %d\n",
  3151. nvmet_xri_cnt);
  3152. phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
  3153. /* update xris to nvmet sgls on the list */
  3154. sglq_entry = NULL;
  3155. sglq_entry_next = NULL;
  3156. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  3157. &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
  3158. lxri = lpfc_sli4_next_xritag(phba);
  3159. if (lxri == NO_XRI) {
  3160. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3161. "6307 Failed to allocate xri for "
  3162. "NVMET sgl\n");
  3163. rc = -ENOMEM;
  3164. goto out_free_mem;
  3165. }
  3166. sglq_entry->sli4_lxritag = lxri;
  3167. sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3168. }
  3169. return 0;
  3170. out_free_mem:
  3171. lpfc_free_nvmet_sgl_list(phba);
  3172. return rc;
  3173. }
  3174. /**
  3175. * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
  3176. * @phba: pointer to lpfc hba data structure.
  3177. *
  3178. * This routine first calculates the sizes of the current els and allocated
  3179. * scsi sgl lists, and then goes through all sgls to updates the physical
  3180. * XRIs assigned due to port function reset. During port initialization, the
  3181. * current els and allocated scsi sgl lists are 0s.
  3182. *
  3183. * Return codes
  3184. * 0 - successful (for now, it always returns 0)
  3185. **/
  3186. int
  3187. lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
  3188. {
  3189. struct lpfc_scsi_buf *psb, *psb_next;
  3190. uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
  3191. LIST_HEAD(scsi_sgl_list);
  3192. int rc;
  3193. /*
  3194. * update on pci function's els xri-sgl list
  3195. */
  3196. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3197. phba->total_scsi_bufs = 0;
  3198. /*
  3199. * update on pci function's allocated scsi xri-sgl list
  3200. */
  3201. /* maximum number of xris available for scsi buffers */
  3202. phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
  3203. els_xri_cnt;
  3204. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  3205. return 0;
  3206. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  3207. phba->sli4_hba.scsi_xri_max = /* Split them up */
  3208. (phba->sli4_hba.scsi_xri_max *
  3209. phba->cfg_xri_split) / 100;
  3210. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3211. spin_lock(&phba->scsi_buf_list_put_lock);
  3212. list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
  3213. list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
  3214. spin_unlock(&phba->scsi_buf_list_put_lock);
  3215. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3216. if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
  3217. /* max scsi xri shrinked below the allocated scsi buffers */
  3218. scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
  3219. phba->sli4_hba.scsi_xri_max;
  3220. /* release the extra allocated scsi buffers */
  3221. for (i = 0; i < scsi_xri_cnt; i++) {
  3222. list_remove_head(&scsi_sgl_list, psb,
  3223. struct lpfc_scsi_buf, list);
  3224. if (psb) {
  3225. pci_pool_free(phba->lpfc_sg_dma_buf_pool,
  3226. psb->data, psb->dma_handle);
  3227. kfree(psb);
  3228. }
  3229. }
  3230. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3231. phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
  3232. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3233. }
  3234. /* update xris associated to remaining allocated scsi buffers */
  3235. psb = NULL;
  3236. psb_next = NULL;
  3237. list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
  3238. lxri = lpfc_sli4_next_xritag(phba);
  3239. if (lxri == NO_XRI) {
  3240. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3241. "2560 Failed to allocate xri for "
  3242. "scsi buffer\n");
  3243. rc = -ENOMEM;
  3244. goto out_free_mem;
  3245. }
  3246. psb->cur_iocbq.sli4_lxritag = lxri;
  3247. psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3248. }
  3249. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3250. spin_lock(&phba->scsi_buf_list_put_lock);
  3251. list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
  3252. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
  3253. spin_unlock(&phba->scsi_buf_list_put_lock);
  3254. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3255. return 0;
  3256. out_free_mem:
  3257. lpfc_scsi_free(phba);
  3258. return rc;
  3259. }
  3260. static uint64_t
  3261. lpfc_get_wwpn(struct lpfc_hba *phba)
  3262. {
  3263. uint64_t wwn;
  3264. int rc;
  3265. LPFC_MBOXQ_t *mboxq;
  3266. MAILBOX_t *mb;
  3267. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  3268. GFP_KERNEL);
  3269. if (!mboxq)
  3270. return (uint64_t)-1;
  3271. /* First get WWN of HBA instance */
  3272. lpfc_read_nv(phba, mboxq);
  3273. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  3274. if (rc != MBX_SUCCESS) {
  3275. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3276. "6019 Mailbox failed , mbxCmd x%x "
  3277. "READ_NV, mbxStatus x%x\n",
  3278. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  3279. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  3280. mempool_free(mboxq, phba->mbox_mem_pool);
  3281. return (uint64_t) -1;
  3282. }
  3283. mb = &mboxq->u.mb;
  3284. memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
  3285. /* wwn is WWPN of HBA instance */
  3286. mempool_free(mboxq, phba->mbox_mem_pool);
  3287. if (phba->sli_rev == LPFC_SLI_REV4)
  3288. return be64_to_cpu(wwn);
  3289. else
  3290. return (((wwn & 0xffffffff00000000) >> 32) |
  3291. ((wwn & 0x00000000ffffffff) << 32));
  3292. }
  3293. /**
  3294. * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
  3295. * @phba: pointer to lpfc hba data structure.
  3296. *
  3297. * This routine first calculates the sizes of the current els and allocated
  3298. * scsi sgl lists, and then goes through all sgls to updates the physical
  3299. * XRIs assigned due to port function reset. During port initialization, the
  3300. * current els and allocated scsi sgl lists are 0s.
  3301. *
  3302. * Return codes
  3303. * 0 - successful (for now, it always returns 0)
  3304. **/
  3305. int
  3306. lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
  3307. {
  3308. struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
  3309. uint16_t i, lxri, els_xri_cnt;
  3310. uint16_t nvme_xri_cnt, nvme_xri_max;
  3311. LIST_HEAD(nvme_sgl_list);
  3312. int rc;
  3313. phba->total_nvme_bufs = 0;
  3314. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  3315. return 0;
  3316. /*
  3317. * update on pci function's allocated nvme xri-sgl list
  3318. */
  3319. /* maximum number of xris available for nvme buffers */
  3320. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3321. nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
  3322. phba->sli4_hba.nvme_xri_max = nvme_xri_max;
  3323. phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
  3324. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3325. "6074 Current allocated NVME xri-sgl count:%d, "
  3326. "maximum NVME xri count:%d\n",
  3327. phba->sli4_hba.nvme_xri_cnt,
  3328. phba->sli4_hba.nvme_xri_max);
  3329. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3330. spin_lock(&phba->nvme_buf_list_put_lock);
  3331. list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
  3332. list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
  3333. spin_unlock(&phba->nvme_buf_list_put_lock);
  3334. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3335. if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
  3336. /* max nvme xri shrunk below the allocated nvme buffers */
  3337. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3338. nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
  3339. phba->sli4_hba.nvme_xri_max;
  3340. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3341. /* release the extra allocated nvme buffers */
  3342. for (i = 0; i < nvme_xri_cnt; i++) {
  3343. list_remove_head(&nvme_sgl_list, lpfc_ncmd,
  3344. struct lpfc_nvme_buf, list);
  3345. if (lpfc_ncmd) {
  3346. pci_pool_free(phba->lpfc_sg_dma_buf_pool,
  3347. lpfc_ncmd->data,
  3348. lpfc_ncmd->dma_handle);
  3349. kfree(lpfc_ncmd);
  3350. }
  3351. }
  3352. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3353. phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
  3354. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3355. }
  3356. /* update xris associated to remaining allocated nvme buffers */
  3357. lpfc_ncmd = NULL;
  3358. lpfc_ncmd_next = NULL;
  3359. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3360. &nvme_sgl_list, list) {
  3361. lxri = lpfc_sli4_next_xritag(phba);
  3362. if (lxri == NO_XRI) {
  3363. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3364. "6075 Failed to allocate xri for "
  3365. "nvme buffer\n");
  3366. rc = -ENOMEM;
  3367. goto out_free_mem;
  3368. }
  3369. lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
  3370. lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3371. }
  3372. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3373. spin_lock(&phba->nvme_buf_list_put_lock);
  3374. list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
  3375. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
  3376. spin_unlock(&phba->nvme_buf_list_put_lock);
  3377. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3378. return 0;
  3379. out_free_mem:
  3380. lpfc_nvme_free(phba);
  3381. return rc;
  3382. }
  3383. /**
  3384. * lpfc_create_port - Create an FC port
  3385. * @phba: pointer to lpfc hba data structure.
  3386. * @instance: a unique integer ID to this FC port.
  3387. * @dev: pointer to the device data structure.
  3388. *
  3389. * This routine creates a FC port for the upper layer protocol. The FC port
  3390. * can be created on top of either a physical port or a virtual port provided
  3391. * by the HBA. This routine also allocates a SCSI host data structure (shost)
  3392. * and associates the FC port created before adding the shost into the SCSI
  3393. * layer.
  3394. *
  3395. * Return codes
  3396. * @vport - pointer to the virtual N_Port data structure.
  3397. * NULL - port create failed.
  3398. **/
  3399. struct lpfc_vport *
  3400. lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
  3401. {
  3402. struct lpfc_vport *vport;
  3403. struct Scsi_Host *shost = NULL;
  3404. int error = 0;
  3405. int i;
  3406. uint64_t wwn;
  3407. bool use_no_reset_hba = false;
  3408. wwn = lpfc_get_wwpn(phba);
  3409. for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
  3410. if (wwn == lpfc_no_hba_reset[i]) {
  3411. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3412. "6020 Setting use_no_reset port=%llx\n",
  3413. wwn);
  3414. use_no_reset_hba = true;
  3415. break;
  3416. }
  3417. }
  3418. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  3419. if (dev != &phba->pcidev->dev) {
  3420. shost = scsi_host_alloc(&lpfc_vport_template,
  3421. sizeof(struct lpfc_vport));
  3422. } else {
  3423. if (!use_no_reset_hba)
  3424. shost = scsi_host_alloc(&lpfc_template,
  3425. sizeof(struct lpfc_vport));
  3426. else
  3427. shost = scsi_host_alloc(&lpfc_template_no_hr,
  3428. sizeof(struct lpfc_vport));
  3429. }
  3430. } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  3431. shost = scsi_host_alloc(&lpfc_template_nvme,
  3432. sizeof(struct lpfc_vport));
  3433. }
  3434. if (!shost)
  3435. goto out;
  3436. vport = (struct lpfc_vport *) shost->hostdata;
  3437. vport->phba = phba;
  3438. vport->load_flag |= FC_LOADING;
  3439. vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  3440. vport->fc_rscn_flush = 0;
  3441. lpfc_get_vport_cfgparam(vport);
  3442. shost->unique_id = instance;
  3443. shost->max_id = LPFC_MAX_TARGET;
  3444. shost->max_lun = vport->cfg_max_luns;
  3445. shost->this_id = -1;
  3446. shost->max_cmd_len = 16;
  3447. shost->nr_hw_queues = phba->cfg_fcp_io_channel;
  3448. if (phba->sli_rev == LPFC_SLI_REV4) {
  3449. shost->dma_boundary =
  3450. phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
  3451. shost->sg_tablesize = phba->cfg_sg_seg_cnt;
  3452. }
  3453. /*
  3454. * Set initial can_queue value since 0 is no longer supported and
  3455. * scsi_add_host will fail. This will be adjusted later based on the
  3456. * max xri value determined in hba setup.
  3457. */
  3458. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  3459. if (dev != &phba->pcidev->dev) {
  3460. shost->transportt = lpfc_vport_transport_template;
  3461. vport->port_type = LPFC_NPIV_PORT;
  3462. } else {
  3463. shost->transportt = lpfc_transport_template;
  3464. vport->port_type = LPFC_PHYSICAL_PORT;
  3465. }
  3466. /* Initialize all internally managed lists. */
  3467. INIT_LIST_HEAD(&vport->fc_nodes);
  3468. INIT_LIST_HEAD(&vport->rcv_buffer_list);
  3469. spin_lock_init(&vport->work_port_lock);
  3470. setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
  3471. (unsigned long)vport);
  3472. setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
  3473. (unsigned long)vport);
  3474. setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
  3475. (unsigned long)vport);
  3476. error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
  3477. if (error)
  3478. goto out_put_shost;
  3479. spin_lock_irq(&phba->hbalock);
  3480. list_add_tail(&vport->listentry, &phba->port_list);
  3481. spin_unlock_irq(&phba->hbalock);
  3482. return vport;
  3483. out_put_shost:
  3484. scsi_host_put(shost);
  3485. out:
  3486. return NULL;
  3487. }
  3488. /**
  3489. * destroy_port - destroy an FC port
  3490. * @vport: pointer to an lpfc virtual N_Port data structure.
  3491. *
  3492. * This routine destroys a FC port from the upper layer protocol. All the
  3493. * resources associated with the port are released.
  3494. **/
  3495. void
  3496. destroy_port(struct lpfc_vport *vport)
  3497. {
  3498. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  3499. struct lpfc_hba *phba = vport->phba;
  3500. lpfc_debugfs_terminate(vport);
  3501. fc_remove_host(shost);
  3502. scsi_remove_host(shost);
  3503. spin_lock_irq(&phba->hbalock);
  3504. list_del_init(&vport->listentry);
  3505. spin_unlock_irq(&phba->hbalock);
  3506. lpfc_cleanup(vport);
  3507. return;
  3508. }
  3509. /**
  3510. * lpfc_get_instance - Get a unique integer ID
  3511. *
  3512. * This routine allocates a unique integer ID from lpfc_hba_index pool. It
  3513. * uses the kernel idr facility to perform the task.
  3514. *
  3515. * Return codes:
  3516. * instance - a unique integer ID allocated as the new instance.
  3517. * -1 - lpfc get instance failed.
  3518. **/
  3519. int
  3520. lpfc_get_instance(void)
  3521. {
  3522. int ret;
  3523. ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
  3524. return ret < 0 ? -1 : ret;
  3525. }
  3526. /**
  3527. * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
  3528. * @shost: pointer to SCSI host data structure.
  3529. * @time: elapsed time of the scan in jiffies.
  3530. *
  3531. * This routine is called by the SCSI layer with a SCSI host to determine
  3532. * whether the scan host is finished.
  3533. *
  3534. * Note: there is no scan_start function as adapter initialization will have
  3535. * asynchronously kicked off the link initialization.
  3536. *
  3537. * Return codes
  3538. * 0 - SCSI host scan is not over yet.
  3539. * 1 - SCSI host scan is over.
  3540. **/
  3541. int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
  3542. {
  3543. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  3544. struct lpfc_hba *phba = vport->phba;
  3545. int stat = 0;
  3546. spin_lock_irq(shost->host_lock);
  3547. if (vport->load_flag & FC_UNLOADING) {
  3548. stat = 1;
  3549. goto finished;
  3550. }
  3551. if (time >= msecs_to_jiffies(30 * 1000)) {
  3552. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  3553. "0461 Scanning longer than 30 "
  3554. "seconds. Continuing initialization\n");
  3555. stat = 1;
  3556. goto finished;
  3557. }
  3558. if (time >= msecs_to_jiffies(15 * 1000) &&
  3559. phba->link_state <= LPFC_LINK_DOWN) {
  3560. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  3561. "0465 Link down longer than 15 "
  3562. "seconds. Continuing initialization\n");
  3563. stat = 1;
  3564. goto finished;
  3565. }
  3566. if (vport->port_state != LPFC_VPORT_READY)
  3567. goto finished;
  3568. if (vport->num_disc_nodes || vport->fc_prli_sent)
  3569. goto finished;
  3570. if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
  3571. goto finished;
  3572. if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
  3573. goto finished;
  3574. stat = 1;
  3575. finished:
  3576. spin_unlock_irq(shost->host_lock);
  3577. return stat;
  3578. }
  3579. /**
  3580. * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
  3581. * @shost: pointer to SCSI host data structure.
  3582. *
  3583. * This routine initializes a given SCSI host attributes on a FC port. The
  3584. * SCSI host can be either on top of a physical port or a virtual port.
  3585. **/
  3586. void lpfc_host_attrib_init(struct Scsi_Host *shost)
  3587. {
  3588. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  3589. struct lpfc_hba *phba = vport->phba;
  3590. /*
  3591. * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
  3592. */
  3593. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  3594. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  3595. fc_host_supported_classes(shost) = FC_COS_CLASS3;
  3596. memset(fc_host_supported_fc4s(shost), 0,
  3597. sizeof(fc_host_supported_fc4s(shost)));
  3598. fc_host_supported_fc4s(shost)[2] = 1;
  3599. fc_host_supported_fc4s(shost)[7] = 1;
  3600. lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
  3601. sizeof fc_host_symbolic_name(shost));
  3602. fc_host_supported_speeds(shost) = 0;
  3603. if (phba->lmt & LMT_32Gb)
  3604. fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
  3605. if (phba->lmt & LMT_16Gb)
  3606. fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
  3607. if (phba->lmt & LMT_10Gb)
  3608. fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
  3609. if (phba->lmt & LMT_8Gb)
  3610. fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
  3611. if (phba->lmt & LMT_4Gb)
  3612. fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
  3613. if (phba->lmt & LMT_2Gb)
  3614. fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
  3615. if (phba->lmt & LMT_1Gb)
  3616. fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
  3617. fc_host_maxframe_size(shost) =
  3618. (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
  3619. (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
  3620. fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
  3621. /* This value is also unchanging */
  3622. memset(fc_host_active_fc4s(shost), 0,
  3623. sizeof(fc_host_active_fc4s(shost)));
  3624. fc_host_active_fc4s(shost)[2] = 1;
  3625. fc_host_active_fc4s(shost)[7] = 1;
  3626. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  3627. spin_lock_irq(shost->host_lock);
  3628. vport->load_flag &= ~FC_LOADING;
  3629. spin_unlock_irq(shost->host_lock);
  3630. }
  3631. /**
  3632. * lpfc_stop_port_s3 - Stop SLI3 device port
  3633. * @phba: pointer to lpfc hba data structure.
  3634. *
  3635. * This routine is invoked to stop an SLI3 device port, it stops the device
  3636. * from generating interrupts and stops the device driver's timers for the
  3637. * device.
  3638. **/
  3639. static void
  3640. lpfc_stop_port_s3(struct lpfc_hba *phba)
  3641. {
  3642. /* Clear all interrupt enable conditions */
  3643. writel(0, phba->HCregaddr);
  3644. readl(phba->HCregaddr); /* flush */
  3645. /* Clear all pending interrupts */
  3646. writel(0xffffffff, phba->HAregaddr);
  3647. readl(phba->HAregaddr); /* flush */
  3648. /* Reset some HBA SLI setup states */
  3649. lpfc_stop_hba_timers(phba);
  3650. phba->pport->work_port_events = 0;
  3651. }
  3652. /**
  3653. * lpfc_stop_port_s4 - Stop SLI4 device port
  3654. * @phba: pointer to lpfc hba data structure.
  3655. *
  3656. * This routine is invoked to stop an SLI4 device port, it stops the device
  3657. * from generating interrupts and stops the device driver's timers for the
  3658. * device.
  3659. **/
  3660. static void
  3661. lpfc_stop_port_s4(struct lpfc_hba *phba)
  3662. {
  3663. /* Reset some HBA SLI4 setup states */
  3664. lpfc_stop_hba_timers(phba);
  3665. phba->pport->work_port_events = 0;
  3666. phba->sli4_hba.intr_enable = 0;
  3667. }
  3668. /**
  3669. * lpfc_stop_port - Wrapper function for stopping hba port
  3670. * @phba: Pointer to HBA context object.
  3671. *
  3672. * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
  3673. * the API jump table function pointer from the lpfc_hba struct.
  3674. **/
  3675. void
  3676. lpfc_stop_port(struct lpfc_hba *phba)
  3677. {
  3678. phba->lpfc_stop_port(phba);
  3679. }
  3680. /**
  3681. * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
  3682. * @phba: Pointer to hba for which this call is being executed.
  3683. *
  3684. * This routine starts the timer waiting for the FCF rediscovery to complete.
  3685. **/
  3686. void
  3687. lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
  3688. {
  3689. unsigned long fcf_redisc_wait_tmo =
  3690. (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
  3691. /* Start fcf rediscovery wait period timer */
  3692. mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
  3693. spin_lock_irq(&phba->hbalock);
  3694. /* Allow action to new fcf asynchronous event */
  3695. phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
  3696. /* Mark the FCF rediscovery pending state */
  3697. phba->fcf.fcf_flag |= FCF_REDISC_PEND;
  3698. spin_unlock_irq(&phba->hbalock);
  3699. }
  3700. /**
  3701. * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
  3702. * @ptr: Map to lpfc_hba data structure pointer.
  3703. *
  3704. * This routine is invoked when waiting for FCF table rediscover has been
  3705. * timed out. If new FCF record(s) has (have) been discovered during the
  3706. * wait period, a new FCF event shall be added to the FCOE async event
  3707. * list, and then worker thread shall be waked up for processing from the
  3708. * worker thread context.
  3709. **/
  3710. static void
  3711. lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
  3712. {
  3713. struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
  3714. /* Don't send FCF rediscovery event if timer cancelled */
  3715. spin_lock_irq(&phba->hbalock);
  3716. if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
  3717. spin_unlock_irq(&phba->hbalock);
  3718. return;
  3719. }
  3720. /* Clear FCF rediscovery timer pending flag */
  3721. phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
  3722. /* FCF rediscovery event to worker thread */
  3723. phba->fcf.fcf_flag |= FCF_REDISC_EVT;
  3724. spin_unlock_irq(&phba->hbalock);
  3725. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  3726. "2776 FCF rediscover quiescent timer expired\n");
  3727. /* wake up worker thread */
  3728. lpfc_worker_wake_up(phba);
  3729. }
  3730. /**
  3731. * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  3732. * @phba: pointer to lpfc hba data structure.
  3733. * @acqe_link: pointer to the async link completion queue entry.
  3734. *
  3735. * This routine is to parse the SLI4 link-attention link fault code and
  3736. * translate it into the base driver's read link attention mailbox command
  3737. * status.
  3738. *
  3739. * Return: Link-attention status in terms of base driver's coding.
  3740. **/
  3741. static uint16_t
  3742. lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
  3743. struct lpfc_acqe_link *acqe_link)
  3744. {
  3745. uint16_t latt_fault;
  3746. switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
  3747. case LPFC_ASYNC_LINK_FAULT_NONE:
  3748. case LPFC_ASYNC_LINK_FAULT_LOCAL:
  3749. case LPFC_ASYNC_LINK_FAULT_REMOTE:
  3750. latt_fault = 0;
  3751. break;
  3752. default:
  3753. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  3754. "0398 Invalid link fault code: x%x\n",
  3755. bf_get(lpfc_acqe_link_fault, acqe_link));
  3756. latt_fault = MBXERR_ERROR;
  3757. break;
  3758. }
  3759. return latt_fault;
  3760. }
  3761. /**
  3762. * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
  3763. * @phba: pointer to lpfc hba data structure.
  3764. * @acqe_link: pointer to the async link completion queue entry.
  3765. *
  3766. * This routine is to parse the SLI4 link attention type and translate it
  3767. * into the base driver's link attention type coding.
  3768. *
  3769. * Return: Link attention type in terms of base driver's coding.
  3770. **/
  3771. static uint8_t
  3772. lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
  3773. struct lpfc_acqe_link *acqe_link)
  3774. {
  3775. uint8_t att_type;
  3776. switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
  3777. case LPFC_ASYNC_LINK_STATUS_DOWN:
  3778. case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
  3779. att_type = LPFC_ATT_LINK_DOWN;
  3780. break;
  3781. case LPFC_ASYNC_LINK_STATUS_UP:
  3782. /* Ignore physical link up events - wait for logical link up */
  3783. att_type = LPFC_ATT_RESERVED;
  3784. break;
  3785. case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
  3786. att_type = LPFC_ATT_LINK_UP;
  3787. break;
  3788. default:
  3789. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  3790. "0399 Invalid link attention type: x%x\n",
  3791. bf_get(lpfc_acqe_link_status, acqe_link));
  3792. att_type = LPFC_ATT_RESERVED;
  3793. break;
  3794. }
  3795. return att_type;
  3796. }
  3797. /**
  3798. * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
  3799. * @phba: pointer to lpfc hba data structure.
  3800. *
  3801. * This routine is to get an SLI3 FC port's link speed in Mbps.
  3802. *
  3803. * Return: link speed in terms of Mbps.
  3804. **/
  3805. uint32_t
  3806. lpfc_sli_port_speed_get(struct lpfc_hba *phba)
  3807. {
  3808. uint32_t link_speed;
  3809. if (!lpfc_is_link_up(phba))
  3810. return 0;
  3811. if (phba->sli_rev <= LPFC_SLI_REV3) {
  3812. switch (phba->fc_linkspeed) {
  3813. case LPFC_LINK_SPEED_1GHZ:
  3814. link_speed = 1000;
  3815. break;
  3816. case LPFC_LINK_SPEED_2GHZ:
  3817. link_speed = 2000;
  3818. break;
  3819. case LPFC_LINK_SPEED_4GHZ:
  3820. link_speed = 4000;
  3821. break;
  3822. case LPFC_LINK_SPEED_8GHZ:
  3823. link_speed = 8000;
  3824. break;
  3825. case LPFC_LINK_SPEED_10GHZ:
  3826. link_speed = 10000;
  3827. break;
  3828. case LPFC_LINK_SPEED_16GHZ:
  3829. link_speed = 16000;
  3830. break;
  3831. default:
  3832. link_speed = 0;
  3833. }
  3834. } else {
  3835. if (phba->sli4_hba.link_state.logical_speed)
  3836. link_speed =
  3837. phba->sli4_hba.link_state.logical_speed;
  3838. else
  3839. link_speed = phba->sli4_hba.link_state.speed;
  3840. }
  3841. return link_speed;
  3842. }
  3843. /**
  3844. * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
  3845. * @phba: pointer to lpfc hba data structure.
  3846. * @evt_code: asynchronous event code.
  3847. * @speed_code: asynchronous event link speed code.
  3848. *
  3849. * This routine is to parse the giving SLI4 async event link speed code into
  3850. * value of Mbps for the link speed.
  3851. *
  3852. * Return: link speed in terms of Mbps.
  3853. **/
  3854. static uint32_t
  3855. lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
  3856. uint8_t speed_code)
  3857. {
  3858. uint32_t port_speed;
  3859. switch (evt_code) {
  3860. case LPFC_TRAILER_CODE_LINK:
  3861. switch (speed_code) {
  3862. case LPFC_ASYNC_LINK_SPEED_ZERO:
  3863. port_speed = 0;
  3864. break;
  3865. case LPFC_ASYNC_LINK_SPEED_10MBPS:
  3866. port_speed = 10;
  3867. break;
  3868. case LPFC_ASYNC_LINK_SPEED_100MBPS:
  3869. port_speed = 100;
  3870. break;
  3871. case LPFC_ASYNC_LINK_SPEED_1GBPS:
  3872. port_speed = 1000;
  3873. break;
  3874. case LPFC_ASYNC_LINK_SPEED_10GBPS:
  3875. port_speed = 10000;
  3876. break;
  3877. case LPFC_ASYNC_LINK_SPEED_20GBPS:
  3878. port_speed = 20000;
  3879. break;
  3880. case LPFC_ASYNC_LINK_SPEED_25GBPS:
  3881. port_speed = 25000;
  3882. break;
  3883. case LPFC_ASYNC_LINK_SPEED_40GBPS:
  3884. port_speed = 40000;
  3885. break;
  3886. default:
  3887. port_speed = 0;
  3888. }
  3889. break;
  3890. case LPFC_TRAILER_CODE_FC:
  3891. switch (speed_code) {
  3892. case LPFC_FC_LA_SPEED_UNKNOWN:
  3893. port_speed = 0;
  3894. break;
  3895. case LPFC_FC_LA_SPEED_1G:
  3896. port_speed = 1000;
  3897. break;
  3898. case LPFC_FC_LA_SPEED_2G:
  3899. port_speed = 2000;
  3900. break;
  3901. case LPFC_FC_LA_SPEED_4G:
  3902. port_speed = 4000;
  3903. break;
  3904. case LPFC_FC_LA_SPEED_8G:
  3905. port_speed = 8000;
  3906. break;
  3907. case LPFC_FC_LA_SPEED_10G:
  3908. port_speed = 10000;
  3909. break;
  3910. case LPFC_FC_LA_SPEED_16G:
  3911. port_speed = 16000;
  3912. break;
  3913. case LPFC_FC_LA_SPEED_32G:
  3914. port_speed = 32000;
  3915. break;
  3916. default:
  3917. port_speed = 0;
  3918. }
  3919. break;
  3920. default:
  3921. port_speed = 0;
  3922. }
  3923. return port_speed;
  3924. }
  3925. /**
  3926. * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
  3927. * @phba: pointer to lpfc hba data structure.
  3928. * @acqe_link: pointer to the async link completion queue entry.
  3929. *
  3930. * This routine is to handle the SLI4 asynchronous FCoE link event.
  3931. **/
  3932. static void
  3933. lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
  3934. struct lpfc_acqe_link *acqe_link)
  3935. {
  3936. struct lpfc_dmabuf *mp;
  3937. LPFC_MBOXQ_t *pmb;
  3938. MAILBOX_t *mb;
  3939. struct lpfc_mbx_read_top *la;
  3940. uint8_t att_type;
  3941. int rc;
  3942. att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
  3943. if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
  3944. return;
  3945. phba->fcoe_eventtag = acqe_link->event_tag;
  3946. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  3947. if (!pmb) {
  3948. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3949. "0395 The mboxq allocation failed\n");
  3950. return;
  3951. }
  3952. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  3953. if (!mp) {
  3954. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3955. "0396 The lpfc_dmabuf allocation failed\n");
  3956. goto out_free_pmb;
  3957. }
  3958. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  3959. if (!mp->virt) {
  3960. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3961. "0397 The mbuf allocation failed\n");
  3962. goto out_free_dmabuf;
  3963. }
  3964. /* Cleanup any outstanding ELS commands */
  3965. lpfc_els_flush_all_cmd(phba);
  3966. /* Block ELS IOCBs until we have done process link event */
  3967. phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
  3968. /* Update link event statistics */
  3969. phba->sli.slistat.link_event++;
  3970. /* Create lpfc_handle_latt mailbox command from link ACQE */
  3971. lpfc_read_topology(phba, pmb, mp);
  3972. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  3973. pmb->vport = phba->pport;
  3974. /* Keep the link status for extra SLI4 state machine reference */
  3975. phba->sli4_hba.link_state.speed =
  3976. lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
  3977. bf_get(lpfc_acqe_link_speed, acqe_link));
  3978. phba->sli4_hba.link_state.duplex =
  3979. bf_get(lpfc_acqe_link_duplex, acqe_link);
  3980. phba->sli4_hba.link_state.status =
  3981. bf_get(lpfc_acqe_link_status, acqe_link);
  3982. phba->sli4_hba.link_state.type =
  3983. bf_get(lpfc_acqe_link_type, acqe_link);
  3984. phba->sli4_hba.link_state.number =
  3985. bf_get(lpfc_acqe_link_number, acqe_link);
  3986. phba->sli4_hba.link_state.fault =
  3987. bf_get(lpfc_acqe_link_fault, acqe_link);
  3988. phba->sli4_hba.link_state.logical_speed =
  3989. bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
  3990. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3991. "2900 Async FC/FCoE Link event - Speed:%dGBit "
  3992. "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
  3993. "Logical speed:%dMbps Fault:%d\n",
  3994. phba->sli4_hba.link_state.speed,
  3995. phba->sli4_hba.link_state.topology,
  3996. phba->sli4_hba.link_state.status,
  3997. phba->sli4_hba.link_state.type,
  3998. phba->sli4_hba.link_state.number,
  3999. phba->sli4_hba.link_state.logical_speed,
  4000. phba->sli4_hba.link_state.fault);
  4001. /*
  4002. * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
  4003. * topology info. Note: Optional for non FC-AL ports.
  4004. */
  4005. if (!(phba->hba_flag & HBA_FCOE_MODE)) {
  4006. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4007. if (rc == MBX_NOT_FINISHED)
  4008. goto out_free_dmabuf;
  4009. return;
  4010. }
  4011. /*
  4012. * For FCoE Mode: fill in all the topology information we need and call
  4013. * the READ_TOPOLOGY completion routine to continue without actually
  4014. * sending the READ_TOPOLOGY mailbox command to the port.
  4015. */
  4016. /* Parse and translate status field */
  4017. mb = &pmb->u.mb;
  4018. mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
  4019. /* Parse and translate link attention fields */
  4020. la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
  4021. la->eventTag = acqe_link->event_tag;
  4022. bf_set(lpfc_mbx_read_top_att_type, la, att_type);
  4023. bf_set(lpfc_mbx_read_top_link_spd, la,
  4024. (bf_get(lpfc_acqe_link_speed, acqe_link)));
  4025. /* Fake the the following irrelvant fields */
  4026. bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
  4027. bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
  4028. bf_set(lpfc_mbx_read_top_il, la, 0);
  4029. bf_set(lpfc_mbx_read_top_pb, la, 0);
  4030. bf_set(lpfc_mbx_read_top_fa, la, 0);
  4031. bf_set(lpfc_mbx_read_top_mm, la, 0);
  4032. /* Invoke the lpfc_handle_latt mailbox command callback function */
  4033. lpfc_mbx_cmpl_read_topology(phba, pmb);
  4034. return;
  4035. out_free_dmabuf:
  4036. kfree(mp);
  4037. out_free_pmb:
  4038. mempool_free(pmb, phba->mbox_mem_pool);
  4039. }
  4040. /**
  4041. * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
  4042. * @phba: pointer to lpfc hba data structure.
  4043. * @acqe_fc: pointer to the async fc completion queue entry.
  4044. *
  4045. * This routine is to handle the SLI4 asynchronous FC event. It will simply log
  4046. * that the event was received and then issue a read_topology mailbox command so
  4047. * that the rest of the driver will treat it the same as SLI3.
  4048. **/
  4049. static void
  4050. lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
  4051. {
  4052. struct lpfc_dmabuf *mp;
  4053. LPFC_MBOXQ_t *pmb;
  4054. MAILBOX_t *mb;
  4055. struct lpfc_mbx_read_top *la;
  4056. int rc;
  4057. if (bf_get(lpfc_trailer_type, acqe_fc) !=
  4058. LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
  4059. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4060. "2895 Non FC link Event detected.(%d)\n",
  4061. bf_get(lpfc_trailer_type, acqe_fc));
  4062. return;
  4063. }
  4064. /* Keep the link status for extra SLI4 state machine reference */
  4065. phba->sli4_hba.link_state.speed =
  4066. lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
  4067. bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
  4068. phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
  4069. phba->sli4_hba.link_state.topology =
  4070. bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
  4071. phba->sli4_hba.link_state.status =
  4072. bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
  4073. phba->sli4_hba.link_state.type =
  4074. bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
  4075. phba->sli4_hba.link_state.number =
  4076. bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
  4077. phba->sli4_hba.link_state.fault =
  4078. bf_get(lpfc_acqe_link_fault, acqe_fc);
  4079. phba->sli4_hba.link_state.logical_speed =
  4080. bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
  4081. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4082. "2896 Async FC event - Speed:%dGBaud Topology:x%x "
  4083. "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
  4084. "%dMbps Fault:%d\n",
  4085. phba->sli4_hba.link_state.speed,
  4086. phba->sli4_hba.link_state.topology,
  4087. phba->sli4_hba.link_state.status,
  4088. phba->sli4_hba.link_state.type,
  4089. phba->sli4_hba.link_state.number,
  4090. phba->sli4_hba.link_state.logical_speed,
  4091. phba->sli4_hba.link_state.fault);
  4092. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4093. if (!pmb) {
  4094. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4095. "2897 The mboxq allocation failed\n");
  4096. return;
  4097. }
  4098. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  4099. if (!mp) {
  4100. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4101. "2898 The lpfc_dmabuf allocation failed\n");
  4102. goto out_free_pmb;
  4103. }
  4104. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  4105. if (!mp->virt) {
  4106. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4107. "2899 The mbuf allocation failed\n");
  4108. goto out_free_dmabuf;
  4109. }
  4110. /* Cleanup any outstanding ELS commands */
  4111. lpfc_els_flush_all_cmd(phba);
  4112. /* Block ELS IOCBs until we have done process link event */
  4113. phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
  4114. /* Update link event statistics */
  4115. phba->sli.slistat.link_event++;
  4116. /* Create lpfc_handle_latt mailbox command from link ACQE */
  4117. lpfc_read_topology(phba, pmb, mp);
  4118. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  4119. pmb->vport = phba->pport;
  4120. if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
  4121. /* Parse and translate status field */
  4122. mb = &pmb->u.mb;
  4123. mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
  4124. (void *)acqe_fc);
  4125. /* Parse and translate link attention fields */
  4126. la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
  4127. la->eventTag = acqe_fc->event_tag;
  4128. bf_set(lpfc_mbx_read_top_att_type, la,
  4129. LPFC_FC_LA_TYPE_LINK_DOWN);
  4130. /* Invoke the mailbox command callback function */
  4131. lpfc_mbx_cmpl_read_topology(phba, pmb);
  4132. return;
  4133. }
  4134. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4135. if (rc == MBX_NOT_FINISHED)
  4136. goto out_free_dmabuf;
  4137. return;
  4138. out_free_dmabuf:
  4139. kfree(mp);
  4140. out_free_pmb:
  4141. mempool_free(pmb, phba->mbox_mem_pool);
  4142. }
  4143. /**
  4144. * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
  4145. * @phba: pointer to lpfc hba data structure.
  4146. * @acqe_fc: pointer to the async SLI completion queue entry.
  4147. *
  4148. * This routine is to handle the SLI4 asynchronous SLI events.
  4149. **/
  4150. static void
  4151. lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
  4152. {
  4153. char port_name;
  4154. char message[128];
  4155. uint8_t status;
  4156. uint8_t evt_type;
  4157. uint8_t operational = 0;
  4158. struct temp_event temp_event_data;
  4159. struct lpfc_acqe_misconfigured_event *misconfigured;
  4160. struct Scsi_Host *shost;
  4161. evt_type = bf_get(lpfc_trailer_type, acqe_sli);
  4162. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4163. "2901 Async SLI event - Event Data1:x%08x Event Data2:"
  4164. "x%08x SLI Event Type:%d\n",
  4165. acqe_sli->event_data1, acqe_sli->event_data2,
  4166. evt_type);
  4167. port_name = phba->Port[0];
  4168. if (port_name == 0x00)
  4169. port_name = '?'; /* get port name is empty */
  4170. switch (evt_type) {
  4171. case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
  4172. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  4173. temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
  4174. temp_event_data.data = (uint32_t)acqe_sli->event_data1;
  4175. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  4176. "3190 Over Temperature:%d Celsius- Port Name %c\n",
  4177. acqe_sli->event_data1, port_name);
  4178. phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
  4179. shost = lpfc_shost_from_vport(phba->pport);
  4180. fc_host_post_vendor_event(shost, fc_get_event_number(),
  4181. sizeof(temp_event_data),
  4182. (char *)&temp_event_data,
  4183. SCSI_NL_VID_TYPE_PCI
  4184. | PCI_VENDOR_ID_EMULEX);
  4185. break;
  4186. case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
  4187. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  4188. temp_event_data.event_code = LPFC_NORMAL_TEMP;
  4189. temp_event_data.data = (uint32_t)acqe_sli->event_data1;
  4190. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4191. "3191 Normal Temperature:%d Celsius - Port Name %c\n",
  4192. acqe_sli->event_data1, port_name);
  4193. shost = lpfc_shost_from_vport(phba->pport);
  4194. fc_host_post_vendor_event(shost, fc_get_event_number(),
  4195. sizeof(temp_event_data),
  4196. (char *)&temp_event_data,
  4197. SCSI_NL_VID_TYPE_PCI
  4198. | PCI_VENDOR_ID_EMULEX);
  4199. break;
  4200. case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
  4201. misconfigured = (struct lpfc_acqe_misconfigured_event *)
  4202. &acqe_sli->event_data1;
  4203. /* fetch the status for this port */
  4204. switch (phba->sli4_hba.lnk_info.lnk_no) {
  4205. case LPFC_LINK_NUMBER_0:
  4206. status = bf_get(lpfc_sli_misconfigured_port0_state,
  4207. &misconfigured->theEvent);
  4208. operational = bf_get(lpfc_sli_misconfigured_port0_op,
  4209. &misconfigured->theEvent);
  4210. break;
  4211. case LPFC_LINK_NUMBER_1:
  4212. status = bf_get(lpfc_sli_misconfigured_port1_state,
  4213. &misconfigured->theEvent);
  4214. operational = bf_get(lpfc_sli_misconfigured_port1_op,
  4215. &misconfigured->theEvent);
  4216. break;
  4217. case LPFC_LINK_NUMBER_2:
  4218. status = bf_get(lpfc_sli_misconfigured_port2_state,
  4219. &misconfigured->theEvent);
  4220. operational = bf_get(lpfc_sli_misconfigured_port2_op,
  4221. &misconfigured->theEvent);
  4222. break;
  4223. case LPFC_LINK_NUMBER_3:
  4224. status = bf_get(lpfc_sli_misconfigured_port3_state,
  4225. &misconfigured->theEvent);
  4226. operational = bf_get(lpfc_sli_misconfigured_port3_op,
  4227. &misconfigured->theEvent);
  4228. break;
  4229. default:
  4230. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4231. "3296 "
  4232. "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
  4233. "event: Invalid link %d",
  4234. phba->sli4_hba.lnk_info.lnk_no);
  4235. return;
  4236. }
  4237. /* Skip if optic state unchanged */
  4238. if (phba->sli4_hba.lnk_info.optic_state == status)
  4239. return;
  4240. switch (status) {
  4241. case LPFC_SLI_EVENT_STATUS_VALID:
  4242. sprintf(message, "Physical Link is functional");
  4243. break;
  4244. case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
  4245. sprintf(message, "Optics faulted/incorrectly "
  4246. "installed/not installed - Reseat optics, "
  4247. "if issue not resolved, replace.");
  4248. break;
  4249. case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
  4250. sprintf(message,
  4251. "Optics of two types installed - Remove one "
  4252. "optic or install matching pair of optics.");
  4253. break;
  4254. case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
  4255. sprintf(message, "Incompatible optics - Replace with "
  4256. "compatible optics for card to function.");
  4257. break;
  4258. case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
  4259. sprintf(message, "Unqualified optics - Replace with "
  4260. "Avago optics for Warranty and Technical "
  4261. "Support - Link is%s operational",
  4262. (operational) ? " not" : "");
  4263. break;
  4264. case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
  4265. sprintf(message, "Uncertified optics - Replace with "
  4266. "Avago-certified optics to enable link "
  4267. "operation - Link is%s operational",
  4268. (operational) ? " not" : "");
  4269. break;
  4270. default:
  4271. /* firmware is reporting a status we don't know about */
  4272. sprintf(message, "Unknown event status x%02x", status);
  4273. break;
  4274. }
  4275. phba->sli4_hba.lnk_info.optic_state = status;
  4276. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4277. "3176 Port Name %c %s\n", port_name, message);
  4278. break;
  4279. case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
  4280. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4281. "3192 Remote DPort Test Initiated - "
  4282. "Event Data1:x%08x Event Data2: x%08x\n",
  4283. acqe_sli->event_data1, acqe_sli->event_data2);
  4284. break;
  4285. default:
  4286. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4287. "3193 Async SLI event - Event Data1:x%08x Event Data2:"
  4288. "x%08x SLI Event Type:%d\n",
  4289. acqe_sli->event_data1, acqe_sli->event_data2,
  4290. evt_type);
  4291. break;
  4292. }
  4293. }
  4294. /**
  4295. * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
  4296. * @vport: pointer to vport data structure.
  4297. *
  4298. * This routine is to perform Clear Virtual Link (CVL) on a vport in
  4299. * response to a CVL event.
  4300. *
  4301. * Return the pointer to the ndlp with the vport if successful, otherwise
  4302. * return NULL.
  4303. **/
  4304. static struct lpfc_nodelist *
  4305. lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
  4306. {
  4307. struct lpfc_nodelist *ndlp;
  4308. struct Scsi_Host *shost;
  4309. struct lpfc_hba *phba;
  4310. if (!vport)
  4311. return NULL;
  4312. phba = vport->phba;
  4313. if (!phba)
  4314. return NULL;
  4315. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  4316. if (!ndlp) {
  4317. /* Cannot find existing Fabric ndlp, so allocate a new one */
  4318. ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
  4319. if (!ndlp)
  4320. return 0;
  4321. lpfc_nlp_init(vport, ndlp, Fabric_DID);
  4322. /* Set the node type */
  4323. ndlp->nlp_type |= NLP_FABRIC;
  4324. /* Put ndlp onto node list */
  4325. lpfc_enqueue_node(vport, ndlp);
  4326. } else if (!NLP_CHK_NODE_ACT(ndlp)) {
  4327. /* re-setup ndlp without removing from node list */
  4328. ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
  4329. if (!ndlp)
  4330. return 0;
  4331. }
  4332. if ((phba->pport->port_state < LPFC_FLOGI) &&
  4333. (phba->pport->port_state != LPFC_VPORT_FAILED))
  4334. return NULL;
  4335. /* If virtual link is not yet instantiated ignore CVL */
  4336. if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
  4337. && (vport->port_state != LPFC_VPORT_FAILED))
  4338. return NULL;
  4339. shost = lpfc_shost_from_vport(vport);
  4340. if (!shost)
  4341. return NULL;
  4342. lpfc_linkdown_port(vport);
  4343. lpfc_cleanup_pending_mbox(vport);
  4344. spin_lock_irq(shost->host_lock);
  4345. vport->fc_flag |= FC_VPORT_CVL_RCVD;
  4346. spin_unlock_irq(shost->host_lock);
  4347. return ndlp;
  4348. }
  4349. /**
  4350. * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
  4351. * @vport: pointer to lpfc hba data structure.
  4352. *
  4353. * This routine is to perform Clear Virtual Link (CVL) on all vports in
  4354. * response to a FCF dead event.
  4355. **/
  4356. static void
  4357. lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
  4358. {
  4359. struct lpfc_vport **vports;
  4360. int i;
  4361. vports = lpfc_create_vport_work_array(phba);
  4362. if (vports)
  4363. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
  4364. lpfc_sli4_perform_vport_cvl(vports[i]);
  4365. lpfc_destroy_vport_work_array(phba, vports);
  4366. }
  4367. /**
  4368. * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  4369. * @phba: pointer to lpfc hba data structure.
  4370. * @acqe_link: pointer to the async fcoe completion queue entry.
  4371. *
  4372. * This routine is to handle the SLI4 asynchronous fcoe event.
  4373. **/
  4374. static void
  4375. lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
  4376. struct lpfc_acqe_fip *acqe_fip)
  4377. {
  4378. uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
  4379. int rc;
  4380. struct lpfc_vport *vport;
  4381. struct lpfc_nodelist *ndlp;
  4382. struct Scsi_Host *shost;
  4383. int active_vlink_present;
  4384. struct lpfc_vport **vports;
  4385. int i;
  4386. phba->fc_eventTag = acqe_fip->event_tag;
  4387. phba->fcoe_eventtag = acqe_fip->event_tag;
  4388. switch (event_type) {
  4389. case LPFC_FIP_EVENT_TYPE_NEW_FCF:
  4390. case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
  4391. if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
  4392. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4393. LOG_DISCOVERY,
  4394. "2546 New FCF event, evt_tag:x%x, "
  4395. "index:x%x\n",
  4396. acqe_fip->event_tag,
  4397. acqe_fip->index);
  4398. else
  4399. lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
  4400. LOG_DISCOVERY,
  4401. "2788 FCF param modified event, "
  4402. "evt_tag:x%x, index:x%x\n",
  4403. acqe_fip->event_tag,
  4404. acqe_fip->index);
  4405. if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
  4406. /*
  4407. * During period of FCF discovery, read the FCF
  4408. * table record indexed by the event to update
  4409. * FCF roundrobin failover eligible FCF bmask.
  4410. */
  4411. lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
  4412. LOG_DISCOVERY,
  4413. "2779 Read FCF (x%x) for updating "
  4414. "roundrobin FCF failover bmask\n",
  4415. acqe_fip->index);
  4416. rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
  4417. }
  4418. /* If the FCF discovery is in progress, do nothing. */
  4419. spin_lock_irq(&phba->hbalock);
  4420. if (phba->hba_flag & FCF_TS_INPROG) {
  4421. spin_unlock_irq(&phba->hbalock);
  4422. break;
  4423. }
  4424. /* If fast FCF failover rescan event is pending, do nothing */
  4425. if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
  4426. spin_unlock_irq(&phba->hbalock);
  4427. break;
  4428. }
  4429. /* If the FCF has been in discovered state, do nothing. */
  4430. if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
  4431. spin_unlock_irq(&phba->hbalock);
  4432. break;
  4433. }
  4434. spin_unlock_irq(&phba->hbalock);
  4435. /* Otherwise, scan the entire FCF table and re-discover SAN */
  4436. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4437. "2770 Start FCF table scan per async FCF "
  4438. "event, evt_tag:x%x, index:x%x\n",
  4439. acqe_fip->event_tag, acqe_fip->index);
  4440. rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
  4441. LPFC_FCOE_FCF_GET_FIRST);
  4442. if (rc)
  4443. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4444. "2547 Issue FCF scan read FCF mailbox "
  4445. "command failed (x%x)\n", rc);
  4446. break;
  4447. case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
  4448. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4449. "2548 FCF Table full count 0x%x tag 0x%x\n",
  4450. bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
  4451. acqe_fip->event_tag);
  4452. break;
  4453. case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
  4454. phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
  4455. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4456. "2549 FCF (x%x) disconnected from network, "
  4457. "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
  4458. /*
  4459. * If we are in the middle of FCF failover process, clear
  4460. * the corresponding FCF bit in the roundrobin bitmap.
  4461. */
  4462. spin_lock_irq(&phba->hbalock);
  4463. if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
  4464. (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
  4465. spin_unlock_irq(&phba->hbalock);
  4466. /* Update FLOGI FCF failover eligible FCF bmask */
  4467. lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
  4468. break;
  4469. }
  4470. spin_unlock_irq(&phba->hbalock);
  4471. /* If the event is not for currently used fcf do nothing */
  4472. if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
  4473. break;
  4474. /*
  4475. * Otherwise, request the port to rediscover the entire FCF
  4476. * table for a fast recovery from case that the current FCF
  4477. * is no longer valid as we are not in the middle of FCF
  4478. * failover process already.
  4479. */
  4480. spin_lock_irq(&phba->hbalock);
  4481. /* Mark the fast failover process in progress */
  4482. phba->fcf.fcf_flag |= FCF_DEAD_DISC;
  4483. spin_unlock_irq(&phba->hbalock);
  4484. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4485. "2771 Start FCF fast failover process due to "
  4486. "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
  4487. "\n", acqe_fip->event_tag, acqe_fip->index);
  4488. rc = lpfc_sli4_redisc_fcf_table(phba);
  4489. if (rc) {
  4490. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4491. LOG_DISCOVERY,
  4492. "2772 Issue FCF rediscover mabilbox "
  4493. "command failed, fail through to FCF "
  4494. "dead event\n");
  4495. spin_lock_irq(&phba->hbalock);
  4496. phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
  4497. spin_unlock_irq(&phba->hbalock);
  4498. /*
  4499. * Last resort will fail over by treating this
  4500. * as a link down to FCF registration.
  4501. */
  4502. lpfc_sli4_fcf_dead_failthrough(phba);
  4503. } else {
  4504. /* Reset FCF roundrobin bmask for new discovery */
  4505. lpfc_sli4_clear_fcf_rr_bmask(phba);
  4506. /*
  4507. * Handling fast FCF failover to a DEAD FCF event is
  4508. * considered equalivant to receiving CVL to all vports.
  4509. */
  4510. lpfc_sli4_perform_all_vport_cvl(phba);
  4511. }
  4512. break;
  4513. case LPFC_FIP_EVENT_TYPE_CVL:
  4514. phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
  4515. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4516. "2718 Clear Virtual Link Received for VPI 0x%x"
  4517. " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
  4518. vport = lpfc_find_vport_by_vpid(phba,
  4519. acqe_fip->index);
  4520. ndlp = lpfc_sli4_perform_vport_cvl(vport);
  4521. if (!ndlp)
  4522. break;
  4523. active_vlink_present = 0;
  4524. vports = lpfc_create_vport_work_array(phba);
  4525. if (vports) {
  4526. for (i = 0; i <= phba->max_vports && vports[i] != NULL;
  4527. i++) {
  4528. if ((!(vports[i]->fc_flag &
  4529. FC_VPORT_CVL_RCVD)) &&
  4530. (vports[i]->port_state > LPFC_FDISC)) {
  4531. active_vlink_present = 1;
  4532. break;
  4533. }
  4534. }
  4535. lpfc_destroy_vport_work_array(phba, vports);
  4536. }
  4537. /*
  4538. * Don't re-instantiate if vport is marked for deletion.
  4539. * If we are here first then vport_delete is going to wait
  4540. * for discovery to complete.
  4541. */
  4542. if (!(vport->load_flag & FC_UNLOADING) &&
  4543. active_vlink_present) {
  4544. /*
  4545. * If there are other active VLinks present,
  4546. * re-instantiate the Vlink using FDISC.
  4547. */
  4548. mod_timer(&ndlp->nlp_delayfunc,
  4549. jiffies + msecs_to_jiffies(1000));
  4550. shost = lpfc_shost_from_vport(vport);
  4551. spin_lock_irq(shost->host_lock);
  4552. ndlp->nlp_flag |= NLP_DELAY_TMO;
  4553. spin_unlock_irq(shost->host_lock);
  4554. ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
  4555. vport->port_state = LPFC_FDISC;
  4556. } else {
  4557. /*
  4558. * Otherwise, we request port to rediscover
  4559. * the entire FCF table for a fast recovery
  4560. * from possible case that the current FCF
  4561. * is no longer valid if we are not already
  4562. * in the FCF failover process.
  4563. */
  4564. spin_lock_irq(&phba->hbalock);
  4565. if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
  4566. spin_unlock_irq(&phba->hbalock);
  4567. break;
  4568. }
  4569. /* Mark the fast failover process in progress */
  4570. phba->fcf.fcf_flag |= FCF_ACVL_DISC;
  4571. spin_unlock_irq(&phba->hbalock);
  4572. lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
  4573. LOG_DISCOVERY,
  4574. "2773 Start FCF failover per CVL, "
  4575. "evt_tag:x%x\n", acqe_fip->event_tag);
  4576. rc = lpfc_sli4_redisc_fcf_table(phba);
  4577. if (rc) {
  4578. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4579. LOG_DISCOVERY,
  4580. "2774 Issue FCF rediscover "
  4581. "mabilbox command failed, "
  4582. "through to CVL event\n");
  4583. spin_lock_irq(&phba->hbalock);
  4584. phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
  4585. spin_unlock_irq(&phba->hbalock);
  4586. /*
  4587. * Last resort will be re-try on the
  4588. * the current registered FCF entry.
  4589. */
  4590. lpfc_retry_pport_discovery(phba);
  4591. } else
  4592. /*
  4593. * Reset FCF roundrobin bmask for new
  4594. * discovery.
  4595. */
  4596. lpfc_sli4_clear_fcf_rr_bmask(phba);
  4597. }
  4598. break;
  4599. default:
  4600. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4601. "0288 Unknown FCoE event type 0x%x event tag "
  4602. "0x%x\n", event_type, acqe_fip->event_tag);
  4603. break;
  4604. }
  4605. }
  4606. /**
  4607. * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
  4608. * @phba: pointer to lpfc hba data structure.
  4609. * @acqe_link: pointer to the async dcbx completion queue entry.
  4610. *
  4611. * This routine is to handle the SLI4 asynchronous dcbx event.
  4612. **/
  4613. static void
  4614. lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
  4615. struct lpfc_acqe_dcbx *acqe_dcbx)
  4616. {
  4617. phba->fc_eventTag = acqe_dcbx->event_tag;
  4618. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4619. "0290 The SLI4 DCBX asynchronous event is not "
  4620. "handled yet\n");
  4621. }
  4622. /**
  4623. * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
  4624. * @phba: pointer to lpfc hba data structure.
  4625. * @acqe_link: pointer to the async grp5 completion queue entry.
  4626. *
  4627. * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
  4628. * is an asynchronous notified of a logical link speed change. The Port
  4629. * reports the logical link speed in units of 10Mbps.
  4630. **/
  4631. static void
  4632. lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
  4633. struct lpfc_acqe_grp5 *acqe_grp5)
  4634. {
  4635. uint16_t prev_ll_spd;
  4636. phba->fc_eventTag = acqe_grp5->event_tag;
  4637. phba->fcoe_eventtag = acqe_grp5->event_tag;
  4638. prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
  4639. phba->sli4_hba.link_state.logical_speed =
  4640. (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
  4641. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4642. "2789 GRP5 Async Event: Updating logical link speed "
  4643. "from %dMbps to %dMbps\n", prev_ll_spd,
  4644. phba->sli4_hba.link_state.logical_speed);
  4645. }
  4646. /**
  4647. * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
  4648. * @phba: pointer to lpfc hba data structure.
  4649. *
  4650. * This routine is invoked by the worker thread to process all the pending
  4651. * SLI4 asynchronous events.
  4652. **/
  4653. void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
  4654. {
  4655. struct lpfc_cq_event *cq_event;
  4656. /* First, declare the async event has been handled */
  4657. spin_lock_irq(&phba->hbalock);
  4658. phba->hba_flag &= ~ASYNC_EVENT;
  4659. spin_unlock_irq(&phba->hbalock);
  4660. /* Now, handle all the async events */
  4661. while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
  4662. /* Get the first event from the head of the event queue */
  4663. spin_lock_irq(&phba->hbalock);
  4664. list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
  4665. cq_event, struct lpfc_cq_event, list);
  4666. spin_unlock_irq(&phba->hbalock);
  4667. /* Process the asynchronous event */
  4668. switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
  4669. case LPFC_TRAILER_CODE_LINK:
  4670. lpfc_sli4_async_link_evt(phba,
  4671. &cq_event->cqe.acqe_link);
  4672. break;
  4673. case LPFC_TRAILER_CODE_FCOE:
  4674. lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
  4675. break;
  4676. case LPFC_TRAILER_CODE_DCBX:
  4677. lpfc_sli4_async_dcbx_evt(phba,
  4678. &cq_event->cqe.acqe_dcbx);
  4679. break;
  4680. case LPFC_TRAILER_CODE_GRP5:
  4681. lpfc_sli4_async_grp5_evt(phba,
  4682. &cq_event->cqe.acqe_grp5);
  4683. break;
  4684. case LPFC_TRAILER_CODE_FC:
  4685. lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
  4686. break;
  4687. case LPFC_TRAILER_CODE_SLI:
  4688. lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
  4689. break;
  4690. default:
  4691. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4692. "1804 Invalid asynchrous event code: "
  4693. "x%x\n", bf_get(lpfc_trailer_code,
  4694. &cq_event->cqe.mcqe_cmpl));
  4695. break;
  4696. }
  4697. /* Free the completion event processed to the free pool */
  4698. lpfc_sli4_cq_event_release(phba, cq_event);
  4699. }
  4700. }
  4701. /**
  4702. * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
  4703. * @phba: pointer to lpfc hba data structure.
  4704. *
  4705. * This routine is invoked by the worker thread to process FCF table
  4706. * rediscovery pending completion event.
  4707. **/
  4708. void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
  4709. {
  4710. int rc;
  4711. spin_lock_irq(&phba->hbalock);
  4712. /* Clear FCF rediscovery timeout event */
  4713. phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
  4714. /* Clear driver fast failover FCF record flag */
  4715. phba->fcf.failover_rec.flag = 0;
  4716. /* Set state for FCF fast failover */
  4717. phba->fcf.fcf_flag |= FCF_REDISC_FOV;
  4718. spin_unlock_irq(&phba->hbalock);
  4719. /* Scan FCF table from the first entry to re-discover SAN */
  4720. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4721. "2777 Start post-quiescent FCF table scan\n");
  4722. rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
  4723. if (rc)
  4724. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4725. "2747 Issue FCF scan read FCF mailbox "
  4726. "command failed 0x%x\n", rc);
  4727. }
  4728. /**
  4729. * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
  4730. * @phba: pointer to lpfc hba data structure.
  4731. * @dev_grp: The HBA PCI-Device group number.
  4732. *
  4733. * This routine is invoked to set up the per HBA PCI-Device group function
  4734. * API jump table entries.
  4735. *
  4736. * Return: 0 if success, otherwise -ENODEV
  4737. **/
  4738. int
  4739. lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  4740. {
  4741. int rc;
  4742. /* Set up lpfc PCI-device group */
  4743. phba->pci_dev_grp = dev_grp;
  4744. /* The LPFC_PCI_DEV_OC uses SLI4 */
  4745. if (dev_grp == LPFC_PCI_DEV_OC)
  4746. phba->sli_rev = LPFC_SLI_REV4;
  4747. /* Set up device INIT API function jump table */
  4748. rc = lpfc_init_api_table_setup(phba, dev_grp);
  4749. if (rc)
  4750. return -ENODEV;
  4751. /* Set up SCSI API function jump table */
  4752. rc = lpfc_scsi_api_table_setup(phba, dev_grp);
  4753. if (rc)
  4754. return -ENODEV;
  4755. /* Set up SLI API function jump table */
  4756. rc = lpfc_sli_api_table_setup(phba, dev_grp);
  4757. if (rc)
  4758. return -ENODEV;
  4759. /* Set up MBOX API function jump table */
  4760. rc = lpfc_mbox_api_table_setup(phba, dev_grp);
  4761. if (rc)
  4762. return -ENODEV;
  4763. return 0;
  4764. }
  4765. /**
  4766. * lpfc_log_intr_mode - Log the active interrupt mode
  4767. * @phba: pointer to lpfc hba data structure.
  4768. * @intr_mode: active interrupt mode adopted.
  4769. *
  4770. * This routine it invoked to log the currently used active interrupt mode
  4771. * to the device.
  4772. **/
  4773. static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
  4774. {
  4775. switch (intr_mode) {
  4776. case 0:
  4777. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4778. "0470 Enable INTx interrupt mode.\n");
  4779. break;
  4780. case 1:
  4781. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4782. "0481 Enabled MSI interrupt mode.\n");
  4783. break;
  4784. case 2:
  4785. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4786. "0480 Enabled MSI-X interrupt mode.\n");
  4787. break;
  4788. default:
  4789. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  4790. "0482 Illegal interrupt mode.\n");
  4791. break;
  4792. }
  4793. return;
  4794. }
  4795. /**
  4796. * lpfc_enable_pci_dev - Enable a generic PCI device.
  4797. * @phba: pointer to lpfc hba data structure.
  4798. *
  4799. * This routine is invoked to enable the PCI device that is common to all
  4800. * PCI devices.
  4801. *
  4802. * Return codes
  4803. * 0 - successful
  4804. * other values - error
  4805. **/
  4806. static int
  4807. lpfc_enable_pci_dev(struct lpfc_hba *phba)
  4808. {
  4809. struct pci_dev *pdev;
  4810. /* Obtain PCI device reference */
  4811. if (!phba->pcidev)
  4812. goto out_error;
  4813. else
  4814. pdev = phba->pcidev;
  4815. /* Enable PCI device */
  4816. if (pci_enable_device_mem(pdev))
  4817. goto out_error;
  4818. /* Request PCI resource for the device */
  4819. if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
  4820. goto out_disable_device;
  4821. /* Set up device as PCI master and save state for EEH */
  4822. pci_set_master(pdev);
  4823. pci_try_set_mwi(pdev);
  4824. pci_save_state(pdev);
  4825. /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
  4826. if (pci_is_pcie(pdev))
  4827. pdev->needs_freset = 1;
  4828. return 0;
  4829. out_disable_device:
  4830. pci_disable_device(pdev);
  4831. out_error:
  4832. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  4833. "1401 Failed to enable pci device\n");
  4834. return -ENODEV;
  4835. }
  4836. /**
  4837. * lpfc_disable_pci_dev - Disable a generic PCI device.
  4838. * @phba: pointer to lpfc hba data structure.
  4839. *
  4840. * This routine is invoked to disable the PCI device that is common to all
  4841. * PCI devices.
  4842. **/
  4843. static void
  4844. lpfc_disable_pci_dev(struct lpfc_hba *phba)
  4845. {
  4846. struct pci_dev *pdev;
  4847. /* Obtain PCI device reference */
  4848. if (!phba->pcidev)
  4849. return;
  4850. else
  4851. pdev = phba->pcidev;
  4852. /* Release PCI resource and disable PCI device */
  4853. pci_release_mem_regions(pdev);
  4854. pci_disable_device(pdev);
  4855. return;
  4856. }
  4857. /**
  4858. * lpfc_reset_hba - Reset a hba
  4859. * @phba: pointer to lpfc hba data structure.
  4860. *
  4861. * This routine is invoked to reset a hba device. It brings the HBA
  4862. * offline, performs a board restart, and then brings the board back
  4863. * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
  4864. * on outstanding mailbox commands.
  4865. **/
  4866. void
  4867. lpfc_reset_hba(struct lpfc_hba *phba)
  4868. {
  4869. /* If resets are disabled then set error state and return. */
  4870. if (!phba->cfg_enable_hba_reset) {
  4871. phba->link_state = LPFC_HBA_ERROR;
  4872. return;
  4873. }
  4874. if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
  4875. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  4876. else
  4877. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  4878. lpfc_offline(phba);
  4879. lpfc_sli_brdrestart(phba);
  4880. lpfc_online(phba);
  4881. lpfc_unblock_mgmt_io(phba);
  4882. }
  4883. /**
  4884. * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
  4885. * @phba: pointer to lpfc hba data structure.
  4886. *
  4887. * This function enables the PCI SR-IOV virtual functions to a physical
  4888. * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
  4889. * enable the number of virtual functions to the physical function. As
  4890. * not all devices support SR-IOV, the return code from the pci_enable_sriov()
  4891. * API call does not considered as an error condition for most of the device.
  4892. **/
  4893. uint16_t
  4894. lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
  4895. {
  4896. struct pci_dev *pdev = phba->pcidev;
  4897. uint16_t nr_virtfn;
  4898. int pos;
  4899. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  4900. if (pos == 0)
  4901. return 0;
  4902. pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
  4903. return nr_virtfn;
  4904. }
  4905. /**
  4906. * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
  4907. * @phba: pointer to lpfc hba data structure.
  4908. * @nr_vfn: number of virtual functions to be enabled.
  4909. *
  4910. * This function enables the PCI SR-IOV virtual functions to a physical
  4911. * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
  4912. * enable the number of virtual functions to the physical function. As
  4913. * not all devices support SR-IOV, the return code from the pci_enable_sriov()
  4914. * API call does not considered as an error condition for most of the device.
  4915. **/
  4916. int
  4917. lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
  4918. {
  4919. struct pci_dev *pdev = phba->pcidev;
  4920. uint16_t max_nr_vfn;
  4921. int rc;
  4922. max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
  4923. if (nr_vfn > max_nr_vfn) {
  4924. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  4925. "3057 Requested vfs (%d) greater than "
  4926. "supported vfs (%d)", nr_vfn, max_nr_vfn);
  4927. return -EINVAL;
  4928. }
  4929. rc = pci_enable_sriov(pdev, nr_vfn);
  4930. if (rc) {
  4931. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  4932. "2806 Failed to enable sriov on this device "
  4933. "with vfn number nr_vf:%d, rc:%d\n",
  4934. nr_vfn, rc);
  4935. } else
  4936. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  4937. "2807 Successful enable sriov on this device "
  4938. "with vfn number nr_vf:%d\n", nr_vfn);
  4939. return rc;
  4940. }
  4941. /**
  4942. * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
  4943. * @phba: pointer to lpfc hba data structure.
  4944. *
  4945. * This routine is invoked to set up the driver internal resources before the
  4946. * device specific resource setup to support the HBA device it attached to.
  4947. *
  4948. * Return codes
  4949. * 0 - successful
  4950. * other values - error
  4951. **/
  4952. static int
  4953. lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
  4954. {
  4955. struct lpfc_sli *psli = &phba->sli;
  4956. /*
  4957. * Driver resources common to all SLI revisions
  4958. */
  4959. atomic_set(&phba->fast_event_count, 0);
  4960. spin_lock_init(&phba->hbalock);
  4961. /* Initialize ndlp management spinlock */
  4962. spin_lock_init(&phba->ndlp_lock);
  4963. INIT_LIST_HEAD(&phba->port_list);
  4964. INIT_LIST_HEAD(&phba->work_list);
  4965. init_waitqueue_head(&phba->wait_4_mlo_m_q);
  4966. /* Initialize the wait queue head for the kernel thread */
  4967. init_waitqueue_head(&phba->work_waitq);
  4968. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4969. "1403 Protocols supported %s %s %s\n",
  4970. ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
  4971. "SCSI" : " "),
  4972. ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
  4973. "NVME" : " "),
  4974. (phba->nvmet_support ? "NVMET" : " "));
  4975. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  4976. /* Initialize the scsi buffer list used by driver for scsi IO */
  4977. spin_lock_init(&phba->scsi_buf_list_get_lock);
  4978. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
  4979. spin_lock_init(&phba->scsi_buf_list_put_lock);
  4980. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
  4981. }
  4982. if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
  4983. (phba->nvmet_support == 0)) {
  4984. /* Initialize the NVME buffer list used by driver for NVME IO */
  4985. spin_lock_init(&phba->nvme_buf_list_get_lock);
  4986. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
  4987. spin_lock_init(&phba->nvme_buf_list_put_lock);
  4988. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
  4989. }
  4990. /* Initialize the fabric iocb list */
  4991. INIT_LIST_HEAD(&phba->fabric_iocb_list);
  4992. /* Initialize list to save ELS buffers */
  4993. INIT_LIST_HEAD(&phba->elsbuf);
  4994. /* Initialize FCF connection rec list */
  4995. INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
  4996. /* Initialize OAS configuration list */
  4997. spin_lock_init(&phba->devicelock);
  4998. INIT_LIST_HEAD(&phba->luns);
  4999. /* MBOX heartbeat timer */
  5000. setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
  5001. /* Fabric block timer */
  5002. setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
  5003. (unsigned long)phba);
  5004. /* EA polling mode timer */
  5005. setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
  5006. (unsigned long)phba);
  5007. /* Heartbeat timer */
  5008. setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
  5009. return 0;
  5010. }
  5011. /**
  5012. * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
  5013. * @phba: pointer to lpfc hba data structure.
  5014. *
  5015. * This routine is invoked to set up the driver internal resources specific to
  5016. * support the SLI-3 HBA device it attached to.
  5017. *
  5018. * Return codes
  5019. * 0 - successful
  5020. * other values - error
  5021. **/
  5022. static int
  5023. lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
  5024. {
  5025. int rc;
  5026. /*
  5027. * Initialize timers used by driver
  5028. */
  5029. /* FCP polling mode timer */
  5030. setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
  5031. (unsigned long)phba);
  5032. /* Host attention work mask setup */
  5033. phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
  5034. phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
  5035. /* Get all the module params for configuring this host */
  5036. lpfc_get_cfgparam(phba);
  5037. /* Set up phase-1 common device driver resources */
  5038. rc = lpfc_setup_driver_resource_phase1(phba);
  5039. if (rc)
  5040. return -ENODEV;
  5041. if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
  5042. phba->menlo_flag |= HBA_MENLO_SUPPORT;
  5043. /* check for menlo minimum sg count */
  5044. if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
  5045. phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
  5046. }
  5047. if (!phba->sli.sli3_ring)
  5048. phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
  5049. sizeof(struct lpfc_sli_ring), GFP_KERNEL);
  5050. if (!phba->sli.sli3_ring)
  5051. return -ENOMEM;
  5052. /*
  5053. * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
  5054. * used to create the sg_dma_buf_pool must be dynamically calculated.
  5055. */
  5056. /* Initialize the host templates the configured values. */
  5057. lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5058. lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
  5059. lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5060. /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
  5061. if (phba->cfg_enable_bg) {
  5062. /*
  5063. * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
  5064. * the FCP rsp, and a BDE for each. Sice we have no control
  5065. * over how many protection data segments the SCSI Layer
  5066. * will hand us (ie: there could be one for every block
  5067. * in the IO), we just allocate enough BDEs to accomidate
  5068. * our max amount and we need to limit lpfc_sg_seg_cnt to
  5069. * minimize the risk of running out.
  5070. */
  5071. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5072. sizeof(struct fcp_rsp) +
  5073. (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
  5074. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
  5075. phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
  5076. /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
  5077. phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
  5078. } else {
  5079. /*
  5080. * The scsi_buf for a regular I/O will hold the FCP cmnd,
  5081. * the FCP rsp, a BDE for each, and a BDE for up to
  5082. * cfg_sg_seg_cnt data segments.
  5083. */
  5084. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5085. sizeof(struct fcp_rsp) +
  5086. ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
  5087. /* Total BDEs in BPL for scsi_sg_list */
  5088. phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
  5089. }
  5090. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
  5091. "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
  5092. phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
  5093. phba->cfg_total_seg_cnt);
  5094. phba->max_vpi = LPFC_MAX_VPI;
  5095. /* This will be set to correct value after config_port mbox */
  5096. phba->max_vports = 0;
  5097. /*
  5098. * Initialize the SLI Layer to run with lpfc HBAs.
  5099. */
  5100. lpfc_sli_setup(phba);
  5101. lpfc_sli_queue_init(phba);
  5102. /* Allocate device driver memory */
  5103. if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
  5104. return -ENOMEM;
  5105. /*
  5106. * Enable sr-iov virtual functions if supported and configured
  5107. * through the module parameter.
  5108. */
  5109. if (phba->cfg_sriov_nr_virtfn > 0) {
  5110. rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
  5111. phba->cfg_sriov_nr_virtfn);
  5112. if (rc) {
  5113. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5114. "2808 Requested number of SR-IOV "
  5115. "virtual functions (%d) is not "
  5116. "supported\n",
  5117. phba->cfg_sriov_nr_virtfn);
  5118. phba->cfg_sriov_nr_virtfn = 0;
  5119. }
  5120. }
  5121. return 0;
  5122. }
  5123. /**
  5124. * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
  5125. * @phba: pointer to lpfc hba data structure.
  5126. *
  5127. * This routine is invoked to unset the driver internal resources set up
  5128. * specific for supporting the SLI-3 HBA device it attached to.
  5129. **/
  5130. static void
  5131. lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
  5132. {
  5133. /* Free device driver memory allocated */
  5134. lpfc_mem_free_all(phba);
  5135. return;
  5136. }
  5137. /**
  5138. * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
  5139. * @phba: pointer to lpfc hba data structure.
  5140. *
  5141. * This routine is invoked to set up the driver internal resources specific to
  5142. * support the SLI-4 HBA device it attached to.
  5143. *
  5144. * Return codes
  5145. * 0 - successful
  5146. * other values - error
  5147. **/
  5148. static int
  5149. lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
  5150. {
  5151. LPFC_MBOXQ_t *mboxq;
  5152. MAILBOX_t *mb;
  5153. int rc, i, max_buf_size;
  5154. uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
  5155. struct lpfc_mqe *mqe;
  5156. int longs;
  5157. int fof_vectors = 0;
  5158. uint64_t wwn;
  5159. phba->sli4_hba.num_online_cpu = num_online_cpus();
  5160. phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
  5161. phba->sli4_hba.curr_disp_cpu = 0;
  5162. /* Get all the module params for configuring this host */
  5163. lpfc_get_cfgparam(phba);
  5164. /* Set up phase-1 common device driver resources */
  5165. rc = lpfc_setup_driver_resource_phase1(phba);
  5166. if (rc)
  5167. return -ENODEV;
  5168. /* Before proceed, wait for POST done and device ready */
  5169. rc = lpfc_sli4_post_status_check(phba);
  5170. if (rc)
  5171. return -ENODEV;
  5172. /*
  5173. * Initialize timers used by driver
  5174. */
  5175. setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
  5176. /* FCF rediscover timer */
  5177. setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
  5178. (unsigned long)phba);
  5179. /*
  5180. * Control structure for handling external multi-buffer mailbox
  5181. * command pass-through.
  5182. */
  5183. memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
  5184. sizeof(struct lpfc_mbox_ext_buf_ctx));
  5185. INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
  5186. phba->max_vpi = LPFC_MAX_VPI;
  5187. /* This will be set to correct value after the read_config mbox */
  5188. phba->max_vports = 0;
  5189. /* Program the default value of vlan_id and fc_map */
  5190. phba->valid_vlan = 0;
  5191. phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
  5192. phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
  5193. phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
  5194. /*
  5195. * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
  5196. * we will associate a new ring, for each EQ/CQ/WQ tuple.
  5197. * The WQ create will allocate the ring.
  5198. */
  5199. /*
  5200. * It doesn't matter what family our adapter is in, we are
  5201. * limited to 2 Pages, 512 SGEs, for our SGL.
  5202. * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
  5203. */
  5204. max_buf_size = (2 * SLI4_PAGE_SIZE);
  5205. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
  5206. phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
  5207. /*
  5208. * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
  5209. * used to create the sg_dma_buf_pool must be calculated.
  5210. */
  5211. if (phba->cfg_enable_bg) {
  5212. /*
  5213. * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
  5214. * the FCP rsp, and a SGE. Sice we have no control
  5215. * over how many protection segments the SCSI Layer
  5216. * will hand us (ie: there could be one for every block
  5217. * in the IO), just allocate enough SGEs to accomidate
  5218. * our max amount and we need to limit lpfc_sg_seg_cnt
  5219. * to minimize the risk of running out.
  5220. */
  5221. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5222. sizeof(struct fcp_rsp) + max_buf_size;
  5223. /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
  5224. phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
  5225. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
  5226. phba->cfg_sg_seg_cnt =
  5227. LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
  5228. } else {
  5229. /*
  5230. * The scsi_buf for a regular I/O holds the FCP cmnd,
  5231. * the FCP rsp, a SGE for each, and a SGE for up to
  5232. * cfg_sg_seg_cnt data segments.
  5233. */
  5234. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5235. sizeof(struct fcp_rsp) +
  5236. ((phba->cfg_sg_seg_cnt + 2) *
  5237. sizeof(struct sli4_sge));
  5238. /* Total SGEs for scsi_sg_list */
  5239. phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
  5240. /*
  5241. * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
  5242. * need to post 1 page for the SGL.
  5243. */
  5244. }
  5245. /* Initialize the host templates with the updated values. */
  5246. lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5247. lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5248. lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
  5249. if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
  5250. phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
  5251. else
  5252. phba->cfg_sg_dma_buf_size =
  5253. SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
  5254. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
  5255. "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
  5256. phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
  5257. phba->cfg_total_seg_cnt);
  5258. /* Initialize buffer queue management fields */
  5259. INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
  5260. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
  5261. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
  5262. /*
  5263. * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
  5264. */
  5265. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  5266. /* Initialize the Abort scsi buffer list used by driver */
  5267. spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
  5268. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
  5269. }
  5270. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  5271. /* Initialize the Abort nvme buffer list used by driver */
  5272. spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
  5273. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
  5274. /* Fast-path XRI aborted CQ Event work queue list */
  5275. INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
  5276. }
  5277. /* This abort list used by worker thread */
  5278. spin_lock_init(&phba->sli4_hba.sgl_list_lock);
  5279. spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
  5280. /*
  5281. * Initialize driver internal slow-path work queues
  5282. */
  5283. /* Driver internel slow-path CQ Event pool */
  5284. INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
  5285. /* Response IOCB work queue list */
  5286. INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
  5287. /* Asynchronous event CQ Event work queue list */
  5288. INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
  5289. /* Fast-path XRI aborted CQ Event work queue list */
  5290. INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
  5291. /* Slow-path XRI aborted CQ Event work queue list */
  5292. INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
  5293. /* Receive queue CQ Event work queue list */
  5294. INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
  5295. /* Initialize extent block lists. */
  5296. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
  5297. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
  5298. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
  5299. INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
  5300. /* initialize optic_state to 0xFF */
  5301. phba->sli4_hba.lnk_info.optic_state = 0xff;
  5302. /* Allocate device driver memory */
  5303. rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
  5304. if (rc)
  5305. return -ENOMEM;
  5306. /* IF Type 2 ports get initialized now. */
  5307. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  5308. LPFC_SLI_INTF_IF_TYPE_2) {
  5309. rc = lpfc_pci_function_reset(phba);
  5310. if (unlikely(rc)) {
  5311. rc = -ENODEV;
  5312. goto out_free_mem;
  5313. }
  5314. phba->temp_sensor_support = 1;
  5315. }
  5316. /* Create the bootstrap mailbox command */
  5317. rc = lpfc_create_bootstrap_mbox(phba);
  5318. if (unlikely(rc))
  5319. goto out_free_mem;
  5320. /* Set up the host's endian order with the device. */
  5321. rc = lpfc_setup_endian_order(phba);
  5322. if (unlikely(rc))
  5323. goto out_free_bsmbx;
  5324. /* Set up the hba's configuration parameters. */
  5325. rc = lpfc_sli4_read_config(phba);
  5326. if (unlikely(rc))
  5327. goto out_free_bsmbx;
  5328. rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
  5329. if (unlikely(rc))
  5330. goto out_free_bsmbx;
  5331. /* IF Type 0 ports get initialized now. */
  5332. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  5333. LPFC_SLI_INTF_IF_TYPE_0) {
  5334. rc = lpfc_pci_function_reset(phba);
  5335. if (unlikely(rc))
  5336. goto out_free_bsmbx;
  5337. }
  5338. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  5339. GFP_KERNEL);
  5340. if (!mboxq) {
  5341. rc = -ENOMEM;
  5342. goto out_free_bsmbx;
  5343. }
  5344. /* Check for NVMET being configured */
  5345. phba->nvmet_support = 0;
  5346. if (lpfc_enable_nvmet_cnt) {
  5347. /* First get WWN of HBA instance */
  5348. lpfc_read_nv(phba, mboxq);
  5349. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5350. if (rc != MBX_SUCCESS) {
  5351. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  5352. "6016 Mailbox failed , mbxCmd x%x "
  5353. "READ_NV, mbxStatus x%x\n",
  5354. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  5355. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  5356. rc = -EIO;
  5357. goto out_free_bsmbx;
  5358. }
  5359. mb = &mboxq->u.mb;
  5360. memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
  5361. sizeof(uint64_t));
  5362. wwn = cpu_to_be64(wwn);
  5363. phba->sli4_hba.wwnn.u.name = wwn;
  5364. memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
  5365. sizeof(uint64_t));
  5366. /* wwn is WWPN of HBA instance */
  5367. wwn = cpu_to_be64(wwn);
  5368. phba->sli4_hba.wwpn.u.name = wwn;
  5369. /* Check to see if it matches any module parameter */
  5370. for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
  5371. if (wwn == lpfc_enable_nvmet[i]) {
  5372. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  5373. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5374. "6017 NVME Target %016llx\n",
  5375. wwn);
  5376. phba->nvmet_support = 1; /* a match */
  5377. #else
  5378. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5379. "6021 Can't enable NVME Target."
  5380. " NVME_TARGET_FC infrastructure"
  5381. " is not in kernel\n");
  5382. #endif
  5383. }
  5384. }
  5385. }
  5386. lpfc_nvme_mod_param_dep(phba);
  5387. /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
  5388. lpfc_supported_pages(mboxq);
  5389. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5390. if (!rc) {
  5391. mqe = &mboxq->u.mqe;
  5392. memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
  5393. LPFC_MAX_SUPPORTED_PAGES);
  5394. for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
  5395. switch (pn_page[i]) {
  5396. case LPFC_SLI4_PARAMETERS:
  5397. phba->sli4_hba.pc_sli4_params.supported = 1;
  5398. break;
  5399. default:
  5400. break;
  5401. }
  5402. }
  5403. /* Read the port's SLI4 Parameters capabilities if supported. */
  5404. if (phba->sli4_hba.pc_sli4_params.supported)
  5405. rc = lpfc_pc_sli4_params_get(phba, mboxq);
  5406. if (rc) {
  5407. mempool_free(mboxq, phba->mbox_mem_pool);
  5408. rc = -EIO;
  5409. goto out_free_bsmbx;
  5410. }
  5411. }
  5412. /*
  5413. * Get sli4 parameters that override parameters from Port capabilities.
  5414. * If this call fails, it isn't critical unless the SLI4 parameters come
  5415. * back in conflict.
  5416. */
  5417. rc = lpfc_get_sli4_parameters(phba, mboxq);
  5418. if (rc) {
  5419. if (phba->sli4_hba.extents_in_use &&
  5420. phba->sli4_hba.rpi_hdrs_in_use) {
  5421. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5422. "2999 Unsupported SLI4 Parameters "
  5423. "Extents and RPI headers enabled.\n");
  5424. }
  5425. mempool_free(mboxq, phba->mbox_mem_pool);
  5426. goto out_free_bsmbx;
  5427. }
  5428. mempool_free(mboxq, phba->mbox_mem_pool);
  5429. /* Verify OAS is supported */
  5430. lpfc_sli4_oas_verify(phba);
  5431. if (phba->cfg_fof)
  5432. fof_vectors = 1;
  5433. /* Verify all the SLI4 queues */
  5434. rc = lpfc_sli4_queue_verify(phba);
  5435. if (rc)
  5436. goto out_free_bsmbx;
  5437. /* Create driver internal CQE event pool */
  5438. rc = lpfc_sli4_cq_event_pool_create(phba);
  5439. if (rc)
  5440. goto out_free_bsmbx;
  5441. /* Initialize sgl lists per host */
  5442. lpfc_init_sgl_list(phba);
  5443. /* Allocate and initialize active sgl array */
  5444. rc = lpfc_init_active_sgl_array(phba);
  5445. if (rc) {
  5446. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5447. "1430 Failed to initialize sgl list.\n");
  5448. goto out_destroy_cq_event_pool;
  5449. }
  5450. rc = lpfc_sli4_init_rpi_hdrs(phba);
  5451. if (rc) {
  5452. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5453. "1432 Failed to initialize rpi headers.\n");
  5454. goto out_free_active_sgl;
  5455. }
  5456. /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
  5457. longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
  5458. phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
  5459. GFP_KERNEL);
  5460. if (!phba->fcf.fcf_rr_bmask) {
  5461. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5462. "2759 Failed allocate memory for FCF round "
  5463. "robin failover bmask\n");
  5464. rc = -ENOMEM;
  5465. goto out_remove_rpi_hdrs;
  5466. }
  5467. phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
  5468. sizeof(struct lpfc_hba_eq_hdl),
  5469. GFP_KERNEL);
  5470. if (!phba->sli4_hba.hba_eq_hdl) {
  5471. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5472. "2572 Failed allocate memory for "
  5473. "fast-path per-EQ handle array\n");
  5474. rc = -ENOMEM;
  5475. goto out_free_fcf_rr_bmask;
  5476. }
  5477. phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
  5478. sizeof(struct lpfc_vector_map_info),
  5479. GFP_KERNEL);
  5480. if (!phba->sli4_hba.cpu_map) {
  5481. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5482. "3327 Failed allocate memory for msi-x "
  5483. "interrupt vector mapping\n");
  5484. rc = -ENOMEM;
  5485. goto out_free_hba_eq_hdl;
  5486. }
  5487. if (lpfc_used_cpu == NULL) {
  5488. lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
  5489. GFP_KERNEL);
  5490. if (!lpfc_used_cpu) {
  5491. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5492. "3335 Failed allocate memory for msi-x "
  5493. "interrupt vector mapping\n");
  5494. kfree(phba->sli4_hba.cpu_map);
  5495. rc = -ENOMEM;
  5496. goto out_free_hba_eq_hdl;
  5497. }
  5498. for (i = 0; i < lpfc_present_cpu; i++)
  5499. lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
  5500. }
  5501. /*
  5502. * Enable sr-iov virtual functions if supported and configured
  5503. * through the module parameter.
  5504. */
  5505. if (phba->cfg_sriov_nr_virtfn > 0) {
  5506. rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
  5507. phba->cfg_sriov_nr_virtfn);
  5508. if (rc) {
  5509. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5510. "3020 Requested number of SR-IOV "
  5511. "virtual functions (%d) is not "
  5512. "supported\n",
  5513. phba->cfg_sriov_nr_virtfn);
  5514. phba->cfg_sriov_nr_virtfn = 0;
  5515. }
  5516. }
  5517. return 0;
  5518. out_free_hba_eq_hdl:
  5519. kfree(phba->sli4_hba.hba_eq_hdl);
  5520. out_free_fcf_rr_bmask:
  5521. kfree(phba->fcf.fcf_rr_bmask);
  5522. out_remove_rpi_hdrs:
  5523. lpfc_sli4_remove_rpi_hdrs(phba);
  5524. out_free_active_sgl:
  5525. lpfc_free_active_sgl(phba);
  5526. out_destroy_cq_event_pool:
  5527. lpfc_sli4_cq_event_pool_destroy(phba);
  5528. out_free_bsmbx:
  5529. lpfc_destroy_bootstrap_mbox(phba);
  5530. out_free_mem:
  5531. lpfc_mem_free(phba);
  5532. return rc;
  5533. }
  5534. /**
  5535. * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
  5536. * @phba: pointer to lpfc hba data structure.
  5537. *
  5538. * This routine is invoked to unset the driver internal resources set up
  5539. * specific for supporting the SLI-4 HBA device it attached to.
  5540. **/
  5541. static void
  5542. lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
  5543. {
  5544. struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
  5545. /* Free memory allocated for msi-x interrupt vector to CPU mapping */
  5546. kfree(phba->sli4_hba.cpu_map);
  5547. phba->sli4_hba.num_present_cpu = 0;
  5548. phba->sli4_hba.num_online_cpu = 0;
  5549. phba->sli4_hba.curr_disp_cpu = 0;
  5550. /* Free memory allocated for fast-path work queue handles */
  5551. kfree(phba->sli4_hba.hba_eq_hdl);
  5552. /* Free the allocated rpi headers. */
  5553. lpfc_sli4_remove_rpi_hdrs(phba);
  5554. lpfc_sli4_remove_rpis(phba);
  5555. /* Free eligible FCF index bmask */
  5556. kfree(phba->fcf.fcf_rr_bmask);
  5557. /* Free the ELS sgl list */
  5558. lpfc_free_active_sgl(phba);
  5559. lpfc_free_els_sgl_list(phba);
  5560. lpfc_free_nvmet_sgl_list(phba);
  5561. /* Free the completion queue EQ event pool */
  5562. lpfc_sli4_cq_event_release_all(phba);
  5563. lpfc_sli4_cq_event_pool_destroy(phba);
  5564. /* Release resource identifiers. */
  5565. lpfc_sli4_dealloc_resource_identifiers(phba);
  5566. /* Free the bsmbx region. */
  5567. lpfc_destroy_bootstrap_mbox(phba);
  5568. /* Free the SLI Layer memory with SLI4 HBAs */
  5569. lpfc_mem_free_all(phba);
  5570. /* Free the current connect table */
  5571. list_for_each_entry_safe(conn_entry, next_conn_entry,
  5572. &phba->fcf_conn_rec_list, list) {
  5573. list_del_init(&conn_entry->list);
  5574. kfree(conn_entry);
  5575. }
  5576. return;
  5577. }
  5578. /**
  5579. * lpfc_init_api_table_setup - Set up init api function jump table
  5580. * @phba: The hba struct for which this call is being executed.
  5581. * @dev_grp: The HBA PCI-Device group number.
  5582. *
  5583. * This routine sets up the device INIT interface API function jump table
  5584. * in @phba struct.
  5585. *
  5586. * Returns: 0 - success, -ENODEV - failure.
  5587. **/
  5588. int
  5589. lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  5590. {
  5591. phba->lpfc_hba_init_link = lpfc_hba_init_link;
  5592. phba->lpfc_hba_down_link = lpfc_hba_down_link;
  5593. phba->lpfc_selective_reset = lpfc_selective_reset;
  5594. switch (dev_grp) {
  5595. case LPFC_PCI_DEV_LP:
  5596. phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
  5597. phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
  5598. phba->lpfc_stop_port = lpfc_stop_port_s3;
  5599. break;
  5600. case LPFC_PCI_DEV_OC:
  5601. phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
  5602. phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
  5603. phba->lpfc_stop_port = lpfc_stop_port_s4;
  5604. break;
  5605. default:
  5606. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5607. "1431 Invalid HBA PCI-device group: 0x%x\n",
  5608. dev_grp);
  5609. return -ENODEV;
  5610. break;
  5611. }
  5612. return 0;
  5613. }
  5614. /**
  5615. * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
  5616. * @phba: pointer to lpfc hba data structure.
  5617. *
  5618. * This routine is invoked to set up the driver internal resources after the
  5619. * device specific resource setup to support the HBA device it attached to.
  5620. *
  5621. * Return codes
  5622. * 0 - successful
  5623. * other values - error
  5624. **/
  5625. static int
  5626. lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
  5627. {
  5628. int error;
  5629. /* Startup the kernel thread for this host adapter. */
  5630. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  5631. "lpfc_worker_%d", phba->brd_no);
  5632. if (IS_ERR(phba->worker_thread)) {
  5633. error = PTR_ERR(phba->worker_thread);
  5634. return error;
  5635. }
  5636. return 0;
  5637. }
  5638. /**
  5639. * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
  5640. * @phba: pointer to lpfc hba data structure.
  5641. *
  5642. * This routine is invoked to unset the driver internal resources set up after
  5643. * the device specific resource setup for supporting the HBA device it
  5644. * attached to.
  5645. **/
  5646. static void
  5647. lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
  5648. {
  5649. /* Stop kernel worker thread */
  5650. kthread_stop(phba->worker_thread);
  5651. }
  5652. /**
  5653. * lpfc_free_iocb_list - Free iocb list.
  5654. * @phba: pointer to lpfc hba data structure.
  5655. *
  5656. * This routine is invoked to free the driver's IOCB list and memory.
  5657. **/
  5658. static void
  5659. lpfc_free_iocb_list(struct lpfc_hba *phba)
  5660. {
  5661. struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
  5662. spin_lock_irq(&phba->hbalock);
  5663. list_for_each_entry_safe(iocbq_entry, iocbq_next,
  5664. &phba->lpfc_iocb_list, list) {
  5665. list_del(&iocbq_entry->list);
  5666. kfree(iocbq_entry);
  5667. phba->total_iocbq_bufs--;
  5668. }
  5669. spin_unlock_irq(&phba->hbalock);
  5670. return;
  5671. }
  5672. /**
  5673. * lpfc_init_iocb_list - Allocate and initialize iocb list.
  5674. * @phba: pointer to lpfc hba data structure.
  5675. *
  5676. * This routine is invoked to allocate and initizlize the driver's IOCB
  5677. * list and set up the IOCB tag array accordingly.
  5678. *
  5679. * Return codes
  5680. * 0 - successful
  5681. * other values - error
  5682. **/
  5683. static int
  5684. lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
  5685. {
  5686. struct lpfc_iocbq *iocbq_entry = NULL;
  5687. uint16_t iotag;
  5688. int i;
  5689. /* Initialize and populate the iocb list per host. */
  5690. INIT_LIST_HEAD(&phba->lpfc_iocb_list);
  5691. for (i = 0; i < iocb_count; i++) {
  5692. iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
  5693. if (iocbq_entry == NULL) {
  5694. printk(KERN_ERR "%s: only allocated %d iocbs of "
  5695. "expected %d count. Unloading driver.\n",
  5696. __func__, i, LPFC_IOCB_LIST_CNT);
  5697. goto out_free_iocbq;
  5698. }
  5699. iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
  5700. if (iotag == 0) {
  5701. kfree(iocbq_entry);
  5702. printk(KERN_ERR "%s: failed to allocate IOTAG. "
  5703. "Unloading driver.\n", __func__);
  5704. goto out_free_iocbq;
  5705. }
  5706. iocbq_entry->sli4_lxritag = NO_XRI;
  5707. iocbq_entry->sli4_xritag = NO_XRI;
  5708. spin_lock_irq(&phba->hbalock);
  5709. list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
  5710. phba->total_iocbq_bufs++;
  5711. spin_unlock_irq(&phba->hbalock);
  5712. }
  5713. return 0;
  5714. out_free_iocbq:
  5715. lpfc_free_iocb_list(phba);
  5716. return -ENOMEM;
  5717. }
  5718. /**
  5719. * lpfc_free_sgl_list - Free a given sgl list.
  5720. * @phba: pointer to lpfc hba data structure.
  5721. * @sglq_list: pointer to the head of sgl list.
  5722. *
  5723. * This routine is invoked to free a give sgl list and memory.
  5724. **/
  5725. void
  5726. lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
  5727. {
  5728. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  5729. list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
  5730. list_del(&sglq_entry->list);
  5731. lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
  5732. kfree(sglq_entry);
  5733. }
  5734. }
  5735. /**
  5736. * lpfc_free_els_sgl_list - Free els sgl list.
  5737. * @phba: pointer to lpfc hba data structure.
  5738. *
  5739. * This routine is invoked to free the driver's els sgl list and memory.
  5740. **/
  5741. static void
  5742. lpfc_free_els_sgl_list(struct lpfc_hba *phba)
  5743. {
  5744. LIST_HEAD(sglq_list);
  5745. /* Retrieve all els sgls from driver list */
  5746. spin_lock_irq(&phba->hbalock);
  5747. spin_lock(&phba->sli4_hba.sgl_list_lock);
  5748. list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
  5749. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  5750. spin_unlock_irq(&phba->hbalock);
  5751. /* Now free the sgl list */
  5752. lpfc_free_sgl_list(phba, &sglq_list);
  5753. }
  5754. /**
  5755. * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
  5756. * @phba: pointer to lpfc hba data structure.
  5757. *
  5758. * This routine is invoked to free the driver's nvmet sgl list and memory.
  5759. **/
  5760. static void
  5761. lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
  5762. {
  5763. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  5764. LIST_HEAD(sglq_list);
  5765. /* Retrieve all nvmet sgls from driver list */
  5766. spin_lock_irq(&phba->hbalock);
  5767. spin_lock(&phba->sli4_hba.sgl_list_lock);
  5768. list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
  5769. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  5770. spin_unlock_irq(&phba->hbalock);
  5771. /* Now free the sgl list */
  5772. list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
  5773. list_del(&sglq_entry->list);
  5774. lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
  5775. kfree(sglq_entry);
  5776. }
  5777. }
  5778. /**
  5779. * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
  5780. * @phba: pointer to lpfc hba data structure.
  5781. *
  5782. * This routine is invoked to allocate the driver's active sgl memory.
  5783. * This array will hold the sglq_entry's for active IOs.
  5784. **/
  5785. static int
  5786. lpfc_init_active_sgl_array(struct lpfc_hba *phba)
  5787. {
  5788. int size;
  5789. size = sizeof(struct lpfc_sglq *);
  5790. size *= phba->sli4_hba.max_cfg_param.max_xri;
  5791. phba->sli4_hba.lpfc_sglq_active_list =
  5792. kzalloc(size, GFP_KERNEL);
  5793. if (!phba->sli4_hba.lpfc_sglq_active_list)
  5794. return -ENOMEM;
  5795. return 0;
  5796. }
  5797. /**
  5798. * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
  5799. * @phba: pointer to lpfc hba data structure.
  5800. *
  5801. * This routine is invoked to walk through the array of active sglq entries
  5802. * and free all of the resources.
  5803. * This is just a place holder for now.
  5804. **/
  5805. static void
  5806. lpfc_free_active_sgl(struct lpfc_hba *phba)
  5807. {
  5808. kfree(phba->sli4_hba.lpfc_sglq_active_list);
  5809. }
  5810. /**
  5811. * lpfc_init_sgl_list - Allocate and initialize sgl list.
  5812. * @phba: pointer to lpfc hba data structure.
  5813. *
  5814. * This routine is invoked to allocate and initizlize the driver's sgl
  5815. * list and set up the sgl xritag tag array accordingly.
  5816. *
  5817. **/
  5818. static void
  5819. lpfc_init_sgl_list(struct lpfc_hba *phba)
  5820. {
  5821. /* Initialize and populate the sglq list per host/VF. */
  5822. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
  5823. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  5824. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
  5825. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
  5826. /* els xri-sgl book keeping */
  5827. phba->sli4_hba.els_xri_cnt = 0;
  5828. /* scsi xri-buffer book keeping */
  5829. phba->sli4_hba.scsi_xri_cnt = 0;
  5830. /* nvme xri-buffer book keeping */
  5831. phba->sli4_hba.nvme_xri_cnt = 0;
  5832. }
  5833. /**
  5834. * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
  5835. * @phba: pointer to lpfc hba data structure.
  5836. *
  5837. * This routine is invoked to post rpi header templates to the
  5838. * port for those SLI4 ports that do not support extents. This routine
  5839. * posts a PAGE_SIZE memory region to the port to hold up to
  5840. * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
  5841. * and should be called only when interrupts are disabled.
  5842. *
  5843. * Return codes
  5844. * 0 - successful
  5845. * -ERROR - otherwise.
  5846. **/
  5847. int
  5848. lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
  5849. {
  5850. int rc = 0;
  5851. struct lpfc_rpi_hdr *rpi_hdr;
  5852. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
  5853. if (!phba->sli4_hba.rpi_hdrs_in_use)
  5854. return rc;
  5855. if (phba->sli4_hba.extents_in_use)
  5856. return -EIO;
  5857. rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
  5858. if (!rpi_hdr) {
  5859. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
  5860. "0391 Error during rpi post operation\n");
  5861. lpfc_sli4_remove_rpis(phba);
  5862. rc = -ENODEV;
  5863. }
  5864. return rc;
  5865. }
  5866. /**
  5867. * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
  5868. * @phba: pointer to lpfc hba data structure.
  5869. *
  5870. * This routine is invoked to allocate a single 4KB memory region to
  5871. * support rpis and stores them in the phba. This single region
  5872. * provides support for up to 64 rpis. The region is used globally
  5873. * by the device.
  5874. *
  5875. * Returns:
  5876. * A valid rpi hdr on success.
  5877. * A NULL pointer on any failure.
  5878. **/
  5879. struct lpfc_rpi_hdr *
  5880. lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
  5881. {
  5882. uint16_t rpi_limit, curr_rpi_range;
  5883. struct lpfc_dmabuf *dmabuf;
  5884. struct lpfc_rpi_hdr *rpi_hdr;
  5885. uint32_t rpi_count;
  5886. /*
  5887. * If the SLI4 port supports extents, posting the rpi header isn't
  5888. * required. Set the expected maximum count and let the actual value
  5889. * get set when extents are fully allocated.
  5890. */
  5891. if (!phba->sli4_hba.rpi_hdrs_in_use)
  5892. return NULL;
  5893. if (phba->sli4_hba.extents_in_use)
  5894. return NULL;
  5895. /* The limit on the logical index is just the max_rpi count. */
  5896. rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
  5897. phba->sli4_hba.max_cfg_param.max_rpi - 1;
  5898. spin_lock_irq(&phba->hbalock);
  5899. /*
  5900. * Establish the starting RPI in this header block. The starting
  5901. * rpi is normalized to a zero base because the physical rpi is
  5902. * port based.
  5903. */
  5904. curr_rpi_range = phba->sli4_hba.next_rpi;
  5905. spin_unlock_irq(&phba->hbalock);
  5906. /*
  5907. * The port has a limited number of rpis. The increment here
  5908. * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
  5909. * and to allow the full max_rpi range per port.
  5910. */
  5911. if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
  5912. rpi_count = rpi_limit - curr_rpi_range;
  5913. else
  5914. rpi_count = LPFC_RPI_HDR_COUNT;
  5915. if (!rpi_count)
  5916. return NULL;
  5917. /*
  5918. * First allocate the protocol header region for the port. The
  5919. * port expects a 4KB DMA-mapped memory region that is 4K aligned.
  5920. */
  5921. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  5922. if (!dmabuf)
  5923. return NULL;
  5924. dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
  5925. LPFC_HDR_TEMPLATE_SIZE,
  5926. &dmabuf->phys, GFP_KERNEL);
  5927. if (!dmabuf->virt) {
  5928. rpi_hdr = NULL;
  5929. goto err_free_dmabuf;
  5930. }
  5931. if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
  5932. rpi_hdr = NULL;
  5933. goto err_free_coherent;
  5934. }
  5935. /* Save the rpi header data for cleanup later. */
  5936. rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
  5937. if (!rpi_hdr)
  5938. goto err_free_coherent;
  5939. rpi_hdr->dmabuf = dmabuf;
  5940. rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
  5941. rpi_hdr->page_count = 1;
  5942. spin_lock_irq(&phba->hbalock);
  5943. /* The rpi_hdr stores the logical index only. */
  5944. rpi_hdr->start_rpi = curr_rpi_range;
  5945. list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
  5946. /*
  5947. * The next_rpi stores the next logical module-64 rpi value used
  5948. * to post physical rpis in subsequent rpi postings.
  5949. */
  5950. phba->sli4_hba.next_rpi += rpi_count;
  5951. spin_unlock_irq(&phba->hbalock);
  5952. return rpi_hdr;
  5953. err_free_coherent:
  5954. dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
  5955. dmabuf->virt, dmabuf->phys);
  5956. err_free_dmabuf:
  5957. kfree(dmabuf);
  5958. return NULL;
  5959. }
  5960. /**
  5961. * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
  5962. * @phba: pointer to lpfc hba data structure.
  5963. *
  5964. * This routine is invoked to remove all memory resources allocated
  5965. * to support rpis for SLI4 ports not supporting extents. This routine
  5966. * presumes the caller has released all rpis consumed by fabric or port
  5967. * logins and is prepared to have the header pages removed.
  5968. **/
  5969. void
  5970. lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
  5971. {
  5972. struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
  5973. if (!phba->sli4_hba.rpi_hdrs_in_use)
  5974. goto exit;
  5975. list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
  5976. &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
  5977. list_del(&rpi_hdr->list);
  5978. dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
  5979. rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
  5980. kfree(rpi_hdr->dmabuf);
  5981. kfree(rpi_hdr);
  5982. }
  5983. exit:
  5984. /* There are no rpis available to the port now. */
  5985. phba->sli4_hba.next_rpi = 0;
  5986. }
  5987. /**
  5988. * lpfc_hba_alloc - Allocate driver hba data structure for a device.
  5989. * @pdev: pointer to pci device data structure.
  5990. *
  5991. * This routine is invoked to allocate the driver hba data structure for an
  5992. * HBA device. If the allocation is successful, the phba reference to the
  5993. * PCI device data structure is set.
  5994. *
  5995. * Return codes
  5996. * pointer to @phba - successful
  5997. * NULL - error
  5998. **/
  5999. static struct lpfc_hba *
  6000. lpfc_hba_alloc(struct pci_dev *pdev)
  6001. {
  6002. struct lpfc_hba *phba;
  6003. /* Allocate memory for HBA structure */
  6004. phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
  6005. if (!phba) {
  6006. dev_err(&pdev->dev, "failed to allocate hba struct\n");
  6007. return NULL;
  6008. }
  6009. /* Set reference to PCI device in HBA structure */
  6010. phba->pcidev = pdev;
  6011. /* Assign an unused board number */
  6012. phba->brd_no = lpfc_get_instance();
  6013. if (phba->brd_no < 0) {
  6014. kfree(phba);
  6015. return NULL;
  6016. }
  6017. phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
  6018. spin_lock_init(&phba->ct_ev_lock);
  6019. INIT_LIST_HEAD(&phba->ct_ev_waiters);
  6020. return phba;
  6021. }
  6022. /**
  6023. * lpfc_hba_free - Free driver hba data structure with a device.
  6024. * @phba: pointer to lpfc hba data structure.
  6025. *
  6026. * This routine is invoked to free the driver hba data structure with an
  6027. * HBA device.
  6028. **/
  6029. static void
  6030. lpfc_hba_free(struct lpfc_hba *phba)
  6031. {
  6032. /* Release the driver assigned board number */
  6033. idr_remove(&lpfc_hba_index, phba->brd_no);
  6034. /* Free memory allocated with sli3 rings */
  6035. kfree(phba->sli.sli3_ring);
  6036. phba->sli.sli3_ring = NULL;
  6037. kfree(phba);
  6038. return;
  6039. }
  6040. /**
  6041. * lpfc_create_shost - Create hba physical port with associated scsi host.
  6042. * @phba: pointer to lpfc hba data structure.
  6043. *
  6044. * This routine is invoked to create HBA physical port and associate a SCSI
  6045. * host with it.
  6046. *
  6047. * Return codes
  6048. * 0 - successful
  6049. * other values - error
  6050. **/
  6051. static int
  6052. lpfc_create_shost(struct lpfc_hba *phba)
  6053. {
  6054. struct lpfc_vport *vport;
  6055. struct Scsi_Host *shost;
  6056. /* Initialize HBA FC structure */
  6057. phba->fc_edtov = FF_DEF_EDTOV;
  6058. phba->fc_ratov = FF_DEF_RATOV;
  6059. phba->fc_altov = FF_DEF_ALTOV;
  6060. phba->fc_arbtov = FF_DEF_ARBTOV;
  6061. atomic_set(&phba->sdev_cnt, 0);
  6062. vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
  6063. if (!vport)
  6064. return -ENODEV;
  6065. shost = lpfc_shost_from_vport(vport);
  6066. phba->pport = vport;
  6067. if (phba->nvmet_support) {
  6068. /* Only 1 vport (pport) will support NVME target */
  6069. if (phba->txrdy_payload_pool == NULL) {
  6070. phba->txrdy_payload_pool = pci_pool_create(
  6071. "txrdy_pool", phba->pcidev,
  6072. TXRDY_PAYLOAD_LEN, 16, 0);
  6073. if (phba->txrdy_payload_pool) {
  6074. phba->targetport = NULL;
  6075. phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
  6076. lpfc_printf_log(phba, KERN_INFO,
  6077. LOG_INIT | LOG_NVME_DISC,
  6078. "6076 NVME Target Found\n");
  6079. }
  6080. }
  6081. }
  6082. lpfc_debugfs_initialize(vport);
  6083. /* Put reference to SCSI host to driver's device private data */
  6084. pci_set_drvdata(phba->pcidev, shost);
  6085. /*
  6086. * At this point we are fully registered with PSA. In addition,
  6087. * any initial discovery should be completed.
  6088. */
  6089. vport->load_flag |= FC_ALLOW_FDMI;
  6090. if (phba->cfg_enable_SmartSAN ||
  6091. (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
  6092. /* Setup appropriate attribute masks */
  6093. vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
  6094. if (phba->cfg_enable_SmartSAN)
  6095. vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
  6096. else
  6097. vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
  6098. }
  6099. return 0;
  6100. }
  6101. /**
  6102. * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
  6103. * @phba: pointer to lpfc hba data structure.
  6104. *
  6105. * This routine is invoked to destroy HBA physical port and the associated
  6106. * SCSI host.
  6107. **/
  6108. static void
  6109. lpfc_destroy_shost(struct lpfc_hba *phba)
  6110. {
  6111. struct lpfc_vport *vport = phba->pport;
  6112. /* Destroy physical port that associated with the SCSI host */
  6113. destroy_port(vport);
  6114. return;
  6115. }
  6116. /**
  6117. * lpfc_setup_bg - Setup Block guard structures and debug areas.
  6118. * @phba: pointer to lpfc hba data structure.
  6119. * @shost: the shost to be used to detect Block guard settings.
  6120. *
  6121. * This routine sets up the local Block guard protocol settings for @shost.
  6122. * This routine also allocates memory for debugging bg buffers.
  6123. **/
  6124. static void
  6125. lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
  6126. {
  6127. uint32_t old_mask;
  6128. uint32_t old_guard;
  6129. int pagecnt = 10;
  6130. if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
  6131. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6132. "1478 Registering BlockGuard with the "
  6133. "SCSI layer\n");
  6134. old_mask = phba->cfg_prot_mask;
  6135. old_guard = phba->cfg_prot_guard;
  6136. /* Only allow supported values */
  6137. phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
  6138. SHOST_DIX_TYPE0_PROTECTION |
  6139. SHOST_DIX_TYPE1_PROTECTION);
  6140. phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
  6141. SHOST_DIX_GUARD_CRC);
  6142. /* DIF Type 1 protection for profiles AST1/C1 is end to end */
  6143. if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
  6144. phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
  6145. if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
  6146. if ((old_mask != phba->cfg_prot_mask) ||
  6147. (old_guard != phba->cfg_prot_guard))
  6148. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6149. "1475 Registering BlockGuard with the "
  6150. "SCSI layer: mask %d guard %d\n",
  6151. phba->cfg_prot_mask,
  6152. phba->cfg_prot_guard);
  6153. scsi_host_set_prot(shost, phba->cfg_prot_mask);
  6154. scsi_host_set_guard(shost, phba->cfg_prot_guard);
  6155. } else
  6156. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6157. "1479 Not Registering BlockGuard with the SCSI "
  6158. "layer, Bad protection parameters: %d %d\n",
  6159. old_mask, old_guard);
  6160. }
  6161. if (!_dump_buf_data) {
  6162. while (pagecnt) {
  6163. spin_lock_init(&_dump_buf_lock);
  6164. _dump_buf_data =
  6165. (char *) __get_free_pages(GFP_KERNEL, pagecnt);
  6166. if (_dump_buf_data) {
  6167. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6168. "9043 BLKGRD: allocated %d pages for "
  6169. "_dump_buf_data at 0x%p\n",
  6170. (1 << pagecnt), _dump_buf_data);
  6171. _dump_buf_data_order = pagecnt;
  6172. memset(_dump_buf_data, 0,
  6173. ((1 << PAGE_SHIFT) << pagecnt));
  6174. break;
  6175. } else
  6176. --pagecnt;
  6177. }
  6178. if (!_dump_buf_data_order)
  6179. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6180. "9044 BLKGRD: ERROR unable to allocate "
  6181. "memory for hexdump\n");
  6182. } else
  6183. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6184. "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
  6185. "\n", _dump_buf_data);
  6186. if (!_dump_buf_dif) {
  6187. while (pagecnt) {
  6188. _dump_buf_dif =
  6189. (char *) __get_free_pages(GFP_KERNEL, pagecnt);
  6190. if (_dump_buf_dif) {
  6191. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6192. "9046 BLKGRD: allocated %d pages for "
  6193. "_dump_buf_dif at 0x%p\n",
  6194. (1 << pagecnt), _dump_buf_dif);
  6195. _dump_buf_dif_order = pagecnt;
  6196. memset(_dump_buf_dif, 0,
  6197. ((1 << PAGE_SHIFT) << pagecnt));
  6198. break;
  6199. } else
  6200. --pagecnt;
  6201. }
  6202. if (!_dump_buf_dif_order)
  6203. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6204. "9047 BLKGRD: ERROR unable to allocate "
  6205. "memory for hexdump\n");
  6206. } else
  6207. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6208. "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
  6209. _dump_buf_dif);
  6210. }
  6211. /**
  6212. * lpfc_post_init_setup - Perform necessary device post initialization setup.
  6213. * @phba: pointer to lpfc hba data structure.
  6214. *
  6215. * This routine is invoked to perform all the necessary post initialization
  6216. * setup for the device.
  6217. **/
  6218. static void
  6219. lpfc_post_init_setup(struct lpfc_hba *phba)
  6220. {
  6221. struct Scsi_Host *shost;
  6222. struct lpfc_adapter_event_header adapter_event;
  6223. /* Get the default values for Model Name and Description */
  6224. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  6225. /*
  6226. * hba setup may have changed the hba_queue_depth so we need to
  6227. * adjust the value of can_queue.
  6228. */
  6229. shost = pci_get_drvdata(phba->pcidev);
  6230. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  6231. if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
  6232. lpfc_setup_bg(phba, shost);
  6233. lpfc_host_attrib_init(shost);
  6234. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  6235. spin_lock_irq(shost->host_lock);
  6236. lpfc_poll_start_timer(phba);
  6237. spin_unlock_irq(shost->host_lock);
  6238. }
  6239. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6240. "0428 Perform SCSI scan\n");
  6241. /* Send board arrival event to upper layer */
  6242. adapter_event.event_type = FC_REG_ADAPTER_EVENT;
  6243. adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
  6244. fc_host_post_vendor_event(shost, fc_get_event_number(),
  6245. sizeof(adapter_event),
  6246. (char *) &adapter_event,
  6247. LPFC_NL_VENDOR_ID);
  6248. return;
  6249. }
  6250. /**
  6251. * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
  6252. * @phba: pointer to lpfc hba data structure.
  6253. *
  6254. * This routine is invoked to set up the PCI device memory space for device
  6255. * with SLI-3 interface spec.
  6256. *
  6257. * Return codes
  6258. * 0 - successful
  6259. * other values - error
  6260. **/
  6261. static int
  6262. lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
  6263. {
  6264. struct pci_dev *pdev;
  6265. unsigned long bar0map_len, bar2map_len;
  6266. int i, hbq_count;
  6267. void *ptr;
  6268. int error = -ENODEV;
  6269. /* Obtain PCI device reference */
  6270. if (!phba->pcidev)
  6271. return error;
  6272. else
  6273. pdev = phba->pcidev;
  6274. /* Set the device DMA mask size */
  6275. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
  6276. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
  6277. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
  6278. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
  6279. return error;
  6280. }
  6281. }
  6282. /* Get the bus address of Bar0 and Bar2 and the number of bytes
  6283. * required by each mapping.
  6284. */
  6285. phba->pci_bar0_map = pci_resource_start(pdev, 0);
  6286. bar0map_len = pci_resource_len(pdev, 0);
  6287. phba->pci_bar2_map = pci_resource_start(pdev, 2);
  6288. bar2map_len = pci_resource_len(pdev, 2);
  6289. /* Map HBA SLIM to a kernel virtual address. */
  6290. phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
  6291. if (!phba->slim_memmap_p) {
  6292. dev_printk(KERN_ERR, &pdev->dev,
  6293. "ioremap failed for SLIM memory.\n");
  6294. goto out;
  6295. }
  6296. /* Map HBA Control Registers to a kernel virtual address. */
  6297. phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
  6298. if (!phba->ctrl_regs_memmap_p) {
  6299. dev_printk(KERN_ERR, &pdev->dev,
  6300. "ioremap failed for HBA control registers.\n");
  6301. goto out_iounmap_slim;
  6302. }
  6303. /* Allocate memory for SLI-2 structures */
  6304. phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6305. &phba->slim2p.phys, GFP_KERNEL);
  6306. if (!phba->slim2p.virt)
  6307. goto out_iounmap;
  6308. phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
  6309. phba->mbox_ext = (phba->slim2p.virt +
  6310. offsetof(struct lpfc_sli2_slim, mbx_ext_words));
  6311. phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
  6312. phba->IOCBs = (phba->slim2p.virt +
  6313. offsetof(struct lpfc_sli2_slim, IOCBs));
  6314. phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
  6315. lpfc_sli_hbq_size(),
  6316. &phba->hbqslimp.phys,
  6317. GFP_KERNEL);
  6318. if (!phba->hbqslimp.virt)
  6319. goto out_free_slim;
  6320. hbq_count = lpfc_sli_hbq_count();
  6321. ptr = phba->hbqslimp.virt;
  6322. for (i = 0; i < hbq_count; ++i) {
  6323. phba->hbqs[i].hbq_virt = ptr;
  6324. INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
  6325. ptr += (lpfc_hbq_defs[i]->entry_count *
  6326. sizeof(struct lpfc_hbq_entry));
  6327. }
  6328. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
  6329. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
  6330. memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
  6331. phba->MBslimaddr = phba->slim_memmap_p;
  6332. phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
  6333. phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
  6334. phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
  6335. phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
  6336. return 0;
  6337. out_free_slim:
  6338. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6339. phba->slim2p.virt, phba->slim2p.phys);
  6340. out_iounmap:
  6341. iounmap(phba->ctrl_regs_memmap_p);
  6342. out_iounmap_slim:
  6343. iounmap(phba->slim_memmap_p);
  6344. out:
  6345. return error;
  6346. }
  6347. /**
  6348. * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
  6349. * @phba: pointer to lpfc hba data structure.
  6350. *
  6351. * This routine is invoked to unset the PCI device memory space for device
  6352. * with SLI-3 interface spec.
  6353. **/
  6354. static void
  6355. lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
  6356. {
  6357. struct pci_dev *pdev;
  6358. /* Obtain PCI device reference */
  6359. if (!phba->pcidev)
  6360. return;
  6361. else
  6362. pdev = phba->pcidev;
  6363. /* Free coherent DMA memory allocated */
  6364. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
  6365. phba->hbqslimp.virt, phba->hbqslimp.phys);
  6366. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6367. phba->slim2p.virt, phba->slim2p.phys);
  6368. /* I/O memory unmap */
  6369. iounmap(phba->ctrl_regs_memmap_p);
  6370. iounmap(phba->slim_memmap_p);
  6371. return;
  6372. }
  6373. /**
  6374. * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
  6375. * @phba: pointer to lpfc hba data structure.
  6376. *
  6377. * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
  6378. * done and check status.
  6379. *
  6380. * Return 0 if successful, otherwise -ENODEV.
  6381. **/
  6382. int
  6383. lpfc_sli4_post_status_check(struct lpfc_hba *phba)
  6384. {
  6385. struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
  6386. struct lpfc_register reg_data;
  6387. int i, port_error = 0;
  6388. uint32_t if_type;
  6389. memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
  6390. memset(&reg_data, 0, sizeof(reg_data));
  6391. if (!phba->sli4_hba.PSMPHRregaddr)
  6392. return -ENODEV;
  6393. /* Wait up to 30 seconds for the SLI Port POST done and ready */
  6394. for (i = 0; i < 3000; i++) {
  6395. if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  6396. &portsmphr_reg.word0) ||
  6397. (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
  6398. /* Port has a fatal POST error, break out */
  6399. port_error = -ENODEV;
  6400. break;
  6401. }
  6402. if (LPFC_POST_STAGE_PORT_READY ==
  6403. bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
  6404. break;
  6405. msleep(10);
  6406. }
  6407. /*
  6408. * If there was a port error during POST, then don't proceed with
  6409. * other register reads as the data may not be valid. Just exit.
  6410. */
  6411. if (port_error) {
  6412. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6413. "1408 Port Failed POST - portsmphr=0x%x, "
  6414. "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
  6415. "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
  6416. portsmphr_reg.word0,
  6417. bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
  6418. bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
  6419. bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
  6420. bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
  6421. bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
  6422. bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
  6423. bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
  6424. bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
  6425. } else {
  6426. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6427. "2534 Device Info: SLIFamily=0x%x, "
  6428. "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
  6429. "SLIHint_2=0x%x, FT=0x%x\n",
  6430. bf_get(lpfc_sli_intf_sli_family,
  6431. &phba->sli4_hba.sli_intf),
  6432. bf_get(lpfc_sli_intf_slirev,
  6433. &phba->sli4_hba.sli_intf),
  6434. bf_get(lpfc_sli_intf_if_type,
  6435. &phba->sli4_hba.sli_intf),
  6436. bf_get(lpfc_sli_intf_sli_hint1,
  6437. &phba->sli4_hba.sli_intf),
  6438. bf_get(lpfc_sli_intf_sli_hint2,
  6439. &phba->sli4_hba.sli_intf),
  6440. bf_get(lpfc_sli_intf_func_type,
  6441. &phba->sli4_hba.sli_intf));
  6442. /*
  6443. * Check for other Port errors during the initialization
  6444. * process. Fail the load if the port did not come up
  6445. * correctly.
  6446. */
  6447. if_type = bf_get(lpfc_sli_intf_if_type,
  6448. &phba->sli4_hba.sli_intf);
  6449. switch (if_type) {
  6450. case LPFC_SLI_INTF_IF_TYPE_0:
  6451. phba->sli4_hba.ue_mask_lo =
  6452. readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
  6453. phba->sli4_hba.ue_mask_hi =
  6454. readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
  6455. uerrlo_reg.word0 =
  6456. readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
  6457. uerrhi_reg.word0 =
  6458. readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
  6459. if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
  6460. (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
  6461. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6462. "1422 Unrecoverable Error "
  6463. "Detected during POST "
  6464. "uerr_lo_reg=0x%x, "
  6465. "uerr_hi_reg=0x%x, "
  6466. "ue_mask_lo_reg=0x%x, "
  6467. "ue_mask_hi_reg=0x%x\n",
  6468. uerrlo_reg.word0,
  6469. uerrhi_reg.word0,
  6470. phba->sli4_hba.ue_mask_lo,
  6471. phba->sli4_hba.ue_mask_hi);
  6472. port_error = -ENODEV;
  6473. }
  6474. break;
  6475. case LPFC_SLI_INTF_IF_TYPE_2:
  6476. /* Final checks. The port status should be clean. */
  6477. if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  6478. &reg_data.word0) ||
  6479. (bf_get(lpfc_sliport_status_err, &reg_data) &&
  6480. !bf_get(lpfc_sliport_status_rn, &reg_data))) {
  6481. phba->work_status[0] =
  6482. readl(phba->sli4_hba.u.if_type2.
  6483. ERR1regaddr);
  6484. phba->work_status[1] =
  6485. readl(phba->sli4_hba.u.if_type2.
  6486. ERR2regaddr);
  6487. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6488. "2888 Unrecoverable port error "
  6489. "following POST: port status reg "
  6490. "0x%x, port_smphr reg 0x%x, "
  6491. "error 1=0x%x, error 2=0x%x\n",
  6492. reg_data.word0,
  6493. portsmphr_reg.word0,
  6494. phba->work_status[0],
  6495. phba->work_status[1]);
  6496. port_error = -ENODEV;
  6497. }
  6498. break;
  6499. case LPFC_SLI_INTF_IF_TYPE_1:
  6500. default:
  6501. break;
  6502. }
  6503. }
  6504. return port_error;
  6505. }
  6506. /**
  6507. * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
  6508. * @phba: pointer to lpfc hba data structure.
  6509. * @if_type: The SLI4 interface type getting configured.
  6510. *
  6511. * This routine is invoked to set up SLI4 BAR0 PCI config space register
  6512. * memory map.
  6513. **/
  6514. static void
  6515. lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
  6516. {
  6517. switch (if_type) {
  6518. case LPFC_SLI_INTF_IF_TYPE_0:
  6519. phba->sli4_hba.u.if_type0.UERRLOregaddr =
  6520. phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
  6521. phba->sli4_hba.u.if_type0.UERRHIregaddr =
  6522. phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
  6523. phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
  6524. phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
  6525. phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
  6526. phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
  6527. phba->sli4_hba.SLIINTFregaddr =
  6528. phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
  6529. break;
  6530. case LPFC_SLI_INTF_IF_TYPE_2:
  6531. phba->sli4_hba.u.if_type2.ERR1regaddr =
  6532. phba->sli4_hba.conf_regs_memmap_p +
  6533. LPFC_CTL_PORT_ER1_OFFSET;
  6534. phba->sli4_hba.u.if_type2.ERR2regaddr =
  6535. phba->sli4_hba.conf_regs_memmap_p +
  6536. LPFC_CTL_PORT_ER2_OFFSET;
  6537. phba->sli4_hba.u.if_type2.CTRLregaddr =
  6538. phba->sli4_hba.conf_regs_memmap_p +
  6539. LPFC_CTL_PORT_CTL_OFFSET;
  6540. phba->sli4_hba.u.if_type2.STATUSregaddr =
  6541. phba->sli4_hba.conf_regs_memmap_p +
  6542. LPFC_CTL_PORT_STA_OFFSET;
  6543. phba->sli4_hba.SLIINTFregaddr =
  6544. phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
  6545. phba->sli4_hba.PSMPHRregaddr =
  6546. phba->sli4_hba.conf_regs_memmap_p +
  6547. LPFC_CTL_PORT_SEM_OFFSET;
  6548. phba->sli4_hba.RQDBregaddr =
  6549. phba->sli4_hba.conf_regs_memmap_p +
  6550. LPFC_ULP0_RQ_DOORBELL;
  6551. phba->sli4_hba.WQDBregaddr =
  6552. phba->sli4_hba.conf_regs_memmap_p +
  6553. LPFC_ULP0_WQ_DOORBELL;
  6554. phba->sli4_hba.EQCQDBregaddr =
  6555. phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
  6556. phba->sli4_hba.MQDBregaddr =
  6557. phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
  6558. phba->sli4_hba.BMBXregaddr =
  6559. phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
  6560. break;
  6561. case LPFC_SLI_INTF_IF_TYPE_1:
  6562. default:
  6563. dev_printk(KERN_ERR, &phba->pcidev->dev,
  6564. "FATAL - unsupported SLI4 interface type - %d\n",
  6565. if_type);
  6566. break;
  6567. }
  6568. }
  6569. /**
  6570. * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
  6571. * @phba: pointer to lpfc hba data structure.
  6572. *
  6573. * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
  6574. * memory map.
  6575. **/
  6576. static void
  6577. lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
  6578. {
  6579. phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6580. LPFC_SLIPORT_IF0_SMPHR;
  6581. phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6582. LPFC_HST_ISR0;
  6583. phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6584. LPFC_HST_IMR0;
  6585. phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6586. LPFC_HST_ISCR0;
  6587. }
  6588. /**
  6589. * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
  6590. * @phba: pointer to lpfc hba data structure.
  6591. * @vf: virtual function number
  6592. *
  6593. * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
  6594. * based on the given viftual function number, @vf.
  6595. *
  6596. * Return 0 if successful, otherwise -ENODEV.
  6597. **/
  6598. static int
  6599. lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
  6600. {
  6601. if (vf > LPFC_VIR_FUNC_MAX)
  6602. return -ENODEV;
  6603. phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6604. vf * LPFC_VFR_PAGE_SIZE +
  6605. LPFC_ULP0_RQ_DOORBELL);
  6606. phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6607. vf * LPFC_VFR_PAGE_SIZE +
  6608. LPFC_ULP0_WQ_DOORBELL);
  6609. phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6610. vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
  6611. phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6612. vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
  6613. phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6614. vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
  6615. return 0;
  6616. }
  6617. /**
  6618. * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
  6619. * @phba: pointer to lpfc hba data structure.
  6620. *
  6621. * This routine is invoked to create the bootstrap mailbox
  6622. * region consistent with the SLI-4 interface spec. This
  6623. * routine allocates all memory necessary to communicate
  6624. * mailbox commands to the port and sets up all alignment
  6625. * needs. No locks are expected to be held when calling
  6626. * this routine.
  6627. *
  6628. * Return codes
  6629. * 0 - successful
  6630. * -ENOMEM - could not allocated memory.
  6631. **/
  6632. static int
  6633. lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
  6634. {
  6635. uint32_t bmbx_size;
  6636. struct lpfc_dmabuf *dmabuf;
  6637. struct dma_address *dma_address;
  6638. uint32_t pa_addr;
  6639. uint64_t phys_addr;
  6640. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  6641. if (!dmabuf)
  6642. return -ENOMEM;
  6643. /*
  6644. * The bootstrap mailbox region is comprised of 2 parts
  6645. * plus an alignment restriction of 16 bytes.
  6646. */
  6647. bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
  6648. dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
  6649. &dmabuf->phys, GFP_KERNEL);
  6650. if (!dmabuf->virt) {
  6651. kfree(dmabuf);
  6652. return -ENOMEM;
  6653. }
  6654. /*
  6655. * Initialize the bootstrap mailbox pointers now so that the register
  6656. * operations are simple later. The mailbox dma address is required
  6657. * to be 16-byte aligned. Also align the virtual memory as each
  6658. * maibox is copied into the bmbx mailbox region before issuing the
  6659. * command to the port.
  6660. */
  6661. phba->sli4_hba.bmbx.dmabuf = dmabuf;
  6662. phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
  6663. phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
  6664. LPFC_ALIGN_16_BYTE);
  6665. phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
  6666. LPFC_ALIGN_16_BYTE);
  6667. /*
  6668. * Set the high and low physical addresses now. The SLI4 alignment
  6669. * requirement is 16 bytes and the mailbox is posted to the port
  6670. * as two 30-bit addresses. The other data is a bit marking whether
  6671. * the 30-bit address is the high or low address.
  6672. * Upcast bmbx aphys to 64bits so shift instruction compiles
  6673. * clean on 32 bit machines.
  6674. */
  6675. dma_address = &phba->sli4_hba.bmbx.dma_address;
  6676. phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
  6677. pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
  6678. dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
  6679. LPFC_BMBX_BIT1_ADDR_HI);
  6680. pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
  6681. dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
  6682. LPFC_BMBX_BIT1_ADDR_LO);
  6683. return 0;
  6684. }
  6685. /**
  6686. * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
  6687. * @phba: pointer to lpfc hba data structure.
  6688. *
  6689. * This routine is invoked to teardown the bootstrap mailbox
  6690. * region and release all host resources. This routine requires
  6691. * the caller to ensure all mailbox commands recovered, no
  6692. * additional mailbox comands are sent, and interrupts are disabled
  6693. * before calling this routine.
  6694. *
  6695. **/
  6696. static void
  6697. lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  6698. {
  6699. dma_free_coherent(&phba->pcidev->dev,
  6700. phba->sli4_hba.bmbx.bmbx_size,
  6701. phba->sli4_hba.bmbx.dmabuf->virt,
  6702. phba->sli4_hba.bmbx.dmabuf->phys);
  6703. kfree(phba->sli4_hba.bmbx.dmabuf);
  6704. memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
  6705. }
  6706. /**
  6707. * lpfc_sli4_read_config - Get the config parameters.
  6708. * @phba: pointer to lpfc hba data structure.
  6709. *
  6710. * This routine is invoked to read the configuration parameters from the HBA.
  6711. * The configuration parameters are used to set the base and maximum values
  6712. * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
  6713. * allocation for the port.
  6714. *
  6715. * Return codes
  6716. * 0 - successful
  6717. * -ENOMEM - No available memory
  6718. * -EIO - The mailbox failed to complete successfully.
  6719. **/
  6720. int
  6721. lpfc_sli4_read_config(struct lpfc_hba *phba)
  6722. {
  6723. LPFC_MBOXQ_t *pmb;
  6724. struct lpfc_mbx_read_config *rd_config;
  6725. union lpfc_sli4_cfg_shdr *shdr;
  6726. uint32_t shdr_status, shdr_add_status;
  6727. struct lpfc_mbx_get_func_cfg *get_func_cfg;
  6728. struct lpfc_rsrc_desc_fcfcoe *desc;
  6729. char *pdesc_0;
  6730. uint16_t forced_link_speed;
  6731. uint32_t if_type;
  6732. int length, i, rc = 0, rc2;
  6733. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  6734. if (!pmb) {
  6735. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6736. "2011 Unable to allocate memory for issuing "
  6737. "SLI_CONFIG_SPECIAL mailbox command\n");
  6738. return -ENOMEM;
  6739. }
  6740. lpfc_read_config(phba, pmb);
  6741. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  6742. if (rc != MBX_SUCCESS) {
  6743. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6744. "2012 Mailbox failed , mbxCmd x%x "
  6745. "READ_CONFIG, mbxStatus x%x\n",
  6746. bf_get(lpfc_mqe_command, &pmb->u.mqe),
  6747. bf_get(lpfc_mqe_status, &pmb->u.mqe));
  6748. rc = -EIO;
  6749. } else {
  6750. rd_config = &pmb->u.mqe.un.rd_config;
  6751. if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
  6752. phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
  6753. phba->sli4_hba.lnk_info.lnk_tp =
  6754. bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
  6755. phba->sli4_hba.lnk_info.lnk_no =
  6756. bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
  6757. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  6758. "3081 lnk_type:%d, lnk_numb:%d\n",
  6759. phba->sli4_hba.lnk_info.lnk_tp,
  6760. phba->sli4_hba.lnk_info.lnk_no);
  6761. } else
  6762. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  6763. "3082 Mailbox (x%x) returned ldv:x0\n",
  6764. bf_get(lpfc_mqe_command, &pmb->u.mqe));
  6765. phba->sli4_hba.extents_in_use =
  6766. bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
  6767. phba->sli4_hba.max_cfg_param.max_xri =
  6768. bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
  6769. phba->sli4_hba.max_cfg_param.xri_base =
  6770. bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
  6771. phba->sli4_hba.max_cfg_param.max_vpi =
  6772. bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
  6773. phba->sli4_hba.max_cfg_param.vpi_base =
  6774. bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
  6775. phba->sli4_hba.max_cfg_param.max_rpi =
  6776. bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
  6777. phba->sli4_hba.max_cfg_param.rpi_base =
  6778. bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
  6779. phba->sli4_hba.max_cfg_param.max_vfi =
  6780. bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
  6781. phba->sli4_hba.max_cfg_param.vfi_base =
  6782. bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
  6783. phba->sli4_hba.max_cfg_param.max_fcfi =
  6784. bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
  6785. phba->sli4_hba.max_cfg_param.max_eq =
  6786. bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
  6787. phba->sli4_hba.max_cfg_param.max_rq =
  6788. bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
  6789. phba->sli4_hba.max_cfg_param.max_wq =
  6790. bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
  6791. phba->sli4_hba.max_cfg_param.max_cq =
  6792. bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
  6793. phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
  6794. phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
  6795. phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
  6796. phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
  6797. phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
  6798. (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
  6799. phba->max_vports = phba->max_vpi;
  6800. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  6801. "2003 cfg params Extents? %d "
  6802. "XRI(B:%d M:%d), "
  6803. "VPI(B:%d M:%d) "
  6804. "VFI(B:%d M:%d) "
  6805. "RPI(B:%d M:%d) "
  6806. "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
  6807. phba->sli4_hba.extents_in_use,
  6808. phba->sli4_hba.max_cfg_param.xri_base,
  6809. phba->sli4_hba.max_cfg_param.max_xri,
  6810. phba->sli4_hba.max_cfg_param.vpi_base,
  6811. phba->sli4_hba.max_cfg_param.max_vpi,
  6812. phba->sli4_hba.max_cfg_param.vfi_base,
  6813. phba->sli4_hba.max_cfg_param.max_vfi,
  6814. phba->sli4_hba.max_cfg_param.rpi_base,
  6815. phba->sli4_hba.max_cfg_param.max_rpi,
  6816. phba->sli4_hba.max_cfg_param.max_fcfi,
  6817. phba->sli4_hba.max_cfg_param.max_eq,
  6818. phba->sli4_hba.max_cfg_param.max_cq,
  6819. phba->sli4_hba.max_cfg_param.max_wq,
  6820. phba->sli4_hba.max_cfg_param.max_rq);
  6821. }
  6822. if (rc)
  6823. goto read_cfg_out;
  6824. /* Update link speed if forced link speed is supported */
  6825. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  6826. if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
  6827. forced_link_speed =
  6828. bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
  6829. if (forced_link_speed) {
  6830. phba->hba_flag |= HBA_FORCED_LINK_SPEED;
  6831. switch (forced_link_speed) {
  6832. case LINK_SPEED_1G:
  6833. phba->cfg_link_speed =
  6834. LPFC_USER_LINK_SPEED_1G;
  6835. break;
  6836. case LINK_SPEED_2G:
  6837. phba->cfg_link_speed =
  6838. LPFC_USER_LINK_SPEED_2G;
  6839. break;
  6840. case LINK_SPEED_4G:
  6841. phba->cfg_link_speed =
  6842. LPFC_USER_LINK_SPEED_4G;
  6843. break;
  6844. case LINK_SPEED_8G:
  6845. phba->cfg_link_speed =
  6846. LPFC_USER_LINK_SPEED_8G;
  6847. break;
  6848. case LINK_SPEED_10G:
  6849. phba->cfg_link_speed =
  6850. LPFC_USER_LINK_SPEED_10G;
  6851. break;
  6852. case LINK_SPEED_16G:
  6853. phba->cfg_link_speed =
  6854. LPFC_USER_LINK_SPEED_16G;
  6855. break;
  6856. case LINK_SPEED_32G:
  6857. phba->cfg_link_speed =
  6858. LPFC_USER_LINK_SPEED_32G;
  6859. break;
  6860. case 0xffff:
  6861. phba->cfg_link_speed =
  6862. LPFC_USER_LINK_SPEED_AUTO;
  6863. break;
  6864. default:
  6865. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6866. "0047 Unrecognized link "
  6867. "speed : %d\n",
  6868. forced_link_speed);
  6869. phba->cfg_link_speed =
  6870. LPFC_USER_LINK_SPEED_AUTO;
  6871. }
  6872. }
  6873. }
  6874. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  6875. length = phba->sli4_hba.max_cfg_param.max_xri -
  6876. lpfc_sli4_get_els_iocb_cnt(phba);
  6877. if (phba->cfg_hba_queue_depth > length) {
  6878. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  6879. "3361 HBA queue depth changed from %d to %d\n",
  6880. phba->cfg_hba_queue_depth, length);
  6881. phba->cfg_hba_queue_depth = length;
  6882. }
  6883. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
  6884. LPFC_SLI_INTF_IF_TYPE_2)
  6885. goto read_cfg_out;
  6886. /* get the pf# and vf# for SLI4 if_type 2 port */
  6887. length = (sizeof(struct lpfc_mbx_get_func_cfg) -
  6888. sizeof(struct lpfc_sli4_cfg_mhdr));
  6889. lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
  6890. LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
  6891. length, LPFC_SLI4_MBX_EMBED);
  6892. rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  6893. shdr = (union lpfc_sli4_cfg_shdr *)
  6894. &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
  6895. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  6896. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  6897. if (rc2 || shdr_status || shdr_add_status) {
  6898. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6899. "3026 Mailbox failed , mbxCmd x%x "
  6900. "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
  6901. bf_get(lpfc_mqe_command, &pmb->u.mqe),
  6902. bf_get(lpfc_mqe_status, &pmb->u.mqe));
  6903. goto read_cfg_out;
  6904. }
  6905. /* search for fc_fcoe resrouce descriptor */
  6906. get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
  6907. pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
  6908. desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
  6909. length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
  6910. if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
  6911. length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
  6912. else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
  6913. goto read_cfg_out;
  6914. for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
  6915. desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
  6916. if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
  6917. bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
  6918. phba->sli4_hba.iov.pf_number =
  6919. bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
  6920. phba->sli4_hba.iov.vf_number =
  6921. bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
  6922. break;
  6923. }
  6924. }
  6925. if (i < LPFC_RSRC_DESC_MAX_NUM)
  6926. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  6927. "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
  6928. "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
  6929. phba->sli4_hba.iov.vf_number);
  6930. else
  6931. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6932. "3028 GET_FUNCTION_CONFIG: failed to find "
  6933. "Resrouce Descriptor:x%x\n",
  6934. LPFC_RSRC_DESC_TYPE_FCFCOE);
  6935. read_cfg_out:
  6936. mempool_free(pmb, phba->mbox_mem_pool);
  6937. return rc;
  6938. }
  6939. /**
  6940. * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
  6941. * @phba: pointer to lpfc hba data structure.
  6942. *
  6943. * This routine is invoked to setup the port-side endian order when
  6944. * the port if_type is 0. This routine has no function for other
  6945. * if_types.
  6946. *
  6947. * Return codes
  6948. * 0 - successful
  6949. * -ENOMEM - No available memory
  6950. * -EIO - The mailbox failed to complete successfully.
  6951. **/
  6952. static int
  6953. lpfc_setup_endian_order(struct lpfc_hba *phba)
  6954. {
  6955. LPFC_MBOXQ_t *mboxq;
  6956. uint32_t if_type, rc = 0;
  6957. uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
  6958. HOST_ENDIAN_HIGH_WORD1};
  6959. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  6960. switch (if_type) {
  6961. case LPFC_SLI_INTF_IF_TYPE_0:
  6962. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  6963. GFP_KERNEL);
  6964. if (!mboxq) {
  6965. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6966. "0492 Unable to allocate memory for "
  6967. "issuing SLI_CONFIG_SPECIAL mailbox "
  6968. "command\n");
  6969. return -ENOMEM;
  6970. }
  6971. /*
  6972. * The SLI4_CONFIG_SPECIAL mailbox command requires the first
  6973. * two words to contain special data values and no other data.
  6974. */
  6975. memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
  6976. memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
  6977. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  6978. if (rc != MBX_SUCCESS) {
  6979. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6980. "0493 SLI_CONFIG_SPECIAL mailbox "
  6981. "failed with status x%x\n",
  6982. rc);
  6983. rc = -EIO;
  6984. }
  6985. mempool_free(mboxq, phba->mbox_mem_pool);
  6986. break;
  6987. case LPFC_SLI_INTF_IF_TYPE_2:
  6988. case LPFC_SLI_INTF_IF_TYPE_1:
  6989. default:
  6990. break;
  6991. }
  6992. return rc;
  6993. }
  6994. /**
  6995. * lpfc_sli4_queue_verify - Verify and update EQ counts
  6996. * @phba: pointer to lpfc hba data structure.
  6997. *
  6998. * This routine is invoked to check the user settable queue counts for EQs.
  6999. * After this routine is called the counts will be set to valid values that
  7000. * adhere to the constraints of the system's interrupt vectors and the port's
  7001. * queue resources.
  7002. *
  7003. * Return codes
  7004. * 0 - successful
  7005. * -ENOMEM - No available memory
  7006. **/
  7007. static int
  7008. lpfc_sli4_queue_verify(struct lpfc_hba *phba)
  7009. {
  7010. int io_channel;
  7011. int fof_vectors = phba->cfg_fof ? 1 : 0;
  7012. /*
  7013. * Sanity check for configured queue parameters against the run-time
  7014. * device parameters
  7015. */
  7016. /* Sanity check on HBA EQ parameters */
  7017. io_channel = phba->io_channel_irqs;
  7018. if (phba->sli4_hba.num_online_cpu < io_channel) {
  7019. lpfc_printf_log(phba,
  7020. KERN_ERR, LOG_INIT,
  7021. "3188 Reducing IO channels to match number of "
  7022. "online CPUs: from %d to %d\n",
  7023. io_channel, phba->sli4_hba.num_online_cpu);
  7024. io_channel = phba->sli4_hba.num_online_cpu;
  7025. }
  7026. if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
  7027. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7028. "2575 Reducing IO channels to match number of "
  7029. "available EQs: from %d to %d\n",
  7030. io_channel,
  7031. phba->sli4_hba.max_cfg_param.max_eq);
  7032. io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
  7033. }
  7034. /* The actual number of FCP / NVME event queues adopted */
  7035. if (io_channel != phba->io_channel_irqs)
  7036. phba->io_channel_irqs = io_channel;
  7037. if (phba->cfg_fcp_io_channel > io_channel)
  7038. phba->cfg_fcp_io_channel = io_channel;
  7039. if (phba->cfg_nvme_io_channel > io_channel)
  7040. phba->cfg_nvme_io_channel = io_channel;
  7041. if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
  7042. phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
  7043. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7044. "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
  7045. phba->io_channel_irqs, phba->cfg_fcp_io_channel,
  7046. phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
  7047. /* Get EQ depth from module parameter, fake the default for now */
  7048. phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
  7049. phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
  7050. /* Get CQ depth from module parameter, fake the default for now */
  7051. phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
  7052. phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
  7053. return 0;
  7054. }
  7055. static int
  7056. lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
  7057. {
  7058. struct lpfc_queue *qdesc;
  7059. int cnt;
  7060. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7061. phba->sli4_hba.cq_ecount);
  7062. if (!qdesc) {
  7063. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7064. "0508 Failed allocate fast-path NVME CQ (%d)\n",
  7065. wqidx);
  7066. return 1;
  7067. }
  7068. phba->sli4_hba.nvme_cq[wqidx] = qdesc;
  7069. cnt = LPFC_NVME_WQSIZE;
  7070. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
  7071. if (!qdesc) {
  7072. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7073. "0509 Failed allocate fast-path NVME WQ (%d)\n",
  7074. wqidx);
  7075. return 1;
  7076. }
  7077. phba->sli4_hba.nvme_wq[wqidx] = qdesc;
  7078. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7079. return 0;
  7080. }
  7081. static int
  7082. lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
  7083. {
  7084. struct lpfc_queue *qdesc;
  7085. uint32_t wqesize;
  7086. /* Create Fast Path FCP CQs */
  7087. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7088. phba->sli4_hba.cq_ecount);
  7089. if (!qdesc) {
  7090. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7091. "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
  7092. return 1;
  7093. }
  7094. phba->sli4_hba.fcp_cq[wqidx] = qdesc;
  7095. /* Create Fast Path FCP WQs */
  7096. wqesize = (phba->fcp_embed_io) ?
  7097. LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
  7098. qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
  7099. if (!qdesc) {
  7100. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7101. "0503 Failed allocate fast-path FCP WQ (%d)\n",
  7102. wqidx);
  7103. return 1;
  7104. }
  7105. phba->sli4_hba.fcp_wq[wqidx] = qdesc;
  7106. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7107. return 0;
  7108. }
  7109. /**
  7110. * lpfc_sli4_queue_create - Create all the SLI4 queues
  7111. * @phba: pointer to lpfc hba data structure.
  7112. *
  7113. * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
  7114. * operation. For each SLI4 queue type, the parameters such as queue entry
  7115. * count (queue depth) shall be taken from the module parameter. For now,
  7116. * we just use some constant number as place holder.
  7117. *
  7118. * Return codes
  7119. * 0 - successful
  7120. * -ENOMEM - No availble memory
  7121. * -EIO - The mailbox failed to complete successfully.
  7122. **/
  7123. int
  7124. lpfc_sli4_queue_create(struct lpfc_hba *phba)
  7125. {
  7126. struct lpfc_queue *qdesc;
  7127. int idx, io_channel, max;
  7128. /*
  7129. * Create HBA Record arrays.
  7130. * Both NVME and FCP will share that same vectors / EQs
  7131. */
  7132. io_channel = phba->io_channel_irqs;
  7133. if (!io_channel)
  7134. return -ERANGE;
  7135. phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
  7136. phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
  7137. phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
  7138. phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
  7139. phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
  7140. phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
  7141. phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
  7142. phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
  7143. phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
  7144. phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
  7145. phba->sli4_hba.hba_eq = kcalloc(io_channel,
  7146. sizeof(struct lpfc_queue *),
  7147. GFP_KERNEL);
  7148. if (!phba->sli4_hba.hba_eq) {
  7149. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7150. "2576 Failed allocate memory for "
  7151. "fast-path EQ record array\n");
  7152. goto out_error;
  7153. }
  7154. if (phba->cfg_fcp_io_channel) {
  7155. phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
  7156. sizeof(struct lpfc_queue *),
  7157. GFP_KERNEL);
  7158. if (!phba->sli4_hba.fcp_cq) {
  7159. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7160. "2577 Failed allocate memory for "
  7161. "fast-path CQ record array\n");
  7162. goto out_error;
  7163. }
  7164. phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
  7165. sizeof(struct lpfc_queue *),
  7166. GFP_KERNEL);
  7167. if (!phba->sli4_hba.fcp_wq) {
  7168. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7169. "2578 Failed allocate memory for "
  7170. "fast-path FCP WQ record array\n");
  7171. goto out_error;
  7172. }
  7173. /*
  7174. * Since the first EQ can have multiple CQs associated with it,
  7175. * this array is used to quickly see if we have a FCP fast-path
  7176. * CQ match.
  7177. */
  7178. phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
  7179. sizeof(uint16_t),
  7180. GFP_KERNEL);
  7181. if (!phba->sli4_hba.fcp_cq_map) {
  7182. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7183. "2545 Failed allocate memory for "
  7184. "fast-path CQ map\n");
  7185. goto out_error;
  7186. }
  7187. }
  7188. if (phba->cfg_nvme_io_channel) {
  7189. phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
  7190. sizeof(struct lpfc_queue *),
  7191. GFP_KERNEL);
  7192. if (!phba->sli4_hba.nvme_cq) {
  7193. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7194. "6077 Failed allocate memory for "
  7195. "fast-path CQ record array\n");
  7196. goto out_error;
  7197. }
  7198. phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
  7199. sizeof(struct lpfc_queue *),
  7200. GFP_KERNEL);
  7201. if (!phba->sli4_hba.nvme_wq) {
  7202. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7203. "2581 Failed allocate memory for "
  7204. "fast-path NVME WQ record array\n");
  7205. goto out_error;
  7206. }
  7207. /*
  7208. * Since the first EQ can have multiple CQs associated with it,
  7209. * this array is used to quickly see if we have a NVME fast-path
  7210. * CQ match.
  7211. */
  7212. phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
  7213. sizeof(uint16_t),
  7214. GFP_KERNEL);
  7215. if (!phba->sli4_hba.nvme_cq_map) {
  7216. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7217. "6078 Failed allocate memory for "
  7218. "fast-path CQ map\n");
  7219. goto out_error;
  7220. }
  7221. if (phba->nvmet_support) {
  7222. phba->sli4_hba.nvmet_cqset = kcalloc(
  7223. phba->cfg_nvmet_mrq,
  7224. sizeof(struct lpfc_queue *),
  7225. GFP_KERNEL);
  7226. if (!phba->sli4_hba.nvmet_cqset) {
  7227. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7228. "3121 Fail allocate memory for "
  7229. "fast-path CQ set array\n");
  7230. goto out_error;
  7231. }
  7232. phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
  7233. phba->cfg_nvmet_mrq,
  7234. sizeof(struct lpfc_queue *),
  7235. GFP_KERNEL);
  7236. if (!phba->sli4_hba.nvmet_mrq_hdr) {
  7237. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7238. "3122 Fail allocate memory for "
  7239. "fast-path RQ set hdr array\n");
  7240. goto out_error;
  7241. }
  7242. phba->sli4_hba.nvmet_mrq_data = kcalloc(
  7243. phba->cfg_nvmet_mrq,
  7244. sizeof(struct lpfc_queue *),
  7245. GFP_KERNEL);
  7246. if (!phba->sli4_hba.nvmet_mrq_data) {
  7247. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7248. "3124 Fail allocate memory for "
  7249. "fast-path RQ set data array\n");
  7250. goto out_error;
  7251. }
  7252. }
  7253. }
  7254. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
  7255. /* Create HBA Event Queues (EQs) */
  7256. for (idx = 0; idx < io_channel; idx++) {
  7257. /* Create EQs */
  7258. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
  7259. phba->sli4_hba.eq_ecount);
  7260. if (!qdesc) {
  7261. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7262. "0497 Failed allocate EQ (%d)\n", idx);
  7263. goto out_error;
  7264. }
  7265. phba->sli4_hba.hba_eq[idx] = qdesc;
  7266. }
  7267. /* FCP and NVME io channels are not required to be balanced */
  7268. for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
  7269. if (lpfc_alloc_fcp_wq_cq(phba, idx))
  7270. goto out_error;
  7271. for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
  7272. if (lpfc_alloc_nvme_wq_cq(phba, idx))
  7273. goto out_error;
  7274. /* allocate MRQ CQs */
  7275. max = phba->cfg_nvme_io_channel;
  7276. if (max < phba->cfg_nvmet_mrq)
  7277. max = phba->cfg_nvmet_mrq;
  7278. for (idx = 0; idx < max; idx++)
  7279. if (lpfc_alloc_nvme_wq_cq(phba, idx))
  7280. goto out_error;
  7281. if (phba->nvmet_support) {
  7282. for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
  7283. qdesc = lpfc_sli4_queue_alloc(phba,
  7284. phba->sli4_hba.cq_esize,
  7285. phba->sli4_hba.cq_ecount);
  7286. if (!qdesc) {
  7287. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7288. "3142 Failed allocate NVME "
  7289. "CQ Set (%d)\n", idx);
  7290. goto out_error;
  7291. }
  7292. phba->sli4_hba.nvmet_cqset[idx] = qdesc;
  7293. }
  7294. }
  7295. /*
  7296. * Create Slow Path Completion Queues (CQs)
  7297. */
  7298. /* Create slow-path Mailbox Command Complete Queue */
  7299. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7300. phba->sli4_hba.cq_ecount);
  7301. if (!qdesc) {
  7302. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7303. "0500 Failed allocate slow-path mailbox CQ\n");
  7304. goto out_error;
  7305. }
  7306. phba->sli4_hba.mbx_cq = qdesc;
  7307. /* Create slow-path ELS Complete Queue */
  7308. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7309. phba->sli4_hba.cq_ecount);
  7310. if (!qdesc) {
  7311. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7312. "0501 Failed allocate slow-path ELS CQ\n");
  7313. goto out_error;
  7314. }
  7315. phba->sli4_hba.els_cq = qdesc;
  7316. /*
  7317. * Create Slow Path Work Queues (WQs)
  7318. */
  7319. /* Create Mailbox Command Queue */
  7320. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
  7321. phba->sli4_hba.mq_ecount);
  7322. if (!qdesc) {
  7323. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7324. "0505 Failed allocate slow-path MQ\n");
  7325. goto out_error;
  7326. }
  7327. phba->sli4_hba.mbx_wq = qdesc;
  7328. /*
  7329. * Create ELS Work Queues
  7330. */
  7331. /* Create slow-path ELS Work Queue */
  7332. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
  7333. phba->sli4_hba.wq_ecount);
  7334. if (!qdesc) {
  7335. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7336. "0504 Failed allocate slow-path ELS WQ\n");
  7337. goto out_error;
  7338. }
  7339. phba->sli4_hba.els_wq = qdesc;
  7340. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7341. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  7342. /* Create NVME LS Complete Queue */
  7343. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7344. phba->sli4_hba.cq_ecount);
  7345. if (!qdesc) {
  7346. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7347. "6079 Failed allocate NVME LS CQ\n");
  7348. goto out_error;
  7349. }
  7350. phba->sli4_hba.nvmels_cq = qdesc;
  7351. /* Create NVME LS Work Queue */
  7352. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
  7353. phba->sli4_hba.wq_ecount);
  7354. if (!qdesc) {
  7355. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7356. "6080 Failed allocate NVME LS WQ\n");
  7357. goto out_error;
  7358. }
  7359. phba->sli4_hba.nvmels_wq = qdesc;
  7360. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7361. }
  7362. /*
  7363. * Create Receive Queue (RQ)
  7364. */
  7365. /* Create Receive Queue for header */
  7366. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
  7367. phba->sli4_hba.rq_ecount);
  7368. if (!qdesc) {
  7369. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7370. "0506 Failed allocate receive HRQ\n");
  7371. goto out_error;
  7372. }
  7373. phba->sli4_hba.hdr_rq = qdesc;
  7374. /* Create Receive Queue for data */
  7375. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
  7376. phba->sli4_hba.rq_ecount);
  7377. if (!qdesc) {
  7378. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7379. "0507 Failed allocate receive DRQ\n");
  7380. goto out_error;
  7381. }
  7382. phba->sli4_hba.dat_rq = qdesc;
  7383. if (phba->nvmet_support) {
  7384. for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
  7385. /* Create NVMET Receive Queue for header */
  7386. qdesc = lpfc_sli4_queue_alloc(phba,
  7387. phba->sli4_hba.rq_esize,
  7388. phba->sli4_hba.rq_ecount);
  7389. if (!qdesc) {
  7390. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7391. "3146 Failed allocate "
  7392. "receive HRQ\n");
  7393. goto out_error;
  7394. }
  7395. phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
  7396. /* Only needed for header of RQ pair */
  7397. qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
  7398. GFP_KERNEL);
  7399. if (qdesc->rqbp == NULL) {
  7400. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7401. "6131 Failed allocate "
  7402. "Header RQBP\n");
  7403. goto out_error;
  7404. }
  7405. /* Create NVMET Receive Queue for data */
  7406. qdesc = lpfc_sli4_queue_alloc(phba,
  7407. phba->sli4_hba.rq_esize,
  7408. phba->sli4_hba.rq_ecount);
  7409. if (!qdesc) {
  7410. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7411. "3156 Failed allocate "
  7412. "receive DRQ\n");
  7413. goto out_error;
  7414. }
  7415. phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
  7416. }
  7417. }
  7418. /* Create the Queues needed for Flash Optimized Fabric operations */
  7419. if (phba->cfg_fof)
  7420. lpfc_fof_queue_create(phba);
  7421. return 0;
  7422. out_error:
  7423. lpfc_sli4_queue_destroy(phba);
  7424. return -ENOMEM;
  7425. }
  7426. static inline void
  7427. __lpfc_sli4_release_queue(struct lpfc_queue **qp)
  7428. {
  7429. if (*qp != NULL) {
  7430. lpfc_sli4_queue_free(*qp);
  7431. *qp = NULL;
  7432. }
  7433. }
  7434. static inline void
  7435. lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
  7436. {
  7437. int idx;
  7438. if (*qs == NULL)
  7439. return;
  7440. for (idx = 0; idx < max; idx++)
  7441. __lpfc_sli4_release_queue(&(*qs)[idx]);
  7442. kfree(*qs);
  7443. *qs = NULL;
  7444. }
  7445. static inline void
  7446. lpfc_sli4_release_queue_map(uint16_t **qmap)
  7447. {
  7448. if (*qmap != NULL) {
  7449. kfree(*qmap);
  7450. *qmap = NULL;
  7451. }
  7452. }
  7453. /**
  7454. * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
  7455. * @phba: pointer to lpfc hba data structure.
  7456. *
  7457. * This routine is invoked to release all the SLI4 queues with the FCoE HBA
  7458. * operation.
  7459. *
  7460. * Return codes
  7461. * 0 - successful
  7462. * -ENOMEM - No available memory
  7463. * -EIO - The mailbox failed to complete successfully.
  7464. **/
  7465. void
  7466. lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
  7467. {
  7468. if (phba->cfg_fof)
  7469. lpfc_fof_queue_destroy(phba);
  7470. /* Release HBA eqs */
  7471. lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
  7472. /* Release FCP cqs */
  7473. lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
  7474. phba->cfg_fcp_io_channel);
  7475. /* Release FCP wqs */
  7476. lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
  7477. phba->cfg_fcp_io_channel);
  7478. /* Release FCP CQ mapping array */
  7479. lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
  7480. /* Release NVME cqs */
  7481. lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
  7482. phba->cfg_nvme_io_channel);
  7483. /* Release NVME wqs */
  7484. lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
  7485. phba->cfg_nvme_io_channel);
  7486. /* Release NVME CQ mapping array */
  7487. lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
  7488. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
  7489. phba->cfg_nvmet_mrq);
  7490. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
  7491. phba->cfg_nvmet_mrq);
  7492. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
  7493. phba->cfg_nvmet_mrq);
  7494. /* Release mailbox command work queue */
  7495. __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
  7496. /* Release ELS work queue */
  7497. __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
  7498. /* Release ELS work queue */
  7499. __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
  7500. /* Release unsolicited receive queue */
  7501. __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
  7502. __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
  7503. /* Release ELS complete queue */
  7504. __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
  7505. /* Release NVME LS complete queue */
  7506. __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
  7507. /* Release mailbox command complete queue */
  7508. __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
  7509. /* Everything on this list has been freed */
  7510. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
  7511. }
  7512. int
  7513. lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
  7514. struct lpfc_queue *drq, int count)
  7515. {
  7516. int rc, i;
  7517. struct lpfc_rqe hrqe;
  7518. struct lpfc_rqe drqe;
  7519. struct lpfc_rqb *rqbp;
  7520. struct rqb_dmabuf *rqb_buffer;
  7521. LIST_HEAD(rqb_buf_list);
  7522. rqbp = hrq->rqbp;
  7523. for (i = 0; i < count; i++) {
  7524. rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
  7525. if (!rqb_buffer)
  7526. break;
  7527. rqb_buffer->hrq = hrq;
  7528. rqb_buffer->drq = drq;
  7529. list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
  7530. }
  7531. while (!list_empty(&rqb_buf_list)) {
  7532. list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
  7533. hbuf.list);
  7534. hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
  7535. hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
  7536. drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
  7537. drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
  7538. rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
  7539. if (rc < 0) {
  7540. (rqbp->rqb_free_buffer)(phba, rqb_buffer);
  7541. } else {
  7542. list_add_tail(&rqb_buffer->hbuf.list,
  7543. &rqbp->rqb_buffer_list);
  7544. rqbp->buffer_count++;
  7545. }
  7546. }
  7547. return 1;
  7548. }
  7549. int
  7550. lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
  7551. {
  7552. struct lpfc_rqb *rqbp;
  7553. struct lpfc_dmabuf *h_buf;
  7554. struct rqb_dmabuf *rqb_buffer;
  7555. rqbp = rq->rqbp;
  7556. while (!list_empty(&rqbp->rqb_buffer_list)) {
  7557. list_remove_head(&rqbp->rqb_buffer_list, h_buf,
  7558. struct lpfc_dmabuf, list);
  7559. rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
  7560. (rqbp->rqb_free_buffer)(phba, rqb_buffer);
  7561. rqbp->buffer_count--;
  7562. }
  7563. return 1;
  7564. }
  7565. static int
  7566. lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
  7567. struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
  7568. int qidx, uint32_t qtype)
  7569. {
  7570. struct lpfc_sli_ring *pring;
  7571. int rc;
  7572. if (!eq || !cq || !wq) {
  7573. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7574. "6085 Fast-path %s (%d) not allocated\n",
  7575. ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
  7576. return -ENOMEM;
  7577. }
  7578. /* create the Cq first */
  7579. rc = lpfc_cq_create(phba, cq, eq,
  7580. (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
  7581. if (rc) {
  7582. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7583. "6086 Failed setup of CQ (%d), rc = 0x%x\n",
  7584. qidx, (uint32_t)rc);
  7585. return rc;
  7586. }
  7587. if (qtype != LPFC_MBOX) {
  7588. /* Setup nvme_cq_map for fast lookup */
  7589. if (cq_map)
  7590. *cq_map = cq->queue_id;
  7591. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7592. "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
  7593. qidx, cq->queue_id, qidx, eq->queue_id);
  7594. /* create the wq */
  7595. rc = lpfc_wq_create(phba, wq, cq, qtype);
  7596. if (rc) {
  7597. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7598. "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
  7599. qidx, (uint32_t)rc);
  7600. /* no need to tear down cq - caller will do so */
  7601. return rc;
  7602. }
  7603. /* Bind this CQ/WQ to the NVME ring */
  7604. pring = wq->pring;
  7605. pring->sli.sli4.wqp = (void *)wq;
  7606. cq->pring = pring;
  7607. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7608. "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
  7609. qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
  7610. } else {
  7611. rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
  7612. if (rc) {
  7613. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7614. "0539 Failed setup of slow-path MQ: "
  7615. "rc = 0x%x\n", rc);
  7616. /* no need to tear down cq - caller will do so */
  7617. return rc;
  7618. }
  7619. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7620. "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
  7621. phba->sli4_hba.mbx_wq->queue_id,
  7622. phba->sli4_hba.mbx_cq->queue_id);
  7623. }
  7624. return 0;
  7625. }
  7626. /**
  7627. * lpfc_sli4_queue_setup - Set up all the SLI4 queues
  7628. * @phba: pointer to lpfc hba data structure.
  7629. *
  7630. * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
  7631. * operation.
  7632. *
  7633. * Return codes
  7634. * 0 - successful
  7635. * -ENOMEM - No available memory
  7636. * -EIO - The mailbox failed to complete successfully.
  7637. **/
  7638. int
  7639. lpfc_sli4_queue_setup(struct lpfc_hba *phba)
  7640. {
  7641. uint32_t shdr_status, shdr_add_status;
  7642. union lpfc_sli4_cfg_shdr *shdr;
  7643. LPFC_MBOXQ_t *mboxq;
  7644. int qidx;
  7645. uint32_t length, io_channel;
  7646. int rc = -ENOMEM;
  7647. /* Check for dual-ULP support */
  7648. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7649. if (!mboxq) {
  7650. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7651. "3249 Unable to allocate memory for "
  7652. "QUERY_FW_CFG mailbox command\n");
  7653. return -ENOMEM;
  7654. }
  7655. length = (sizeof(struct lpfc_mbx_query_fw_config) -
  7656. sizeof(struct lpfc_sli4_cfg_mhdr));
  7657. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  7658. LPFC_MBOX_OPCODE_QUERY_FW_CFG,
  7659. length, LPFC_SLI4_MBX_EMBED);
  7660. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7661. shdr = (union lpfc_sli4_cfg_shdr *)
  7662. &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
  7663. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7664. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7665. if (shdr_status || shdr_add_status || rc) {
  7666. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7667. "3250 QUERY_FW_CFG mailbox failed with status "
  7668. "x%x add_status x%x, mbx status x%x\n",
  7669. shdr_status, shdr_add_status, rc);
  7670. if (rc != MBX_TIMEOUT)
  7671. mempool_free(mboxq, phba->mbox_mem_pool);
  7672. rc = -ENXIO;
  7673. goto out_error;
  7674. }
  7675. phba->sli4_hba.fw_func_mode =
  7676. mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
  7677. phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
  7678. phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
  7679. phba->sli4_hba.physical_port =
  7680. mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
  7681. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7682. "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
  7683. "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
  7684. phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
  7685. if (rc != MBX_TIMEOUT)
  7686. mempool_free(mboxq, phba->mbox_mem_pool);
  7687. /*
  7688. * Set up HBA Event Queues (EQs)
  7689. */
  7690. io_channel = phba->io_channel_irqs;
  7691. /* Set up HBA event queue */
  7692. if (io_channel && !phba->sli4_hba.hba_eq) {
  7693. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7694. "3147 Fast-path EQs not allocated\n");
  7695. rc = -ENOMEM;
  7696. goto out_error;
  7697. }
  7698. for (qidx = 0; qidx < io_channel; qidx++) {
  7699. if (!phba->sli4_hba.hba_eq[qidx]) {
  7700. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7701. "0522 Fast-path EQ (%d) not "
  7702. "allocated\n", qidx);
  7703. rc = -ENOMEM;
  7704. goto out_destroy;
  7705. }
  7706. rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
  7707. phba->cfg_fcp_imax);
  7708. if (rc) {
  7709. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7710. "0523 Failed setup of fast-path EQ "
  7711. "(%d), rc = 0x%x\n", qidx,
  7712. (uint32_t)rc);
  7713. goto out_destroy;
  7714. }
  7715. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7716. "2584 HBA EQ setup: queue[%d]-id=%d\n",
  7717. qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
  7718. }
  7719. if (phba->cfg_nvme_io_channel) {
  7720. if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
  7721. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7722. "6084 Fast-path NVME %s array not allocated\n",
  7723. (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
  7724. rc = -ENOMEM;
  7725. goto out_destroy;
  7726. }
  7727. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
  7728. rc = lpfc_create_wq_cq(phba,
  7729. phba->sli4_hba.hba_eq[
  7730. qidx % io_channel],
  7731. phba->sli4_hba.nvme_cq[qidx],
  7732. phba->sli4_hba.nvme_wq[qidx],
  7733. &phba->sli4_hba.nvme_cq_map[qidx],
  7734. qidx, LPFC_NVME);
  7735. if (rc) {
  7736. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7737. "6123 Failed to setup fastpath "
  7738. "NVME WQ/CQ (%d), rc = 0x%x\n",
  7739. qidx, (uint32_t)rc);
  7740. goto out_destroy;
  7741. }
  7742. }
  7743. }
  7744. if (phba->cfg_fcp_io_channel) {
  7745. /* Set up fast-path FCP Response Complete Queue */
  7746. if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
  7747. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7748. "3148 Fast-path FCP %s array not allocated\n",
  7749. phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
  7750. rc = -ENOMEM;
  7751. goto out_destroy;
  7752. }
  7753. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
  7754. rc = lpfc_create_wq_cq(phba,
  7755. phba->sli4_hba.hba_eq[
  7756. qidx % io_channel],
  7757. phba->sli4_hba.fcp_cq[qidx],
  7758. phba->sli4_hba.fcp_wq[qidx],
  7759. &phba->sli4_hba.fcp_cq_map[qidx],
  7760. qidx, LPFC_FCP);
  7761. if (rc) {
  7762. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7763. "0535 Failed to setup fastpath "
  7764. "FCP WQ/CQ (%d), rc = 0x%x\n",
  7765. qidx, (uint32_t)rc);
  7766. goto out_destroy;
  7767. }
  7768. }
  7769. }
  7770. /*
  7771. * Set up Slow Path Complete Queues (CQs)
  7772. */
  7773. /* Set up slow-path MBOX CQ/MQ */
  7774. if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
  7775. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7776. "0528 %s not allocated\n",
  7777. phba->sli4_hba.mbx_cq ?
  7778. "Mailbox WQ" : "Mailbox CQ");
  7779. rc = -ENOMEM;
  7780. goto out_destroy;
  7781. }
  7782. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  7783. phba->sli4_hba.mbx_cq,
  7784. phba->sli4_hba.mbx_wq,
  7785. NULL, 0, LPFC_MBOX);
  7786. if (rc) {
  7787. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7788. "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
  7789. (uint32_t)rc);
  7790. goto out_destroy;
  7791. }
  7792. if (phba->nvmet_support) {
  7793. if (!phba->sli4_hba.nvmet_cqset) {
  7794. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7795. "3165 Fast-path NVME CQ Set "
  7796. "array not allocated\n");
  7797. rc = -ENOMEM;
  7798. goto out_destroy;
  7799. }
  7800. if (phba->cfg_nvmet_mrq > 1) {
  7801. rc = lpfc_cq_create_set(phba,
  7802. phba->sli4_hba.nvmet_cqset,
  7803. phba->sli4_hba.hba_eq,
  7804. LPFC_WCQ, LPFC_NVMET);
  7805. if (rc) {
  7806. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7807. "3164 Failed setup of NVME CQ "
  7808. "Set, rc = 0x%x\n",
  7809. (uint32_t)rc);
  7810. goto out_destroy;
  7811. }
  7812. } else {
  7813. /* Set up NVMET Receive Complete Queue */
  7814. rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
  7815. phba->sli4_hba.hba_eq[0],
  7816. LPFC_WCQ, LPFC_NVMET);
  7817. if (rc) {
  7818. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7819. "6089 Failed setup NVMET CQ: "
  7820. "rc = 0x%x\n", (uint32_t)rc);
  7821. goto out_destroy;
  7822. }
  7823. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7824. "6090 NVMET CQ setup: cq-id=%d, "
  7825. "parent eq-id=%d\n",
  7826. phba->sli4_hba.nvmet_cqset[0]->queue_id,
  7827. phba->sli4_hba.hba_eq[0]->queue_id);
  7828. }
  7829. }
  7830. /* Set up slow-path ELS WQ/CQ */
  7831. if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
  7832. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7833. "0530 ELS %s not allocated\n",
  7834. phba->sli4_hba.els_cq ? "WQ" : "CQ");
  7835. rc = -ENOMEM;
  7836. goto out_destroy;
  7837. }
  7838. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  7839. phba->sli4_hba.els_cq,
  7840. phba->sli4_hba.els_wq,
  7841. NULL, 0, LPFC_ELS);
  7842. if (rc) {
  7843. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7844. "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
  7845. (uint32_t)rc);
  7846. goto out_destroy;
  7847. }
  7848. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7849. "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
  7850. phba->sli4_hba.els_wq->queue_id,
  7851. phba->sli4_hba.els_cq->queue_id);
  7852. if (phba->cfg_nvme_io_channel) {
  7853. /* Set up NVME LS Complete Queue */
  7854. if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
  7855. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7856. "6091 LS %s not allocated\n",
  7857. phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
  7858. rc = -ENOMEM;
  7859. goto out_destroy;
  7860. }
  7861. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  7862. phba->sli4_hba.nvmels_cq,
  7863. phba->sli4_hba.nvmels_wq,
  7864. NULL, 0, LPFC_NVME_LS);
  7865. if (rc) {
  7866. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7867. "0529 Failed setup of NVVME LS WQ/CQ: "
  7868. "rc = 0x%x\n", (uint32_t)rc);
  7869. goto out_destroy;
  7870. }
  7871. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7872. "6096 ELS WQ setup: wq-id=%d, "
  7873. "parent cq-id=%d\n",
  7874. phba->sli4_hba.nvmels_wq->queue_id,
  7875. phba->sli4_hba.nvmels_cq->queue_id);
  7876. }
  7877. /*
  7878. * Create NVMET Receive Queue (RQ)
  7879. */
  7880. if (phba->nvmet_support) {
  7881. if ((!phba->sli4_hba.nvmet_cqset) ||
  7882. (!phba->sli4_hba.nvmet_mrq_hdr) ||
  7883. (!phba->sli4_hba.nvmet_mrq_data)) {
  7884. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7885. "6130 MRQ CQ Queues not "
  7886. "allocated\n");
  7887. rc = -ENOMEM;
  7888. goto out_destroy;
  7889. }
  7890. if (phba->cfg_nvmet_mrq > 1) {
  7891. rc = lpfc_mrq_create(phba,
  7892. phba->sli4_hba.nvmet_mrq_hdr,
  7893. phba->sli4_hba.nvmet_mrq_data,
  7894. phba->sli4_hba.nvmet_cqset,
  7895. LPFC_NVMET);
  7896. if (rc) {
  7897. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7898. "6098 Failed setup of NVMET "
  7899. "MRQ: rc = 0x%x\n",
  7900. (uint32_t)rc);
  7901. goto out_destroy;
  7902. }
  7903. } else {
  7904. rc = lpfc_rq_create(phba,
  7905. phba->sli4_hba.nvmet_mrq_hdr[0],
  7906. phba->sli4_hba.nvmet_mrq_data[0],
  7907. phba->sli4_hba.nvmet_cqset[0],
  7908. LPFC_NVMET);
  7909. if (rc) {
  7910. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7911. "6057 Failed setup of NVMET "
  7912. "Receive Queue: rc = 0x%x\n",
  7913. (uint32_t)rc);
  7914. goto out_destroy;
  7915. }
  7916. lpfc_printf_log(
  7917. phba, KERN_INFO, LOG_INIT,
  7918. "6099 NVMET RQ setup: hdr-rq-id=%d, "
  7919. "dat-rq-id=%d parent cq-id=%d\n",
  7920. phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
  7921. phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
  7922. phba->sli4_hba.nvmet_cqset[0]->queue_id);
  7923. }
  7924. }
  7925. if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
  7926. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7927. "0540 Receive Queue not allocated\n");
  7928. rc = -ENOMEM;
  7929. goto out_destroy;
  7930. }
  7931. lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
  7932. lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
  7933. rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
  7934. phba->sli4_hba.els_cq, LPFC_USOL);
  7935. if (rc) {
  7936. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7937. "0541 Failed setup of Receive Queue: "
  7938. "rc = 0x%x\n", (uint32_t)rc);
  7939. goto out_destroy;
  7940. }
  7941. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7942. "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
  7943. "parent cq-id=%d\n",
  7944. phba->sli4_hba.hdr_rq->queue_id,
  7945. phba->sli4_hba.dat_rq->queue_id,
  7946. phba->sli4_hba.els_cq->queue_id);
  7947. if (phba->cfg_fof) {
  7948. rc = lpfc_fof_queue_setup(phba);
  7949. if (rc) {
  7950. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7951. "0549 Failed setup of FOF Queues: "
  7952. "rc = 0x%x\n", rc);
  7953. goto out_destroy;
  7954. }
  7955. }
  7956. for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
  7957. lpfc_modify_hba_eq_delay(phba, qidx);
  7958. return 0;
  7959. out_destroy:
  7960. lpfc_sli4_queue_unset(phba);
  7961. out_error:
  7962. return rc;
  7963. }
  7964. /**
  7965. * lpfc_sli4_queue_unset - Unset all the SLI4 queues
  7966. * @phba: pointer to lpfc hba data structure.
  7967. *
  7968. * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
  7969. * operation.
  7970. *
  7971. * Return codes
  7972. * 0 - successful
  7973. * -ENOMEM - No available memory
  7974. * -EIO - The mailbox failed to complete successfully.
  7975. **/
  7976. void
  7977. lpfc_sli4_queue_unset(struct lpfc_hba *phba)
  7978. {
  7979. int qidx;
  7980. /* Unset the queues created for Flash Optimized Fabric operations */
  7981. if (phba->cfg_fof)
  7982. lpfc_fof_queue_destroy(phba);
  7983. /* Unset mailbox command work queue */
  7984. if (phba->sli4_hba.mbx_wq)
  7985. lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
  7986. /* Unset NVME LS work queue */
  7987. if (phba->sli4_hba.nvmels_wq)
  7988. lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
  7989. /* Unset ELS work queue */
  7990. if (phba->sli4_hba.els_cq)
  7991. lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
  7992. /* Unset unsolicited receive queue */
  7993. if (phba->sli4_hba.hdr_rq)
  7994. lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
  7995. phba->sli4_hba.dat_rq);
  7996. /* Unset FCP work queue */
  7997. if (phba->sli4_hba.fcp_wq)
  7998. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
  7999. lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
  8000. /* Unset NVME work queue */
  8001. if (phba->sli4_hba.nvme_wq) {
  8002. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
  8003. lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
  8004. }
  8005. /* Unset mailbox command complete queue */
  8006. if (phba->sli4_hba.mbx_cq)
  8007. lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
  8008. /* Unset ELS complete queue */
  8009. if (phba->sli4_hba.els_cq)
  8010. lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
  8011. /* Unset NVME LS complete queue */
  8012. if (phba->sli4_hba.nvmels_cq)
  8013. lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
  8014. /* Unset NVME response complete queue */
  8015. if (phba->sli4_hba.nvme_cq)
  8016. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
  8017. lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
  8018. /* Unset NVMET MRQ queue */
  8019. if (phba->sli4_hba.nvmet_mrq_hdr) {
  8020. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
  8021. lpfc_rq_destroy(phba,
  8022. phba->sli4_hba.nvmet_mrq_hdr[qidx],
  8023. phba->sli4_hba.nvmet_mrq_data[qidx]);
  8024. }
  8025. /* Unset NVMET CQ Set complete queue */
  8026. if (phba->sli4_hba.nvmet_cqset) {
  8027. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
  8028. lpfc_cq_destroy(phba,
  8029. phba->sli4_hba.nvmet_cqset[qidx]);
  8030. }
  8031. /* Unset FCP response complete queue */
  8032. if (phba->sli4_hba.fcp_cq)
  8033. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
  8034. lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
  8035. /* Unset fast-path event queue */
  8036. if (phba->sli4_hba.hba_eq)
  8037. for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
  8038. lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
  8039. }
  8040. /**
  8041. * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
  8042. * @phba: pointer to lpfc hba data structure.
  8043. *
  8044. * This routine is invoked to allocate and set up a pool of completion queue
  8045. * events. The body of the completion queue event is a completion queue entry
  8046. * CQE. For now, this pool is used for the interrupt service routine to queue
  8047. * the following HBA completion queue events for the worker thread to process:
  8048. * - Mailbox asynchronous events
  8049. * - Receive queue completion unsolicited events
  8050. * Later, this can be used for all the slow-path events.
  8051. *
  8052. * Return codes
  8053. * 0 - successful
  8054. * -ENOMEM - No available memory
  8055. **/
  8056. static int
  8057. lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
  8058. {
  8059. struct lpfc_cq_event *cq_event;
  8060. int i;
  8061. for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
  8062. cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
  8063. if (!cq_event)
  8064. goto out_pool_create_fail;
  8065. list_add_tail(&cq_event->list,
  8066. &phba->sli4_hba.sp_cqe_event_pool);
  8067. }
  8068. return 0;
  8069. out_pool_create_fail:
  8070. lpfc_sli4_cq_event_pool_destroy(phba);
  8071. return -ENOMEM;
  8072. }
  8073. /**
  8074. * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
  8075. * @phba: pointer to lpfc hba data structure.
  8076. *
  8077. * This routine is invoked to free the pool of completion queue events at
  8078. * driver unload time. Note that, it is the responsibility of the driver
  8079. * cleanup routine to free all the outstanding completion-queue events
  8080. * allocated from this pool back into the pool before invoking this routine
  8081. * to destroy the pool.
  8082. **/
  8083. static void
  8084. lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
  8085. {
  8086. struct lpfc_cq_event *cq_event, *next_cq_event;
  8087. list_for_each_entry_safe(cq_event, next_cq_event,
  8088. &phba->sli4_hba.sp_cqe_event_pool, list) {
  8089. list_del(&cq_event->list);
  8090. kfree(cq_event);
  8091. }
  8092. }
  8093. /**
  8094. * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
  8095. * @phba: pointer to lpfc hba data structure.
  8096. *
  8097. * This routine is the lock free version of the API invoked to allocate a
  8098. * completion-queue event from the free pool.
  8099. *
  8100. * Return: Pointer to the newly allocated completion-queue event if successful
  8101. * NULL otherwise.
  8102. **/
  8103. struct lpfc_cq_event *
  8104. __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
  8105. {
  8106. struct lpfc_cq_event *cq_event = NULL;
  8107. list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
  8108. struct lpfc_cq_event, list);
  8109. return cq_event;
  8110. }
  8111. /**
  8112. * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
  8113. * @phba: pointer to lpfc hba data structure.
  8114. *
  8115. * This routine is the lock version of the API invoked to allocate a
  8116. * completion-queue event from the free pool.
  8117. *
  8118. * Return: Pointer to the newly allocated completion-queue event if successful
  8119. * NULL otherwise.
  8120. **/
  8121. struct lpfc_cq_event *
  8122. lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
  8123. {
  8124. struct lpfc_cq_event *cq_event;
  8125. unsigned long iflags;
  8126. spin_lock_irqsave(&phba->hbalock, iflags);
  8127. cq_event = __lpfc_sli4_cq_event_alloc(phba);
  8128. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8129. return cq_event;
  8130. }
  8131. /**
  8132. * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
  8133. * @phba: pointer to lpfc hba data structure.
  8134. * @cq_event: pointer to the completion queue event to be freed.
  8135. *
  8136. * This routine is the lock free version of the API invoked to release a
  8137. * completion-queue event back into the free pool.
  8138. **/
  8139. void
  8140. __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
  8141. struct lpfc_cq_event *cq_event)
  8142. {
  8143. list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
  8144. }
  8145. /**
  8146. * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
  8147. * @phba: pointer to lpfc hba data structure.
  8148. * @cq_event: pointer to the completion queue event to be freed.
  8149. *
  8150. * This routine is the lock version of the API invoked to release a
  8151. * completion-queue event back into the free pool.
  8152. **/
  8153. void
  8154. lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
  8155. struct lpfc_cq_event *cq_event)
  8156. {
  8157. unsigned long iflags;
  8158. spin_lock_irqsave(&phba->hbalock, iflags);
  8159. __lpfc_sli4_cq_event_release(phba, cq_event);
  8160. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8161. }
  8162. /**
  8163. * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
  8164. * @phba: pointer to lpfc hba data structure.
  8165. *
  8166. * This routine is to free all the pending completion-queue events to the
  8167. * back into the free pool for device reset.
  8168. **/
  8169. static void
  8170. lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
  8171. {
  8172. LIST_HEAD(cqelist);
  8173. struct lpfc_cq_event *cqe;
  8174. unsigned long iflags;
  8175. /* Retrieve all the pending WCQEs from pending WCQE lists */
  8176. spin_lock_irqsave(&phba->hbalock, iflags);
  8177. /* Pending FCP XRI abort events */
  8178. list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
  8179. &cqelist);
  8180. /* Pending ELS XRI abort events */
  8181. list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
  8182. &cqelist);
  8183. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  8184. /* Pending NVME XRI abort events */
  8185. list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
  8186. &cqelist);
  8187. }
  8188. /* Pending asynnc events */
  8189. list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
  8190. &cqelist);
  8191. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8192. while (!list_empty(&cqelist)) {
  8193. list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
  8194. lpfc_sli4_cq_event_release(phba, cqe);
  8195. }
  8196. }
  8197. /**
  8198. * lpfc_pci_function_reset - Reset pci function.
  8199. * @phba: pointer to lpfc hba data structure.
  8200. *
  8201. * This routine is invoked to request a PCI function reset. It will destroys
  8202. * all resources assigned to the PCI function which originates this request.
  8203. *
  8204. * Return codes
  8205. * 0 - successful
  8206. * -ENOMEM - No available memory
  8207. * -EIO - The mailbox failed to complete successfully.
  8208. **/
  8209. int
  8210. lpfc_pci_function_reset(struct lpfc_hba *phba)
  8211. {
  8212. LPFC_MBOXQ_t *mboxq;
  8213. uint32_t rc = 0, if_type;
  8214. uint32_t shdr_status, shdr_add_status;
  8215. uint32_t rdy_chk;
  8216. uint32_t port_reset = 0;
  8217. union lpfc_sli4_cfg_shdr *shdr;
  8218. struct lpfc_register reg_data;
  8219. uint16_t devid;
  8220. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8221. switch (if_type) {
  8222. case LPFC_SLI_INTF_IF_TYPE_0:
  8223. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  8224. GFP_KERNEL);
  8225. if (!mboxq) {
  8226. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8227. "0494 Unable to allocate memory for "
  8228. "issuing SLI_FUNCTION_RESET mailbox "
  8229. "command\n");
  8230. return -ENOMEM;
  8231. }
  8232. /* Setup PCI function reset mailbox-ioctl command */
  8233. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  8234. LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
  8235. LPFC_SLI4_MBX_EMBED);
  8236. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8237. shdr = (union lpfc_sli4_cfg_shdr *)
  8238. &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
  8239. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  8240. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  8241. &shdr->response);
  8242. if (rc != MBX_TIMEOUT)
  8243. mempool_free(mboxq, phba->mbox_mem_pool);
  8244. if (shdr_status || shdr_add_status || rc) {
  8245. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8246. "0495 SLI_FUNCTION_RESET mailbox "
  8247. "failed with status x%x add_status x%x,"
  8248. " mbx status x%x\n",
  8249. shdr_status, shdr_add_status, rc);
  8250. rc = -ENXIO;
  8251. }
  8252. break;
  8253. case LPFC_SLI_INTF_IF_TYPE_2:
  8254. wait:
  8255. /*
  8256. * Poll the Port Status Register and wait for RDY for
  8257. * up to 30 seconds. If the port doesn't respond, treat
  8258. * it as an error.
  8259. */
  8260. for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
  8261. if (lpfc_readl(phba->sli4_hba.u.if_type2.
  8262. STATUSregaddr, &reg_data.word0)) {
  8263. rc = -ENODEV;
  8264. goto out;
  8265. }
  8266. if (bf_get(lpfc_sliport_status_rdy, &reg_data))
  8267. break;
  8268. msleep(20);
  8269. }
  8270. if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
  8271. phba->work_status[0] = readl(
  8272. phba->sli4_hba.u.if_type2.ERR1regaddr);
  8273. phba->work_status[1] = readl(
  8274. phba->sli4_hba.u.if_type2.ERR2regaddr);
  8275. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8276. "2890 Port not ready, port status reg "
  8277. "0x%x error 1=0x%x, error 2=0x%x\n",
  8278. reg_data.word0,
  8279. phba->work_status[0],
  8280. phba->work_status[1]);
  8281. rc = -ENODEV;
  8282. goto out;
  8283. }
  8284. if (!port_reset) {
  8285. /*
  8286. * Reset the port now
  8287. */
  8288. reg_data.word0 = 0;
  8289. bf_set(lpfc_sliport_ctrl_end, &reg_data,
  8290. LPFC_SLIPORT_LITTLE_ENDIAN);
  8291. bf_set(lpfc_sliport_ctrl_ip, &reg_data,
  8292. LPFC_SLIPORT_INIT_PORT);
  8293. writel(reg_data.word0, phba->sli4_hba.u.if_type2.
  8294. CTRLregaddr);
  8295. /* flush */
  8296. pci_read_config_word(phba->pcidev,
  8297. PCI_DEVICE_ID, &devid);
  8298. port_reset = 1;
  8299. msleep(20);
  8300. goto wait;
  8301. } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
  8302. rc = -ENODEV;
  8303. goto out;
  8304. }
  8305. break;
  8306. case LPFC_SLI_INTF_IF_TYPE_1:
  8307. default:
  8308. break;
  8309. }
  8310. out:
  8311. /* Catch the not-ready port failure after a port reset. */
  8312. if (rc) {
  8313. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8314. "3317 HBA not functional: IP Reset Failed "
  8315. "try: echo fw_reset > board_mode\n");
  8316. rc = -ENODEV;
  8317. }
  8318. return rc;
  8319. }
  8320. /**
  8321. * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
  8322. * @phba: pointer to lpfc hba data structure.
  8323. *
  8324. * This routine is invoked to set up the PCI device memory space for device
  8325. * with SLI-4 interface spec.
  8326. *
  8327. * Return codes
  8328. * 0 - successful
  8329. * other values - error
  8330. **/
  8331. static int
  8332. lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
  8333. {
  8334. struct pci_dev *pdev;
  8335. unsigned long bar0map_len, bar1map_len, bar2map_len;
  8336. int error = -ENODEV;
  8337. uint32_t if_type;
  8338. /* Obtain PCI device reference */
  8339. if (!phba->pcidev)
  8340. return error;
  8341. else
  8342. pdev = phba->pcidev;
  8343. /* Set the device DMA mask size */
  8344. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
  8345. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
  8346. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
  8347. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
  8348. return error;
  8349. }
  8350. }
  8351. /*
  8352. * The BARs and register set definitions and offset locations are
  8353. * dependent on the if_type.
  8354. */
  8355. if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
  8356. &phba->sli4_hba.sli_intf.word0)) {
  8357. return error;
  8358. }
  8359. /* There is no SLI3 failback for SLI4 devices. */
  8360. if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
  8361. LPFC_SLI_INTF_VALID) {
  8362. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8363. "2894 SLI_INTF reg contents invalid "
  8364. "sli_intf reg 0x%x\n",
  8365. phba->sli4_hba.sli_intf.word0);
  8366. return error;
  8367. }
  8368. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8369. /*
  8370. * Get the bus address of SLI4 device Bar regions and the
  8371. * number of bytes required by each mapping. The mapping of the
  8372. * particular PCI BARs regions is dependent on the type of
  8373. * SLI4 device.
  8374. */
  8375. if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
  8376. phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
  8377. bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
  8378. /*
  8379. * Map SLI4 PCI Config Space Register base to a kernel virtual
  8380. * addr
  8381. */
  8382. phba->sli4_hba.conf_regs_memmap_p =
  8383. ioremap(phba->pci_bar0_map, bar0map_len);
  8384. if (!phba->sli4_hba.conf_regs_memmap_p) {
  8385. dev_printk(KERN_ERR, &pdev->dev,
  8386. "ioremap failed for SLI4 PCI config "
  8387. "registers.\n");
  8388. goto out;
  8389. }
  8390. phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
  8391. /* Set up BAR0 PCI config space register memory map */
  8392. lpfc_sli4_bar0_register_memmap(phba, if_type);
  8393. } else {
  8394. phba->pci_bar0_map = pci_resource_start(pdev, 1);
  8395. bar0map_len = pci_resource_len(pdev, 1);
  8396. if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
  8397. dev_printk(KERN_ERR, &pdev->dev,
  8398. "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
  8399. goto out;
  8400. }
  8401. phba->sli4_hba.conf_regs_memmap_p =
  8402. ioremap(phba->pci_bar0_map, bar0map_len);
  8403. if (!phba->sli4_hba.conf_regs_memmap_p) {
  8404. dev_printk(KERN_ERR, &pdev->dev,
  8405. "ioremap failed for SLI4 PCI config "
  8406. "registers.\n");
  8407. goto out;
  8408. }
  8409. lpfc_sli4_bar0_register_memmap(phba, if_type);
  8410. }
  8411. if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
  8412. (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
  8413. /*
  8414. * Map SLI4 if type 0 HBA Control Register base to a kernel
  8415. * virtual address and setup the registers.
  8416. */
  8417. phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
  8418. bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
  8419. phba->sli4_hba.ctrl_regs_memmap_p =
  8420. ioremap(phba->pci_bar1_map, bar1map_len);
  8421. if (!phba->sli4_hba.ctrl_regs_memmap_p) {
  8422. dev_printk(KERN_ERR, &pdev->dev,
  8423. "ioremap failed for SLI4 HBA control registers.\n");
  8424. goto out_iounmap_conf;
  8425. }
  8426. phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
  8427. lpfc_sli4_bar1_register_memmap(phba);
  8428. }
  8429. if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
  8430. (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
  8431. /*
  8432. * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
  8433. * virtual address and setup the registers.
  8434. */
  8435. phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
  8436. bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
  8437. phba->sli4_hba.drbl_regs_memmap_p =
  8438. ioremap(phba->pci_bar2_map, bar2map_len);
  8439. if (!phba->sli4_hba.drbl_regs_memmap_p) {
  8440. dev_printk(KERN_ERR, &pdev->dev,
  8441. "ioremap failed for SLI4 HBA doorbell registers.\n");
  8442. goto out_iounmap_ctrl;
  8443. }
  8444. phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
  8445. error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
  8446. if (error)
  8447. goto out_iounmap_all;
  8448. }
  8449. return 0;
  8450. out_iounmap_all:
  8451. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8452. out_iounmap_ctrl:
  8453. iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
  8454. out_iounmap_conf:
  8455. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8456. out:
  8457. return error;
  8458. }
  8459. /**
  8460. * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
  8461. * @phba: pointer to lpfc hba data structure.
  8462. *
  8463. * This routine is invoked to unset the PCI device memory space for device
  8464. * with SLI-4 interface spec.
  8465. **/
  8466. static void
  8467. lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
  8468. {
  8469. uint32_t if_type;
  8470. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8471. switch (if_type) {
  8472. case LPFC_SLI_INTF_IF_TYPE_0:
  8473. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8474. iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
  8475. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8476. break;
  8477. case LPFC_SLI_INTF_IF_TYPE_2:
  8478. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8479. break;
  8480. case LPFC_SLI_INTF_IF_TYPE_1:
  8481. default:
  8482. dev_printk(KERN_ERR, &phba->pcidev->dev,
  8483. "FATAL - unsupported SLI4 interface type - %d\n",
  8484. if_type);
  8485. break;
  8486. }
  8487. }
  8488. /**
  8489. * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
  8490. * @phba: pointer to lpfc hba data structure.
  8491. *
  8492. * This routine is invoked to enable the MSI-X interrupt vectors to device
  8493. * with SLI-3 interface specs.
  8494. *
  8495. * Return codes
  8496. * 0 - successful
  8497. * other values - error
  8498. **/
  8499. static int
  8500. lpfc_sli_enable_msix(struct lpfc_hba *phba)
  8501. {
  8502. int rc;
  8503. LPFC_MBOXQ_t *pmb;
  8504. /* Set up MSI-X multi-message vectors */
  8505. rc = pci_alloc_irq_vectors(phba->pcidev,
  8506. LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
  8507. if (rc < 0) {
  8508. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8509. "0420 PCI enable MSI-X failed (%d)\n", rc);
  8510. goto vec_fail_out;
  8511. }
  8512. /*
  8513. * Assign MSI-X vectors to interrupt handlers
  8514. */
  8515. /* vector-0 is associated to slow-path handler */
  8516. rc = request_irq(pci_irq_vector(phba->pcidev, 0),
  8517. &lpfc_sli_sp_intr_handler, 0,
  8518. LPFC_SP_DRIVER_HANDLER_NAME, phba);
  8519. if (rc) {
  8520. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8521. "0421 MSI-X slow-path request_irq failed "
  8522. "(%d)\n", rc);
  8523. goto msi_fail_out;
  8524. }
  8525. /* vector-1 is associated to fast-path handler */
  8526. rc = request_irq(pci_irq_vector(phba->pcidev, 1),
  8527. &lpfc_sli_fp_intr_handler, 0,
  8528. LPFC_FP_DRIVER_HANDLER_NAME, phba);
  8529. if (rc) {
  8530. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8531. "0429 MSI-X fast-path request_irq failed "
  8532. "(%d)\n", rc);
  8533. goto irq_fail_out;
  8534. }
  8535. /*
  8536. * Configure HBA MSI-X attention conditions to messages
  8537. */
  8538. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  8539. if (!pmb) {
  8540. rc = -ENOMEM;
  8541. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8542. "0474 Unable to allocate memory for issuing "
  8543. "MBOX_CONFIG_MSI command\n");
  8544. goto mem_fail_out;
  8545. }
  8546. rc = lpfc_config_msi(phba, pmb);
  8547. if (rc)
  8548. goto mbx_fail_out;
  8549. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  8550. if (rc != MBX_SUCCESS) {
  8551. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
  8552. "0351 Config MSI mailbox command failed, "
  8553. "mbxCmd x%x, mbxStatus x%x\n",
  8554. pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
  8555. goto mbx_fail_out;
  8556. }
  8557. /* Free memory allocated for mailbox command */
  8558. mempool_free(pmb, phba->mbox_mem_pool);
  8559. return rc;
  8560. mbx_fail_out:
  8561. /* Free memory allocated for mailbox command */
  8562. mempool_free(pmb, phba->mbox_mem_pool);
  8563. mem_fail_out:
  8564. /* free the irq already requested */
  8565. free_irq(pci_irq_vector(phba->pcidev, 1), phba);
  8566. irq_fail_out:
  8567. /* free the irq already requested */
  8568. free_irq(pci_irq_vector(phba->pcidev, 0), phba);
  8569. msi_fail_out:
  8570. /* Unconfigure MSI-X capability structure */
  8571. pci_free_irq_vectors(phba->pcidev);
  8572. vec_fail_out:
  8573. return rc;
  8574. }
  8575. /**
  8576. * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
  8577. * @phba: pointer to lpfc hba data structure.
  8578. *
  8579. * This routine is invoked to enable the MSI interrupt mode to device with
  8580. * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
  8581. * enable the MSI vector. The device driver is responsible for calling the
  8582. * request_irq() to register MSI vector with a interrupt the handler, which
  8583. * is done in this function.
  8584. *
  8585. * Return codes
  8586. * 0 - successful
  8587. * other values - error
  8588. */
  8589. static int
  8590. lpfc_sli_enable_msi(struct lpfc_hba *phba)
  8591. {
  8592. int rc;
  8593. rc = pci_enable_msi(phba->pcidev);
  8594. if (!rc)
  8595. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8596. "0462 PCI enable MSI mode success.\n");
  8597. else {
  8598. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8599. "0471 PCI enable MSI mode failed (%d)\n", rc);
  8600. return rc;
  8601. }
  8602. rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
  8603. 0, LPFC_DRIVER_NAME, phba);
  8604. if (rc) {
  8605. pci_disable_msi(phba->pcidev);
  8606. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8607. "0478 MSI request_irq failed (%d)\n", rc);
  8608. }
  8609. return rc;
  8610. }
  8611. /**
  8612. * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
  8613. * @phba: pointer to lpfc hba data structure.
  8614. *
  8615. * This routine is invoked to enable device interrupt and associate driver's
  8616. * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
  8617. * spec. Depends on the interrupt mode configured to the driver, the driver
  8618. * will try to fallback from the configured interrupt mode to an interrupt
  8619. * mode which is supported by the platform, kernel, and device in the order
  8620. * of:
  8621. * MSI-X -> MSI -> IRQ.
  8622. *
  8623. * Return codes
  8624. * 0 - successful
  8625. * other values - error
  8626. **/
  8627. static uint32_t
  8628. lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
  8629. {
  8630. uint32_t intr_mode = LPFC_INTR_ERROR;
  8631. int retval;
  8632. if (cfg_mode == 2) {
  8633. /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
  8634. retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
  8635. if (!retval) {
  8636. /* Now, try to enable MSI-X interrupt mode */
  8637. retval = lpfc_sli_enable_msix(phba);
  8638. if (!retval) {
  8639. /* Indicate initialization to MSI-X mode */
  8640. phba->intr_type = MSIX;
  8641. intr_mode = 2;
  8642. }
  8643. }
  8644. }
  8645. /* Fallback to MSI if MSI-X initialization failed */
  8646. if (cfg_mode >= 1 && phba->intr_type == NONE) {
  8647. retval = lpfc_sli_enable_msi(phba);
  8648. if (!retval) {
  8649. /* Indicate initialization to MSI mode */
  8650. phba->intr_type = MSI;
  8651. intr_mode = 1;
  8652. }
  8653. }
  8654. /* Fallback to INTx if both MSI-X/MSI initalization failed */
  8655. if (phba->intr_type == NONE) {
  8656. retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
  8657. IRQF_SHARED, LPFC_DRIVER_NAME, phba);
  8658. if (!retval) {
  8659. /* Indicate initialization to INTx mode */
  8660. phba->intr_type = INTx;
  8661. intr_mode = 0;
  8662. }
  8663. }
  8664. return intr_mode;
  8665. }
  8666. /**
  8667. * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
  8668. * @phba: pointer to lpfc hba data structure.
  8669. *
  8670. * This routine is invoked to disable device interrupt and disassociate the
  8671. * driver's interrupt handler(s) from interrupt vector(s) to device with
  8672. * SLI-3 interface spec. Depending on the interrupt mode, the driver will
  8673. * release the interrupt vector(s) for the message signaled interrupt.
  8674. **/
  8675. static void
  8676. lpfc_sli_disable_intr(struct lpfc_hba *phba)
  8677. {
  8678. int nr_irqs, i;
  8679. if (phba->intr_type == MSIX)
  8680. nr_irqs = LPFC_MSIX_VECTORS;
  8681. else
  8682. nr_irqs = 1;
  8683. for (i = 0; i < nr_irqs; i++)
  8684. free_irq(pci_irq_vector(phba->pcidev, i), phba);
  8685. pci_free_irq_vectors(phba->pcidev);
  8686. /* Reset interrupt management states */
  8687. phba->intr_type = NONE;
  8688. phba->sli.slistat.sli_intr = 0;
  8689. }
  8690. /**
  8691. * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
  8692. * @phba: pointer to lpfc hba data structure.
  8693. * @vectors: number of msix vectors allocated.
  8694. *
  8695. * The routine will figure out the CPU affinity assignment for every
  8696. * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
  8697. * with a pointer to the CPU mask that defines ALL the CPUs this vector
  8698. * can be associated with. If the vector can be unquely associated with
  8699. * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
  8700. * In addition, the CPU to IO channel mapping will be calculated
  8701. * and the phba->sli4_hba.cpu_map array will reflect this.
  8702. */
  8703. static void
  8704. lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
  8705. {
  8706. struct lpfc_vector_map_info *cpup;
  8707. int index = 0;
  8708. int vec = 0;
  8709. int cpu;
  8710. #ifdef CONFIG_X86
  8711. struct cpuinfo_x86 *cpuinfo;
  8712. #endif
  8713. /* Init cpu_map array */
  8714. memset(phba->sli4_hba.cpu_map, 0xff,
  8715. (sizeof(struct lpfc_vector_map_info) *
  8716. phba->sli4_hba.num_present_cpu));
  8717. /* Update CPU map with physical id and core id of each CPU */
  8718. cpup = phba->sli4_hba.cpu_map;
  8719. for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
  8720. #ifdef CONFIG_X86
  8721. cpuinfo = &cpu_data(cpu);
  8722. cpup->phys_id = cpuinfo->phys_proc_id;
  8723. cpup->core_id = cpuinfo->cpu_core_id;
  8724. #else
  8725. /* No distinction between CPUs for other platforms */
  8726. cpup->phys_id = 0;
  8727. cpup->core_id = 0;
  8728. #endif
  8729. cpup->channel_id = index; /* For now round robin */
  8730. cpup->irq = pci_irq_vector(phba->pcidev, vec);
  8731. vec++;
  8732. if (vec >= vectors)
  8733. vec = 0;
  8734. index++;
  8735. if (index >= phba->cfg_fcp_io_channel)
  8736. index = 0;
  8737. cpup++;
  8738. }
  8739. }
  8740. /**
  8741. * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  8742. * @phba: pointer to lpfc hba data structure.
  8743. *
  8744. * This routine is invoked to enable the MSI-X interrupt vectors to device
  8745. * with SLI-4 interface spec.
  8746. *
  8747. * Return codes
  8748. * 0 - successful
  8749. * other values - error
  8750. **/
  8751. static int
  8752. lpfc_sli4_enable_msix(struct lpfc_hba *phba)
  8753. {
  8754. int vectors, rc, index;
  8755. /* Set up MSI-X multi-message vectors */
  8756. vectors = phba->io_channel_irqs;
  8757. if (phba->cfg_fof)
  8758. vectors++;
  8759. rc = pci_alloc_irq_vectors(phba->pcidev,
  8760. (phba->nvmet_support) ? 1 : 2,
  8761. vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
  8762. if (rc < 0) {
  8763. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8764. "0484 PCI enable MSI-X failed (%d)\n", rc);
  8765. goto vec_fail_out;
  8766. }
  8767. vectors = rc;
  8768. /* Assign MSI-X vectors to interrupt handlers */
  8769. for (index = 0; index < vectors; index++) {
  8770. memset(&phba->sli4_hba.handler_name[index], 0, 16);
  8771. snprintf((char *)&phba->sli4_hba.handler_name[index],
  8772. LPFC_SLI4_HANDLER_NAME_SZ,
  8773. LPFC_DRIVER_HANDLER_NAME"%d", index);
  8774. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  8775. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  8776. atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
  8777. if (phba->cfg_fof && (index == (vectors - 1)))
  8778. rc = request_irq(pci_irq_vector(phba->pcidev, index),
  8779. &lpfc_sli4_fof_intr_handler, 0,
  8780. (char *)&phba->sli4_hba.handler_name[index],
  8781. &phba->sli4_hba.hba_eq_hdl[index]);
  8782. else
  8783. rc = request_irq(pci_irq_vector(phba->pcidev, index),
  8784. &lpfc_sli4_hba_intr_handler, 0,
  8785. (char *)&phba->sli4_hba.handler_name[index],
  8786. &phba->sli4_hba.hba_eq_hdl[index]);
  8787. if (rc) {
  8788. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8789. "0486 MSI-X fast-path (%d) "
  8790. "request_irq failed (%d)\n", index, rc);
  8791. goto cfg_fail_out;
  8792. }
  8793. }
  8794. if (phba->cfg_fof)
  8795. vectors--;
  8796. if (vectors != phba->io_channel_irqs) {
  8797. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8798. "3238 Reducing IO channels to match number of "
  8799. "MSI-X vectors, requested %d got %d\n",
  8800. phba->io_channel_irqs, vectors);
  8801. if (phba->cfg_fcp_io_channel > vectors)
  8802. phba->cfg_fcp_io_channel = vectors;
  8803. if (phba->cfg_nvme_io_channel > vectors)
  8804. phba->cfg_nvme_io_channel = vectors;
  8805. if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
  8806. phba->io_channel_irqs = phba->cfg_fcp_io_channel;
  8807. else
  8808. phba->io_channel_irqs = phba->cfg_nvme_io_channel;
  8809. }
  8810. lpfc_cpu_affinity_check(phba, vectors);
  8811. return rc;
  8812. cfg_fail_out:
  8813. /* free the irq already requested */
  8814. for (--index; index >= 0; index--)
  8815. free_irq(pci_irq_vector(phba->pcidev, index),
  8816. &phba->sli4_hba.hba_eq_hdl[index]);
  8817. /* Unconfigure MSI-X capability structure */
  8818. pci_free_irq_vectors(phba->pcidev);
  8819. vec_fail_out:
  8820. return rc;
  8821. }
  8822. /**
  8823. * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
  8824. * @phba: pointer to lpfc hba data structure.
  8825. *
  8826. * This routine is invoked to enable the MSI interrupt mode to device with
  8827. * SLI-4 interface spec. The kernel function pci_enable_msi() is called
  8828. * to enable the MSI vector. The device driver is responsible for calling
  8829. * the request_irq() to register MSI vector with a interrupt the handler,
  8830. * which is done in this function.
  8831. *
  8832. * Return codes
  8833. * 0 - successful
  8834. * other values - error
  8835. **/
  8836. static int
  8837. lpfc_sli4_enable_msi(struct lpfc_hba *phba)
  8838. {
  8839. int rc, index;
  8840. rc = pci_enable_msi(phba->pcidev);
  8841. if (!rc)
  8842. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8843. "0487 PCI enable MSI mode success.\n");
  8844. else {
  8845. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8846. "0488 PCI enable MSI mode failed (%d)\n", rc);
  8847. return rc;
  8848. }
  8849. rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
  8850. 0, LPFC_DRIVER_NAME, phba);
  8851. if (rc) {
  8852. pci_disable_msi(phba->pcidev);
  8853. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8854. "0490 MSI request_irq failed (%d)\n", rc);
  8855. return rc;
  8856. }
  8857. for (index = 0; index < phba->io_channel_irqs; index++) {
  8858. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  8859. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  8860. }
  8861. if (phba->cfg_fof) {
  8862. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  8863. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  8864. }
  8865. return 0;
  8866. }
  8867. /**
  8868. * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
  8869. * @phba: pointer to lpfc hba data structure.
  8870. *
  8871. * This routine is invoked to enable device interrupt and associate driver's
  8872. * interrupt handler(s) to interrupt vector(s) to device with SLI-4
  8873. * interface spec. Depends on the interrupt mode configured to the driver,
  8874. * the driver will try to fallback from the configured interrupt mode to an
  8875. * interrupt mode which is supported by the platform, kernel, and device in
  8876. * the order of:
  8877. * MSI-X -> MSI -> IRQ.
  8878. *
  8879. * Return codes
  8880. * 0 - successful
  8881. * other values - error
  8882. **/
  8883. static uint32_t
  8884. lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
  8885. {
  8886. uint32_t intr_mode = LPFC_INTR_ERROR;
  8887. int retval, idx;
  8888. if (cfg_mode == 2) {
  8889. /* Preparation before conf_msi mbox cmd */
  8890. retval = 0;
  8891. if (!retval) {
  8892. /* Now, try to enable MSI-X interrupt mode */
  8893. retval = lpfc_sli4_enable_msix(phba);
  8894. if (!retval) {
  8895. /* Indicate initialization to MSI-X mode */
  8896. phba->intr_type = MSIX;
  8897. intr_mode = 2;
  8898. }
  8899. }
  8900. }
  8901. /* Fallback to MSI if MSI-X initialization failed */
  8902. if (cfg_mode >= 1 && phba->intr_type == NONE) {
  8903. retval = lpfc_sli4_enable_msi(phba);
  8904. if (!retval) {
  8905. /* Indicate initialization to MSI mode */
  8906. phba->intr_type = MSI;
  8907. intr_mode = 1;
  8908. }
  8909. }
  8910. /* Fallback to INTx if both MSI-X/MSI initalization failed */
  8911. if (phba->intr_type == NONE) {
  8912. retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
  8913. IRQF_SHARED, LPFC_DRIVER_NAME, phba);
  8914. if (!retval) {
  8915. struct lpfc_hba_eq_hdl *eqhdl;
  8916. /* Indicate initialization to INTx mode */
  8917. phba->intr_type = INTx;
  8918. intr_mode = 0;
  8919. for (idx = 0; idx < phba->io_channel_irqs; idx++) {
  8920. eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
  8921. eqhdl->idx = idx;
  8922. eqhdl->phba = phba;
  8923. atomic_set(&eqhdl->hba_eq_in_use, 1);
  8924. }
  8925. if (phba->cfg_fof) {
  8926. eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
  8927. eqhdl->idx = idx;
  8928. eqhdl->phba = phba;
  8929. atomic_set(&eqhdl->hba_eq_in_use, 1);
  8930. }
  8931. }
  8932. }
  8933. return intr_mode;
  8934. }
  8935. /**
  8936. * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
  8937. * @phba: pointer to lpfc hba data structure.
  8938. *
  8939. * This routine is invoked to disable device interrupt and disassociate
  8940. * the driver's interrupt handler(s) from interrupt vector(s) to device
  8941. * with SLI-4 interface spec. Depending on the interrupt mode, the driver
  8942. * will release the interrupt vector(s) for the message signaled interrupt.
  8943. **/
  8944. static void
  8945. lpfc_sli4_disable_intr(struct lpfc_hba *phba)
  8946. {
  8947. /* Disable the currently initialized interrupt mode */
  8948. if (phba->intr_type == MSIX) {
  8949. int index;
  8950. /* Free up MSI-X multi-message vectors */
  8951. for (index = 0; index < phba->io_channel_irqs; index++)
  8952. free_irq(pci_irq_vector(phba->pcidev, index),
  8953. &phba->sli4_hba.hba_eq_hdl[index]);
  8954. if (phba->cfg_fof)
  8955. free_irq(pci_irq_vector(phba->pcidev, index),
  8956. &phba->sli4_hba.hba_eq_hdl[index]);
  8957. } else {
  8958. free_irq(phba->pcidev->irq, phba);
  8959. }
  8960. pci_free_irq_vectors(phba->pcidev);
  8961. /* Reset interrupt management states */
  8962. phba->intr_type = NONE;
  8963. phba->sli.slistat.sli_intr = 0;
  8964. }
  8965. /**
  8966. * lpfc_unset_hba - Unset SLI3 hba device initialization
  8967. * @phba: pointer to lpfc hba data structure.
  8968. *
  8969. * This routine is invoked to unset the HBA device initialization steps to
  8970. * a device with SLI-3 interface spec.
  8971. **/
  8972. static void
  8973. lpfc_unset_hba(struct lpfc_hba *phba)
  8974. {
  8975. struct lpfc_vport *vport = phba->pport;
  8976. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  8977. spin_lock_irq(shost->host_lock);
  8978. vport->load_flag |= FC_UNLOADING;
  8979. spin_unlock_irq(shost->host_lock);
  8980. kfree(phba->vpi_bmask);
  8981. kfree(phba->vpi_ids);
  8982. lpfc_stop_hba_timers(phba);
  8983. phba->pport->work_port_events = 0;
  8984. lpfc_sli_hba_down(phba);
  8985. lpfc_sli_brdrestart(phba);
  8986. lpfc_sli_disable_intr(phba);
  8987. return;
  8988. }
  8989. /**
  8990. * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
  8991. * @phba: Pointer to HBA context object.
  8992. *
  8993. * This function is called in the SLI4 code path to wait for completion
  8994. * of device's XRIs exchange busy. It will check the XRI exchange busy
  8995. * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
  8996. * that, it will check the XRI exchange busy on outstanding FCP and ELS
  8997. * I/Os every 30 seconds, log error message, and wait forever. Only when
  8998. * all XRI exchange busy complete, the driver unload shall proceed with
  8999. * invoking the function reset ioctl mailbox command to the CNA and the
  9000. * the rest of the driver unload resource release.
  9001. **/
  9002. static void
  9003. lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
  9004. {
  9005. int wait_time = 0;
  9006. int nvme_xri_cmpl = 1;
  9007. int fcp_xri_cmpl = 1;
  9008. int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  9009. int nvmet_xri_cmpl =
  9010. list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
  9011. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  9012. fcp_xri_cmpl =
  9013. list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
  9014. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  9015. nvme_xri_cmpl =
  9016. list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
  9017. while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
  9018. !nvmet_xri_cmpl) {
  9019. if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
  9020. if (!nvme_xri_cmpl)
  9021. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9022. "6100 NVME XRI exchange busy "
  9023. "wait time: %d seconds.\n",
  9024. wait_time/1000);
  9025. if (!fcp_xri_cmpl)
  9026. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9027. "2877 FCP XRI exchange busy "
  9028. "wait time: %d seconds.\n",
  9029. wait_time/1000);
  9030. if (!els_xri_cmpl)
  9031. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9032. "2878 ELS XRI exchange busy "
  9033. "wait time: %d seconds.\n",
  9034. wait_time/1000);
  9035. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
  9036. wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
  9037. } else {
  9038. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
  9039. wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
  9040. }
  9041. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  9042. nvme_xri_cmpl = list_empty(
  9043. &phba->sli4_hba.lpfc_abts_nvme_buf_list);
  9044. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  9045. fcp_xri_cmpl = list_empty(
  9046. &phba->sli4_hba.lpfc_abts_scsi_buf_list);
  9047. els_xri_cmpl =
  9048. list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  9049. nvmet_xri_cmpl =
  9050. list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
  9051. }
  9052. }
  9053. /**
  9054. * lpfc_sli4_hba_unset - Unset the fcoe hba
  9055. * @phba: Pointer to HBA context object.
  9056. *
  9057. * This function is called in the SLI4 code path to reset the HBA's FCoE
  9058. * function. The caller is not required to hold any lock. This routine
  9059. * issues PCI function reset mailbox command to reset the FCoE function.
  9060. * At the end of the function, it calls lpfc_hba_down_post function to
  9061. * free any pending commands.
  9062. **/
  9063. static void
  9064. lpfc_sli4_hba_unset(struct lpfc_hba *phba)
  9065. {
  9066. int wait_cnt = 0;
  9067. LPFC_MBOXQ_t *mboxq;
  9068. struct pci_dev *pdev = phba->pcidev;
  9069. lpfc_stop_hba_timers(phba);
  9070. phba->sli4_hba.intr_enable = 0;
  9071. /*
  9072. * Gracefully wait out the potential current outstanding asynchronous
  9073. * mailbox command.
  9074. */
  9075. /* First, block any pending async mailbox command from posted */
  9076. spin_lock_irq(&phba->hbalock);
  9077. phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
  9078. spin_unlock_irq(&phba->hbalock);
  9079. /* Now, trying to wait it out if we can */
  9080. while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9081. msleep(10);
  9082. if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
  9083. break;
  9084. }
  9085. /* Forcefully release the outstanding mailbox command if timed out */
  9086. if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9087. spin_lock_irq(&phba->hbalock);
  9088. mboxq = phba->sli.mbox_active;
  9089. mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
  9090. __lpfc_mbox_cmpl_put(phba, mboxq);
  9091. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  9092. phba->sli.mbox_active = NULL;
  9093. spin_unlock_irq(&phba->hbalock);
  9094. }
  9095. /* Abort all iocbs associated with the hba */
  9096. lpfc_sli_hba_iocb_abort(phba);
  9097. /* Wait for completion of device XRI exchange busy */
  9098. lpfc_sli4_xri_exchange_busy_wait(phba);
  9099. /* Disable PCI subsystem interrupt */
  9100. lpfc_sli4_disable_intr(phba);
  9101. /* Disable SR-IOV if enabled */
  9102. if (phba->cfg_sriov_nr_virtfn)
  9103. pci_disable_sriov(pdev);
  9104. /* Stop kthread signal shall trigger work_done one more time */
  9105. kthread_stop(phba->worker_thread);
  9106. /* Reset SLI4 HBA FCoE function */
  9107. lpfc_pci_function_reset(phba);
  9108. lpfc_sli4_queue_destroy(phba);
  9109. /* Stop the SLI4 device port */
  9110. phba->pport->work_port_events = 0;
  9111. }
  9112. /**
  9113. * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
  9114. * @phba: Pointer to HBA context object.
  9115. * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
  9116. *
  9117. * This function is called in the SLI4 code path to read the port's
  9118. * sli4 capabilities.
  9119. *
  9120. * This function may be be called from any context that can block-wait
  9121. * for the completion. The expectation is that this routine is called
  9122. * typically from probe_one or from the online routine.
  9123. **/
  9124. int
  9125. lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  9126. {
  9127. int rc;
  9128. struct lpfc_mqe *mqe;
  9129. struct lpfc_pc_sli4_params *sli4_params;
  9130. uint32_t mbox_tmo;
  9131. rc = 0;
  9132. mqe = &mboxq->u.mqe;
  9133. /* Read the port's SLI4 Parameters port capabilities */
  9134. lpfc_pc_sli4_params(mboxq);
  9135. if (!phba->sli4_hba.intr_enable)
  9136. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  9137. else {
  9138. mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  9139. rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  9140. }
  9141. if (unlikely(rc))
  9142. return 1;
  9143. sli4_params = &phba->sli4_hba.pc_sli4_params;
  9144. sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
  9145. sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
  9146. sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
  9147. sli4_params->featurelevel_1 = bf_get(featurelevel_1,
  9148. &mqe->un.sli4_params);
  9149. sli4_params->featurelevel_2 = bf_get(featurelevel_2,
  9150. &mqe->un.sli4_params);
  9151. sli4_params->proto_types = mqe->un.sli4_params.word3;
  9152. sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
  9153. sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
  9154. sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
  9155. sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
  9156. sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
  9157. sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
  9158. sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
  9159. sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
  9160. sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
  9161. sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
  9162. sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
  9163. sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
  9164. sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
  9165. sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
  9166. sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
  9167. sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
  9168. sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
  9169. sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
  9170. sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
  9171. sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
  9172. /* Make sure that sge_supp_len can be handled by the driver */
  9173. if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
  9174. sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
  9175. return rc;
  9176. }
  9177. /**
  9178. * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
  9179. * @phba: Pointer to HBA context object.
  9180. * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
  9181. *
  9182. * This function is called in the SLI4 code path to read the port's
  9183. * sli4 capabilities.
  9184. *
  9185. * This function may be be called from any context that can block-wait
  9186. * for the completion. The expectation is that this routine is called
  9187. * typically from probe_one or from the online routine.
  9188. **/
  9189. int
  9190. lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  9191. {
  9192. int rc;
  9193. struct lpfc_mqe *mqe = &mboxq->u.mqe;
  9194. struct lpfc_pc_sli4_params *sli4_params;
  9195. uint32_t mbox_tmo;
  9196. int length;
  9197. struct lpfc_sli4_parameters *mbx_sli4_parameters;
  9198. /*
  9199. * By default, the driver assumes the SLI4 port requires RPI
  9200. * header postings. The SLI4_PARAM response will correct this
  9201. * assumption.
  9202. */
  9203. phba->sli4_hba.rpi_hdrs_in_use = 1;
  9204. /* Read the port's SLI4 Config Parameters */
  9205. length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
  9206. sizeof(struct lpfc_sli4_cfg_mhdr));
  9207. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  9208. LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
  9209. length, LPFC_SLI4_MBX_EMBED);
  9210. if (!phba->sli4_hba.intr_enable)
  9211. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  9212. else {
  9213. mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  9214. rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  9215. }
  9216. if (unlikely(rc))
  9217. return rc;
  9218. sli4_params = &phba->sli4_hba.pc_sli4_params;
  9219. mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
  9220. sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
  9221. sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
  9222. sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
  9223. sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
  9224. mbx_sli4_parameters);
  9225. sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
  9226. mbx_sli4_parameters);
  9227. if (bf_get(cfg_phwq, mbx_sli4_parameters))
  9228. phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
  9229. else
  9230. phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
  9231. sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
  9232. sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
  9233. sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
  9234. sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
  9235. sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
  9236. sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
  9237. sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
  9238. sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
  9239. sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
  9240. mbx_sli4_parameters);
  9241. sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
  9242. sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
  9243. mbx_sli4_parameters);
  9244. phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
  9245. phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
  9246. phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
  9247. bf_get(cfg_xib, mbx_sli4_parameters));
  9248. if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
  9249. !phba->nvme_support) {
  9250. phba->nvme_support = 0;
  9251. phba->nvmet_support = 0;
  9252. phba->cfg_nvmet_mrq = 0;
  9253. phba->cfg_nvme_io_channel = 0;
  9254. phba->io_channel_irqs = phba->cfg_fcp_io_channel;
  9255. lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
  9256. "6101 Disabling NVME support: "
  9257. "Not supported by firmware: %d %d\n",
  9258. bf_get(cfg_nvme, mbx_sli4_parameters),
  9259. bf_get(cfg_xib, mbx_sli4_parameters));
  9260. /* If firmware doesn't support NVME, just use SCSI support */
  9261. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  9262. return -ENODEV;
  9263. phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
  9264. }
  9265. if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
  9266. phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
  9267. /* Make sure that sge_supp_len can be handled by the driver */
  9268. if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
  9269. sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
  9270. /*
  9271. * Issue IOs with CDB embedded in WQE to minimized the number
  9272. * of DMAs the firmware has to do. Setting this to 1 also forces
  9273. * the driver to use 128 bytes WQEs for FCP IOs.
  9274. */
  9275. if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
  9276. phba->fcp_embed_io = 1;
  9277. else
  9278. phba->fcp_embed_io = 0;
  9279. /*
  9280. * Check if the SLI port supports MDS Diagnostics
  9281. */
  9282. if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
  9283. phba->mds_diags_support = 1;
  9284. else
  9285. phba->mds_diags_support = 0;
  9286. return 0;
  9287. }
  9288. /**
  9289. * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  9290. * @pdev: pointer to PCI device
  9291. * @pid: pointer to PCI device identifier
  9292. *
  9293. * This routine is to be called to attach a device with SLI-3 interface spec
  9294. * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
  9295. * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
  9296. * information of the device and driver to see if the driver state that it can
  9297. * support this kind of device. If the match is successful, the driver core
  9298. * invokes this routine. If this routine determines it can claim the HBA, it
  9299. * does all the initialization that it needs to do to handle the HBA properly.
  9300. *
  9301. * Return code
  9302. * 0 - driver can claim the device
  9303. * negative value - driver can not claim the device
  9304. **/
  9305. static int
  9306. lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
  9307. {
  9308. struct lpfc_hba *phba;
  9309. struct lpfc_vport *vport = NULL;
  9310. struct Scsi_Host *shost = NULL;
  9311. int error;
  9312. uint32_t cfg_mode, intr_mode;
  9313. /* Allocate memory for HBA structure */
  9314. phba = lpfc_hba_alloc(pdev);
  9315. if (!phba)
  9316. return -ENOMEM;
  9317. /* Perform generic PCI device enabling operation */
  9318. error = lpfc_enable_pci_dev(phba);
  9319. if (error)
  9320. goto out_free_phba;
  9321. /* Set up SLI API function jump table for PCI-device group-0 HBAs */
  9322. error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
  9323. if (error)
  9324. goto out_disable_pci_dev;
  9325. /* Set up SLI-3 specific device PCI memory space */
  9326. error = lpfc_sli_pci_mem_setup(phba);
  9327. if (error) {
  9328. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9329. "1402 Failed to set up pci memory space.\n");
  9330. goto out_disable_pci_dev;
  9331. }
  9332. /* Set up SLI-3 specific device driver resources */
  9333. error = lpfc_sli_driver_resource_setup(phba);
  9334. if (error) {
  9335. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9336. "1404 Failed to set up driver resource.\n");
  9337. goto out_unset_pci_mem_s3;
  9338. }
  9339. /* Initialize and populate the iocb list per host */
  9340. error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
  9341. if (error) {
  9342. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9343. "1405 Failed to initialize iocb list.\n");
  9344. goto out_unset_driver_resource_s3;
  9345. }
  9346. /* Set up common device driver resources */
  9347. error = lpfc_setup_driver_resource_phase2(phba);
  9348. if (error) {
  9349. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9350. "1406 Failed to set up driver resource.\n");
  9351. goto out_free_iocb_list;
  9352. }
  9353. /* Get the default values for Model Name and Description */
  9354. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  9355. /* Create SCSI host to the physical port */
  9356. error = lpfc_create_shost(phba);
  9357. if (error) {
  9358. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9359. "1407 Failed to create scsi host.\n");
  9360. goto out_unset_driver_resource;
  9361. }
  9362. /* Configure sysfs attributes */
  9363. vport = phba->pport;
  9364. error = lpfc_alloc_sysfs_attr(vport);
  9365. if (error) {
  9366. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9367. "1476 Failed to allocate sysfs attr\n");
  9368. goto out_destroy_shost;
  9369. }
  9370. shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
  9371. /* Now, trying to enable interrupt and bring up the device */
  9372. cfg_mode = phba->cfg_use_msi;
  9373. while (true) {
  9374. /* Put device to a known state before enabling interrupt */
  9375. lpfc_stop_port(phba);
  9376. /* Configure and enable interrupt */
  9377. intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
  9378. if (intr_mode == LPFC_INTR_ERROR) {
  9379. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9380. "0431 Failed to enable interrupt.\n");
  9381. error = -ENODEV;
  9382. goto out_free_sysfs_attr;
  9383. }
  9384. /* SLI-3 HBA setup */
  9385. if (lpfc_sli_hba_setup(phba)) {
  9386. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9387. "1477 Failed to set up hba\n");
  9388. error = -ENODEV;
  9389. goto out_remove_device;
  9390. }
  9391. /* Wait 50ms for the interrupts of previous mailbox commands */
  9392. msleep(50);
  9393. /* Check active interrupts on message signaled interrupts */
  9394. if (intr_mode == 0 ||
  9395. phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
  9396. /* Log the current active interrupt mode */
  9397. phba->intr_mode = intr_mode;
  9398. lpfc_log_intr_mode(phba, intr_mode);
  9399. break;
  9400. } else {
  9401. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9402. "0447 Configure interrupt mode (%d) "
  9403. "failed active interrupt test.\n",
  9404. intr_mode);
  9405. /* Disable the current interrupt mode */
  9406. lpfc_sli_disable_intr(phba);
  9407. /* Try next level of interrupt mode */
  9408. cfg_mode = --intr_mode;
  9409. }
  9410. }
  9411. /* Perform post initialization setup */
  9412. lpfc_post_init_setup(phba);
  9413. /* Check if there are static vports to be created. */
  9414. lpfc_create_static_vport(phba);
  9415. return 0;
  9416. out_remove_device:
  9417. lpfc_unset_hba(phba);
  9418. out_free_sysfs_attr:
  9419. lpfc_free_sysfs_attr(vport);
  9420. out_destroy_shost:
  9421. lpfc_destroy_shost(phba);
  9422. out_unset_driver_resource:
  9423. lpfc_unset_driver_resource_phase2(phba);
  9424. out_free_iocb_list:
  9425. lpfc_free_iocb_list(phba);
  9426. out_unset_driver_resource_s3:
  9427. lpfc_sli_driver_resource_unset(phba);
  9428. out_unset_pci_mem_s3:
  9429. lpfc_sli_pci_mem_unset(phba);
  9430. out_disable_pci_dev:
  9431. lpfc_disable_pci_dev(phba);
  9432. if (shost)
  9433. scsi_host_put(shost);
  9434. out_free_phba:
  9435. lpfc_hba_free(phba);
  9436. return error;
  9437. }
  9438. /**
  9439. * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
  9440. * @pdev: pointer to PCI device
  9441. *
  9442. * This routine is to be called to disattach a device with SLI-3 interface
  9443. * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
  9444. * removed from PCI bus, it performs all the necessary cleanup for the HBA
  9445. * device to be removed from the PCI subsystem properly.
  9446. **/
  9447. static void
  9448. lpfc_pci_remove_one_s3(struct pci_dev *pdev)
  9449. {
  9450. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9451. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  9452. struct lpfc_vport **vports;
  9453. struct lpfc_hba *phba = vport->phba;
  9454. int i;
  9455. spin_lock_irq(&phba->hbalock);
  9456. vport->load_flag |= FC_UNLOADING;
  9457. spin_unlock_irq(&phba->hbalock);
  9458. lpfc_free_sysfs_attr(vport);
  9459. /* Release all the vports against this physical port */
  9460. vports = lpfc_create_vport_work_array(phba);
  9461. if (vports != NULL)
  9462. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  9463. if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
  9464. continue;
  9465. fc_vport_terminate(vports[i]->fc_vport);
  9466. }
  9467. lpfc_destroy_vport_work_array(phba, vports);
  9468. /* Remove FC host and then SCSI host with the physical port */
  9469. fc_remove_host(shost);
  9470. scsi_remove_host(shost);
  9471. lpfc_cleanup(vport);
  9472. /*
  9473. * Bring down the SLI Layer. This step disable all interrupts,
  9474. * clears the rings, discards all mailbox commands, and resets
  9475. * the HBA.
  9476. */
  9477. /* HBA interrupt will be disabled after this call */
  9478. lpfc_sli_hba_down(phba);
  9479. /* Stop kthread signal shall trigger work_done one more time */
  9480. kthread_stop(phba->worker_thread);
  9481. /* Final cleanup of txcmplq and reset the HBA */
  9482. lpfc_sli_brdrestart(phba);
  9483. kfree(phba->vpi_bmask);
  9484. kfree(phba->vpi_ids);
  9485. lpfc_stop_hba_timers(phba);
  9486. spin_lock_irq(&phba->hbalock);
  9487. list_del_init(&vport->listentry);
  9488. spin_unlock_irq(&phba->hbalock);
  9489. lpfc_debugfs_terminate(vport);
  9490. /* Disable SR-IOV if enabled */
  9491. if (phba->cfg_sriov_nr_virtfn)
  9492. pci_disable_sriov(pdev);
  9493. /* Disable interrupt */
  9494. lpfc_sli_disable_intr(phba);
  9495. scsi_host_put(shost);
  9496. /*
  9497. * Call scsi_free before mem_free since scsi bufs are released to their
  9498. * corresponding pools here.
  9499. */
  9500. lpfc_scsi_free(phba);
  9501. lpfc_mem_free_all(phba);
  9502. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
  9503. phba->hbqslimp.virt, phba->hbqslimp.phys);
  9504. /* Free resources associated with SLI2 interface */
  9505. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  9506. phba->slim2p.virt, phba->slim2p.phys);
  9507. /* unmap adapter SLIM and Control Registers */
  9508. iounmap(phba->ctrl_regs_memmap_p);
  9509. iounmap(phba->slim_memmap_p);
  9510. lpfc_hba_free(phba);
  9511. pci_release_mem_regions(pdev);
  9512. pci_disable_device(pdev);
  9513. }
  9514. /**
  9515. * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
  9516. * @pdev: pointer to PCI device
  9517. * @msg: power management message
  9518. *
  9519. * This routine is to be called from the kernel's PCI subsystem to support
  9520. * system Power Management (PM) to device with SLI-3 interface spec. When
  9521. * PM invokes this method, it quiesces the device by stopping the driver's
  9522. * worker thread for the device, turning off device's interrupt and DMA,
  9523. * and bring the device offline. Note that as the driver implements the
  9524. * minimum PM requirements to a power-aware driver's PM support for the
  9525. * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
  9526. * to the suspend() method call will be treated as SUSPEND and the driver will
  9527. * fully reinitialize its device during resume() method call, the driver will
  9528. * set device to PCI_D3hot state in PCI config space instead of setting it
  9529. * according to the @msg provided by the PM.
  9530. *
  9531. * Return code
  9532. * 0 - driver suspended the device
  9533. * Error otherwise
  9534. **/
  9535. static int
  9536. lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
  9537. {
  9538. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9539. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9540. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9541. "0473 PCI device Power Management suspend.\n");
  9542. /* Bring down the device */
  9543. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  9544. lpfc_offline(phba);
  9545. kthread_stop(phba->worker_thread);
  9546. /* Disable interrupt from device */
  9547. lpfc_sli_disable_intr(phba);
  9548. /* Save device state to PCI config space */
  9549. pci_save_state(pdev);
  9550. pci_set_power_state(pdev, PCI_D3hot);
  9551. return 0;
  9552. }
  9553. /**
  9554. * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
  9555. * @pdev: pointer to PCI device
  9556. *
  9557. * This routine is to be called from the kernel's PCI subsystem to support
  9558. * system Power Management (PM) to device with SLI-3 interface spec. When PM
  9559. * invokes this method, it restores the device's PCI config space state and
  9560. * fully reinitializes the device and brings it online. Note that as the
  9561. * driver implements the minimum PM requirements to a power-aware driver's
  9562. * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
  9563. * FREEZE) to the suspend() method call will be treated as SUSPEND and the
  9564. * driver will fully reinitialize its device during resume() method call,
  9565. * the device will be set to PCI_D0 directly in PCI config space before
  9566. * restoring the state.
  9567. *
  9568. * Return code
  9569. * 0 - driver suspended the device
  9570. * Error otherwise
  9571. **/
  9572. static int
  9573. lpfc_pci_resume_one_s3(struct pci_dev *pdev)
  9574. {
  9575. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9576. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9577. uint32_t intr_mode;
  9578. int error;
  9579. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9580. "0452 PCI device Power Management resume.\n");
  9581. /* Restore device state from PCI config space */
  9582. pci_set_power_state(pdev, PCI_D0);
  9583. pci_restore_state(pdev);
  9584. /*
  9585. * As the new kernel behavior of pci_restore_state() API call clears
  9586. * device saved_state flag, need to save the restored state again.
  9587. */
  9588. pci_save_state(pdev);
  9589. if (pdev->is_busmaster)
  9590. pci_set_master(pdev);
  9591. /* Startup the kernel thread for this host adapter. */
  9592. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  9593. "lpfc_worker_%d", phba->brd_no);
  9594. if (IS_ERR(phba->worker_thread)) {
  9595. error = PTR_ERR(phba->worker_thread);
  9596. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9597. "0434 PM resume failed to start worker "
  9598. "thread: error=x%x.\n", error);
  9599. return error;
  9600. }
  9601. /* Configure and enable interrupt */
  9602. intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
  9603. if (intr_mode == LPFC_INTR_ERROR) {
  9604. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9605. "0430 PM resume Failed to enable interrupt\n");
  9606. return -EIO;
  9607. } else
  9608. phba->intr_mode = intr_mode;
  9609. /* Restart HBA and bring it online */
  9610. lpfc_sli_brdrestart(phba);
  9611. lpfc_online(phba);
  9612. /* Log the current active interrupt mode */
  9613. lpfc_log_intr_mode(phba, phba->intr_mode);
  9614. return 0;
  9615. }
  9616. /**
  9617. * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
  9618. * @phba: pointer to lpfc hba data structure.
  9619. *
  9620. * This routine is called to prepare the SLI3 device for PCI slot recover. It
  9621. * aborts all the outstanding SCSI I/Os to the pci device.
  9622. **/
  9623. static void
  9624. lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
  9625. {
  9626. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9627. "2723 PCI channel I/O abort preparing for recovery\n");
  9628. /*
  9629. * There may be errored I/Os through HBA, abort all I/Os on txcmplq
  9630. * and let the SCSI mid-layer to retry them to recover.
  9631. */
  9632. lpfc_sli_abort_fcp_rings(phba);
  9633. }
  9634. /**
  9635. * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
  9636. * @phba: pointer to lpfc hba data structure.
  9637. *
  9638. * This routine is called to prepare the SLI3 device for PCI slot reset. It
  9639. * disables the device interrupt and pci device, and aborts the internal FCP
  9640. * pending I/Os.
  9641. **/
  9642. static void
  9643. lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
  9644. {
  9645. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9646. "2710 PCI channel disable preparing for reset\n");
  9647. /* Block any management I/Os to the device */
  9648. lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
  9649. /* Block all SCSI devices' I/Os on the host */
  9650. lpfc_scsi_dev_block(phba);
  9651. /* Flush all driver's outstanding SCSI I/Os as we are to reset */
  9652. lpfc_sli_flush_fcp_rings(phba);
  9653. /* stop all timers */
  9654. lpfc_stop_hba_timers(phba);
  9655. /* Disable interrupt and pci device */
  9656. lpfc_sli_disable_intr(phba);
  9657. pci_disable_device(phba->pcidev);
  9658. }
  9659. /**
  9660. * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
  9661. * @phba: pointer to lpfc hba data structure.
  9662. *
  9663. * This routine is called to prepare the SLI3 device for PCI slot permanently
  9664. * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
  9665. * pending I/Os.
  9666. **/
  9667. static void
  9668. lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
  9669. {
  9670. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9671. "2711 PCI channel permanent disable for failure\n");
  9672. /* Block all SCSI devices' I/Os on the host */
  9673. lpfc_scsi_dev_block(phba);
  9674. /* stop all timers */
  9675. lpfc_stop_hba_timers(phba);
  9676. /* Clean up all driver's outstanding SCSI I/Os */
  9677. lpfc_sli_flush_fcp_rings(phba);
  9678. }
  9679. /**
  9680. * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
  9681. * @pdev: pointer to PCI device.
  9682. * @state: the current PCI connection state.
  9683. *
  9684. * This routine is called from the PCI subsystem for I/O error handling to
  9685. * device with SLI-3 interface spec. This function is called by the PCI
  9686. * subsystem after a PCI bus error affecting this device has been detected.
  9687. * When this function is invoked, it will need to stop all the I/Os and
  9688. * interrupt(s) to the device. Once that is done, it will return
  9689. * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
  9690. * as desired.
  9691. *
  9692. * Return codes
  9693. * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
  9694. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  9695. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  9696. **/
  9697. static pci_ers_result_t
  9698. lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
  9699. {
  9700. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9701. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9702. switch (state) {
  9703. case pci_channel_io_normal:
  9704. /* Non-fatal error, prepare for recovery */
  9705. lpfc_sli_prep_dev_for_recover(phba);
  9706. return PCI_ERS_RESULT_CAN_RECOVER;
  9707. case pci_channel_io_frozen:
  9708. /* Fatal error, prepare for slot reset */
  9709. lpfc_sli_prep_dev_for_reset(phba);
  9710. return PCI_ERS_RESULT_NEED_RESET;
  9711. case pci_channel_io_perm_failure:
  9712. /* Permanent failure, prepare for device down */
  9713. lpfc_sli_prep_dev_for_perm_failure(phba);
  9714. return PCI_ERS_RESULT_DISCONNECT;
  9715. default:
  9716. /* Unknown state, prepare and request slot reset */
  9717. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9718. "0472 Unknown PCI error state: x%x\n", state);
  9719. lpfc_sli_prep_dev_for_reset(phba);
  9720. return PCI_ERS_RESULT_NEED_RESET;
  9721. }
  9722. }
  9723. /**
  9724. * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
  9725. * @pdev: pointer to PCI device.
  9726. *
  9727. * This routine is called from the PCI subsystem for error handling to
  9728. * device with SLI-3 interface spec. This is called after PCI bus has been
  9729. * reset to restart the PCI card from scratch, as if from a cold-boot.
  9730. * During the PCI subsystem error recovery, after driver returns
  9731. * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
  9732. * recovery and then call this routine before calling the .resume method
  9733. * to recover the device. This function will initialize the HBA device,
  9734. * enable the interrupt, but it will just put the HBA to offline state
  9735. * without passing any I/O traffic.
  9736. *
  9737. * Return codes
  9738. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  9739. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  9740. */
  9741. static pci_ers_result_t
  9742. lpfc_io_slot_reset_s3(struct pci_dev *pdev)
  9743. {
  9744. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9745. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9746. struct lpfc_sli *psli = &phba->sli;
  9747. uint32_t intr_mode;
  9748. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  9749. if (pci_enable_device_mem(pdev)) {
  9750. printk(KERN_ERR "lpfc: Cannot re-enable "
  9751. "PCI device after reset.\n");
  9752. return PCI_ERS_RESULT_DISCONNECT;
  9753. }
  9754. pci_restore_state(pdev);
  9755. /*
  9756. * As the new kernel behavior of pci_restore_state() API call clears
  9757. * device saved_state flag, need to save the restored state again.
  9758. */
  9759. pci_save_state(pdev);
  9760. if (pdev->is_busmaster)
  9761. pci_set_master(pdev);
  9762. spin_lock_irq(&phba->hbalock);
  9763. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  9764. spin_unlock_irq(&phba->hbalock);
  9765. /* Configure and enable interrupt */
  9766. intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
  9767. if (intr_mode == LPFC_INTR_ERROR) {
  9768. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9769. "0427 Cannot re-enable interrupt after "
  9770. "slot reset.\n");
  9771. return PCI_ERS_RESULT_DISCONNECT;
  9772. } else
  9773. phba->intr_mode = intr_mode;
  9774. /* Take device offline, it will perform cleanup */
  9775. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  9776. lpfc_offline(phba);
  9777. lpfc_sli_brdrestart(phba);
  9778. /* Log the current active interrupt mode */
  9779. lpfc_log_intr_mode(phba, phba->intr_mode);
  9780. return PCI_ERS_RESULT_RECOVERED;
  9781. }
  9782. /**
  9783. * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
  9784. * @pdev: pointer to PCI device
  9785. *
  9786. * This routine is called from the PCI subsystem for error handling to device
  9787. * with SLI-3 interface spec. It is called when kernel error recovery tells
  9788. * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  9789. * error recovery. After this call, traffic can start to flow from this device
  9790. * again.
  9791. */
  9792. static void
  9793. lpfc_io_resume_s3(struct pci_dev *pdev)
  9794. {
  9795. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9796. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9797. /* Bring device online, it will be no-op for non-fatal error resume */
  9798. lpfc_online(phba);
  9799. /* Clean up Advanced Error Reporting (AER) if needed */
  9800. if (phba->hba_flag & HBA_AER_ENABLED)
  9801. pci_cleanup_aer_uncorrect_error_status(pdev);
  9802. }
  9803. /**
  9804. * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
  9805. * @phba: pointer to lpfc hba data structure.
  9806. *
  9807. * returns the number of ELS/CT IOCBs to reserve
  9808. **/
  9809. int
  9810. lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
  9811. {
  9812. int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
  9813. if (phba->sli_rev == LPFC_SLI_REV4) {
  9814. if (max_xri <= 100)
  9815. return 10;
  9816. else if (max_xri <= 256)
  9817. return 25;
  9818. else if (max_xri <= 512)
  9819. return 50;
  9820. else if (max_xri <= 1024)
  9821. return 100;
  9822. else if (max_xri <= 1536)
  9823. return 150;
  9824. else if (max_xri <= 2048)
  9825. return 200;
  9826. else
  9827. return 250;
  9828. } else
  9829. return 0;
  9830. }
  9831. /**
  9832. * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
  9833. * @phba: pointer to lpfc hba data structure.
  9834. *
  9835. * returns the number of ELS/CT + NVMET IOCBs to reserve
  9836. **/
  9837. int
  9838. lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
  9839. {
  9840. int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
  9841. if (phba->nvmet_support)
  9842. max_xri += LPFC_NVMET_BUF_POST;
  9843. return max_xri;
  9844. }
  9845. /**
  9846. * lpfc_write_firmware - attempt to write a firmware image to the port
  9847. * @fw: pointer to firmware image returned from request_firmware.
  9848. * @phba: pointer to lpfc hba data structure.
  9849. *
  9850. **/
  9851. static void
  9852. lpfc_write_firmware(const struct firmware *fw, void *context)
  9853. {
  9854. struct lpfc_hba *phba = (struct lpfc_hba *)context;
  9855. char fwrev[FW_REV_STR_SIZE];
  9856. struct lpfc_grp_hdr *image;
  9857. struct list_head dma_buffer_list;
  9858. int i, rc = 0;
  9859. struct lpfc_dmabuf *dmabuf, *next;
  9860. uint32_t offset = 0, temp_offset = 0;
  9861. uint32_t magic_number, ftype, fid, fsize;
  9862. /* It can be null in no-wait mode, sanity check */
  9863. if (!fw) {
  9864. rc = -ENXIO;
  9865. goto out;
  9866. }
  9867. image = (struct lpfc_grp_hdr *)fw->data;
  9868. magic_number = be32_to_cpu(image->magic_number);
  9869. ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
  9870. fid = bf_get_be32(lpfc_grp_hdr_id, image),
  9871. fsize = be32_to_cpu(image->size);
  9872. INIT_LIST_HEAD(&dma_buffer_list);
  9873. if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
  9874. magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
  9875. ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
  9876. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9877. "3022 Invalid FW image found. "
  9878. "Magic:%x Type:%x ID:%x Size %d %zd\n",
  9879. magic_number, ftype, fid, fsize, fw->size);
  9880. rc = -EINVAL;
  9881. goto release_out;
  9882. }
  9883. lpfc_decode_firmware_rev(phba, fwrev, 1);
  9884. if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
  9885. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9886. "3023 Updating Firmware, Current Version:%s "
  9887. "New Version:%s\n",
  9888. fwrev, image->revision);
  9889. for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
  9890. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
  9891. GFP_KERNEL);
  9892. if (!dmabuf) {
  9893. rc = -ENOMEM;
  9894. goto release_out;
  9895. }
  9896. dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
  9897. SLI4_PAGE_SIZE,
  9898. &dmabuf->phys,
  9899. GFP_KERNEL);
  9900. if (!dmabuf->virt) {
  9901. kfree(dmabuf);
  9902. rc = -ENOMEM;
  9903. goto release_out;
  9904. }
  9905. list_add_tail(&dmabuf->list, &dma_buffer_list);
  9906. }
  9907. while (offset < fw->size) {
  9908. temp_offset = offset;
  9909. list_for_each_entry(dmabuf, &dma_buffer_list, list) {
  9910. if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
  9911. memcpy(dmabuf->virt,
  9912. fw->data + temp_offset,
  9913. fw->size - temp_offset);
  9914. temp_offset = fw->size;
  9915. break;
  9916. }
  9917. memcpy(dmabuf->virt, fw->data + temp_offset,
  9918. SLI4_PAGE_SIZE);
  9919. temp_offset += SLI4_PAGE_SIZE;
  9920. }
  9921. rc = lpfc_wr_object(phba, &dma_buffer_list,
  9922. (fw->size - offset), &offset);
  9923. if (rc)
  9924. goto release_out;
  9925. }
  9926. rc = offset;
  9927. }
  9928. release_out:
  9929. list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
  9930. list_del(&dmabuf->list);
  9931. dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
  9932. dmabuf->virt, dmabuf->phys);
  9933. kfree(dmabuf);
  9934. }
  9935. release_firmware(fw);
  9936. out:
  9937. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9938. "3024 Firmware update done: %d.\n", rc);
  9939. return;
  9940. }
  9941. /**
  9942. * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
  9943. * @phba: pointer to lpfc hba data structure.
  9944. *
  9945. * This routine is called to perform Linux generic firmware upgrade on device
  9946. * that supports such feature.
  9947. **/
  9948. int
  9949. lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
  9950. {
  9951. uint8_t file_name[ELX_MODEL_NAME_SIZE];
  9952. int ret;
  9953. const struct firmware *fw;
  9954. /* Only supported on SLI4 interface type 2 for now */
  9955. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
  9956. LPFC_SLI_INTF_IF_TYPE_2)
  9957. return -EPERM;
  9958. snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
  9959. if (fw_upgrade == INT_FW_UPGRADE) {
  9960. ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
  9961. file_name, &phba->pcidev->dev,
  9962. GFP_KERNEL, (void *)phba,
  9963. lpfc_write_firmware);
  9964. } else if (fw_upgrade == RUN_FW_UPGRADE) {
  9965. ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
  9966. if (!ret)
  9967. lpfc_write_firmware(fw, (void *)phba);
  9968. } else {
  9969. ret = -EINVAL;
  9970. }
  9971. return ret;
  9972. }
  9973. /**
  9974. * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
  9975. * @pdev: pointer to PCI device
  9976. * @pid: pointer to PCI device identifier
  9977. *
  9978. * This routine is called from the kernel's PCI subsystem to device with
  9979. * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  9980. * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
  9981. * information of the device and driver to see if the driver state that it
  9982. * can support this kind of device. If the match is successful, the driver
  9983. * core invokes this routine. If this routine determines it can claim the HBA,
  9984. * it does all the initialization that it needs to do to handle the HBA
  9985. * properly.
  9986. *
  9987. * Return code
  9988. * 0 - driver can claim the device
  9989. * negative value - driver can not claim the device
  9990. **/
  9991. static int
  9992. lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
  9993. {
  9994. struct lpfc_hba *phba;
  9995. struct lpfc_vport *vport = NULL;
  9996. struct Scsi_Host *shost = NULL;
  9997. int error;
  9998. uint32_t cfg_mode, intr_mode;
  9999. /* Allocate memory for HBA structure */
  10000. phba = lpfc_hba_alloc(pdev);
  10001. if (!phba)
  10002. return -ENOMEM;
  10003. /* Perform generic PCI device enabling operation */
  10004. error = lpfc_enable_pci_dev(phba);
  10005. if (error)
  10006. goto out_free_phba;
  10007. /* Set up SLI API function jump table for PCI-device group-1 HBAs */
  10008. error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
  10009. if (error)
  10010. goto out_disable_pci_dev;
  10011. /* Set up SLI-4 specific device PCI memory space */
  10012. error = lpfc_sli4_pci_mem_setup(phba);
  10013. if (error) {
  10014. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10015. "1410 Failed to set up pci memory space.\n");
  10016. goto out_disable_pci_dev;
  10017. }
  10018. /* Set up SLI-4 Specific device driver resources */
  10019. error = lpfc_sli4_driver_resource_setup(phba);
  10020. if (error) {
  10021. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10022. "1412 Failed to set up driver resource.\n");
  10023. goto out_unset_pci_mem_s4;
  10024. }
  10025. /* Initialize and populate the iocb list per host */
  10026. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10027. "2821 initialize iocb list %d.\n",
  10028. phba->cfg_iocb_cnt*1024);
  10029. error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
  10030. if (error) {
  10031. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10032. "1413 Failed to initialize iocb list.\n");
  10033. goto out_unset_driver_resource_s4;
  10034. }
  10035. INIT_LIST_HEAD(&phba->active_rrq_list);
  10036. INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
  10037. /* Set up common device driver resources */
  10038. error = lpfc_setup_driver_resource_phase2(phba);
  10039. if (error) {
  10040. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10041. "1414 Failed to set up driver resource.\n");
  10042. goto out_free_iocb_list;
  10043. }
  10044. /* Get the default values for Model Name and Description */
  10045. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  10046. /* Create SCSI host to the physical port */
  10047. error = lpfc_create_shost(phba);
  10048. if (error) {
  10049. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10050. "1415 Failed to create scsi host.\n");
  10051. goto out_unset_driver_resource;
  10052. }
  10053. /* Configure sysfs attributes */
  10054. vport = phba->pport;
  10055. error = lpfc_alloc_sysfs_attr(vport);
  10056. if (error) {
  10057. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10058. "1416 Failed to allocate sysfs attr\n");
  10059. goto out_destroy_shost;
  10060. }
  10061. shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
  10062. /* Now, trying to enable interrupt and bring up the device */
  10063. cfg_mode = phba->cfg_use_msi;
  10064. /* Put device to a known state before enabling interrupt */
  10065. lpfc_stop_port(phba);
  10066. /* Configure and enable interrupt */
  10067. intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
  10068. if (intr_mode == LPFC_INTR_ERROR) {
  10069. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10070. "0426 Failed to enable interrupt.\n");
  10071. error = -ENODEV;
  10072. goto out_free_sysfs_attr;
  10073. }
  10074. /* Default to single EQ for non-MSI-X */
  10075. if (phba->intr_type != MSIX) {
  10076. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  10077. phba->cfg_fcp_io_channel = 1;
  10078. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  10079. phba->cfg_nvme_io_channel = 1;
  10080. if (phba->nvmet_support)
  10081. phba->cfg_nvmet_mrq = 1;
  10082. }
  10083. phba->io_channel_irqs = 1;
  10084. }
  10085. /* Set up SLI-4 HBA */
  10086. if (lpfc_sli4_hba_setup(phba)) {
  10087. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10088. "1421 Failed to set up hba\n");
  10089. error = -ENODEV;
  10090. goto out_disable_intr;
  10091. }
  10092. /* Log the current active interrupt mode */
  10093. phba->intr_mode = intr_mode;
  10094. lpfc_log_intr_mode(phba, intr_mode);
  10095. /* Perform post initialization setup */
  10096. lpfc_post_init_setup(phba);
  10097. /* NVME support in FW earlier in the driver load corrects the
  10098. * FC4 type making a check for nvme_support unnecessary.
  10099. */
  10100. if ((phba->nvmet_support == 0) &&
  10101. (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
  10102. /* Create NVME binding with nvme_fc_transport. This
  10103. * ensures the vport is initialized.
  10104. */
  10105. error = lpfc_nvme_create_localport(vport);
  10106. if (error) {
  10107. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10108. "6004 NVME registration failed, "
  10109. "error x%x\n",
  10110. error);
  10111. goto out_disable_intr;
  10112. }
  10113. }
  10114. /* check for firmware upgrade or downgrade */
  10115. if (phba->cfg_request_firmware_upgrade)
  10116. lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
  10117. /* Check if there are static vports to be created. */
  10118. lpfc_create_static_vport(phba);
  10119. return 0;
  10120. out_disable_intr:
  10121. lpfc_sli4_disable_intr(phba);
  10122. out_free_sysfs_attr:
  10123. lpfc_free_sysfs_attr(vport);
  10124. out_destroy_shost:
  10125. lpfc_destroy_shost(phba);
  10126. out_unset_driver_resource:
  10127. lpfc_unset_driver_resource_phase2(phba);
  10128. out_free_iocb_list:
  10129. lpfc_free_iocb_list(phba);
  10130. out_unset_driver_resource_s4:
  10131. lpfc_sli4_driver_resource_unset(phba);
  10132. out_unset_pci_mem_s4:
  10133. lpfc_sli4_pci_mem_unset(phba);
  10134. out_disable_pci_dev:
  10135. lpfc_disable_pci_dev(phba);
  10136. if (shost)
  10137. scsi_host_put(shost);
  10138. out_free_phba:
  10139. lpfc_hba_free(phba);
  10140. return error;
  10141. }
  10142. /**
  10143. * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
  10144. * @pdev: pointer to PCI device
  10145. *
  10146. * This routine is called from the kernel's PCI subsystem to device with
  10147. * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  10148. * removed from PCI bus, it performs all the necessary cleanup for the HBA
  10149. * device to be removed from the PCI subsystem properly.
  10150. **/
  10151. static void
  10152. lpfc_pci_remove_one_s4(struct pci_dev *pdev)
  10153. {
  10154. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10155. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  10156. struct lpfc_vport **vports;
  10157. struct lpfc_hba *phba = vport->phba;
  10158. int i;
  10159. /* Mark the device unloading flag */
  10160. spin_lock_irq(&phba->hbalock);
  10161. vport->load_flag |= FC_UNLOADING;
  10162. spin_unlock_irq(&phba->hbalock);
  10163. /* Free the HBA sysfs attributes */
  10164. lpfc_free_sysfs_attr(vport);
  10165. /* Release all the vports against this physical port */
  10166. vports = lpfc_create_vport_work_array(phba);
  10167. if (vports != NULL)
  10168. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  10169. if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
  10170. continue;
  10171. fc_vport_terminate(vports[i]->fc_vport);
  10172. }
  10173. lpfc_destroy_vport_work_array(phba, vports);
  10174. /* Remove FC host and then SCSI host with the physical port */
  10175. fc_remove_host(shost);
  10176. scsi_remove_host(shost);
  10177. /* Perform ndlp cleanup on the physical port. The nvme and nvmet
  10178. * localports are destroyed after to cleanup all transport memory.
  10179. */
  10180. lpfc_cleanup(vport);
  10181. lpfc_nvmet_destroy_targetport(phba);
  10182. lpfc_nvme_destroy_localport(vport);
  10183. /*
  10184. * Bring down the SLI Layer. This step disables all interrupts,
  10185. * clears the rings, discards all mailbox commands, and resets
  10186. * the HBA FCoE function.
  10187. */
  10188. lpfc_debugfs_terminate(vport);
  10189. lpfc_sli4_hba_unset(phba);
  10190. spin_lock_irq(&phba->hbalock);
  10191. list_del_init(&vport->listentry);
  10192. spin_unlock_irq(&phba->hbalock);
  10193. /* Perform scsi free before driver resource_unset since scsi
  10194. * buffers are released to their corresponding pools here.
  10195. */
  10196. lpfc_scsi_free(phba);
  10197. lpfc_nvme_free(phba);
  10198. lpfc_free_iocb_list(phba);
  10199. lpfc_sli4_driver_resource_unset(phba);
  10200. /* Unmap adapter Control and Doorbell registers */
  10201. lpfc_sli4_pci_mem_unset(phba);
  10202. /* Release PCI resources and disable device's PCI function */
  10203. scsi_host_put(shost);
  10204. lpfc_disable_pci_dev(phba);
  10205. /* Finally, free the driver's device data structure */
  10206. lpfc_hba_free(phba);
  10207. return;
  10208. }
  10209. /**
  10210. * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
  10211. * @pdev: pointer to PCI device
  10212. * @msg: power management message
  10213. *
  10214. * This routine is called from the kernel's PCI subsystem to support system
  10215. * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
  10216. * this method, it quiesces the device by stopping the driver's worker
  10217. * thread for the device, turning off device's interrupt and DMA, and bring
  10218. * the device offline. Note that as the driver implements the minimum PM
  10219. * requirements to a power-aware driver's PM support for suspend/resume -- all
  10220. * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
  10221. * method call will be treated as SUSPEND and the driver will fully
  10222. * reinitialize its device during resume() method call, the driver will set
  10223. * device to PCI_D3hot state in PCI config space instead of setting it
  10224. * according to the @msg provided by the PM.
  10225. *
  10226. * Return code
  10227. * 0 - driver suspended the device
  10228. * Error otherwise
  10229. **/
  10230. static int
  10231. lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
  10232. {
  10233. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10234. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10235. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10236. "2843 PCI device Power Management suspend.\n");
  10237. /* Bring down the device */
  10238. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10239. lpfc_offline(phba);
  10240. kthread_stop(phba->worker_thread);
  10241. /* Disable interrupt from device */
  10242. lpfc_sli4_disable_intr(phba);
  10243. lpfc_sli4_queue_destroy(phba);
  10244. /* Save device state to PCI config space */
  10245. pci_save_state(pdev);
  10246. pci_set_power_state(pdev, PCI_D3hot);
  10247. return 0;
  10248. }
  10249. /**
  10250. * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
  10251. * @pdev: pointer to PCI device
  10252. *
  10253. * This routine is called from the kernel's PCI subsystem to support system
  10254. * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
  10255. * this method, it restores the device's PCI config space state and fully
  10256. * reinitializes the device and brings it online. Note that as the driver
  10257. * implements the minimum PM requirements to a power-aware driver's PM for
  10258. * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
  10259. * to the suspend() method call will be treated as SUSPEND and the driver
  10260. * will fully reinitialize its device during resume() method call, the device
  10261. * will be set to PCI_D0 directly in PCI config space before restoring the
  10262. * state.
  10263. *
  10264. * Return code
  10265. * 0 - driver suspended the device
  10266. * Error otherwise
  10267. **/
  10268. static int
  10269. lpfc_pci_resume_one_s4(struct pci_dev *pdev)
  10270. {
  10271. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10272. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10273. uint32_t intr_mode;
  10274. int error;
  10275. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10276. "0292 PCI device Power Management resume.\n");
  10277. /* Restore device state from PCI config space */
  10278. pci_set_power_state(pdev, PCI_D0);
  10279. pci_restore_state(pdev);
  10280. /*
  10281. * As the new kernel behavior of pci_restore_state() API call clears
  10282. * device saved_state flag, need to save the restored state again.
  10283. */
  10284. pci_save_state(pdev);
  10285. if (pdev->is_busmaster)
  10286. pci_set_master(pdev);
  10287. /* Startup the kernel thread for this host adapter. */
  10288. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  10289. "lpfc_worker_%d", phba->brd_no);
  10290. if (IS_ERR(phba->worker_thread)) {
  10291. error = PTR_ERR(phba->worker_thread);
  10292. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10293. "0293 PM resume failed to start worker "
  10294. "thread: error=x%x.\n", error);
  10295. return error;
  10296. }
  10297. /* Configure and enable interrupt */
  10298. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  10299. if (intr_mode == LPFC_INTR_ERROR) {
  10300. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10301. "0294 PM resume Failed to enable interrupt\n");
  10302. return -EIO;
  10303. } else
  10304. phba->intr_mode = intr_mode;
  10305. /* Restart HBA and bring it online */
  10306. lpfc_sli_brdrestart(phba);
  10307. lpfc_online(phba);
  10308. /* Log the current active interrupt mode */
  10309. lpfc_log_intr_mode(phba, phba->intr_mode);
  10310. return 0;
  10311. }
  10312. /**
  10313. * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
  10314. * @phba: pointer to lpfc hba data structure.
  10315. *
  10316. * This routine is called to prepare the SLI4 device for PCI slot recover. It
  10317. * aborts all the outstanding SCSI I/Os to the pci device.
  10318. **/
  10319. static void
  10320. lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
  10321. {
  10322. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10323. "2828 PCI channel I/O abort preparing for recovery\n");
  10324. /*
  10325. * There may be errored I/Os through HBA, abort all I/Os on txcmplq
  10326. * and let the SCSI mid-layer to retry them to recover.
  10327. */
  10328. lpfc_sli_abort_fcp_rings(phba);
  10329. }
  10330. /**
  10331. * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
  10332. * @phba: pointer to lpfc hba data structure.
  10333. *
  10334. * This routine is called to prepare the SLI4 device for PCI slot reset. It
  10335. * disables the device interrupt and pci device, and aborts the internal FCP
  10336. * pending I/Os.
  10337. **/
  10338. static void
  10339. lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
  10340. {
  10341. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10342. "2826 PCI channel disable preparing for reset\n");
  10343. /* Block any management I/Os to the device */
  10344. lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
  10345. /* Block all SCSI devices' I/Os on the host */
  10346. lpfc_scsi_dev_block(phba);
  10347. /* Flush all driver's outstanding SCSI I/Os as we are to reset */
  10348. lpfc_sli_flush_fcp_rings(phba);
  10349. /* stop all timers */
  10350. lpfc_stop_hba_timers(phba);
  10351. /* Disable interrupt and pci device */
  10352. lpfc_sli4_disable_intr(phba);
  10353. lpfc_sli4_queue_destroy(phba);
  10354. pci_disable_device(phba->pcidev);
  10355. }
  10356. /**
  10357. * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
  10358. * @phba: pointer to lpfc hba data structure.
  10359. *
  10360. * This routine is called to prepare the SLI4 device for PCI slot permanently
  10361. * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
  10362. * pending I/Os.
  10363. **/
  10364. static void
  10365. lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
  10366. {
  10367. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10368. "2827 PCI channel permanent disable for failure\n");
  10369. /* Block all SCSI devices' I/Os on the host */
  10370. lpfc_scsi_dev_block(phba);
  10371. /* stop all timers */
  10372. lpfc_stop_hba_timers(phba);
  10373. /* Clean up all driver's outstanding SCSI I/Os */
  10374. lpfc_sli_flush_fcp_rings(phba);
  10375. }
  10376. /**
  10377. * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
  10378. * @pdev: pointer to PCI device.
  10379. * @state: the current PCI connection state.
  10380. *
  10381. * This routine is called from the PCI subsystem for error handling to device
  10382. * with SLI-4 interface spec. This function is called by the PCI subsystem
  10383. * after a PCI bus error affecting this device has been detected. When this
  10384. * function is invoked, it will need to stop all the I/Os and interrupt(s)
  10385. * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
  10386. * for the PCI subsystem to perform proper recovery as desired.
  10387. *
  10388. * Return codes
  10389. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  10390. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10391. **/
  10392. static pci_ers_result_t
  10393. lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
  10394. {
  10395. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10396. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10397. switch (state) {
  10398. case pci_channel_io_normal:
  10399. /* Non-fatal error, prepare for recovery */
  10400. lpfc_sli4_prep_dev_for_recover(phba);
  10401. return PCI_ERS_RESULT_CAN_RECOVER;
  10402. case pci_channel_io_frozen:
  10403. /* Fatal error, prepare for slot reset */
  10404. lpfc_sli4_prep_dev_for_reset(phba);
  10405. return PCI_ERS_RESULT_NEED_RESET;
  10406. case pci_channel_io_perm_failure:
  10407. /* Permanent failure, prepare for device down */
  10408. lpfc_sli4_prep_dev_for_perm_failure(phba);
  10409. return PCI_ERS_RESULT_DISCONNECT;
  10410. default:
  10411. /* Unknown state, prepare and request slot reset */
  10412. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10413. "2825 Unknown PCI error state: x%x\n", state);
  10414. lpfc_sli4_prep_dev_for_reset(phba);
  10415. return PCI_ERS_RESULT_NEED_RESET;
  10416. }
  10417. }
  10418. /**
  10419. * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
  10420. * @pdev: pointer to PCI device.
  10421. *
  10422. * This routine is called from the PCI subsystem for error handling to device
  10423. * with SLI-4 interface spec. It is called after PCI bus has been reset to
  10424. * restart the PCI card from scratch, as if from a cold-boot. During the
  10425. * PCI subsystem error recovery, after the driver returns
  10426. * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
  10427. * recovery and then call this routine before calling the .resume method to
  10428. * recover the device. This function will initialize the HBA device, enable
  10429. * the interrupt, but it will just put the HBA to offline state without
  10430. * passing any I/O traffic.
  10431. *
  10432. * Return codes
  10433. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  10434. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10435. */
  10436. static pci_ers_result_t
  10437. lpfc_io_slot_reset_s4(struct pci_dev *pdev)
  10438. {
  10439. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10440. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10441. struct lpfc_sli *psli = &phba->sli;
  10442. uint32_t intr_mode;
  10443. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  10444. if (pci_enable_device_mem(pdev)) {
  10445. printk(KERN_ERR "lpfc: Cannot re-enable "
  10446. "PCI device after reset.\n");
  10447. return PCI_ERS_RESULT_DISCONNECT;
  10448. }
  10449. pci_restore_state(pdev);
  10450. /*
  10451. * As the new kernel behavior of pci_restore_state() API call clears
  10452. * device saved_state flag, need to save the restored state again.
  10453. */
  10454. pci_save_state(pdev);
  10455. if (pdev->is_busmaster)
  10456. pci_set_master(pdev);
  10457. spin_lock_irq(&phba->hbalock);
  10458. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  10459. spin_unlock_irq(&phba->hbalock);
  10460. /* Configure and enable interrupt */
  10461. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  10462. if (intr_mode == LPFC_INTR_ERROR) {
  10463. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10464. "2824 Cannot re-enable interrupt after "
  10465. "slot reset.\n");
  10466. return PCI_ERS_RESULT_DISCONNECT;
  10467. } else
  10468. phba->intr_mode = intr_mode;
  10469. /* Log the current active interrupt mode */
  10470. lpfc_log_intr_mode(phba, phba->intr_mode);
  10471. return PCI_ERS_RESULT_RECOVERED;
  10472. }
  10473. /**
  10474. * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
  10475. * @pdev: pointer to PCI device
  10476. *
  10477. * This routine is called from the PCI subsystem for error handling to device
  10478. * with SLI-4 interface spec. It is called when kernel error recovery tells
  10479. * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  10480. * error recovery. After this call, traffic can start to flow from this device
  10481. * again.
  10482. **/
  10483. static void
  10484. lpfc_io_resume_s4(struct pci_dev *pdev)
  10485. {
  10486. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10487. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10488. /*
  10489. * In case of slot reset, as function reset is performed through
  10490. * mailbox command which needs DMA to be enabled, this operation
  10491. * has to be moved to the io resume phase. Taking device offline
  10492. * will perform the necessary cleanup.
  10493. */
  10494. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
  10495. /* Perform device reset */
  10496. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10497. lpfc_offline(phba);
  10498. lpfc_sli_brdrestart(phba);
  10499. /* Bring the device back online */
  10500. lpfc_online(phba);
  10501. }
  10502. /* Clean up Advanced Error Reporting (AER) if needed */
  10503. if (phba->hba_flag & HBA_AER_ENABLED)
  10504. pci_cleanup_aer_uncorrect_error_status(pdev);
  10505. }
  10506. /**
  10507. * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
  10508. * @pdev: pointer to PCI device
  10509. * @pid: pointer to PCI device identifier
  10510. *
  10511. * This routine is to be registered to the kernel's PCI subsystem. When an
  10512. * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
  10513. * at PCI device-specific information of the device and driver to see if the
  10514. * driver state that it can support this kind of device. If the match is
  10515. * successful, the driver core invokes this routine. This routine dispatches
  10516. * the action to the proper SLI-3 or SLI-4 device probing routine, which will
  10517. * do all the initialization that it needs to do to handle the HBA device
  10518. * properly.
  10519. *
  10520. * Return code
  10521. * 0 - driver can claim the device
  10522. * negative value - driver can not claim the device
  10523. **/
  10524. static int
  10525. lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
  10526. {
  10527. int rc;
  10528. struct lpfc_sli_intf intf;
  10529. if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
  10530. return -ENODEV;
  10531. if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
  10532. (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
  10533. rc = lpfc_pci_probe_one_s4(pdev, pid);
  10534. else
  10535. rc = lpfc_pci_probe_one_s3(pdev, pid);
  10536. return rc;
  10537. }
  10538. /**
  10539. * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
  10540. * @pdev: pointer to PCI device
  10541. *
  10542. * This routine is to be registered to the kernel's PCI subsystem. When an
  10543. * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
  10544. * This routine dispatches the action to the proper SLI-3 or SLI-4 device
  10545. * remove routine, which will perform all the necessary cleanup for the
  10546. * device to be removed from the PCI subsystem properly.
  10547. **/
  10548. static void
  10549. lpfc_pci_remove_one(struct pci_dev *pdev)
  10550. {
  10551. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10552. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10553. switch (phba->pci_dev_grp) {
  10554. case LPFC_PCI_DEV_LP:
  10555. lpfc_pci_remove_one_s3(pdev);
  10556. break;
  10557. case LPFC_PCI_DEV_OC:
  10558. lpfc_pci_remove_one_s4(pdev);
  10559. break;
  10560. default:
  10561. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10562. "1424 Invalid PCI device group: 0x%x\n",
  10563. phba->pci_dev_grp);
  10564. break;
  10565. }
  10566. return;
  10567. }
  10568. /**
  10569. * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
  10570. * @pdev: pointer to PCI device
  10571. * @msg: power management message
  10572. *
  10573. * This routine is to be registered to the kernel's PCI subsystem to support
  10574. * system Power Management (PM). When PM invokes this method, it dispatches
  10575. * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
  10576. * suspend the device.
  10577. *
  10578. * Return code
  10579. * 0 - driver suspended the device
  10580. * Error otherwise
  10581. **/
  10582. static int
  10583. lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
  10584. {
  10585. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10586. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10587. int rc = -ENODEV;
  10588. switch (phba->pci_dev_grp) {
  10589. case LPFC_PCI_DEV_LP:
  10590. rc = lpfc_pci_suspend_one_s3(pdev, msg);
  10591. break;
  10592. case LPFC_PCI_DEV_OC:
  10593. rc = lpfc_pci_suspend_one_s4(pdev, msg);
  10594. break;
  10595. default:
  10596. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10597. "1425 Invalid PCI device group: 0x%x\n",
  10598. phba->pci_dev_grp);
  10599. break;
  10600. }
  10601. return rc;
  10602. }
  10603. /**
  10604. * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
  10605. * @pdev: pointer to PCI device
  10606. *
  10607. * This routine is to be registered to the kernel's PCI subsystem to support
  10608. * system Power Management (PM). When PM invokes this method, it dispatches
  10609. * the action to the proper SLI-3 or SLI-4 device resume routine, which will
  10610. * resume the device.
  10611. *
  10612. * Return code
  10613. * 0 - driver suspended the device
  10614. * Error otherwise
  10615. **/
  10616. static int
  10617. lpfc_pci_resume_one(struct pci_dev *pdev)
  10618. {
  10619. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10620. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10621. int rc = -ENODEV;
  10622. switch (phba->pci_dev_grp) {
  10623. case LPFC_PCI_DEV_LP:
  10624. rc = lpfc_pci_resume_one_s3(pdev);
  10625. break;
  10626. case LPFC_PCI_DEV_OC:
  10627. rc = lpfc_pci_resume_one_s4(pdev);
  10628. break;
  10629. default:
  10630. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10631. "1426 Invalid PCI device group: 0x%x\n",
  10632. phba->pci_dev_grp);
  10633. break;
  10634. }
  10635. return rc;
  10636. }
  10637. /**
  10638. * lpfc_io_error_detected - lpfc method for handling PCI I/O error
  10639. * @pdev: pointer to PCI device.
  10640. * @state: the current PCI connection state.
  10641. *
  10642. * This routine is registered to the PCI subsystem for error handling. This
  10643. * function is called by the PCI subsystem after a PCI bus error affecting
  10644. * this device has been detected. When this routine is invoked, it dispatches
  10645. * the action to the proper SLI-3 or SLI-4 device error detected handling
  10646. * routine, which will perform the proper error detected operation.
  10647. *
  10648. * Return codes
  10649. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  10650. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10651. **/
  10652. static pci_ers_result_t
  10653. lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  10654. {
  10655. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10656. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10657. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  10658. switch (phba->pci_dev_grp) {
  10659. case LPFC_PCI_DEV_LP:
  10660. rc = lpfc_io_error_detected_s3(pdev, state);
  10661. break;
  10662. case LPFC_PCI_DEV_OC:
  10663. rc = lpfc_io_error_detected_s4(pdev, state);
  10664. break;
  10665. default:
  10666. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10667. "1427 Invalid PCI device group: 0x%x\n",
  10668. phba->pci_dev_grp);
  10669. break;
  10670. }
  10671. return rc;
  10672. }
  10673. /**
  10674. * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
  10675. * @pdev: pointer to PCI device.
  10676. *
  10677. * This routine is registered to the PCI subsystem for error handling. This
  10678. * function is called after PCI bus has been reset to restart the PCI card
  10679. * from scratch, as if from a cold-boot. When this routine is invoked, it
  10680. * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
  10681. * routine, which will perform the proper device reset.
  10682. *
  10683. * Return codes
  10684. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  10685. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10686. **/
  10687. static pci_ers_result_t
  10688. lpfc_io_slot_reset(struct pci_dev *pdev)
  10689. {
  10690. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10691. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10692. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  10693. switch (phba->pci_dev_grp) {
  10694. case LPFC_PCI_DEV_LP:
  10695. rc = lpfc_io_slot_reset_s3(pdev);
  10696. break;
  10697. case LPFC_PCI_DEV_OC:
  10698. rc = lpfc_io_slot_reset_s4(pdev);
  10699. break;
  10700. default:
  10701. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10702. "1428 Invalid PCI device group: 0x%x\n",
  10703. phba->pci_dev_grp);
  10704. break;
  10705. }
  10706. return rc;
  10707. }
  10708. /**
  10709. * lpfc_io_resume - lpfc method for resuming PCI I/O operation
  10710. * @pdev: pointer to PCI device
  10711. *
  10712. * This routine is registered to the PCI subsystem for error handling. It
  10713. * is called when kernel error recovery tells the lpfc driver that it is
  10714. * OK to resume normal PCI operation after PCI bus error recovery. When
  10715. * this routine is invoked, it dispatches the action to the proper SLI-3
  10716. * or SLI-4 device io_resume routine, which will resume the device operation.
  10717. **/
  10718. static void
  10719. lpfc_io_resume(struct pci_dev *pdev)
  10720. {
  10721. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10722. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10723. switch (phba->pci_dev_grp) {
  10724. case LPFC_PCI_DEV_LP:
  10725. lpfc_io_resume_s3(pdev);
  10726. break;
  10727. case LPFC_PCI_DEV_OC:
  10728. lpfc_io_resume_s4(pdev);
  10729. break;
  10730. default:
  10731. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10732. "1429 Invalid PCI device group: 0x%x\n",
  10733. phba->pci_dev_grp);
  10734. break;
  10735. }
  10736. return;
  10737. }
  10738. /**
  10739. * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
  10740. * @phba: pointer to lpfc hba data structure.
  10741. *
  10742. * This routine checks to see if OAS is supported for this adapter. If
  10743. * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
  10744. * the enable oas flag is cleared and the pool created for OAS device data
  10745. * is destroyed.
  10746. *
  10747. **/
  10748. void
  10749. lpfc_sli4_oas_verify(struct lpfc_hba *phba)
  10750. {
  10751. if (!phba->cfg_EnableXLane)
  10752. return;
  10753. if (phba->sli4_hba.pc_sli4_params.oas_supported) {
  10754. phba->cfg_fof = 1;
  10755. } else {
  10756. phba->cfg_fof = 0;
  10757. if (phba->device_data_mem_pool)
  10758. mempool_destroy(phba->device_data_mem_pool);
  10759. phba->device_data_mem_pool = NULL;
  10760. }
  10761. return;
  10762. }
  10763. /**
  10764. * lpfc_fof_queue_setup - Set up all the fof queues
  10765. * @phba: pointer to lpfc hba data structure.
  10766. *
  10767. * This routine is invoked to set up all the fof queues for the FC HBA
  10768. * operation.
  10769. *
  10770. * Return codes
  10771. * 0 - successful
  10772. * -ENOMEM - No available memory
  10773. **/
  10774. int
  10775. lpfc_fof_queue_setup(struct lpfc_hba *phba)
  10776. {
  10777. struct lpfc_sli_ring *pring;
  10778. int rc;
  10779. rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
  10780. if (rc)
  10781. return -ENOMEM;
  10782. if (phba->cfg_fof) {
  10783. rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
  10784. phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
  10785. if (rc)
  10786. goto out_oas_cq;
  10787. rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
  10788. phba->sli4_hba.oas_cq, LPFC_FCP);
  10789. if (rc)
  10790. goto out_oas_wq;
  10791. /* Bind this CQ/WQ to the NVME ring */
  10792. pring = phba->sli4_hba.oas_wq->pring;
  10793. pring->sli.sli4.wqp =
  10794. (void *)phba->sli4_hba.oas_wq;
  10795. phba->sli4_hba.oas_cq->pring = pring;
  10796. }
  10797. return 0;
  10798. out_oas_wq:
  10799. lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
  10800. out_oas_cq:
  10801. lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
  10802. return rc;
  10803. }
  10804. /**
  10805. * lpfc_fof_queue_create - Create all the fof queues
  10806. * @phba: pointer to lpfc hba data structure.
  10807. *
  10808. * This routine is invoked to allocate all the fof queues for the FC HBA
  10809. * operation. For each SLI4 queue type, the parameters such as queue entry
  10810. * count (queue depth) shall be taken from the module parameter. For now,
  10811. * we just use some constant number as place holder.
  10812. *
  10813. * Return codes
  10814. * 0 - successful
  10815. * -ENOMEM - No availble memory
  10816. * -EIO - The mailbox failed to complete successfully.
  10817. **/
  10818. int
  10819. lpfc_fof_queue_create(struct lpfc_hba *phba)
  10820. {
  10821. struct lpfc_queue *qdesc;
  10822. /* Create FOF EQ */
  10823. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
  10824. phba->sli4_hba.eq_ecount);
  10825. if (!qdesc)
  10826. goto out_error;
  10827. phba->sli4_hba.fof_eq = qdesc;
  10828. if (phba->cfg_fof) {
  10829. /* Create OAS CQ */
  10830. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  10831. phba->sli4_hba.cq_ecount);
  10832. if (!qdesc)
  10833. goto out_error;
  10834. phba->sli4_hba.oas_cq = qdesc;
  10835. /* Create OAS WQ */
  10836. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
  10837. phba->sli4_hba.wq_ecount);
  10838. if (!qdesc)
  10839. goto out_error;
  10840. phba->sli4_hba.oas_wq = qdesc;
  10841. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  10842. }
  10843. return 0;
  10844. out_error:
  10845. lpfc_fof_queue_destroy(phba);
  10846. return -ENOMEM;
  10847. }
  10848. /**
  10849. * lpfc_fof_queue_destroy - Destroy all the fof queues
  10850. * @phba: pointer to lpfc hba data structure.
  10851. *
  10852. * This routine is invoked to release all the SLI4 queues with the FC HBA
  10853. * operation.
  10854. *
  10855. * Return codes
  10856. * 0 - successful
  10857. **/
  10858. int
  10859. lpfc_fof_queue_destroy(struct lpfc_hba *phba)
  10860. {
  10861. /* Release FOF Event queue */
  10862. if (phba->sli4_hba.fof_eq != NULL) {
  10863. lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
  10864. phba->sli4_hba.fof_eq = NULL;
  10865. }
  10866. /* Release OAS Completion queue */
  10867. if (phba->sli4_hba.oas_cq != NULL) {
  10868. lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
  10869. phba->sli4_hba.oas_cq = NULL;
  10870. }
  10871. /* Release OAS Work queue */
  10872. if (phba->sli4_hba.oas_wq != NULL) {
  10873. lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
  10874. phba->sli4_hba.oas_wq = NULL;
  10875. }
  10876. return 0;
  10877. }
  10878. MODULE_DEVICE_TABLE(pci, lpfc_id_table);
  10879. static const struct pci_error_handlers lpfc_err_handler = {
  10880. .error_detected = lpfc_io_error_detected,
  10881. .slot_reset = lpfc_io_slot_reset,
  10882. .resume = lpfc_io_resume,
  10883. };
  10884. static struct pci_driver lpfc_driver = {
  10885. .name = LPFC_DRIVER_NAME,
  10886. .id_table = lpfc_id_table,
  10887. .probe = lpfc_pci_probe_one,
  10888. .remove = lpfc_pci_remove_one,
  10889. .shutdown = lpfc_pci_remove_one,
  10890. .suspend = lpfc_pci_suspend_one,
  10891. .resume = lpfc_pci_resume_one,
  10892. .err_handler = &lpfc_err_handler,
  10893. };
  10894. static const struct file_operations lpfc_mgmt_fop = {
  10895. .owner = THIS_MODULE,
  10896. };
  10897. static struct miscdevice lpfc_mgmt_dev = {
  10898. .minor = MISC_DYNAMIC_MINOR,
  10899. .name = "lpfcmgmt",
  10900. .fops = &lpfc_mgmt_fop,
  10901. };
  10902. /**
  10903. * lpfc_init - lpfc module initialization routine
  10904. *
  10905. * This routine is to be invoked when the lpfc module is loaded into the
  10906. * kernel. The special kernel macro module_init() is used to indicate the
  10907. * role of this routine to the kernel as lpfc module entry point.
  10908. *
  10909. * Return codes
  10910. * 0 - successful
  10911. * -ENOMEM - FC attach transport failed
  10912. * all others - failed
  10913. */
  10914. static int __init
  10915. lpfc_init(void)
  10916. {
  10917. int error = 0;
  10918. printk(LPFC_MODULE_DESC "\n");
  10919. printk(LPFC_COPYRIGHT "\n");
  10920. error = misc_register(&lpfc_mgmt_dev);
  10921. if (error)
  10922. printk(KERN_ERR "Could not register lpfcmgmt device, "
  10923. "misc_register returned with status %d", error);
  10924. lpfc_transport_functions.vport_create = lpfc_vport_create;
  10925. lpfc_transport_functions.vport_delete = lpfc_vport_delete;
  10926. lpfc_transport_template =
  10927. fc_attach_transport(&lpfc_transport_functions);
  10928. if (lpfc_transport_template == NULL)
  10929. return -ENOMEM;
  10930. lpfc_vport_transport_template =
  10931. fc_attach_transport(&lpfc_vport_transport_functions);
  10932. if (lpfc_vport_transport_template == NULL) {
  10933. fc_release_transport(lpfc_transport_template);
  10934. return -ENOMEM;
  10935. }
  10936. /* Initialize in case vector mapping is needed */
  10937. lpfc_used_cpu = NULL;
  10938. lpfc_present_cpu = num_present_cpus();
  10939. error = pci_register_driver(&lpfc_driver);
  10940. if (error) {
  10941. fc_release_transport(lpfc_transport_template);
  10942. fc_release_transport(lpfc_vport_transport_template);
  10943. }
  10944. return error;
  10945. }
  10946. /**
  10947. * lpfc_exit - lpfc module removal routine
  10948. *
  10949. * This routine is invoked when the lpfc module is removed from the kernel.
  10950. * The special kernel macro module_exit() is used to indicate the role of
  10951. * this routine to the kernel as lpfc module exit point.
  10952. */
  10953. static void __exit
  10954. lpfc_exit(void)
  10955. {
  10956. misc_deregister(&lpfc_mgmt_dev);
  10957. pci_unregister_driver(&lpfc_driver);
  10958. fc_release_transport(lpfc_transport_template);
  10959. fc_release_transport(lpfc_vport_transport_template);
  10960. if (_dump_buf_data) {
  10961. printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
  10962. "_dump_buf_data at 0x%p\n",
  10963. (1L << _dump_buf_data_order), _dump_buf_data);
  10964. free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
  10965. }
  10966. if (_dump_buf_dif) {
  10967. printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
  10968. "_dump_buf_dif at 0x%p\n",
  10969. (1L << _dump_buf_dif_order), _dump_buf_dif);
  10970. free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
  10971. }
  10972. kfree(lpfc_used_cpu);
  10973. idr_destroy(&lpfc_hba_index);
  10974. }
  10975. module_init(lpfc_init);
  10976. module_exit(lpfc_exit);
  10977. MODULE_LICENSE("GPL");
  10978. MODULE_DESCRIPTION(LPFC_MODULE_DESC);
  10979. MODULE_AUTHOR("Broadcom");
  10980. MODULE_VERSION("0:" LPFC_DRIVER_VERSION);