/drivers/scsi/lpfc/lpfc_els.c

http://github.com/mirrors/linux · C · 10152 lines · 6613 code · 1025 blank · 2514 comment · 1180 complexity · bff4b2cf65119e8010bb275c58f7b75d MD5 · raw file

Large files are truncated click here to view the full file

  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. /* See Fibre Channel protocol T11 FC-LS for details */
  24. #include <linux/blkdev.h>
  25. #include <linux/pci.h>
  26. #include <linux/slab.h>
  27. #include <linux/interrupt.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_device.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_transport_fc.h>
  32. #include <uapi/scsi/fc/fc_fs.h>
  33. #include <uapi/scsi/fc/fc_els.h>
  34. #include "lpfc_hw4.h"
  35. #include "lpfc_hw.h"
  36. #include "lpfc_sli.h"
  37. #include "lpfc_sli4.h"
  38. #include "lpfc_nl.h"
  39. #include "lpfc_disc.h"
  40. #include "lpfc_scsi.h"
  41. #include "lpfc.h"
  42. #include "lpfc_logmsg.h"
  43. #include "lpfc_crtn.h"
  44. #include "lpfc_vport.h"
  45. #include "lpfc_debugfs.h"
  46. static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
  47. struct lpfc_iocbq *);
  48. static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
  49. struct lpfc_iocbq *);
  50. static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
  51. static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
  52. struct lpfc_nodelist *ndlp, uint8_t retry);
  53. static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
  54. struct lpfc_iocbq *iocb);
  55. static int lpfc_max_els_tries = 3;
  56. /**
  57. * lpfc_els_chk_latt - Check host link attention event for a vport
  58. * @vport: pointer to a host virtual N_Port data structure.
  59. *
  60. * This routine checks whether there is an outstanding host link
  61. * attention event during the discovery process with the @vport. It is done
  62. * by reading the HBA's Host Attention (HA) register. If there is any host
  63. * link attention events during this @vport's discovery process, the @vport
  64. * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
  65. * be issued if the link state is not already in host link cleared state,
  66. * and a return code shall indicate whether the host link attention event
  67. * had happened.
  68. *
  69. * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
  70. * state in LPFC_VPORT_READY, the request for checking host link attention
  71. * event will be ignored and a return code shall indicate no host link
  72. * attention event had happened.
  73. *
  74. * Return codes
  75. * 0 - no host link attention event happened
  76. * 1 - host link attention event happened
  77. **/
  78. int
  79. lpfc_els_chk_latt(struct lpfc_vport *vport)
  80. {
  81. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  82. struct lpfc_hba *phba = vport->phba;
  83. uint32_t ha_copy;
  84. if (vport->port_state >= LPFC_VPORT_READY ||
  85. phba->link_state == LPFC_LINK_DOWN ||
  86. phba->sli_rev > LPFC_SLI_REV3)
  87. return 0;
  88. /* Read the HBA Host Attention Register */
  89. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  90. return 1;
  91. if (!(ha_copy & HA_LATT))
  92. return 0;
  93. /* Pending Link Event during Discovery */
  94. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  95. "0237 Pending Link Event during "
  96. "Discovery: State x%x\n",
  97. phba->pport->port_state);
  98. /* CLEAR_LA should re-enable link attention events and
  99. * we should then immediately take a LATT event. The
  100. * LATT processing should call lpfc_linkdown() which
  101. * will cleanup any left over in-progress discovery
  102. * events.
  103. */
  104. spin_lock_irq(shost->host_lock);
  105. vport->fc_flag |= FC_ABORT_DISCOVERY;
  106. spin_unlock_irq(shost->host_lock);
  107. if (phba->link_state != LPFC_CLEAR_LA)
  108. lpfc_issue_clear_la(phba, vport);
  109. return 1;
  110. }
  111. /**
  112. * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
  113. * @vport: pointer to a host virtual N_Port data structure.
  114. * @expectRsp: flag indicating whether response is expected.
  115. * @cmdSize: size of the ELS command.
  116. * @retry: number of retries to the command IOCB when it fails.
  117. * @ndlp: pointer to a node-list data structure.
  118. * @did: destination identifier.
  119. * @elscmd: the ELS command code.
  120. *
  121. * This routine is used for allocating a lpfc-IOCB data structure from
  122. * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
  123. * passed into the routine for discovery state machine to issue an Extended
  124. * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
  125. * and preparation routine that is used by all the discovery state machine
  126. * routines and the ELS command-specific fields will be later set up by
  127. * the individual discovery machine routines after calling this routine
  128. * allocating and preparing a generic IOCB data structure. It fills in the
  129. * Buffer Descriptor Entries (BDEs), allocates buffers for both command
  130. * payload and response payload (if expected). The reference count on the
  131. * ndlp is incremented by 1 and the reference to the ndlp is put into
  132. * context1 of the IOCB data structure for this IOCB to hold the ndlp
  133. * reference for the command's callback function to access later.
  134. *
  135. * Return code
  136. * Pointer to the newly allocated/prepared els iocb data structure
  137. * NULL - when els iocb data structure allocation/preparation failed
  138. **/
  139. struct lpfc_iocbq *
  140. lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
  141. uint16_t cmdSize, uint8_t retry,
  142. struct lpfc_nodelist *ndlp, uint32_t did,
  143. uint32_t elscmd)
  144. {
  145. struct lpfc_hba *phba = vport->phba;
  146. struct lpfc_iocbq *elsiocb;
  147. struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
  148. struct ulp_bde64 *bpl;
  149. IOCB_t *icmd;
  150. if (!lpfc_is_link_up(phba))
  151. return NULL;
  152. /* Allocate buffer for command iocb */
  153. elsiocb = lpfc_sli_get_iocbq(phba);
  154. if (elsiocb == NULL)
  155. return NULL;
  156. /*
  157. * If this command is for fabric controller and HBA running
  158. * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
  159. */
  160. if ((did == Fabric_DID) &&
  161. (phba->hba_flag & HBA_FIP_SUPPORT) &&
  162. ((elscmd == ELS_CMD_FLOGI) ||
  163. (elscmd == ELS_CMD_FDISC) ||
  164. (elscmd == ELS_CMD_LOGO)))
  165. switch (elscmd) {
  166. case ELS_CMD_FLOGI:
  167. elsiocb->iocb_flag |=
  168. ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
  169. & LPFC_FIP_ELS_ID_MASK);
  170. break;
  171. case ELS_CMD_FDISC:
  172. elsiocb->iocb_flag |=
  173. ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
  174. & LPFC_FIP_ELS_ID_MASK);
  175. break;
  176. case ELS_CMD_LOGO:
  177. elsiocb->iocb_flag |=
  178. ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
  179. & LPFC_FIP_ELS_ID_MASK);
  180. break;
  181. }
  182. else
  183. elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
  184. icmd = &elsiocb->iocb;
  185. /* fill in BDEs for command */
  186. /* Allocate buffer for command payload */
  187. pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  188. if (pcmd)
  189. pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
  190. if (!pcmd || !pcmd->virt)
  191. goto els_iocb_free_pcmb_exit;
  192. INIT_LIST_HEAD(&pcmd->list);
  193. /* Allocate buffer for response payload */
  194. if (expectRsp) {
  195. prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  196. if (prsp)
  197. prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  198. &prsp->phys);
  199. if (!prsp || !prsp->virt)
  200. goto els_iocb_free_prsp_exit;
  201. INIT_LIST_HEAD(&prsp->list);
  202. } else
  203. prsp = NULL;
  204. /* Allocate buffer for Buffer ptr list */
  205. pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  206. if (pbuflist)
  207. pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  208. &pbuflist->phys);
  209. if (!pbuflist || !pbuflist->virt)
  210. goto els_iocb_free_pbuf_exit;
  211. INIT_LIST_HEAD(&pbuflist->list);
  212. if (expectRsp) {
  213. icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
  214. icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
  215. icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  216. icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
  217. icmd->un.elsreq64.remoteID = did; /* DID */
  218. icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
  219. if (elscmd == ELS_CMD_FLOGI)
  220. icmd->ulpTimeout = FF_DEF_RATOV * 2;
  221. else if (elscmd == ELS_CMD_LOGO)
  222. icmd->ulpTimeout = phba->fc_ratov;
  223. else
  224. icmd->ulpTimeout = phba->fc_ratov * 2;
  225. } else {
  226. icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
  227. icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
  228. icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  229. icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
  230. icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
  231. icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
  232. }
  233. icmd->ulpBdeCount = 1;
  234. icmd->ulpLe = 1;
  235. icmd->ulpClass = CLASS3;
  236. /*
  237. * If we have NPIV enabled, we want to send ELS traffic by VPI.
  238. * For SLI4, since the driver controls VPIs we also want to include
  239. * all ELS pt2pt protocol traffic as well.
  240. */
  241. if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
  242. ((phba->sli_rev == LPFC_SLI_REV4) &&
  243. (vport->fc_flag & FC_PT2PT))) {
  244. if (expectRsp) {
  245. icmd->un.elsreq64.myID = vport->fc_myDID;
  246. /* For ELS_REQUEST64_CR, use the VPI by default */
  247. icmd->ulpContext = phba->vpi_ids[vport->vpi];
  248. }
  249. icmd->ulpCt_h = 0;
  250. /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
  251. if (elscmd == ELS_CMD_ECHO)
  252. icmd->ulpCt_l = 0; /* context = invalid RPI */
  253. else
  254. icmd->ulpCt_l = 1; /* context = VPI */
  255. }
  256. bpl = (struct ulp_bde64 *) pbuflist->virt;
  257. bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
  258. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
  259. bpl->tus.f.bdeSize = cmdSize;
  260. bpl->tus.f.bdeFlags = 0;
  261. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  262. if (expectRsp) {
  263. bpl++;
  264. bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
  265. bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
  266. bpl->tus.f.bdeSize = FCELSSIZE;
  267. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  268. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  269. }
  270. /* prevent preparing iocb with NULL ndlp reference */
  271. elsiocb->context1 = lpfc_nlp_get(ndlp);
  272. if (!elsiocb->context1)
  273. goto els_iocb_free_pbuf_exit;
  274. elsiocb->context2 = pcmd;
  275. elsiocb->context3 = pbuflist;
  276. elsiocb->retry = retry;
  277. elsiocb->vport = vport;
  278. elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
  279. if (prsp) {
  280. list_add(&prsp->list, &pcmd->list);
  281. }
  282. if (expectRsp) {
  283. /* Xmit ELS command <elsCmd> to remote NPORT <did> */
  284. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  285. "0116 Xmit ELS command x%x to remote "
  286. "NPORT x%x I/O tag: x%x, port state:x%x "
  287. "rpi x%x fc_flag:x%x\n",
  288. elscmd, did, elsiocb->iotag,
  289. vport->port_state, ndlp->nlp_rpi,
  290. vport->fc_flag);
  291. } else {
  292. /* Xmit ELS response <elsCmd> to remote NPORT <did> */
  293. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  294. "0117 Xmit ELS response x%x to remote "
  295. "NPORT x%x I/O tag: x%x, size: x%x "
  296. "port_state x%x rpi x%x fc_flag x%x\n",
  297. elscmd, ndlp->nlp_DID, elsiocb->iotag,
  298. cmdSize, vport->port_state,
  299. ndlp->nlp_rpi, vport->fc_flag);
  300. }
  301. return elsiocb;
  302. els_iocb_free_pbuf_exit:
  303. if (expectRsp)
  304. lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
  305. kfree(pbuflist);
  306. els_iocb_free_prsp_exit:
  307. lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  308. kfree(prsp);
  309. els_iocb_free_pcmb_exit:
  310. kfree(pcmd);
  311. lpfc_sli_release_iocbq(phba, elsiocb);
  312. return NULL;
  313. }
  314. /**
  315. * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
  316. * @vport: pointer to a host virtual N_Port data structure.
  317. *
  318. * This routine issues a fabric registration login for a @vport. An
  319. * active ndlp node with Fabric_DID must already exist for this @vport.
  320. * The routine invokes two mailbox commands to carry out fabric registration
  321. * login through the HBA firmware: the first mailbox command requests the
  322. * HBA to perform link configuration for the @vport; and the second mailbox
  323. * command requests the HBA to perform the actual fabric registration login
  324. * with the @vport.
  325. *
  326. * Return code
  327. * 0 - successfully issued fabric registration login for @vport
  328. * -ENXIO -- failed to issue fabric registration login for @vport
  329. **/
  330. int
  331. lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
  332. {
  333. struct lpfc_hba *phba = vport->phba;
  334. LPFC_MBOXQ_t *mbox;
  335. struct lpfc_dmabuf *mp;
  336. struct lpfc_nodelist *ndlp;
  337. struct serv_parm *sp;
  338. int rc;
  339. int err = 0;
  340. sp = &phba->fc_fabparam;
  341. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  342. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  343. err = 1;
  344. goto fail;
  345. }
  346. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  347. if (!mbox) {
  348. err = 2;
  349. goto fail;
  350. }
  351. vport->port_state = LPFC_FABRIC_CFG_LINK;
  352. lpfc_config_link(phba, mbox);
  353. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  354. mbox->vport = vport;
  355. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  356. if (rc == MBX_NOT_FINISHED) {
  357. err = 3;
  358. goto fail_free_mbox;
  359. }
  360. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  361. if (!mbox) {
  362. err = 4;
  363. goto fail;
  364. }
  365. rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
  366. ndlp->nlp_rpi);
  367. if (rc) {
  368. err = 5;
  369. goto fail_free_mbox;
  370. }
  371. mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
  372. mbox->vport = vport;
  373. /* increment the reference count on ndlp to hold reference
  374. * for the callback routine.
  375. */
  376. mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
  377. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  378. if (rc == MBX_NOT_FINISHED) {
  379. err = 6;
  380. goto fail_issue_reg_login;
  381. }
  382. return 0;
  383. fail_issue_reg_login:
  384. /* decrement the reference count on ndlp just incremented
  385. * for the failed mbox command.
  386. */
  387. lpfc_nlp_put(ndlp);
  388. mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
  389. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  390. kfree(mp);
  391. fail_free_mbox:
  392. mempool_free(mbox, phba->mbox_mem_pool);
  393. fail:
  394. lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  395. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  396. "0249 Cannot issue Register Fabric login: Err %d\n", err);
  397. return -ENXIO;
  398. }
  399. /**
  400. * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
  401. * @vport: pointer to a host virtual N_Port data structure.
  402. *
  403. * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
  404. * the @vport. This mailbox command is necessary for SLI4 port only.
  405. *
  406. * Return code
  407. * 0 - successfully issued REG_VFI for @vport
  408. * A failure code otherwise.
  409. **/
  410. int
  411. lpfc_issue_reg_vfi(struct lpfc_vport *vport)
  412. {
  413. struct lpfc_hba *phba = vport->phba;
  414. LPFC_MBOXQ_t *mboxq = NULL;
  415. struct lpfc_nodelist *ndlp;
  416. struct lpfc_dmabuf *dmabuf = NULL;
  417. int rc = 0;
  418. /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
  419. if ((phba->sli_rev == LPFC_SLI_REV4) &&
  420. !(phba->link_flag & LS_LOOPBACK_MODE) &&
  421. !(vport->fc_flag & FC_PT2PT)) {
  422. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  423. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  424. rc = -ENODEV;
  425. goto fail;
  426. }
  427. }
  428. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  429. if (!mboxq) {
  430. rc = -ENOMEM;
  431. goto fail;
  432. }
  433. /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
  434. if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
  435. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  436. if (!dmabuf) {
  437. rc = -ENOMEM;
  438. goto fail;
  439. }
  440. dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
  441. if (!dmabuf->virt) {
  442. rc = -ENOMEM;
  443. goto fail;
  444. }
  445. memcpy(dmabuf->virt, &phba->fc_fabparam,
  446. sizeof(struct serv_parm));
  447. }
  448. vport->port_state = LPFC_FABRIC_CFG_LINK;
  449. if (dmabuf)
  450. lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
  451. else
  452. lpfc_reg_vfi(mboxq, vport, 0);
  453. mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
  454. mboxq->vport = vport;
  455. mboxq->ctx_buf = dmabuf;
  456. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  457. if (rc == MBX_NOT_FINISHED) {
  458. rc = -ENXIO;
  459. goto fail;
  460. }
  461. return 0;
  462. fail:
  463. if (mboxq)
  464. mempool_free(mboxq, phba->mbox_mem_pool);
  465. if (dmabuf) {
  466. if (dmabuf->virt)
  467. lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
  468. kfree(dmabuf);
  469. }
  470. lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  471. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  472. "0289 Issue Register VFI failed: Err %d\n", rc);
  473. return rc;
  474. }
  475. /**
  476. * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
  477. * @vport: pointer to a host virtual N_Port data structure.
  478. *
  479. * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
  480. * the @vport. This mailbox command is necessary for SLI4 port only.
  481. *
  482. * Return code
  483. * 0 - successfully issued REG_VFI for @vport
  484. * A failure code otherwise.
  485. **/
  486. int
  487. lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
  488. {
  489. struct lpfc_hba *phba = vport->phba;
  490. struct Scsi_Host *shost;
  491. LPFC_MBOXQ_t *mboxq;
  492. int rc;
  493. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  494. if (!mboxq) {
  495. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
  496. "2556 UNREG_VFI mbox allocation failed"
  497. "HBA state x%x\n", phba->pport->port_state);
  498. return -ENOMEM;
  499. }
  500. lpfc_unreg_vfi(mboxq, vport);
  501. mboxq->vport = vport;
  502. mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
  503. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  504. if (rc == MBX_NOT_FINISHED) {
  505. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
  506. "2557 UNREG_VFI issue mbox failed rc x%x "
  507. "HBA state x%x\n",
  508. rc, phba->pport->port_state);
  509. mempool_free(mboxq, phba->mbox_mem_pool);
  510. return -EIO;
  511. }
  512. shost = lpfc_shost_from_vport(vport);
  513. spin_lock_irq(shost->host_lock);
  514. vport->fc_flag &= ~FC_VFI_REGISTERED;
  515. spin_unlock_irq(shost->host_lock);
  516. return 0;
  517. }
  518. /**
  519. * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
  520. * @vport: pointer to a host virtual N_Port data structure.
  521. * @sp: pointer to service parameter data structure.
  522. *
  523. * This routine is called from FLOGI/FDISC completion handler functions.
  524. * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
  525. * node nodename is changed in the completion service parameter else return
  526. * 0. This function also set flag in the vport data structure to delay
  527. * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
  528. * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
  529. * node nodename is changed in the completion service parameter.
  530. *
  531. * Return code
  532. * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
  533. * 1 - FCID or Fabric Nodename or Fabric portname is changed.
  534. *
  535. **/
  536. static uint8_t
  537. lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
  538. struct serv_parm *sp)
  539. {
  540. struct lpfc_hba *phba = vport->phba;
  541. uint8_t fabric_param_changed = 0;
  542. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  543. if ((vport->fc_prevDID != vport->fc_myDID) ||
  544. memcmp(&vport->fabric_portname, &sp->portName,
  545. sizeof(struct lpfc_name)) ||
  546. memcmp(&vport->fabric_nodename, &sp->nodeName,
  547. sizeof(struct lpfc_name)) ||
  548. (vport->vport_flag & FAWWPN_PARAM_CHG)) {
  549. fabric_param_changed = 1;
  550. vport->vport_flag &= ~FAWWPN_PARAM_CHG;
  551. }
  552. /*
  553. * Word 1 Bit 31 in common service parameter is overloaded.
  554. * Word 1 Bit 31 in FLOGI request is multiple NPort request
  555. * Word 1 Bit 31 in FLOGI response is clean address bit
  556. *
  557. * If fabric parameter is changed and clean address bit is
  558. * cleared delay nport discovery if
  559. * - vport->fc_prevDID != 0 (not initial discovery) OR
  560. * - lpfc_delay_discovery module parameter is set.
  561. */
  562. if (fabric_param_changed && !sp->cmn.clean_address_bit &&
  563. (vport->fc_prevDID || phba->cfg_delay_discovery)) {
  564. spin_lock_irq(shost->host_lock);
  565. vport->fc_flag |= FC_DISC_DELAYED;
  566. spin_unlock_irq(shost->host_lock);
  567. }
  568. return fabric_param_changed;
  569. }
  570. /**
  571. * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  572. * @vport: pointer to a host virtual N_Port data structure.
  573. * @ndlp: pointer to a node-list data structure.
  574. * @sp: pointer to service parameter data structure.
  575. * @irsp: pointer to the IOCB within the lpfc response IOCB.
  576. *
  577. * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
  578. * function to handle the completion of a Fabric Login (FLOGI) into a fabric
  579. * port in a fabric topology. It properly sets up the parameters to the @ndlp
  580. * from the IOCB response. It also check the newly assigned N_Port ID to the
  581. * @vport against the previously assigned N_Port ID. If it is different from
  582. * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
  583. * is invoked on all the remaining nodes with the @vport to unregister the
  584. * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
  585. * is invoked to register login to the fabric.
  586. *
  587. * Return code
  588. * 0 - Success (currently, always return 0)
  589. **/
  590. static int
  591. lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  592. struct serv_parm *sp, IOCB_t *irsp)
  593. {
  594. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  595. struct lpfc_hba *phba = vport->phba;
  596. struct lpfc_nodelist *np;
  597. struct lpfc_nodelist *next_np;
  598. uint8_t fabric_param_changed;
  599. spin_lock_irq(shost->host_lock);
  600. vport->fc_flag |= FC_FABRIC;
  601. spin_unlock_irq(shost->host_lock);
  602. phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
  603. if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
  604. phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
  605. phba->fc_edtovResol = sp->cmn.edtovResolution;
  606. phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
  607. if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
  608. spin_lock_irq(shost->host_lock);
  609. vport->fc_flag |= FC_PUBLIC_LOOP;
  610. spin_unlock_irq(shost->host_lock);
  611. }
  612. vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
  613. memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
  614. memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
  615. ndlp->nlp_class_sup = 0;
  616. if (sp->cls1.classValid)
  617. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  618. if (sp->cls2.classValid)
  619. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  620. if (sp->cls3.classValid)
  621. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  622. if (sp->cls4.classValid)
  623. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  624. ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
  625. sp->cmn.bbRcvSizeLsb;
  626. fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
  627. if (fabric_param_changed) {
  628. /* Reset FDMI attribute masks based on config parameter */
  629. if (phba->cfg_enable_SmartSAN ||
  630. (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
  631. /* Setup appropriate attribute masks */
  632. vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
  633. if (phba->cfg_enable_SmartSAN)
  634. vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
  635. else
  636. vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
  637. } else {
  638. vport->fdmi_hba_mask = 0;
  639. vport->fdmi_port_mask = 0;
  640. }
  641. }
  642. memcpy(&vport->fabric_portname, &sp->portName,
  643. sizeof(struct lpfc_name));
  644. memcpy(&vport->fabric_nodename, &sp->nodeName,
  645. sizeof(struct lpfc_name));
  646. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  647. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
  648. if (sp->cmn.response_multiple_NPort) {
  649. lpfc_printf_vlog(vport, KERN_WARNING,
  650. LOG_ELS | LOG_VPORT,
  651. "1816 FLOGI NPIV supported, "
  652. "response data 0x%x\n",
  653. sp->cmn.response_multiple_NPort);
  654. spin_lock_irq(&phba->hbalock);
  655. phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
  656. spin_unlock_irq(&phba->hbalock);
  657. } else {
  658. /* Because we asked f/w for NPIV it still expects us
  659. to call reg_vnpid atleast for the physcial host */
  660. lpfc_printf_vlog(vport, KERN_WARNING,
  661. LOG_ELS | LOG_VPORT,
  662. "1817 Fabric does not support NPIV "
  663. "- configuring single port mode.\n");
  664. spin_lock_irq(&phba->hbalock);
  665. phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
  666. spin_unlock_irq(&phba->hbalock);
  667. }
  668. }
  669. /*
  670. * For FC we need to do some special processing because of the SLI
  671. * Port's default settings of the Common Service Parameters.
  672. */
  673. if ((phba->sli_rev == LPFC_SLI_REV4) &&
  674. (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
  675. /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
  676. if (fabric_param_changed)
  677. lpfc_unregister_fcf_prep(phba);
  678. /* This should just update the VFI CSPs*/
  679. if (vport->fc_flag & FC_VFI_REGISTERED)
  680. lpfc_issue_reg_vfi(vport);
  681. }
  682. if (fabric_param_changed &&
  683. !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
  684. /* If our NportID changed, we need to ensure all
  685. * remaining NPORTs get unreg_login'ed.
  686. */
  687. list_for_each_entry_safe(np, next_np,
  688. &vport->fc_nodes, nlp_listp) {
  689. if (!NLP_CHK_NODE_ACT(np))
  690. continue;
  691. if ((np->nlp_state != NLP_STE_NPR_NODE) ||
  692. !(np->nlp_flag & NLP_NPR_ADISC))
  693. continue;
  694. spin_lock_irq(shost->host_lock);
  695. np->nlp_flag &= ~NLP_NPR_ADISC;
  696. spin_unlock_irq(shost->host_lock);
  697. lpfc_unreg_rpi(vport, np);
  698. }
  699. lpfc_cleanup_pending_mbox(vport);
  700. if (phba->sli_rev == LPFC_SLI_REV4) {
  701. lpfc_sli4_unreg_all_rpis(vport);
  702. lpfc_mbx_unreg_vpi(vport);
  703. spin_lock_irq(shost->host_lock);
  704. vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
  705. spin_unlock_irq(shost->host_lock);
  706. }
  707. /*
  708. * For SLI3 and SLI4, the VPI needs to be reregistered in
  709. * response to this fabric parameter change event.
  710. */
  711. spin_lock_irq(shost->host_lock);
  712. vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  713. spin_unlock_irq(shost->host_lock);
  714. } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
  715. !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
  716. /*
  717. * Driver needs to re-reg VPI in order for f/w
  718. * to update the MAC address.
  719. */
  720. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  721. lpfc_register_new_vport(phba, vport, ndlp);
  722. return 0;
  723. }
  724. if (phba->sli_rev < LPFC_SLI_REV4) {
  725. lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
  726. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
  727. vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
  728. lpfc_register_new_vport(phba, vport, ndlp);
  729. else
  730. lpfc_issue_fabric_reglogin(vport);
  731. } else {
  732. ndlp->nlp_type |= NLP_FABRIC;
  733. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  734. if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
  735. (vport->vpi_state & LPFC_VPI_REGISTERED)) {
  736. lpfc_start_fdiscs(phba);
  737. lpfc_do_scr_ns_plogi(phba, vport);
  738. } else if (vport->fc_flag & FC_VFI_REGISTERED)
  739. lpfc_issue_init_vpi(vport);
  740. else {
  741. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  742. "3135 Need register VFI: (x%x/%x)\n",
  743. vport->fc_prevDID, vport->fc_myDID);
  744. lpfc_issue_reg_vfi(vport);
  745. }
  746. }
  747. return 0;
  748. }
  749. /**
  750. * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  751. * @vport: pointer to a host virtual N_Port data structure.
  752. * @ndlp: pointer to a node-list data structure.
  753. * @sp: pointer to service parameter data structure.
  754. *
  755. * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
  756. * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
  757. * in a point-to-point topology. First, the @vport's N_Port Name is compared
  758. * with the received N_Port Name: if the @vport's N_Port Name is greater than
  759. * the received N_Port Name lexicographically, this node shall assign local
  760. * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
  761. * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
  762. * this node shall just wait for the remote node to issue PLOGI and assign
  763. * N_Port IDs.
  764. *
  765. * Return code
  766. * 0 - Success
  767. * -ENXIO - Fail
  768. **/
  769. static int
  770. lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  771. struct serv_parm *sp)
  772. {
  773. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  774. struct lpfc_hba *phba = vport->phba;
  775. LPFC_MBOXQ_t *mbox;
  776. int rc;
  777. spin_lock_irq(shost->host_lock);
  778. vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
  779. vport->fc_flag |= FC_PT2PT;
  780. spin_unlock_irq(shost->host_lock);
  781. /* If we are pt2pt with another NPort, force NPIV off! */
  782. phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
  783. /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
  784. if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
  785. lpfc_unregister_fcf_prep(phba);
  786. spin_lock_irq(shost->host_lock);
  787. vport->fc_flag &= ~FC_VFI_REGISTERED;
  788. spin_unlock_irq(shost->host_lock);
  789. phba->fc_topology_changed = 0;
  790. }
  791. rc = memcmp(&vport->fc_portname, &sp->portName,
  792. sizeof(vport->fc_portname));
  793. if (rc >= 0) {
  794. /* This side will initiate the PLOGI */
  795. spin_lock_irq(shost->host_lock);
  796. vport->fc_flag |= FC_PT2PT_PLOGI;
  797. spin_unlock_irq(shost->host_lock);
  798. /*
  799. * N_Port ID cannot be 0, set our Id to LocalID
  800. * the other side will be RemoteID.
  801. */
  802. /* not equal */
  803. if (rc)
  804. vport->fc_myDID = PT2PT_LocalID;
  805. /* Decrement ndlp reference count indicating that ndlp can be
  806. * safely released when other references to it are done.
  807. */
  808. lpfc_nlp_put(ndlp);
  809. ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
  810. if (!ndlp) {
  811. /*
  812. * Cannot find existing Fabric ndlp, so allocate a
  813. * new one
  814. */
  815. ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
  816. if (!ndlp)
  817. goto fail;
  818. } else if (!NLP_CHK_NODE_ACT(ndlp)) {
  819. ndlp = lpfc_enable_node(vport, ndlp,
  820. NLP_STE_UNUSED_NODE);
  821. if(!ndlp)
  822. goto fail;
  823. }
  824. memcpy(&ndlp->nlp_portname, &sp->portName,
  825. sizeof(struct lpfc_name));
  826. memcpy(&ndlp->nlp_nodename, &sp->nodeName,
  827. sizeof(struct lpfc_name));
  828. /* Set state will put ndlp onto node list if not already done */
  829. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  830. spin_lock_irq(shost->host_lock);
  831. ndlp->nlp_flag |= NLP_NPR_2B_DISC;
  832. spin_unlock_irq(shost->host_lock);
  833. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  834. if (!mbox)
  835. goto fail;
  836. lpfc_config_link(phba, mbox);
  837. mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
  838. mbox->vport = vport;
  839. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  840. if (rc == MBX_NOT_FINISHED) {
  841. mempool_free(mbox, phba->mbox_mem_pool);
  842. goto fail;
  843. }
  844. } else {
  845. /* This side will wait for the PLOGI, decrement ndlp reference
  846. * count indicating that ndlp can be released when other
  847. * references to it are done.
  848. */
  849. lpfc_nlp_put(ndlp);
  850. /* Start discovery - this should just do CLEAR_LA */
  851. lpfc_disc_start(vport);
  852. }
  853. return 0;
  854. fail:
  855. return -ENXIO;
  856. }
  857. /**
  858. * lpfc_cmpl_els_flogi - Completion callback function for flogi
  859. * @phba: pointer to lpfc hba data structure.
  860. * @cmdiocb: pointer to lpfc command iocb data structure.
  861. * @rspiocb: pointer to lpfc response iocb data structure.
  862. *
  863. * This routine is the top-level completion callback function for issuing
  864. * a Fabric Login (FLOGI) command. If the response IOCB reported error,
  865. * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
  866. * retry has been made (either immediately or delayed with lpfc_els_retry()
  867. * returning 1), the command IOCB will be released and function returned.
  868. * If the retry attempt has been given up (possibly reach the maximum
  869. * number of retries), one additional decrement of ndlp reference shall be
  870. * invoked before going out after releasing the command IOCB. This will
  871. * actually release the remote node (Note, lpfc_els_free_iocb() will also
  872. * invoke one decrement of ndlp reference count). If no error reported in
  873. * the IOCB status, the command Port ID field is used to determine whether
  874. * this is a point-to-point topology or a fabric topology: if the Port ID
  875. * field is assigned, it is a fabric topology; otherwise, it is a
  876. * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
  877. * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
  878. * specific topology completion conditions.
  879. **/
  880. static void
  881. lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  882. struct lpfc_iocbq *rspiocb)
  883. {
  884. struct lpfc_vport *vport = cmdiocb->vport;
  885. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  886. IOCB_t *irsp = &rspiocb->iocb;
  887. struct lpfc_nodelist *ndlp = cmdiocb->context1;
  888. struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
  889. struct serv_parm *sp;
  890. uint16_t fcf_index;
  891. int rc;
  892. /* Check to see if link went down during discovery */
  893. if (lpfc_els_chk_latt(vport)) {
  894. /* One additional decrement on node reference count to
  895. * trigger the release of the node
  896. */
  897. lpfc_nlp_put(ndlp);
  898. goto out;
  899. }
  900. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
  901. "FLOGI cmpl: status:x%x/x%x state:x%x",
  902. irsp->ulpStatus, irsp->un.ulpWord[4],
  903. vport->port_state);
  904. if (irsp->ulpStatus) {
  905. /*
  906. * In case of FIP mode, perform roundrobin FCF failover
  907. * due to new FCF discovery
  908. */
  909. if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
  910. (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
  911. if (phba->link_state < LPFC_LINK_UP)
  912. goto stop_rr_fcf_flogi;
  913. if ((phba->fcoe_cvl_eventtag_attn ==
  914. phba->fcoe_cvl_eventtag) &&
  915. (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
  916. ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
  917. IOERR_SLI_ABORTED))
  918. goto stop_rr_fcf_flogi;
  919. else
  920. phba->fcoe_cvl_eventtag_attn =
  921. phba->fcoe_cvl_eventtag;
  922. lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
  923. "2611 FLOGI failed on FCF (x%x), "
  924. "status:x%x/x%x, tmo:x%x, perform "
  925. "roundrobin FCF failover\n",
  926. phba->fcf.current_rec.fcf_indx,
  927. irsp->ulpStatus, irsp->un.ulpWord[4],
  928. irsp->ulpTimeout);
  929. lpfc_sli4_set_fcf_flogi_fail(phba,
  930. phba->fcf.current_rec.fcf_indx);
  931. fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
  932. rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
  933. if (rc)
  934. goto out;
  935. }
  936. stop_rr_fcf_flogi:
  937. /* FLOGI failure */
  938. if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
  939. ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
  940. IOERR_LOOP_OPEN_FAILURE)))
  941. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  942. "2858 FLOGI failure Status:x%x/x%x "
  943. "TMO:x%x Data x%x x%x\n",
  944. irsp->ulpStatus, irsp->un.ulpWord[4],
  945. irsp->ulpTimeout, phba->hba_flag,
  946. phba->fcf.fcf_flag);
  947. /* Check for retry */
  948. if (lpfc_els_retry(phba, cmdiocb, rspiocb))
  949. goto out;
  950. lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
  951. "0150 FLOGI failure Status:x%x/x%x "
  952. "xri x%x TMO:x%x\n",
  953. irsp->ulpStatus, irsp->un.ulpWord[4],
  954. cmdiocb->sli4_xritag, irsp->ulpTimeout);
  955. /* If this is not a loop open failure, bail out */
  956. if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
  957. ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
  958. IOERR_LOOP_OPEN_FAILURE)))
  959. goto flogifail;
  960. /* FLOGI failed, so there is no fabric */
  961. spin_lock_irq(shost->host_lock);
  962. vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
  963. spin_unlock_irq(shost->host_lock);
  964. /* If private loop, then allow max outstanding els to be
  965. * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
  966. * alpa map would take too long otherwise.
  967. */
  968. if (phba->alpa_map[0] == 0)
  969. vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
  970. if ((phba->sli_rev == LPFC_SLI_REV4) &&
  971. (!(vport->fc_flag & FC_VFI_REGISTERED) ||
  972. (vport->fc_prevDID != vport->fc_myDID) ||
  973. phba->fc_topology_changed)) {
  974. if (vport->fc_flag & FC_VFI_REGISTERED) {
  975. if (phba->fc_topology_changed) {
  976. lpfc_unregister_fcf_prep(phba);
  977. spin_lock_irq(shost->host_lock);
  978. vport->fc_flag &= ~FC_VFI_REGISTERED;
  979. spin_unlock_irq(shost->host_lock);
  980. phba->fc_topology_changed = 0;
  981. } else {
  982. lpfc_sli4_unreg_all_rpis(vport);
  983. }
  984. }
  985. /* Do not register VFI if the driver aborted FLOGI */
  986. if (!lpfc_error_lost_link(irsp))
  987. lpfc_issue_reg_vfi(vport);
  988. lpfc_nlp_put(ndlp);
  989. goto out;
  990. }
  991. goto flogifail;
  992. }
  993. spin_lock_irq(shost->host_lock);
  994. vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
  995. vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
  996. spin_unlock_irq(shost->host_lock);
  997. /*
  998. * The FLogI succeeded. Sync the data for the CPU before
  999. * accessing it.
  1000. */
  1001. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
  1002. if (!prsp)
  1003. goto out;
  1004. sp = prsp->virt + sizeof(uint32_t);
  1005. /* FLOGI completes successfully */
  1006. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  1007. "0101 FLOGI completes successfully, I/O tag:x%x, "
  1008. "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
  1009. cmdiocb->iotag, cmdiocb->sli4_xritag,
  1010. irsp->un.ulpWord[4], sp->cmn.e_d_tov,
  1011. sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
  1012. vport->port_state, vport->fc_flag);
  1013. if (vport->port_state == LPFC_FLOGI) {
  1014. /*
  1015. * If Common Service Parameters indicate Nport
  1016. * we are point to point, if Fport we are Fabric.
  1017. */
  1018. if (sp->cmn.fPort)
  1019. rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
  1020. else if (!(phba->hba_flag & HBA_FCOE_MODE))
  1021. rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
  1022. else {
  1023. lpfc_printf_vlog(vport, KERN_ERR,
  1024. LOG_FIP | LOG_ELS,
  1025. "2831 FLOGI response with cleared Fabric "
  1026. "bit fcf_index 0x%x "
  1027. "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
  1028. "Fabric Name "
  1029. "%02x%02x%02x%02x%02x%02x%02x%02x\n",
  1030. phba->fcf.current_rec.fcf_indx,
  1031. phba->fcf.current_rec.switch_name[0],
  1032. phba->fcf.current_rec.switch_name[1],
  1033. phba->fcf.current_rec.switch_name[2],
  1034. phba->fcf.current_rec.switch_name[3],
  1035. phba->fcf.current_rec.switch_name[4],
  1036. phba->fcf.current_rec.switch_name[5],
  1037. phba->fcf.current_rec.switch_name[6],
  1038. phba->fcf.current_rec.switch_name[7],
  1039. phba->fcf.current_rec.fabric_name[0],
  1040. phba->fcf.current_rec.fabric_name[1],
  1041. phba->fcf.current_rec.fabric_name[2],
  1042. phba->fcf.current_rec.fabric_name[3],
  1043. phba->fcf.current_rec.fabric_name[4],
  1044. phba->fcf.current_rec.fabric_name[5],
  1045. phba->fcf.current_rec.fabric_name[6],
  1046. phba->fcf.current_rec.fabric_name[7]);
  1047. lpfc_nlp_put(ndlp);
  1048. spin_lock_irq(&phba->hbalock);
  1049. phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
  1050. phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
  1051. spin_unlock_irq(&phba->hbalock);
  1052. phba->fcf.fcf_redisc_attempted = 0; /* reset */
  1053. goto out;
  1054. }
  1055. if (!rc) {
  1056. /* Mark the FCF discovery process done */
  1057. if (phba->hba_flag & HBA_FIP_SUPPORT)
  1058. lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
  1059. LOG_ELS,
  1060. "2769 FLOGI to FCF (x%x) "
  1061. "completed successfully\n",
  1062. phba->fcf.current_rec.fcf_indx);
  1063. spin_lock_irq(&phba->hbalock);
  1064. phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
  1065. phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
  1066. spin_unlock_irq(&phba->hbalock);
  1067. phba->fcf.fcf_redisc_attempted = 0; /* reset */
  1068. goto out;
  1069. }
  1070. }
  1071. flogifail:
  1072. spin_lock_irq(&phba->hbalock);
  1073. phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
  1074. spin_unlock_irq(&phba->hbalock);
  1075. lpfc_nlp_put(ndlp);
  1076. if (!lpfc_error_lost_link(irsp)) {
  1077. /* FLOGI failed, so just use loop map to make discovery list */
  1078. lpfc_disc_list_loopmap(vport);
  1079. /* Start discovery */
  1080. lpfc_disc_start(vport);
  1081. } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
  1082. (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
  1083. IOERR_SLI_ABORTED) &&
  1084. ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
  1085. IOERR_SLI_DOWN))) &&
  1086. (phba->link_state != LPFC_CLEAR_LA)) {
  1087. /* If FLOGI failed enable link interrupt. */
  1088. lpfc_issue_clear_la(phba, vport);
  1089. }
  1090. out:
  1091. lpfc_els_free_iocb(phba, cmdiocb);
  1092. }
  1093. /**
  1094. * lpfc_cmpl_els_link_down - Completion callback function for ELS command
  1095. * aborted during a link down
  1096. * @phba: pointer to lpfc hba data structure.
  1097. * @cmdiocb: pointer to lpfc command iocb data structure.
  1098. * @rspiocb: pointer to lpfc response iocb data structure.
  1099. *
  1100. */
  1101. static void
  1102. lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  1103. struct lpfc_iocbq *rspiocb)
  1104. {
  1105. IOCB_t *irsp;
  1106. uint32_t *pcmd;
  1107. uint32_t cmd;
  1108. pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
  1109. cmd = *pcmd;
  1110. irsp = &rspiocb->iocb;
  1111. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  1112. "6445 ELS completes after LINK_DOWN: "
  1113. " Status %x/%x cmd x%x flg x%x\n",
  1114. irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
  1115. cmdiocb->iocb_flag);
  1116. if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
  1117. cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
  1118. atomic_dec(&phba->fabric_iocb_count);
  1119. }
  1120. lpfc_els_free_iocb(phba, cmdiocb);
  1121. }
  1122. /**
  1123. * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
  1124. * @vport: pointer to a host virtual N_Port data structure.
  1125. * @ndlp: pointer to a node-list data structure.
  1126. * @retry: number of retries to the command IOCB.
  1127. *
  1128. * This routine issues a Fabric Login (FLOGI) Request ELS command
  1129. * for a @vport. The initiator service parameters are put into the payload
  1130. * of the FLOGI Request IOCB and the top-level callback function pointer
  1131. * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
  1132. * function field. The lpfc_issue_fabric_iocb routine is invoked to send
  1133. * out FLOGI ELS command with one outstanding fabric IOCB at a time.
  1134. *
  1135. * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
  1136. * will be incremented by 1 for holding the ndlp and the reference to ndlp
  1137. * will be stored into the context1 field of the IOCB for the completion
  1138. * callback function to the FLOGI ELS command.
  1139. *
  1140. * Return code
  1141. * 0 - successfully issued flogi iocb for @vport
  1142. * 1 - failed to issue flogi iocb for @vport
  1143. **/
  1144. static int
  1145. lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1146. uint8_t retry)
  1147. {
  1148. struct lpfc_hba *phba = vport->phba;
  1149. struct serv_parm *sp;
  1150. IOCB_t *icmd;
  1151. struct lpfc_iocbq *elsiocb;
  1152. struct lpfc_iocbq defer_flogi_acc;
  1153. uint8_t *pcmd;
  1154. uint16_t cmdsize;
  1155. uint32_t tmo, did;
  1156. int rc;
  1157. cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
  1158. elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
  1159. ndlp->nlp_DID, ELS_CMD_FLOGI);
  1160. if (!elsiocb)
  1161. return 1;
  1162. icmd = &elsiocb->iocb;
  1163. pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
  1164. /* For FLOGI request, remainder of payload is service parameters */
  1165. *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
  1166. pcmd += sizeof(uint32_t);
  1167. memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
  1168. sp = (struct serv_parm *) pcmd;
  1169. /* Setup CSPs accordingly for Fabric */
  1170. sp->cmn.e_d_tov = 0;
  1171. sp->cmn.w2.r_a_tov = 0;
  1172. sp->cmn.virtual_fabric_support = 0;
  1173. sp->cls1.classValid = 0;
  1174. if (sp->cmn.fcphLow < FC_PH3)
  1175. sp->cmn.fcphLow = FC_PH3;
  1176. if (sp->cmn.fcphHigh < FC_PH3)
  1177. sp->cmn.fcphHigh = FC_PH3;
  1178. if (phba->sli_rev == LPFC_SLI_REV4) {
  1179. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  1180. LPFC_SLI_INTF_IF_TYPE_0) {
  1181. elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
  1182. elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
  1183. /* FLOGI needs to be 3 for WQE FCFI */
  1184. /* Set the fcfi to the fcfi we registered with */
  1185. elsiocb->iocb.ulpContext = phba->fcf.fcfi;
  1186. }
  1187. /* Can't do SLI4 class2 without support sequence coalescing */
  1188. sp->cls2.classValid = 0;
  1189. sp->cls2.seqDelivery = 0;
  1190. } else {
  1191. /* Historical, setting sequential-delivery bit for SLI3 */
  1192. sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
  1193. sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
  1194. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
  1195. sp->cmn.request_multiple_Nport = 1;
  1196. /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
  1197. icmd->ulpCt_h = 1;
  1198. icmd->ulpCt_l = 0;
  1199. } else
  1200. sp->cmn.request_multiple_Nport = 0;
  1201. }
  1202. if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
  1203. icmd->un.elsreq64.myID = 0;
  1204. icmd->un.elsreq64.fl = 1;
  1205. }
  1206. tmo = phba->fc_ratov;
  1207. phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
  1208. lpfc_set_disctmo(vport);
  1209. phba->fc_ratov = tmo;
  1210. phba->fc_stat.elsXmitFLOGI++;
  1211. elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
  1212. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
  1213. "Issue FLOGI: opt:x%x",
  1214. phba->sli3_options, 0, 0);
  1215. rc = lpfc_issue_fabric_iocb(phba, elsiocb);
  1216. phba->hba_flag |= HBA_FLOGI_ISSUED;
  1217. /* Check for a deferred FLOGI ACC condition */
  1218. if (phba->defer_flogi_acc_flag) {
  1219. did = vport->fc_myDID;
  1220. vport->fc_myDID = Fabric_DID;
  1221. memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
  1222. defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
  1223. defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
  1224. phba->defer_flogi_acc_ox_id;
  1225. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  1226. "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
  1227. " ox_id: x%x, hba_flag x%x\n",
  1228. phba->defer_flogi_acc_rx_id,
  1229. phba->defer_flogi_acc_ox_id, phba->hba_flag);
  1230. /* Send deferred FLOGI ACC */
  1231. lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
  1232. ndlp, NULL);
  1233. phba->defer_flogi_acc_flag = false;
  1234. vport->fc_myDID = did;
  1235. }
  1236. if (rc == IOCB_ERROR) {
  1237. lpfc_els_free_iocb(phba, elsiocb);
  1238. return 1;
  1239. }
  1240. return 0;
  1241. }
  1242. /**
  1243. * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
  1244. * @phba: pointer to lpfc hba data structure.
  1245. *
  1246. * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
  1247. * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
  1248. * list and issues an abort IOCB commond on each outstanding IOCB that
  1249. * contains a active Fabric_DID ndlp. Note that this function is to issue
  1250. * the abort IOCB command on all the outstanding IOCBs, thus when this
  1251. * function returns, it does not guarantee all the IOCBs are actually aborted.
  1252. *
  1253. * Return code
  1254. * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
  1255. **/
  1256. int
  1257. lpfc_els_abort_flogi(struct lpfc_hba *phba)
  1258. {
  1259. struct lpfc_sli_ring *pring;
  1260. struct lpfc_iocbq *iocb, *next_iocb;
  1261. struct lpfc_nodelist *ndlp;
  1262. IOCB_t *icmd;
  1263. /* Abort outstanding I/O on NPort <nlp_DID> */
  1264. lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
  1265. "0201 Abort outstanding I/O on NPort x%x\n",
  1266. Fabric_DID);
  1267. pring = lpfc_phba_elsring(phba);
  1268. if (unlikely(!pring))
  1269. return -EIO;
  1270. /*
  1271. * Check the txcmplq for an iocb that matches the nport the driver is
  1272. * searching for.
  1273. */
  1274. spin_lock_irq(&phba->hbalock);
  1275. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  1276. icmd = &iocb->iocb;
  1277. if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
  1278. ndlp = (struct lpfc_nodelist *)(iocb->context1);
  1279. if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
  1280. (ndlp->nlp_DID == Fabric_DID))
  1281. lpfc_sli_issue_abort_iotag(phba, pring, iocb);
  1282. }
  1283. }
  1284. spin_unlock_irq(&phba->hbalock);
  1285. return 0;
  1286. }
  1287. /**
  1288. * lpfc_initial_flogi - Issue an initial fabric login for a vport
  1289. * @vport: pointer to a host virtual N_Port data structure.
  1290. *
  1291. * This routine issues an initial Fabric Login (FLOGI) for the @vport
  1292. * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
  1293. * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
  1294. * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
  1295. * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
  1296. * is then invoked with the @vport and the ndlp to perform the FLOGI for the
  1297. * @vport.
  1298. *
  1299. * Return code
  1300. * 0 - failed to issue initial flogi for @vport
  1301. * 1 - successfully issued initial flogi for @vport
  1302. **/
  1303. int
  1304. lpfc_initial_flogi(struct lpfc_vport *vport)
  1305. {
  1306. struct lpfc_nodelist *ndlp;
  1307. vport->port_state = LPFC_FLOGI;
  1308. lpfc_set_disctmo(vport);
  1309. /* First look for the Fabric ndlp */
  1310. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  1311. if (!ndlp) {
  1312. /* Cannot find existing Fabric ndlp, so allocate a new one */
  1313. ndlp = lpfc_nlp_init(vport, Fabric_DID);
  1314. if (!ndlp)
  1315. return 0;
  1316. /* Set the node type */
  1317. ndlp->nlp_type |= NLP_FABRIC;
  1318. /* Put ndlp onto node list */
  1319. lpfc_enqueue_node(vport, ndlp);
  1320. } else if (!NLP_CHK_NODE_ACT(ndlp)) {
  1321. /* re-setup ndlp without removing from node list */
  1322. ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
  1323. if (!ndlp)
  1324. return 0;
  1325. }
  1326. if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
  1327. /* This decrement of reference count to node shall kick off
  1328. * the release of the node.
  1329. */
  1330. lpfc_nlp_put(ndlp);
  1331. return 0;
  1332. }
  1333. return 1;
  1334. }
  1335. /**
  1336. * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
  1337. * @vport: pointer to a host virtual N_Port data structure.
  1338. *
  1339. * This routine issues an initial Fabric Discover (FDISC) for the @vport
  1340. * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
  1341. * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
  1342. * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
  1343. * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
  1344. * is then invoked with the @vport and the ndlp to perform the FDISC for the
  1345. * @vport.
  1346. *
  1347. * Return code
  1348. * 0 - failed to issue initial fdisc for @vport
  1349. * 1 - successfully issued initial fdisc for @vport
  1350. **/
  1351. int
  1352. lpfc_initial_fdisc(struct lpfc_vport *vport)
  1353. {
  1354. struct lpfc_nodelist *ndlp;
  1355. /* First look for the Fabric ndlp */
  1356. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  1357. if (!ndlp) {
  1358. /* Cannot find existing Fabric ndlp, so allocate a new one */
  1359. ndlp = lpfc_nlp_init(vport, Fabric_DID);
  1360. if (!ndlp)
  1361. return 0;
  1362. /* Put ndlp onto node list */
  1363. lpfc_enqueue_node(vport, ndlp);
  1364. } else if (!NLP_CHK_NODE_ACT(ndlp)) {
  1365. /* re-setup ndlp without removing from node list */
  1366. ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
  1367. if (!ndlp)
  1368. return 0;
  1369. }
  1370. if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
  1371. /* decrement node reference count to trigger the release of
  1372. * the node.
  1373. */
  1374. lpfc_nlp_put(ndlp);
  1375. return 0;
  1376. }
  1377. return 1;
  1378. }
  1379. /**
  1380. * lpfc_more_plogi - Check and issue remaining plogis for a vport
  1381. * @vport: pointer to a host virtual N_Port data structure.
  1382. *
  1383. * This routine checks whether there are more remaining Port Logins
  1384. * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
  1385. * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
  1386. * to issue ELS PLOGIs up to the configured discover threads with the
  1387. * @vport (@vport->cfg_discovery_threads). The function also decrement
  1388. * the @vport's num_disc_node by 1 if it is not already 0.
  1389. **/
  1390. void
  1391. lpfc_more_plogi(struct lpfc_vport *vport)
  1392. {
  1393. if (vport->num_disc_nodes)
  1394. vport->num