/drivers/scsi/lpfc/lpfc_nportdisc.c

http://github.com/mirrors/linux · C · 3148 lines · 2316 code · 380 blank · 452 comment · 360 complexity · 5452b82a4b12fa7b6c6ff53dbee425c0 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/pci.h>
  25. #include <linux/slab.h>
  26. #include <linux/interrupt.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_device.h>
  29. #include <scsi/scsi_host.h>
  30. #include <scsi/scsi_transport_fc.h>
  31. #include <scsi/fc/fc_fs.h>
  32. #include <linux/nvme-fc-driver.h>
  33. #include "lpfc_hw4.h"
  34. #include "lpfc_hw.h"
  35. #include "lpfc_sli.h"
  36. #include "lpfc_sli4.h"
  37. #include "lpfc_nl.h"
  38. #include "lpfc_disc.h"
  39. #include "lpfc.h"
  40. #include "lpfc_scsi.h"
  41. #include "lpfc_nvme.h"
  42. #include "lpfc_logmsg.h"
  43. #include "lpfc_crtn.h"
  44. #include "lpfc_vport.h"
  45. #include "lpfc_debugfs.h"
  46. /* Called to verify a rcv'ed ADISC was intended for us. */
  47. static int
  48. lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  49. struct lpfc_name *nn, struct lpfc_name *pn)
  50. {
  51. /* First, we MUST have a RPI registered */
  52. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
  53. return 0;
  54. /* Compare the ADISC rsp WWNN / WWPN matches our internal node
  55. * table entry for that node.
  56. */
  57. if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
  58. return 0;
  59. if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
  60. return 0;
  61. /* we match, return success */
  62. return 1;
  63. }
  64. int
  65. lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  66. struct serv_parm *sp, uint32_t class, int flogi)
  67. {
  68. volatile struct serv_parm *hsp = &vport->fc_sparam;
  69. uint16_t hsp_value, ssp_value = 0;
  70. /*
  71. * The receive data field size and buffer-to-buffer receive data field
  72. * size entries are 16 bits but are represented as two 8-bit fields in
  73. * the driver data structure to account for rsvd bits and other control
  74. * bits. Reconstruct and compare the fields as a 16-bit values before
  75. * correcting the byte values.
  76. */
  77. if (sp->cls1.classValid) {
  78. if (!flogi) {
  79. hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
  80. hsp->cls1.rcvDataSizeLsb);
  81. ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
  82. sp->cls1.rcvDataSizeLsb);
  83. if (!ssp_value)
  84. goto bad_service_param;
  85. if (ssp_value > hsp_value) {
  86. sp->cls1.rcvDataSizeLsb =
  87. hsp->cls1.rcvDataSizeLsb;
  88. sp->cls1.rcvDataSizeMsb =
  89. hsp->cls1.rcvDataSizeMsb;
  90. }
  91. }
  92. } else if (class == CLASS1)
  93. goto bad_service_param;
  94. if (sp->cls2.classValid) {
  95. if (!flogi) {
  96. hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
  97. hsp->cls2.rcvDataSizeLsb);
  98. ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
  99. sp->cls2.rcvDataSizeLsb);
  100. if (!ssp_value)
  101. goto bad_service_param;
  102. if (ssp_value > hsp_value) {
  103. sp->cls2.rcvDataSizeLsb =
  104. hsp->cls2.rcvDataSizeLsb;
  105. sp->cls2.rcvDataSizeMsb =
  106. hsp->cls2.rcvDataSizeMsb;
  107. }
  108. }
  109. } else if (class == CLASS2)
  110. goto bad_service_param;
  111. if (sp->cls3.classValid) {
  112. if (!flogi) {
  113. hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
  114. hsp->cls3.rcvDataSizeLsb);
  115. ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
  116. sp->cls3.rcvDataSizeLsb);
  117. if (!ssp_value)
  118. goto bad_service_param;
  119. if (ssp_value > hsp_value) {
  120. sp->cls3.rcvDataSizeLsb =
  121. hsp->cls3.rcvDataSizeLsb;
  122. sp->cls3.rcvDataSizeMsb =
  123. hsp->cls3.rcvDataSizeMsb;
  124. }
  125. }
  126. } else if (class == CLASS3)
  127. goto bad_service_param;
  128. /*
  129. * Preserve the upper four bits of the MSB from the PLOGI response.
  130. * These bits contain the Buffer-to-Buffer State Change Number
  131. * from the target and need to be passed to the FW.
  132. */
  133. hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
  134. ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
  135. if (ssp_value > hsp_value) {
  136. sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
  137. sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
  138. (hsp->cmn.bbRcvSizeMsb & 0x0F);
  139. }
  140. memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
  141. memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
  142. return 1;
  143. bad_service_param:
  144. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  145. "0207 Device %x "
  146. "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
  147. "invalid service parameters. Ignoring device.\n",
  148. ndlp->nlp_DID,
  149. sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
  150. sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
  151. sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
  152. sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
  153. return 0;
  154. }
  155. static void *
  156. lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  157. struct lpfc_iocbq *rspiocb)
  158. {
  159. struct lpfc_dmabuf *pcmd, *prsp;
  160. uint32_t *lp;
  161. void *ptr = NULL;
  162. IOCB_t *irsp;
  163. irsp = &rspiocb->iocb;
  164. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  165. /* For lpfc_els_abort, context2 could be zero'ed to delay
  166. * freeing associated memory till after ABTS completes.
  167. */
  168. if (pcmd) {
  169. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
  170. list);
  171. if (prsp) {
  172. lp = (uint32_t *) prsp->virt;
  173. ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
  174. }
  175. } else {
  176. /* Force ulpStatus error since we are returning NULL ptr */
  177. if (!(irsp->ulpStatus)) {
  178. irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
  179. irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
  180. }
  181. ptr = NULL;
  182. }
  183. return ptr;
  184. }
  185. /*
  186. * Free resources / clean up outstanding I/Os
  187. * associated with a LPFC_NODELIST entry. This
  188. * routine effectively results in a "software abort".
  189. */
  190. void
  191. lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
  192. {
  193. LIST_HEAD(abort_list);
  194. struct lpfc_sli_ring *pring;
  195. struct lpfc_iocbq *iocb, *next_iocb;
  196. pring = lpfc_phba_elsring(phba);
  197. /* In case of error recovery path, we might have a NULL pring here */
  198. if (unlikely(!pring))
  199. return;
  200. /* Abort outstanding I/O on NPort <nlp_DID> */
  201. lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
  202. "2819 Abort outstanding I/O on NPort x%x "
  203. "Data: x%x x%x x%x\n",
  204. ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
  205. ndlp->nlp_rpi);
  206. /* Clean up all fabric IOs first.*/
  207. lpfc_fabric_abort_nport(ndlp);
  208. /*
  209. * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
  210. * of all ELS IOs that need an ABTS. The IOs need to stay on the
  211. * txcmplq so that the abort operation completes them successfully.
  212. */
  213. spin_lock_irq(&phba->hbalock);
  214. if (phba->sli_rev == LPFC_SLI_REV4)
  215. spin_lock(&pring->ring_lock);
  216. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  217. /* Add to abort_list on on NDLP match. */
  218. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
  219. list_add_tail(&iocb->dlist, &abort_list);
  220. }
  221. if (phba->sli_rev == LPFC_SLI_REV4)
  222. spin_unlock(&pring->ring_lock);
  223. spin_unlock_irq(&phba->hbalock);
  224. /* Abort the targeted IOs and remove them from the abort list. */
  225. list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
  226. spin_lock_irq(&phba->hbalock);
  227. list_del_init(&iocb->dlist);
  228. lpfc_sli_issue_abort_iotag(phba, pring, iocb);
  229. spin_unlock_irq(&phba->hbalock);
  230. }
  231. INIT_LIST_HEAD(&abort_list);
  232. /* Now process the txq */
  233. spin_lock_irq(&phba->hbalock);
  234. if (phba->sli_rev == LPFC_SLI_REV4)
  235. spin_lock(&pring->ring_lock);
  236. list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
  237. /* Check to see if iocb matches the nport we are looking for */
  238. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
  239. list_del_init(&iocb->list);
  240. list_add_tail(&iocb->list, &abort_list);
  241. }
  242. }
  243. if (phba->sli_rev == LPFC_SLI_REV4)
  244. spin_unlock(&pring->ring_lock);
  245. spin_unlock_irq(&phba->hbalock);
  246. /* Cancel all the IOCBs from the completions list */
  247. lpfc_sli_cancel_iocbs(phba, &abort_list,
  248. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  249. lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
  250. }
  251. /* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
  252. * @phba: pointer to lpfc hba data structure.
  253. * @link_mbox: pointer to CONFIG_LINK mailbox object
  254. *
  255. * This routine is only called if we are SLI3, direct connect pt2pt
  256. * mode and the remote NPort issues the PLOGI after link up.
  257. */
  258. static void
  259. lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
  260. {
  261. LPFC_MBOXQ_t *login_mbox;
  262. MAILBOX_t *mb = &link_mbox->u.mb;
  263. struct lpfc_iocbq *save_iocb;
  264. struct lpfc_nodelist *ndlp;
  265. int rc;
  266. ndlp = link_mbox->ctx_ndlp;
  267. login_mbox = link_mbox->context3;
  268. save_iocb = login_mbox->context3;
  269. link_mbox->context3 = NULL;
  270. login_mbox->context3 = NULL;
  271. /* Check for CONFIG_LINK error */
  272. if (mb->mbxStatus) {
  273. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  274. "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
  275. mb->mbxStatus);
  276. mempool_free(login_mbox, phba->mbox_mem_pool);
  277. mempool_free(link_mbox, phba->mbox_mem_pool);
  278. kfree(save_iocb);
  279. return;
  280. }
  281. /* Now that CONFIG_LINK completed, and our SID is configured,
  282. * we can now proceed with sending the PLOGI ACC.
  283. */
  284. rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
  285. save_iocb, ndlp, login_mbox);
  286. if (rc) {
  287. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  288. "4576 PLOGI ACC fails pt2pt discovery: %x\n",
  289. rc);
  290. mempool_free(login_mbox, phba->mbox_mem_pool);
  291. }
  292. mempool_free(link_mbox, phba->mbox_mem_pool);
  293. kfree(save_iocb);
  294. }
  295. /**
  296. * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
  297. * @phba: Pointer to HBA context object.
  298. * @pmb: Pointer to mailbox object.
  299. *
  300. * This function provides the unreg rpi mailbox completion handler for a tgt.
  301. * The routine frees the memory resources associated with the completed
  302. * mailbox command and transmits the ELS ACC.
  303. *
  304. * This routine is only called if we are SLI4, acting in target
  305. * mode and the remote NPort issues the PLOGI after link up.
  306. **/
  307. static void
  308. lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  309. {
  310. struct lpfc_vport *vport = pmb->vport;
  311. struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
  312. LPFC_MBOXQ_t *mbox = pmb->context3;
  313. struct lpfc_iocbq *piocb = NULL;
  314. int rc;
  315. if (mbox) {
  316. pmb->context3 = NULL;
  317. piocb = mbox->context3;
  318. mbox->context3 = NULL;
  319. }
  320. /*
  321. * Complete the unreg rpi mbx request, and update flags.
  322. * This will also restart any deferred events.
  323. */
  324. lpfc_nlp_get(ndlp);
  325. lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
  326. if (!piocb) {
  327. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
  328. "4578 PLOGI ACC fail\n");
  329. if (mbox)
  330. mempool_free(mbox, phba->mbox_mem_pool);
  331. goto out;
  332. }
  333. rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
  334. if (rc) {
  335. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
  336. "4579 PLOGI ACC fail %x\n", rc);
  337. if (mbox)
  338. mempool_free(mbox, phba->mbox_mem_pool);
  339. }
  340. kfree(piocb);
  341. out:
  342. lpfc_nlp_put(ndlp);
  343. }
  344. static int
  345. lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  346. struct lpfc_iocbq *cmdiocb)
  347. {
  348. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  349. struct lpfc_hba *phba = vport->phba;
  350. struct lpfc_dmabuf *pcmd;
  351. uint64_t nlp_portwwn = 0;
  352. uint32_t *lp;
  353. IOCB_t *icmd;
  354. struct serv_parm *sp;
  355. uint32_t ed_tov;
  356. LPFC_MBOXQ_t *link_mbox;
  357. LPFC_MBOXQ_t *login_mbox;
  358. struct lpfc_iocbq *save_iocb;
  359. struct ls_rjt stat;
  360. uint32_t vid, flag;
  361. u16 rpi;
  362. int rc, defer_acc;
  363. memset(&stat, 0, sizeof (struct ls_rjt));
  364. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  365. lp = (uint32_t *) pcmd->virt;
  366. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  367. if (wwn_to_u64(sp->portName.u.wwn) == 0) {
  368. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  369. "0140 PLOGI Reject: invalid nname\n");
  370. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  371. stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
  372. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  373. NULL);
  374. return 0;
  375. }
  376. if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
  377. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  378. "0141 PLOGI Reject: invalid pname\n");
  379. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  380. stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
  381. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  382. NULL);
  383. return 0;
  384. }
  385. nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
  386. if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
  387. /* Reject this request because invalid parameters */
  388. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  389. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  390. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  391. NULL);
  392. return 0;
  393. }
  394. icmd = &cmdiocb->iocb;
  395. /* PLOGI chkparm OK */
  396. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  397. "0114 PLOGI chkparm OK Data: x%x x%x x%x "
  398. "x%x x%x x%x\n",
  399. ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
  400. ndlp->nlp_rpi, vport->port_state,
  401. vport->fc_flag);
  402. if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
  403. ndlp->nlp_fcp_info |= CLASS2;
  404. else
  405. ndlp->nlp_fcp_info |= CLASS3;
  406. defer_acc = 0;
  407. ndlp->nlp_class_sup = 0;
  408. if (sp->cls1.classValid)
  409. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  410. if (sp->cls2.classValid)
  411. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  412. if (sp->cls3.classValid)
  413. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  414. if (sp->cls4.classValid)
  415. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  416. ndlp->nlp_maxframe =
  417. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  418. /* if already logged in, do implicit logout */
  419. switch (ndlp->nlp_state) {
  420. case NLP_STE_NPR_NODE:
  421. if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
  422. break;
  423. /* fall through */
  424. case NLP_STE_REG_LOGIN_ISSUE:
  425. case NLP_STE_PRLI_ISSUE:
  426. case NLP_STE_UNMAPPED_NODE:
  427. case NLP_STE_MAPPED_NODE:
  428. /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
  429. * For target mode, execute implicit logo.
  430. * Fabric nodes go into NPR.
  431. */
  432. if (!(ndlp->nlp_type & NLP_FABRIC) &&
  433. !(phba->nvmet_support)) {
  434. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
  435. ndlp, NULL);
  436. return 1;
  437. }
  438. if (nlp_portwwn != 0 &&
  439. nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
  440. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  441. "0143 PLOGI recv'd from DID: x%x "
  442. "WWPN changed: old %llx new %llx\n",
  443. ndlp->nlp_DID,
  444. (unsigned long long)nlp_portwwn,
  445. (unsigned long long)
  446. wwn_to_u64(sp->portName.u.wwn));
  447. ndlp->nlp_prev_state = ndlp->nlp_state;
  448. /* rport needs to be unregistered first */
  449. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  450. break;
  451. }
  452. ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
  453. ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
  454. ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
  455. ndlp->nlp_flag &= ~NLP_FIRSTBURST;
  456. login_mbox = NULL;
  457. link_mbox = NULL;
  458. save_iocb = NULL;
  459. /* Check for Nport to NPort pt2pt protocol */
  460. if ((vport->fc_flag & FC_PT2PT) &&
  461. !(vport->fc_flag & FC_PT2PT_PLOGI)) {
  462. /* rcv'ed PLOGI decides what our NPortId will be */
  463. vport->fc_myDID = icmd->un.rcvels.parmRo;
  464. ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
  465. if (sp->cmn.edtovResolution) {
  466. /* E_D_TOV ticks are in nanoseconds */
  467. ed_tov = (phba->fc_edtov + 999999) / 1000000;
  468. }
  469. /*
  470. * For pt-to-pt, use the larger EDTOV
  471. * RATOV = 2 * EDTOV
  472. */
  473. if (ed_tov > phba->fc_edtov)
  474. phba->fc_edtov = ed_tov;
  475. phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
  476. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  477. /* Issue config_link / reg_vfi to account for updated TOV's */
  478. if (phba->sli_rev == LPFC_SLI_REV4)
  479. lpfc_issue_reg_vfi(vport);
  480. else {
  481. defer_acc = 1;
  482. link_mbox = mempool_alloc(phba->mbox_mem_pool,
  483. GFP_KERNEL);
  484. if (!link_mbox)
  485. goto out;
  486. lpfc_config_link(phba, link_mbox);
  487. link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
  488. link_mbox->vport = vport;
  489. link_mbox->ctx_ndlp = ndlp;
  490. save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
  491. if (!save_iocb)
  492. goto out;
  493. /* Save info from cmd IOCB used in rsp */
  494. memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
  495. sizeof(struct lpfc_iocbq));
  496. }
  497. lpfc_can_disctmo(vport);
  498. }
  499. ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
  500. if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
  501. sp->cmn.valid_vendor_ver_level) {
  502. vid = be32_to_cpu(sp->un.vv.vid);
  503. flag = be32_to_cpu(sp->un.vv.flags);
  504. if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
  505. ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
  506. }
  507. login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  508. if (!login_mbox)
  509. goto out;
  510. /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
  511. if (phba->nvmet_support && !defer_acc) {
  512. link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  513. if (!link_mbox)
  514. goto out;
  515. /* As unique identifiers such as iotag would be overwritten
  516. * with those from the cmdiocb, allocate separate temporary
  517. * storage for the copy.
  518. */
  519. save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
  520. if (!save_iocb)
  521. goto out;
  522. /* Unreg RPI is required for SLI4. */
  523. rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
  524. lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
  525. link_mbox->vport = vport;
  526. link_mbox->ctx_ndlp = ndlp;
  527. link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
  528. if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
  529. (!(vport->fc_flag & FC_OFFLINE_MODE)))
  530. ndlp->nlp_flag |= NLP_UNREG_INP;
  531. /* Save info from cmd IOCB used in rsp */
  532. memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
  533. /* Delay sending ACC till unreg RPI completes. */
  534. defer_acc = 1;
  535. } else if (phba->sli_rev == LPFC_SLI_REV4)
  536. lpfc_unreg_rpi(vport, ndlp);
  537. rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
  538. (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
  539. if (rc)
  540. goto out;
  541. /* ACC PLOGI rsp command needs to execute first,
  542. * queue this login_mbox command to be processed later.
  543. */
  544. login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  545. /*
  546. * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
  547. * command issued in lpfc_cmpl_els_acc().
  548. */
  549. login_mbox->vport = vport;
  550. spin_lock_irq(shost->host_lock);
  551. ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
  552. spin_unlock_irq(shost->host_lock);
  553. /*
  554. * If there is an outstanding PLOGI issued, abort it before
  555. * sending ACC rsp for received PLOGI. If pending plogi
  556. * is not canceled here, the plogi will be rejected by
  557. * remote port and will be retried. On a configuration with
  558. * single discovery thread, this will cause a huge delay in
  559. * discovery. Also this will cause multiple state machines
  560. * running in parallel for this node.
  561. * This only applies to a fabric environment.
  562. */
  563. if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
  564. (vport->fc_flag & FC_FABRIC)) {
  565. /* software abort outstanding PLOGI */
  566. lpfc_els_abort(phba, ndlp);
  567. }
  568. if ((vport->port_type == LPFC_NPIV_PORT &&
  569. vport->cfg_restrict_login)) {
  570. /* no deferred ACC */
  571. kfree(save_iocb);
  572. /* In order to preserve RPIs, we want to cleanup
  573. * the default RPI the firmware created to rcv
  574. * this ELS request. The only way to do this is
  575. * to register, then unregister the RPI.
  576. */
  577. spin_lock_irq(shost->host_lock);
  578. ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
  579. spin_unlock_irq(shost->host_lock);
  580. stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
  581. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  582. rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  583. ndlp, login_mbox);
  584. if (rc)
  585. mempool_free(login_mbox, phba->mbox_mem_pool);
  586. return 1;
  587. }
  588. if (defer_acc) {
  589. /* So the order here should be:
  590. * SLI3 pt2pt
  591. * Issue CONFIG_LINK mbox
  592. * CONFIG_LINK cmpl
  593. * SLI4 tgt
  594. * Issue UNREG RPI mbx
  595. * UNREG RPI cmpl
  596. * Issue PLOGI ACC
  597. * PLOGI ACC cmpl
  598. * Issue REG_LOGIN mbox
  599. */
  600. /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
  601. link_mbox->context3 = login_mbox;
  602. login_mbox->context3 = save_iocb;
  603. /* Start the ball rolling by issuing CONFIG_LINK here */
  604. rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
  605. if (rc == MBX_NOT_FINISHED)
  606. goto out;
  607. return 1;
  608. }
  609. rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
  610. if (rc)
  611. mempool_free(login_mbox, phba->mbox_mem_pool);
  612. return 1;
  613. out:
  614. if (defer_acc)
  615. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  616. "4577 discovery failure: %p %p %p\n",
  617. save_iocb, link_mbox, login_mbox);
  618. kfree(save_iocb);
  619. if (link_mbox)
  620. mempool_free(link_mbox, phba->mbox_mem_pool);
  621. if (login_mbox)
  622. mempool_free(login_mbox, phba->mbox_mem_pool);
  623. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  624. stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
  625. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  626. return 0;
  627. }
  628. /**
  629. * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
  630. * @phba: pointer to lpfc hba data structure.
  631. * @mboxq: pointer to mailbox object
  632. *
  633. * This routine is invoked to issue a completion to a rcv'ed
  634. * ADISC or PDISC after the paused RPI has been resumed.
  635. **/
  636. static void
  637. lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  638. {
  639. struct lpfc_vport *vport;
  640. struct lpfc_iocbq *elsiocb;
  641. struct lpfc_nodelist *ndlp;
  642. uint32_t cmd;
  643. elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
  644. ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
  645. vport = mboxq->vport;
  646. cmd = elsiocb->drvrTimeout;
  647. if (cmd == ELS_CMD_ADISC) {
  648. lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
  649. } else {
  650. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
  651. ndlp, NULL);
  652. }
  653. kfree(elsiocb);
  654. mempool_free(mboxq, phba->mbox_mem_pool);
  655. }
  656. static int
  657. lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  658. struct lpfc_iocbq *cmdiocb)
  659. {
  660. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  661. struct lpfc_iocbq *elsiocb;
  662. struct lpfc_dmabuf *pcmd;
  663. struct serv_parm *sp;
  664. struct lpfc_name *pnn, *ppn;
  665. struct ls_rjt stat;
  666. ADISC *ap;
  667. IOCB_t *icmd;
  668. uint32_t *lp;
  669. uint32_t cmd;
  670. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  671. lp = (uint32_t *) pcmd->virt;
  672. cmd = *lp++;
  673. if (cmd == ELS_CMD_ADISC) {
  674. ap = (ADISC *) lp;
  675. pnn = (struct lpfc_name *) & ap->nodeName;
  676. ppn = (struct lpfc_name *) & ap->portName;
  677. } else {
  678. sp = (struct serv_parm *) lp;
  679. pnn = (struct lpfc_name *) & sp->nodeName;
  680. ppn = (struct lpfc_name *) & sp->portName;
  681. }
  682. icmd = &cmdiocb->iocb;
  683. if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
  684. /*
  685. * As soon as we send ACC, the remote NPort can
  686. * start sending us data. Thus, for SLI4 we must
  687. * resume the RPI before the ACC goes out.
  688. */
  689. if (vport->phba->sli_rev == LPFC_SLI_REV4) {
  690. elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
  691. GFP_KERNEL);
  692. if (elsiocb) {
  693. /* Save info from cmd IOCB used in rsp */
  694. memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
  695. sizeof(struct lpfc_iocbq));
  696. /* Save the ELS cmd */
  697. elsiocb->drvrTimeout = cmd;
  698. lpfc_sli4_resume_rpi(ndlp,
  699. lpfc_mbx_cmpl_resume_rpi, elsiocb);
  700. goto out;
  701. }
  702. }
  703. if (cmd == ELS_CMD_ADISC) {
  704. lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
  705. } else {
  706. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
  707. ndlp, NULL);
  708. }
  709. out:
  710. /* If we are authenticated, move to the proper state */
  711. if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
  712. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  713. else
  714. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  715. return 1;
  716. }
  717. /* Reject this request because invalid parameters */
  718. stat.un.b.lsRjtRsvd0 = 0;
  719. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  720. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  721. stat.un.b.vendorUnique = 0;
  722. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  723. /* 1 sec timeout */
  724. mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
  725. spin_lock_irq(shost->host_lock);
  726. ndlp->nlp_flag |= NLP_DELAY_TMO;
  727. spin_unlock_irq(shost->host_lock);
  728. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  729. ndlp->nlp_prev_state = ndlp->nlp_state;
  730. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  731. return 0;
  732. }
  733. static int
  734. lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  735. struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
  736. {
  737. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  738. struct lpfc_hba *phba = vport->phba;
  739. struct lpfc_vport **vports;
  740. int i, active_vlink_present = 0 ;
  741. /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
  742. /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
  743. * PLOGIs during LOGO storms from a device.
  744. */
  745. spin_lock_irq(shost->host_lock);
  746. ndlp->nlp_flag |= NLP_LOGO_ACC;
  747. spin_unlock_irq(shost->host_lock);
  748. if (els_cmd == ELS_CMD_PRLO)
  749. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  750. else
  751. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  752. if (ndlp->nlp_DID == Fabric_DID) {
  753. if (vport->port_state <= LPFC_FDISC)
  754. goto out;
  755. lpfc_linkdown_port(vport);
  756. spin_lock_irq(shost->host_lock);
  757. vport->fc_flag |= FC_VPORT_LOGO_RCVD;
  758. spin_unlock_irq(shost->host_lock);
  759. vports = lpfc_create_vport_work_array(phba);
  760. if (vports) {
  761. for (i = 0; i <= phba->max_vports && vports[i] != NULL;
  762. i++) {
  763. if ((!(vports[i]->fc_flag &
  764. FC_VPORT_LOGO_RCVD)) &&
  765. (vports[i]->port_state > LPFC_FDISC)) {
  766. active_vlink_present = 1;
  767. break;
  768. }
  769. }
  770. lpfc_destroy_vport_work_array(phba, vports);
  771. }
  772. /*
  773. * Don't re-instantiate if vport is marked for deletion.
  774. * If we are here first then vport_delete is going to wait
  775. * for discovery to complete.
  776. */
  777. if (!(vport->load_flag & FC_UNLOADING) &&
  778. active_vlink_present) {
  779. /*
  780. * If there are other active VLinks present,
  781. * re-instantiate the Vlink using FDISC.
  782. */
  783. mod_timer(&ndlp->nlp_delayfunc,
  784. jiffies + msecs_to_jiffies(1000));
  785. spin_lock_irq(shost->host_lock);
  786. ndlp->nlp_flag |= NLP_DELAY_TMO;
  787. spin_unlock_irq(shost->host_lock);
  788. ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
  789. vport->port_state = LPFC_FDISC;
  790. } else {
  791. spin_lock_irq(shost->host_lock);
  792. phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
  793. spin_unlock_irq(shost->host_lock);
  794. lpfc_retry_pport_discovery(phba);
  795. }
  796. } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
  797. ((ndlp->nlp_type & NLP_FCP_TARGET) ||
  798. !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
  799. (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
  800. /* Only try to re-login if this is NOT a Fabric Node */
  801. mod_timer(&ndlp->nlp_delayfunc,
  802. jiffies + msecs_to_jiffies(1000 * 1));
  803. spin_lock_irq(shost->host_lock);
  804. ndlp->nlp_flag |= NLP_DELAY_TMO;
  805. spin_unlock_irq(shost->host_lock);
  806. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  807. }
  808. out:
  809. ndlp->nlp_prev_state = ndlp->nlp_state;
  810. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  811. spin_lock_irq(shost->host_lock);
  812. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  813. spin_unlock_irq(shost->host_lock);
  814. /* The driver has to wait until the ACC completes before it continues
  815. * processing the LOGO. The action will resume in
  816. * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
  817. * unreg_login, the driver waits so the ACC does not get aborted.
  818. */
  819. return 0;
  820. }
  821. static uint32_t
  822. lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
  823. struct lpfc_nodelist *ndlp,
  824. struct lpfc_iocbq *cmdiocb)
  825. {
  826. struct ls_rjt stat;
  827. uint32_t *payload;
  828. uint32_t cmd;
  829. payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
  830. cmd = *payload;
  831. if (vport->phba->nvmet_support) {
  832. /* Must be a NVME PRLI */
  833. if (cmd == ELS_CMD_PRLI)
  834. goto out;
  835. } else {
  836. /* Initiator mode. */
  837. if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
  838. goto out;
  839. }
  840. return 1;
  841. out:
  842. lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
  843. "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
  844. "state x%x flags x%x\n",
  845. cmd, ndlp->nlp_rpi, ndlp->nlp_state,
  846. ndlp->nlp_flag);
  847. memset(&stat, 0, sizeof(struct ls_rjt));
  848. stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
  849. stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
  850. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  851. ndlp, NULL);
  852. return 0;
  853. }
  854. static void
  855. lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  856. struct lpfc_iocbq *cmdiocb)
  857. {
  858. struct lpfc_hba *phba = vport->phba;
  859. struct lpfc_dmabuf *pcmd;
  860. uint32_t *lp;
  861. PRLI *npr;
  862. struct fc_rport *rport = ndlp->rport;
  863. u32 roles;
  864. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  865. lp = (uint32_t *) pcmd->virt;
  866. npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
  867. if ((npr->prliType == PRLI_FCP_TYPE) ||
  868. (npr->prliType == PRLI_NVME_TYPE)) {
  869. if (npr->initiatorFunc) {
  870. if (npr->prliType == PRLI_FCP_TYPE)
  871. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  872. if (npr->prliType == PRLI_NVME_TYPE)
  873. ndlp->nlp_type |= NLP_NVME_INITIATOR;
  874. }
  875. if (npr->targetFunc) {
  876. if (npr->prliType == PRLI_FCP_TYPE)
  877. ndlp->nlp_type |= NLP_FCP_TARGET;
  878. if (npr->prliType == PRLI_NVME_TYPE)
  879. ndlp->nlp_type |= NLP_NVME_TARGET;
  880. if (npr->writeXferRdyDis)
  881. ndlp->nlp_flag |= NLP_FIRSTBURST;
  882. }
  883. if (npr->Retry && ndlp->nlp_type &
  884. (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
  885. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  886. if (npr->Retry && phba->nsler &&
  887. ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
  888. ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
  889. /* If this driver is in nvme target mode, set the ndlp's fc4
  890. * type to NVME provided the PRLI response claims NVME FC4
  891. * type. Target mode does not issue gft_id so doesn't get
  892. * the fc4 type set until now.
  893. */
  894. if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
  895. ndlp->nlp_fc4_type |= NLP_FC4_NVME;
  896. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  897. }
  898. if (npr->prliType == PRLI_FCP_TYPE)
  899. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  900. }
  901. if (rport) {
  902. /* We need to update the rport role values */
  903. roles = FC_RPORT_ROLE_UNKNOWN;
  904. if (ndlp->nlp_type & NLP_FCP_INITIATOR)
  905. roles |= FC_RPORT_ROLE_FCP_INITIATOR;
  906. if (ndlp->nlp_type & NLP_FCP_TARGET)
  907. roles |= FC_RPORT_ROLE_FCP_TARGET;
  908. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
  909. "rport rolechg: role:x%x did:x%x flg:x%x",
  910. roles, ndlp->nlp_DID, ndlp->nlp_flag);
  911. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
  912. fc_remote_port_rolechg(rport, roles);
  913. }
  914. }
  915. static uint32_t
  916. lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  917. {
  918. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  919. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
  920. spin_lock_irq(shost->host_lock);
  921. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  922. spin_unlock_irq(shost->host_lock);
  923. return 0;
  924. }
  925. if (!(vport->fc_flag & FC_PT2PT)) {
  926. /* Check config parameter use-adisc or FCP-2 */
  927. if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
  928. ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
  929. (ndlp->nlp_type & NLP_FCP_TARGET)))) {
  930. spin_lock_irq(shost->host_lock);
  931. ndlp->nlp_flag |= NLP_NPR_ADISC;
  932. spin_unlock_irq(shost->host_lock);
  933. return 1;
  934. }
  935. }
  936. spin_lock_irq(shost->host_lock);
  937. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  938. spin_unlock_irq(shost->host_lock);
  939. lpfc_unreg_rpi(vport, ndlp);
  940. return 0;
  941. }
  942. /**
  943. * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
  944. * @phba : Pointer to lpfc_hba structure.
  945. * @vport: Pointer to lpfc_vport structure.
  946. * @rpi : rpi to be release.
  947. *
  948. * This function will send a unreg_login mailbox command to the firmware
  949. * to release a rpi.
  950. **/
  951. static void
  952. lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
  953. struct lpfc_nodelist *ndlp, uint16_t rpi)
  954. {
  955. LPFC_MBOXQ_t *pmb;
  956. int rc;
  957. /* If there is already an UNREG in progress for this ndlp,
  958. * no need to queue up another one.
  959. */
  960. if (ndlp->nlp_flag & NLP_UNREG_INP) {
  961. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  962. "1435 release_rpi SKIP UNREG x%x on "
  963. "NPort x%x deferred x%x flg x%x "
  964. "Data: x%px\n",
  965. ndlp->nlp_rpi, ndlp->nlp_DID,
  966. ndlp->nlp_defer_did,
  967. ndlp->nlp_flag, ndlp);
  968. return;
  969. }
  970. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  971. GFP_KERNEL);
  972. if (!pmb)
  973. lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
  974. "2796 mailbox memory allocation failed \n");
  975. else {
  976. lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
  977. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  978. pmb->vport = vport;
  979. pmb->ctx_ndlp = ndlp;
  980. if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
  981. (!(vport->fc_flag & FC_OFFLINE_MODE)))
  982. ndlp->nlp_flag |= NLP_UNREG_INP;
  983. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  984. "1437 release_rpi UNREG x%x "
  985. "on NPort x%x flg x%x\n",
  986. ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
  987. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  988. if (rc == MBX_NOT_FINISHED)
  989. mempool_free(pmb, phba->mbox_mem_pool);
  990. }
  991. }
  992. static uint32_t
  993. lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  994. void *arg, uint32_t evt)
  995. {
  996. struct lpfc_hba *phba;
  997. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  998. uint16_t rpi;
  999. phba = vport->phba;
  1000. /* Release the RPI if reglogin completing */
  1001. if (!(phba->pport->load_flag & FC_UNLOADING) &&
  1002. (evt == NLP_EVT_CMPL_REG_LOGIN) &&
  1003. (!pmb->u.mb.mbxStatus)) {
  1004. rpi = pmb->u.mb.un.varWords[0];
  1005. lpfc_release_rpi(phba, vport, ndlp, rpi);
  1006. }
  1007. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  1008. "0271 Illegal State Transition: node x%x "
  1009. "event x%x, state x%x Data: x%x x%x\n",
  1010. ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
  1011. ndlp->nlp_flag);
  1012. return ndlp->nlp_state;
  1013. }
  1014. static uint32_t
  1015. lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1016. void *arg, uint32_t evt)
  1017. {
  1018. /* This transition is only legal if we previously
  1019. * rcv'ed a PLOGI. Since we don't want 2 discovery threads
  1020. * working on the same NPortID, do nothing for this thread
  1021. * to stop it.
  1022. */
  1023. if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
  1024. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  1025. "0272 Illegal State Transition: node x%x "
  1026. "event x%x, state x%x Data: x%x x%x\n",
  1027. ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
  1028. ndlp->nlp_flag);
  1029. }
  1030. return ndlp->nlp_state;
  1031. }
  1032. /* Start of Discovery State Machine routines */
  1033. static uint32_t
  1034. lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1035. void *arg, uint32_t evt)
  1036. {
  1037. struct lpfc_iocbq *cmdiocb;
  1038. cmdiocb = (struct lpfc_iocbq *) arg;
  1039. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1040. return ndlp->nlp_state;
  1041. }
  1042. return NLP_STE_FREED_NODE;
  1043. }
  1044. static uint32_t
  1045. lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1046. void *arg, uint32_t evt)
  1047. {
  1048. lpfc_issue_els_logo(vport, ndlp, 0);
  1049. return ndlp->nlp_state;
  1050. }
  1051. static uint32_t
  1052. lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1053. void *arg, uint32_t evt)
  1054. {
  1055. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1056. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1057. spin_lock_irq(shost->host_lock);
  1058. ndlp->nlp_flag |= NLP_LOGO_ACC;
  1059. spin_unlock_irq(shost->host_lock);
  1060. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  1061. return ndlp->nlp_state;
  1062. }
  1063. static uint32_t
  1064. lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1065. void *arg, uint32_t evt)
  1066. {
  1067. return NLP_STE_FREED_NODE;
  1068. }
  1069. static uint32_t
  1070. lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1071. void *arg, uint32_t evt)
  1072. {
  1073. return NLP_STE_FREED_NODE;
  1074. }
  1075. static uint32_t
  1076. lpfc_device_recov_unused_node(struct lpfc_vport *vport,
  1077. struct lpfc_nodelist *ndlp,
  1078. void *arg, uint32_t evt)
  1079. {
  1080. return ndlp->nlp_state;
  1081. }
  1082. static uint32_t
  1083. lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1084. void *arg, uint32_t evt)
  1085. {
  1086. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1087. struct lpfc_hba *phba = vport->phba;
  1088. struct lpfc_iocbq *cmdiocb = arg;
  1089. struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  1090. uint32_t *lp = (uint32_t *) pcmd->virt;
  1091. struct serv_parm *sp = (struct serv_parm *) (lp + 1);
  1092. struct ls_rjt stat;
  1093. int port_cmp;
  1094. memset(&stat, 0, sizeof (struct ls_rjt));
  1095. /* For a PLOGI, we only accept if our portname is less
  1096. * than the remote portname.
  1097. */
  1098. phba->fc_stat.elsLogiCol++;
  1099. port_cmp = memcmp(&vport->fc_portname, &sp->portName,
  1100. sizeof(struct lpfc_name));
  1101. if (port_cmp >= 0) {
  1102. /* Reject this request because the remote node will accept
  1103. ours */
  1104. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  1105. stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
  1106. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  1107. NULL);
  1108. } else {
  1109. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
  1110. (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
  1111. (vport->num_disc_nodes)) {
  1112. spin_lock_irq(shost->host_lock);
  1113. ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
  1114. spin_unlock_irq(shost->host_lock);
  1115. /* Check if there are more PLOGIs to be sent */
  1116. lpfc_more_plogi(vport);
  1117. if (vport->num_disc_nodes == 0) {
  1118. spin_lock_irq(shost->host_lock);
  1119. vport->fc_flag &= ~FC_NDISC_ACTIVE;
  1120. spin_unlock_irq(shost->host_lock);
  1121. lpfc_can_disctmo(vport);
  1122. lpfc_end_rscn(vport);
  1123. }
  1124. }
  1125. } /* If our portname was less */
  1126. return ndlp->nlp_state;
  1127. }
  1128. static uint32_t
  1129. lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1130. void *arg, uint32_t evt)
  1131. {
  1132. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1133. struct ls_rjt stat;
  1134. memset(&stat, 0, sizeof (struct ls_rjt));
  1135. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  1136. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  1137. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  1138. return ndlp->nlp_state;
  1139. }
  1140. static uint32_t
  1141. lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1142. void *arg, uint32_t evt)
  1143. {
  1144. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1145. /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
  1146. if (vport->phba->sli_rev == LPFC_SLI_REV3)
  1147. ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
  1148. /* software abort outstanding PLOGI */
  1149. lpfc_els_abort(vport->phba, ndlp);
  1150. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1151. return ndlp->nlp_state;
  1152. }
  1153. static uint32_t
  1154. lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1155. void *arg, uint32_t evt)
  1156. {
  1157. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1158. struct lpfc_hba *phba = vport->phba;
  1159. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1160. /* software abort outstanding PLOGI */
  1161. lpfc_els_abort(phba, ndlp);
  1162. if (evt == NLP_EVT_RCV_LOGO) {
  1163. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  1164. } else {
  1165. lpfc_issue_els_logo(vport, ndlp, 0);
  1166. }
  1167. /* Put ndlp in npr state set plogi timer for 1 sec */
  1168. mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
  1169. spin_lock_irq(shost->host_lock);
  1170. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1171. spin_unlock_irq(shost->host_lock);
  1172. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1173. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  1174. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1175. return ndlp->nlp_state;
  1176. }
  1177. static uint32_t
  1178. lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
  1179. struct lpfc_nodelist *ndlp,
  1180. void *arg,
  1181. uint32_t evt)
  1182. {
  1183. struct lpfc_hba *phba = vport->phba;
  1184. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1185. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1186. struct lpfc_dmabuf *pcmd, *prsp, *mp;
  1187. uint32_t *lp;
  1188. uint32_t vid, flag;
  1189. IOCB_t *irsp;
  1190. struct serv_parm *sp;
  1191. uint32_t ed_tov;
  1192. LPFC_MBOXQ_t *mbox;
  1193. int rc;
  1194. cmdiocb = (struct lpfc_iocbq *) arg;
  1195. rspiocb = cmdiocb->context_un.rsp_iocb;
  1196. if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
  1197. /* Recovery from PLOGI collision logic */
  1198. return ndlp->nlp_state;
  1199. }
  1200. irsp = &rspiocb->iocb;
  1201. if (irsp->ulpStatus)
  1202. goto out;
  1203. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  1204. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
  1205. if (!prsp)
  1206. goto out;
  1207. lp = (uint32_t *) prsp->virt;
  1208. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  1209. /* Some switches have FDMI servers returning 0 for WWN */
  1210. if ((ndlp->nlp_DID != FDMI_DID) &&
  1211. (wwn_to_u64(sp->portName.u.wwn) == 0 ||
  1212. wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
  1213. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1214. "0142 PLOGI RSP: Invalid WWN.\n");
  1215. goto out;
  1216. }
  1217. if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
  1218. goto out;
  1219. /* PLOGI chkparm OK */
  1220. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  1221. "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
  1222. ndlp->nlp_DID, ndlp->nlp_state,
  1223. ndlp->nlp_flag, ndlp->nlp_rpi);
  1224. if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
  1225. ndlp->nlp_fcp_info |= CLASS2;
  1226. else
  1227. ndlp->nlp_fcp_info |= CLASS3;
  1228. ndlp->nlp_class_sup = 0;
  1229. if (sp->cls1.classValid)
  1230. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  1231. if (sp->cls2.classValid)
  1232. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  1233. if (sp->cls3.classValid)
  1234. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  1235. if (sp->cls4.classValid)
  1236. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  1237. ndlp->nlp_maxframe =
  1238. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  1239. if ((vport->fc_flag & FC_PT2PT) &&
  1240. (vport->fc_flag & FC_PT2PT_PLOGI)) {
  1241. ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
  1242. if (sp->cmn.edtovResolution) {
  1243. /* E_D_TOV ticks are in nanoseconds */
  1244. ed_tov = (phba->fc_edtov + 999999) / 1000000;
  1245. }
  1246. ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
  1247. if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
  1248. sp->cmn.valid_vendor_ver_level) {
  1249. vid = be32_to_cpu(sp->un.vv.vid);
  1250. flag = be32_to_cpu(sp->un.vv.flags);
  1251. if ((vid == LPFC_VV_EMLX_ID) &&
  1252. (flag & LPFC_VV_SUPPRESS_RSP))
  1253. ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
  1254. }
  1255. /*
  1256. * Use the larger EDTOV
  1257. * RATOV = 2 * EDTOV for pt-to-pt
  1258. */
  1259. if (ed_tov > phba->fc_edtov)
  1260. phba->fc_edtov = ed_tov;
  1261. phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
  1262. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  1263. /* Issue config_link / reg_vfi to account for updated TOV's */
  1264. if (phba->sli_rev == LPFC_SLI_REV4) {
  1265. lpfc_issue_reg_vfi(vport);
  1266. } else {
  1267. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1268. if (!mbox) {
  1269. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1270. "0133 PLOGI: no memory "
  1271. "for config_link "
  1272. "Data: x%x x%x x%x x%x\n",
  1273. ndlp->nlp_DID, ndlp->nlp_state,
  1274. ndlp->nlp_flag, ndlp->nlp_rpi);
  1275. goto out;
  1276. }
  1277. lpfc_config_link(phba, mbox);
  1278. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  1279. mbox->vport = vport;
  1280. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  1281. if (rc == MBX_NOT_FINISHED) {
  1282. mempool_free(mbox, phba->mbox_mem_pool);
  1283. goto out;
  1284. }
  1285. }
  1286. }
  1287. lpfc_unreg_rpi(vport, ndlp);
  1288. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1289. if (!mbox) {
  1290. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1291. "0018 PLOGI: no memory for reg_login "
  1292. "Data: x%x x%x x%x x%x\n",
  1293. ndlp->nlp_DID, ndlp->nlp_state,
  1294. ndlp->nlp_flag, ndlp->nlp_rpi);
  1295. goto out;
  1296. }
  1297. if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
  1298. (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
  1299. switch (ndlp->nlp_DID) {
  1300. case NameServer_DID:
  1301. mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
  1302. break;
  1303. case FDMI_DID:
  1304. mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
  1305. break;
  1306. default:
  1307. ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
  1308. mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  1309. }
  1310. mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
  1311. mbox->vport = vport;
  1312. if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
  1313. != MBX_NOT_FINISHED) {
  1314. lpfc_nlp_set_state(vport, ndlp,
  1315. NLP_STE_REG_LOGIN_ISSUE);
  1316. return ndlp->nlp_state;
  1317. }
  1318. if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
  1319. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1320. /* decrement node reference count to the failed mbox
  1321. * command
  1322. */
  1323. lpfc_nlp_put(ndlp);
  1324. mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
  1325. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1326. kfree(mp);
  1327. mempool_free(mbox, phba->mbox_mem_pool);
  1328. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1329. "0134 PLOGI: cannot issue reg_login "
  1330. "Data: x%x x%x x%x x%x\n",
  1331. ndlp->nlp_DID, ndlp->nlp_state,
  1332. ndlp->nlp_flag, ndlp->nlp_rpi);
  1333. } else {
  1334. mempool_free(mbox, phba->mbox_mem_pool);
  1335. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1336. "0135 PLOGI: cannot format reg_login "
  1337. "Data: x%x x%x x%x x%x\n",
  1338. ndlp->nlp_DID, ndlp->nlp_state,
  1339. ndlp->nlp_flag, ndlp->nlp_rpi);
  1340. }
  1341. out:
  1342. if (ndlp->nlp_DID == NameServer_DID) {
  1343. lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  1344. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1345. "0261 Cannot Register NameServer login\n");
  1346. }
  1347. /*
  1348. ** In case the node reference counter does not go to zero, ensure that
  1349. ** the stale state for the node is not processed.
  1350. */
  1351. ndlp->nlp_prev_state = ndlp->nlp_state;
  1352. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1353. spin_lock_irq(shost->host_lock);
  1354. ndlp->nlp_flag |= NLP_DEFER_RM;
  1355. spin_unlock_irq(shost->host_lock);
  1356. return NLP_STE_FREED_NODE;
  1357. }
  1358. static uint32_t
  1359. lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1360. void *arg, uint32_t evt)
  1361. {
  1362. return ndlp->nlp_state;
  1363. }
  1364. static uint32_t
  1365. lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
  1366. struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
  1367. {
  1368. struct lpfc_hba *phba;
  1369. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1370. MAILBOX_t *mb = &pmb->u.mb;
  1371. uint16_t rpi;
  1372. phba = vport->phba;
  1373. /* Release the RPI */
  1374. if (!(phba->pport->load_flag & FC_UNLOADING) &&
  1375. !mb->mbxStatus) {
  1376. rpi = pmb->u.mb.un.varWords[0];
  1377. lpfc_release_rpi(phba, vport, ndlp, rpi);
  1378. }
  1379. return ndlp->nlp_state;
  1380. }
  1381. static uint32_t
  1382. lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1383. void *arg, uint32_t evt)
  1384. {
  1385. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1386. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1387. spin_lock_irq(shost->host_lock);
  1388. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1389. spin_unlock_irq(shost->host_lock);
  1390. return ndlp->nlp_state;
  1391. } else {
  1392. /* software abort outstanding PLOGI */
  1393. lpfc_els_abort(vport->phba, ndlp);
  1394. lpfc_drop_node(vport, ndlp);
  1395. return NLP_STE_FREED_NODE;
  1396. }
  1397. }
  1398. static uint32_t
  1399. lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
  1400. struct lpfc_nodelist *ndlp,
  1401. void *arg,
  1402. uint32_t evt)
  1403. {
  1404. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1405. struct lpfc_hba *phba = vport->phba;
  1406. /* Don't do anything that will mess up processing of the
  1407. * previous RSCN.
  1408. */
  1409. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1410. return ndlp->nlp_state;
  1411. /* software abort outstanding PLOGI */
  1412. lpfc_els_abort(phba, ndlp);
  1413. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  1414. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1415. spin_lock_irq(shost->host_lock);
  1416. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1417. spin_unlock_irq(shost->host_lock);
  1418. return ndlp->nlp_state;
  1419. }
  1420. static uint32_t
  1421. lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1422. void *arg, uint32_t evt)
  1423. {
  1424. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1425. struct lpfc_hba *phba = vport->phba;
  1426. struct lpfc_iocbq *cmdiocb;
  1427. /* software abort outstanding ADISC */
  1428. lpfc_els_abort(phba, ndlp);
  1429. cmdiocb = (struct lpfc_iocbq *) arg;
  1430. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1431. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1432. spin_lock_irq(shost->host_lock);
  1433. ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
  1434. spin_unlock_irq(shost->host_lock);
  1435. if (vport->num_disc_nodes)
  1436. lpfc_more_adisc(vport);
  1437. }
  1438. return ndlp->nlp_state;
  1439. }
  1440. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1441. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  1442. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  1443. return ndlp->nlp_state;
  1444. }
  1445. static uint32_t
  1446. lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1447. void *arg, uint32_t evt)
  1448. {
  1449. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1450. if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  1451. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1452. return ndlp->nlp_state;
  1453. }
  1454. static uint32_t
  1455. lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1456. void *arg, uint32_t evt)
  1457. {
  1458. struct lpfc_hba *phba = vport->phba;
  1459. struct lpfc_iocbq *cmdiocb;
  1460. cmdiocb = (struct lpfc_iocbq *) arg;
  1461. /* software abort outstanding ADISC */
  1462. lpfc_els_abort(phba, ndlp);
  1463. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1464. return ndlp->nlp_state;
  1465. }
  1466. static uint32_t
  1467. lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
  1468. struct lpfc_nodelist *ndlp,
  1469. void *arg, uint32_t evt)
  1470. {
  1471. struct lpfc_iocbq *cmdiocb;
  1472. cmdiocb = (struct lpfc_iocbq *) arg;
  1473. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1474. return ndlp->nlp_state;
  1475. }
  1476. static uint32_t
  1477. lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1478. void *arg, uint32_t evt)
  1479. {
  1480. struct lpfc_iocbq *cmdiocb;
  1481. cmdiocb = (struct lpfc_iocbq *) arg;
  1482. /* Treat like rcv logo */
  1483. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  1484. return ndlp->nlp_state;
  1485. }
  1486. static uint32_t
  1487. lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
  1488. struct lpfc_nodelist *ndlp,
  1489. void *arg, uint32_t evt)
  1490. {
  1491. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1492. struct lpfc_hba *phba = vport->phba;
  1493. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1494. IOCB_t *irsp;
  1495. ADISC *ap;
  1496. int rc;
  1497. cmdiocb = (struct lpfc_iocbq *) arg;
  1498. rspiocb = cmdiocb->context_un.rsp_iocb;
  1499. ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  1500. irsp = &rspiocb->iocb;
  1501. if ((irsp->ulpStatus