PageRenderTime 59ms CodeModel.GetById 14ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/lpfc/lpfc_nportdisc.c

http://github.com/mirrors/linux
C | 3148 lines | 2316 code | 380 blank | 452 comment | 360 complexity | 5452b82a4b12fa7b6c6ff53dbee425c0 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/pci.h>
  25. #include <linux/slab.h>
  26. #include <linux/interrupt.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_device.h>
  29. #include <scsi/scsi_host.h>
  30. #include <scsi/scsi_transport_fc.h>
  31. #include <scsi/fc/fc_fs.h>
  32. #include <linux/nvme-fc-driver.h>
  33. #include "lpfc_hw4.h"
  34. #include "lpfc_hw.h"
  35. #include "lpfc_sli.h"
  36. #include "lpfc_sli4.h"
  37. #include "lpfc_nl.h"
  38. #include "lpfc_disc.h"
  39. #include "lpfc.h"
  40. #include "lpfc_scsi.h"
  41. #include "lpfc_nvme.h"
  42. #include "lpfc_logmsg.h"
  43. #include "lpfc_crtn.h"
  44. #include "lpfc_vport.h"
  45. #include "lpfc_debugfs.h"
  46. /* Called to verify a rcv'ed ADISC was intended for us. */
  47. static int
  48. lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  49. struct lpfc_name *nn, struct lpfc_name *pn)
  50. {
  51. /* First, we MUST have a RPI registered */
  52. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
  53. return 0;
  54. /* Compare the ADISC rsp WWNN / WWPN matches our internal node
  55. * table entry for that node.
  56. */
  57. if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
  58. return 0;
  59. if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
  60. return 0;
  61. /* we match, return success */
  62. return 1;
  63. }
  64. int
  65. lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  66. struct serv_parm *sp, uint32_t class, int flogi)
  67. {
  68. volatile struct serv_parm *hsp = &vport->fc_sparam;
  69. uint16_t hsp_value, ssp_value = 0;
  70. /*
  71. * The receive data field size and buffer-to-buffer receive data field
  72. * size entries are 16 bits but are represented as two 8-bit fields in
  73. * the driver data structure to account for rsvd bits and other control
  74. * bits. Reconstruct and compare the fields as a 16-bit values before
  75. * correcting the byte values.
  76. */
  77. if (sp->cls1.classValid) {
  78. if (!flogi) {
  79. hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
  80. hsp->cls1.rcvDataSizeLsb);
  81. ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
  82. sp->cls1.rcvDataSizeLsb);
  83. if (!ssp_value)
  84. goto bad_service_param;
  85. if (ssp_value > hsp_value) {
  86. sp->cls1.rcvDataSizeLsb =
  87. hsp->cls1.rcvDataSizeLsb;
  88. sp->cls1.rcvDataSizeMsb =
  89. hsp->cls1.rcvDataSizeMsb;
  90. }
  91. }
  92. } else if (class == CLASS1)
  93. goto bad_service_param;
  94. if (sp->cls2.classValid) {
  95. if (!flogi) {
  96. hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
  97. hsp->cls2.rcvDataSizeLsb);
  98. ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
  99. sp->cls2.rcvDataSizeLsb);
  100. if (!ssp_value)
  101. goto bad_service_param;
  102. if (ssp_value > hsp_value) {
  103. sp->cls2.rcvDataSizeLsb =
  104. hsp->cls2.rcvDataSizeLsb;
  105. sp->cls2.rcvDataSizeMsb =
  106. hsp->cls2.rcvDataSizeMsb;
  107. }
  108. }
  109. } else if (class == CLASS2)
  110. goto bad_service_param;
  111. if (sp->cls3.classValid) {
  112. if (!flogi) {
  113. hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
  114. hsp->cls3.rcvDataSizeLsb);
  115. ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
  116. sp->cls3.rcvDataSizeLsb);
  117. if (!ssp_value)
  118. goto bad_service_param;
  119. if (ssp_value > hsp_value) {
  120. sp->cls3.rcvDataSizeLsb =
  121. hsp->cls3.rcvDataSizeLsb;
  122. sp->cls3.rcvDataSizeMsb =
  123. hsp->cls3.rcvDataSizeMsb;
  124. }
  125. }
  126. } else if (class == CLASS3)
  127. goto bad_service_param;
  128. /*
  129. * Preserve the upper four bits of the MSB from the PLOGI response.
  130. * These bits contain the Buffer-to-Buffer State Change Number
  131. * from the target and need to be passed to the FW.
  132. */
  133. hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
  134. ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
  135. if (ssp_value > hsp_value) {
  136. sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
  137. sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
  138. (hsp->cmn.bbRcvSizeMsb & 0x0F);
  139. }
  140. memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
  141. memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
  142. return 1;
  143. bad_service_param:
  144. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  145. "0207 Device %x "
  146. "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
  147. "invalid service parameters. Ignoring device.\n",
  148. ndlp->nlp_DID,
  149. sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
  150. sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
  151. sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
  152. sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
  153. return 0;
  154. }
  155. static void *
  156. lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  157. struct lpfc_iocbq *rspiocb)
  158. {
  159. struct lpfc_dmabuf *pcmd, *prsp;
  160. uint32_t *lp;
  161. void *ptr = NULL;
  162. IOCB_t *irsp;
  163. irsp = &rspiocb->iocb;
  164. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  165. /* For lpfc_els_abort, context2 could be zero'ed to delay
  166. * freeing associated memory till after ABTS completes.
  167. */
  168. if (pcmd) {
  169. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
  170. list);
  171. if (prsp) {
  172. lp = (uint32_t *) prsp->virt;
  173. ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
  174. }
  175. } else {
  176. /* Force ulpStatus error since we are returning NULL ptr */
  177. if (!(irsp->ulpStatus)) {
  178. irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
  179. irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
  180. }
  181. ptr = NULL;
  182. }
  183. return ptr;
  184. }
  185. /*
  186. * Free resources / clean up outstanding I/Os
  187. * associated with a LPFC_NODELIST entry. This
  188. * routine effectively results in a "software abort".
  189. */
  190. void
  191. lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
  192. {
  193. LIST_HEAD(abort_list);
  194. struct lpfc_sli_ring *pring;
  195. struct lpfc_iocbq *iocb, *next_iocb;
  196. pring = lpfc_phba_elsring(phba);
  197. /* In case of error recovery path, we might have a NULL pring here */
  198. if (unlikely(!pring))
  199. return;
  200. /* Abort outstanding I/O on NPort <nlp_DID> */
  201. lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
  202. "2819 Abort outstanding I/O on NPort x%x "
  203. "Data: x%x x%x x%x\n",
  204. ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
  205. ndlp->nlp_rpi);
  206. /* Clean up all fabric IOs first.*/
  207. lpfc_fabric_abort_nport(ndlp);
  208. /*
  209. * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
  210. * of all ELS IOs that need an ABTS. The IOs need to stay on the
  211. * txcmplq so that the abort operation completes them successfully.
  212. */
  213. spin_lock_irq(&phba->hbalock);
  214. if (phba->sli_rev == LPFC_SLI_REV4)
  215. spin_lock(&pring->ring_lock);
  216. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  217. /* Add to abort_list on on NDLP match. */
  218. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
  219. list_add_tail(&iocb->dlist, &abort_list);
  220. }
  221. if (phba->sli_rev == LPFC_SLI_REV4)
  222. spin_unlock(&pring->ring_lock);
  223. spin_unlock_irq(&phba->hbalock);
  224. /* Abort the targeted IOs and remove them from the abort list. */
  225. list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
  226. spin_lock_irq(&phba->hbalock);
  227. list_del_init(&iocb->dlist);
  228. lpfc_sli_issue_abort_iotag(phba, pring, iocb);
  229. spin_unlock_irq(&phba->hbalock);
  230. }
  231. INIT_LIST_HEAD(&abort_list);
  232. /* Now process the txq */
  233. spin_lock_irq(&phba->hbalock);
  234. if (phba->sli_rev == LPFC_SLI_REV4)
  235. spin_lock(&pring->ring_lock);
  236. list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
  237. /* Check to see if iocb matches the nport we are looking for */
  238. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
  239. list_del_init(&iocb->list);
  240. list_add_tail(&iocb->list, &abort_list);
  241. }
  242. }
  243. if (phba->sli_rev == LPFC_SLI_REV4)
  244. spin_unlock(&pring->ring_lock);
  245. spin_unlock_irq(&phba->hbalock);
  246. /* Cancel all the IOCBs from the completions list */
  247. lpfc_sli_cancel_iocbs(phba, &abort_list,
  248. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  249. lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
  250. }
  251. /* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
  252. * @phba: pointer to lpfc hba data structure.
  253. * @link_mbox: pointer to CONFIG_LINK mailbox object
  254. *
  255. * This routine is only called if we are SLI3, direct connect pt2pt
  256. * mode and the remote NPort issues the PLOGI after link up.
  257. */
  258. static void
  259. lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
  260. {
  261. LPFC_MBOXQ_t *login_mbox;
  262. MAILBOX_t *mb = &link_mbox->u.mb;
  263. struct lpfc_iocbq *save_iocb;
  264. struct lpfc_nodelist *ndlp;
  265. int rc;
  266. ndlp = link_mbox->ctx_ndlp;
  267. login_mbox = link_mbox->context3;
  268. save_iocb = login_mbox->context3;
  269. link_mbox->context3 = NULL;
  270. login_mbox->context3 = NULL;
  271. /* Check for CONFIG_LINK error */
  272. if (mb->mbxStatus) {
  273. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  274. "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
  275. mb->mbxStatus);
  276. mempool_free(login_mbox, phba->mbox_mem_pool);
  277. mempool_free(link_mbox, phba->mbox_mem_pool);
  278. kfree(save_iocb);
  279. return;
  280. }
  281. /* Now that CONFIG_LINK completed, and our SID is configured,
  282. * we can now proceed with sending the PLOGI ACC.
  283. */
  284. rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
  285. save_iocb, ndlp, login_mbox);
  286. if (rc) {
  287. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  288. "4576 PLOGI ACC fails pt2pt discovery: %x\n",
  289. rc);
  290. mempool_free(login_mbox, phba->mbox_mem_pool);
  291. }
  292. mempool_free(link_mbox, phba->mbox_mem_pool);
  293. kfree(save_iocb);
  294. }
  295. /**
  296. * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
  297. * @phba: Pointer to HBA context object.
  298. * @pmb: Pointer to mailbox object.
  299. *
  300. * This function provides the unreg rpi mailbox completion handler for a tgt.
  301. * The routine frees the memory resources associated with the completed
  302. * mailbox command and transmits the ELS ACC.
  303. *
  304. * This routine is only called if we are SLI4, acting in target
  305. * mode and the remote NPort issues the PLOGI after link up.
  306. **/
  307. static void
  308. lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  309. {
  310. struct lpfc_vport *vport = pmb->vport;
  311. struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
  312. LPFC_MBOXQ_t *mbox = pmb->context3;
  313. struct lpfc_iocbq *piocb = NULL;
  314. int rc;
  315. if (mbox) {
  316. pmb->context3 = NULL;
  317. piocb = mbox->context3;
  318. mbox->context3 = NULL;
  319. }
  320. /*
  321. * Complete the unreg rpi mbx request, and update flags.
  322. * This will also restart any deferred events.
  323. */
  324. lpfc_nlp_get(ndlp);
  325. lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
  326. if (!piocb) {
  327. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
  328. "4578 PLOGI ACC fail\n");
  329. if (mbox)
  330. mempool_free(mbox, phba->mbox_mem_pool);
  331. goto out;
  332. }
  333. rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
  334. if (rc) {
  335. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
  336. "4579 PLOGI ACC fail %x\n", rc);
  337. if (mbox)
  338. mempool_free(mbox, phba->mbox_mem_pool);
  339. }
  340. kfree(piocb);
  341. out:
  342. lpfc_nlp_put(ndlp);
  343. }
  344. static int
  345. lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  346. struct lpfc_iocbq *cmdiocb)
  347. {
  348. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  349. struct lpfc_hba *phba = vport->phba;
  350. struct lpfc_dmabuf *pcmd;
  351. uint64_t nlp_portwwn = 0;
  352. uint32_t *lp;
  353. IOCB_t *icmd;
  354. struct serv_parm *sp;
  355. uint32_t ed_tov;
  356. LPFC_MBOXQ_t *link_mbox;
  357. LPFC_MBOXQ_t *login_mbox;
  358. struct lpfc_iocbq *save_iocb;
  359. struct ls_rjt stat;
  360. uint32_t vid, flag;
  361. u16 rpi;
  362. int rc, defer_acc;
  363. memset(&stat, 0, sizeof (struct ls_rjt));
  364. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  365. lp = (uint32_t *) pcmd->virt;
  366. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  367. if (wwn_to_u64(sp->portName.u.wwn) == 0) {
  368. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  369. "0140 PLOGI Reject: invalid nname\n");
  370. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  371. stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
  372. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  373. NULL);
  374. return 0;
  375. }
  376. if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
  377. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  378. "0141 PLOGI Reject: invalid pname\n");
  379. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  380. stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
  381. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  382. NULL);
  383. return 0;
  384. }
  385. nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
  386. if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
  387. /* Reject this request because invalid parameters */
  388. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  389. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  390. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  391. NULL);
  392. return 0;
  393. }
  394. icmd = &cmdiocb->iocb;
  395. /* PLOGI chkparm OK */
  396. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  397. "0114 PLOGI chkparm OK Data: x%x x%x x%x "
  398. "x%x x%x x%x\n",
  399. ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
  400. ndlp->nlp_rpi, vport->port_state,
  401. vport->fc_flag);
  402. if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
  403. ndlp->nlp_fcp_info |= CLASS2;
  404. else
  405. ndlp->nlp_fcp_info |= CLASS3;
  406. defer_acc = 0;
  407. ndlp->nlp_class_sup = 0;
  408. if (sp->cls1.classValid)
  409. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  410. if (sp->cls2.classValid)
  411. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  412. if (sp->cls3.classValid)
  413. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  414. if (sp->cls4.classValid)
  415. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  416. ndlp->nlp_maxframe =
  417. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  418. /* if already logged in, do implicit logout */
  419. switch (ndlp->nlp_state) {
  420. case NLP_STE_NPR_NODE:
  421. if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
  422. break;
  423. /* fall through */
  424. case NLP_STE_REG_LOGIN_ISSUE:
  425. case NLP_STE_PRLI_ISSUE:
  426. case NLP_STE_UNMAPPED_NODE:
  427. case NLP_STE_MAPPED_NODE:
  428. /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
  429. * For target mode, execute implicit logo.
  430. * Fabric nodes go into NPR.
  431. */
  432. if (!(ndlp->nlp_type & NLP_FABRIC) &&
  433. !(phba->nvmet_support)) {
  434. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
  435. ndlp, NULL);
  436. return 1;
  437. }
  438. if (nlp_portwwn != 0 &&
  439. nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
  440. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  441. "0143 PLOGI recv'd from DID: x%x "
  442. "WWPN changed: old %llx new %llx\n",
  443. ndlp->nlp_DID,
  444. (unsigned long long)nlp_portwwn,
  445. (unsigned long long)
  446. wwn_to_u64(sp->portName.u.wwn));
  447. ndlp->nlp_prev_state = ndlp->nlp_state;
  448. /* rport needs to be unregistered first */
  449. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  450. break;
  451. }
  452. ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
  453. ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
  454. ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
  455. ndlp->nlp_flag &= ~NLP_FIRSTBURST;
  456. login_mbox = NULL;
  457. link_mbox = NULL;
  458. save_iocb = NULL;
  459. /* Check for Nport to NPort pt2pt protocol */
  460. if ((vport->fc_flag & FC_PT2PT) &&
  461. !(vport->fc_flag & FC_PT2PT_PLOGI)) {
  462. /* rcv'ed PLOGI decides what our NPortId will be */
  463. vport->fc_myDID = icmd->un.rcvels.parmRo;
  464. ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
  465. if (sp->cmn.edtovResolution) {
  466. /* E_D_TOV ticks are in nanoseconds */
  467. ed_tov = (phba->fc_edtov + 999999) / 1000000;
  468. }
  469. /*
  470. * For pt-to-pt, use the larger EDTOV
  471. * RATOV = 2 * EDTOV
  472. */
  473. if (ed_tov > phba->fc_edtov)
  474. phba->fc_edtov = ed_tov;
  475. phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
  476. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  477. /* Issue config_link / reg_vfi to account for updated TOV's */
  478. if (phba->sli_rev == LPFC_SLI_REV4)
  479. lpfc_issue_reg_vfi(vport);
  480. else {
  481. defer_acc = 1;
  482. link_mbox = mempool_alloc(phba->mbox_mem_pool,
  483. GFP_KERNEL);
  484. if (!link_mbox)
  485. goto out;
  486. lpfc_config_link(phba, link_mbox);
  487. link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
  488. link_mbox->vport = vport;
  489. link_mbox->ctx_ndlp = ndlp;
  490. save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
  491. if (!save_iocb)
  492. goto out;
  493. /* Save info from cmd IOCB used in rsp */
  494. memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
  495. sizeof(struct lpfc_iocbq));
  496. }
  497. lpfc_can_disctmo(vport);
  498. }
  499. ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
  500. if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
  501. sp->cmn.valid_vendor_ver_level) {
  502. vid = be32_to_cpu(sp->un.vv.vid);
  503. flag = be32_to_cpu(sp->un.vv.flags);
  504. if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
  505. ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
  506. }
  507. login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  508. if (!login_mbox)
  509. goto out;
  510. /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
  511. if (phba->nvmet_support && !defer_acc) {
  512. link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  513. if (!link_mbox)
  514. goto out;
  515. /* As unique identifiers such as iotag would be overwritten
  516. * with those from the cmdiocb, allocate separate temporary
  517. * storage for the copy.
  518. */
  519. save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
  520. if (!save_iocb)
  521. goto out;
  522. /* Unreg RPI is required for SLI4. */
  523. rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
  524. lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
  525. link_mbox->vport = vport;
  526. link_mbox->ctx_ndlp = ndlp;
  527. link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
  528. if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
  529. (!(vport->fc_flag & FC_OFFLINE_MODE)))
  530. ndlp->nlp_flag |= NLP_UNREG_INP;
  531. /* Save info from cmd IOCB used in rsp */
  532. memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
  533. /* Delay sending ACC till unreg RPI completes. */
  534. defer_acc = 1;
  535. } else if (phba->sli_rev == LPFC_SLI_REV4)
  536. lpfc_unreg_rpi(vport, ndlp);
  537. rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
  538. (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
  539. if (rc)
  540. goto out;
  541. /* ACC PLOGI rsp command needs to execute first,
  542. * queue this login_mbox command to be processed later.
  543. */
  544. login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  545. /*
  546. * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
  547. * command issued in lpfc_cmpl_els_acc().
  548. */
  549. login_mbox->vport = vport;
  550. spin_lock_irq(shost->host_lock);
  551. ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
  552. spin_unlock_irq(shost->host_lock);
  553. /*
  554. * If there is an outstanding PLOGI issued, abort it before
  555. * sending ACC rsp for received PLOGI. If pending plogi
  556. * is not canceled here, the plogi will be rejected by
  557. * remote port and will be retried. On a configuration with
  558. * single discovery thread, this will cause a huge delay in
  559. * discovery. Also this will cause multiple state machines
  560. * running in parallel for this node.
  561. * This only applies to a fabric environment.
  562. */
  563. if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
  564. (vport->fc_flag & FC_FABRIC)) {
  565. /* software abort outstanding PLOGI */
  566. lpfc_els_abort(phba, ndlp);
  567. }
  568. if ((vport->port_type == LPFC_NPIV_PORT &&
  569. vport->cfg_restrict_login)) {
  570. /* no deferred ACC */
  571. kfree(save_iocb);
  572. /* In order to preserve RPIs, we want to cleanup
  573. * the default RPI the firmware created to rcv
  574. * this ELS request. The only way to do this is
  575. * to register, then unregister the RPI.
  576. */
  577. spin_lock_irq(shost->host_lock);
  578. ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
  579. spin_unlock_irq(shost->host_lock);
  580. stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
  581. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  582. rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  583. ndlp, login_mbox);
  584. if (rc)
  585. mempool_free(login_mbox, phba->mbox_mem_pool);
  586. return 1;
  587. }
  588. if (defer_acc) {
  589. /* So the order here should be:
  590. * SLI3 pt2pt
  591. * Issue CONFIG_LINK mbox
  592. * CONFIG_LINK cmpl
  593. * SLI4 tgt
  594. * Issue UNREG RPI mbx
  595. * UNREG RPI cmpl
  596. * Issue PLOGI ACC
  597. * PLOGI ACC cmpl
  598. * Issue REG_LOGIN mbox
  599. */
  600. /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
  601. link_mbox->context3 = login_mbox;
  602. login_mbox->context3 = save_iocb;
  603. /* Start the ball rolling by issuing CONFIG_LINK here */
  604. rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
  605. if (rc == MBX_NOT_FINISHED)
  606. goto out;
  607. return 1;
  608. }
  609. rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
  610. if (rc)
  611. mempool_free(login_mbox, phba->mbox_mem_pool);
  612. return 1;
  613. out:
  614. if (defer_acc)
  615. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  616. "4577 discovery failure: %p %p %p\n",
  617. save_iocb, link_mbox, login_mbox);
  618. kfree(save_iocb);
  619. if (link_mbox)
  620. mempool_free(link_mbox, phba->mbox_mem_pool);
  621. if (login_mbox)
  622. mempool_free(login_mbox, phba->mbox_mem_pool);
  623. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  624. stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
  625. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  626. return 0;
  627. }
  628. /**
  629. * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
  630. * @phba: pointer to lpfc hba data structure.
  631. * @mboxq: pointer to mailbox object
  632. *
  633. * This routine is invoked to issue a completion to a rcv'ed
  634. * ADISC or PDISC after the paused RPI has been resumed.
  635. **/
  636. static void
  637. lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  638. {
  639. struct lpfc_vport *vport;
  640. struct lpfc_iocbq *elsiocb;
  641. struct lpfc_nodelist *ndlp;
  642. uint32_t cmd;
  643. elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
  644. ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
  645. vport = mboxq->vport;
  646. cmd = elsiocb->drvrTimeout;
  647. if (cmd == ELS_CMD_ADISC) {
  648. lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
  649. } else {
  650. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
  651. ndlp, NULL);
  652. }
  653. kfree(elsiocb);
  654. mempool_free(mboxq, phba->mbox_mem_pool);
  655. }
  656. static int
  657. lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  658. struct lpfc_iocbq *cmdiocb)
  659. {
  660. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  661. struct lpfc_iocbq *elsiocb;
  662. struct lpfc_dmabuf *pcmd;
  663. struct serv_parm *sp;
  664. struct lpfc_name *pnn, *ppn;
  665. struct ls_rjt stat;
  666. ADISC *ap;
  667. IOCB_t *icmd;
  668. uint32_t *lp;
  669. uint32_t cmd;
  670. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  671. lp = (uint32_t *) pcmd->virt;
  672. cmd = *lp++;
  673. if (cmd == ELS_CMD_ADISC) {
  674. ap = (ADISC *) lp;
  675. pnn = (struct lpfc_name *) & ap->nodeName;
  676. ppn = (struct lpfc_name *) & ap->portName;
  677. } else {
  678. sp = (struct serv_parm *) lp;
  679. pnn = (struct lpfc_name *) & sp->nodeName;
  680. ppn = (struct lpfc_name *) & sp->portName;
  681. }
  682. icmd = &cmdiocb->iocb;
  683. if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
  684. /*
  685. * As soon as we send ACC, the remote NPort can
  686. * start sending us data. Thus, for SLI4 we must
  687. * resume the RPI before the ACC goes out.
  688. */
  689. if (vport->phba->sli_rev == LPFC_SLI_REV4) {
  690. elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
  691. GFP_KERNEL);
  692. if (elsiocb) {
  693. /* Save info from cmd IOCB used in rsp */
  694. memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
  695. sizeof(struct lpfc_iocbq));
  696. /* Save the ELS cmd */
  697. elsiocb->drvrTimeout = cmd;
  698. lpfc_sli4_resume_rpi(ndlp,
  699. lpfc_mbx_cmpl_resume_rpi, elsiocb);
  700. goto out;
  701. }
  702. }
  703. if (cmd == ELS_CMD_ADISC) {
  704. lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
  705. } else {
  706. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
  707. ndlp, NULL);
  708. }
  709. out:
  710. /* If we are authenticated, move to the proper state */
  711. if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
  712. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  713. else
  714. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  715. return 1;
  716. }
  717. /* Reject this request because invalid parameters */
  718. stat.un.b.lsRjtRsvd0 = 0;
  719. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  720. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  721. stat.un.b.vendorUnique = 0;
  722. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  723. /* 1 sec timeout */
  724. mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
  725. spin_lock_irq(shost->host_lock);
  726. ndlp->nlp_flag |= NLP_DELAY_TMO;
  727. spin_unlock_irq(shost->host_lock);
  728. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  729. ndlp->nlp_prev_state = ndlp->nlp_state;
  730. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  731. return 0;
  732. }
  733. static int
  734. lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  735. struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
  736. {
  737. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  738. struct lpfc_hba *phba = vport->phba;
  739. struct lpfc_vport **vports;
  740. int i, active_vlink_present = 0 ;
  741. /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
  742. /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
  743. * PLOGIs during LOGO storms from a device.
  744. */
  745. spin_lock_irq(shost->host_lock);
  746. ndlp->nlp_flag |= NLP_LOGO_ACC;
  747. spin_unlock_irq(shost->host_lock);
  748. if (els_cmd == ELS_CMD_PRLO)
  749. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  750. else
  751. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  752. if (ndlp->nlp_DID == Fabric_DID) {
  753. if (vport->port_state <= LPFC_FDISC)
  754. goto out;
  755. lpfc_linkdown_port(vport);
  756. spin_lock_irq(shost->host_lock);
  757. vport->fc_flag |= FC_VPORT_LOGO_RCVD;
  758. spin_unlock_irq(shost->host_lock);
  759. vports = lpfc_create_vport_work_array(phba);
  760. if (vports) {
  761. for (i = 0; i <= phba->max_vports && vports[i] != NULL;
  762. i++) {
  763. if ((!(vports[i]->fc_flag &
  764. FC_VPORT_LOGO_RCVD)) &&
  765. (vports[i]->port_state > LPFC_FDISC)) {
  766. active_vlink_present = 1;
  767. break;
  768. }
  769. }
  770. lpfc_destroy_vport_work_array(phba, vports);
  771. }
  772. /*
  773. * Don't re-instantiate if vport is marked for deletion.
  774. * If we are here first then vport_delete is going to wait
  775. * for discovery to complete.
  776. */
  777. if (!(vport->load_flag & FC_UNLOADING) &&
  778. active_vlink_present) {
  779. /*
  780. * If there are other active VLinks present,
  781. * re-instantiate the Vlink using FDISC.
  782. */
  783. mod_timer(&ndlp->nlp_delayfunc,
  784. jiffies + msecs_to_jiffies(1000));
  785. spin_lock_irq(shost->host_lock);
  786. ndlp->nlp_flag |= NLP_DELAY_TMO;
  787. spin_unlock_irq(shost->host_lock);
  788. ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
  789. vport->port_state = LPFC_FDISC;
  790. } else {
  791. spin_lock_irq(shost->host_lock);
  792. phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
  793. spin_unlock_irq(shost->host_lock);
  794. lpfc_retry_pport_discovery(phba);
  795. }
  796. } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
  797. ((ndlp->nlp_type & NLP_FCP_TARGET) ||
  798. !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
  799. (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
  800. /* Only try to re-login if this is NOT a Fabric Node */
  801. mod_timer(&ndlp->nlp_delayfunc,
  802. jiffies + msecs_to_jiffies(1000 * 1));
  803. spin_lock_irq(shost->host_lock);
  804. ndlp->nlp_flag |= NLP_DELAY_TMO;
  805. spin_unlock_irq(shost->host_lock);
  806. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  807. }
  808. out:
  809. ndlp->nlp_prev_state = ndlp->nlp_state;
  810. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  811. spin_lock_irq(shost->host_lock);
  812. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  813. spin_unlock_irq(shost->host_lock);
  814. /* The driver has to wait until the ACC completes before it continues
  815. * processing the LOGO. The action will resume in
  816. * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
  817. * unreg_login, the driver waits so the ACC does not get aborted.
  818. */
  819. return 0;
  820. }
  821. static uint32_t
  822. lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
  823. struct lpfc_nodelist *ndlp,
  824. struct lpfc_iocbq *cmdiocb)
  825. {
  826. struct ls_rjt stat;
  827. uint32_t *payload;
  828. uint32_t cmd;
  829. payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
  830. cmd = *payload;
  831. if (vport->phba->nvmet_support) {
  832. /* Must be a NVME PRLI */
  833. if (cmd == ELS_CMD_PRLI)
  834. goto out;
  835. } else {
  836. /* Initiator mode. */
  837. if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
  838. goto out;
  839. }
  840. return 1;
  841. out:
  842. lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
  843. "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
  844. "state x%x flags x%x\n",
  845. cmd, ndlp->nlp_rpi, ndlp->nlp_state,
  846. ndlp->nlp_flag);
  847. memset(&stat, 0, sizeof(struct ls_rjt));
  848. stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
  849. stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
  850. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  851. ndlp, NULL);
  852. return 0;
  853. }
  854. static void
  855. lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  856. struct lpfc_iocbq *cmdiocb)
  857. {
  858. struct lpfc_hba *phba = vport->phba;
  859. struct lpfc_dmabuf *pcmd;
  860. uint32_t *lp;
  861. PRLI *npr;
  862. struct fc_rport *rport = ndlp->rport;
  863. u32 roles;
  864. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  865. lp = (uint32_t *) pcmd->virt;
  866. npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
  867. if ((npr->prliType == PRLI_FCP_TYPE) ||
  868. (npr->prliType == PRLI_NVME_TYPE)) {
  869. if (npr->initiatorFunc) {
  870. if (npr->prliType == PRLI_FCP_TYPE)
  871. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  872. if (npr->prliType == PRLI_NVME_TYPE)
  873. ndlp->nlp_type |= NLP_NVME_INITIATOR;
  874. }
  875. if (npr->targetFunc) {
  876. if (npr->prliType == PRLI_FCP_TYPE)
  877. ndlp->nlp_type |= NLP_FCP_TARGET;
  878. if (npr->prliType == PRLI_NVME_TYPE)
  879. ndlp->nlp_type |= NLP_NVME_TARGET;
  880. if (npr->writeXferRdyDis)
  881. ndlp->nlp_flag |= NLP_FIRSTBURST;
  882. }
  883. if (npr->Retry && ndlp->nlp_type &
  884. (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
  885. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  886. if (npr->Retry && phba->nsler &&
  887. ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
  888. ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
  889. /* If this driver is in nvme target mode, set the ndlp's fc4
  890. * type to NVME provided the PRLI response claims NVME FC4
  891. * type. Target mode does not issue gft_id so doesn't get
  892. * the fc4 type set until now.
  893. */
  894. if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
  895. ndlp->nlp_fc4_type |= NLP_FC4_NVME;
  896. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  897. }
  898. if (npr->prliType == PRLI_FCP_TYPE)
  899. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  900. }
  901. if (rport) {
  902. /* We need to update the rport role values */
  903. roles = FC_RPORT_ROLE_UNKNOWN;
  904. if (ndlp->nlp_type & NLP_FCP_INITIATOR)
  905. roles |= FC_RPORT_ROLE_FCP_INITIATOR;
  906. if (ndlp->nlp_type & NLP_FCP_TARGET)
  907. roles |= FC_RPORT_ROLE_FCP_TARGET;
  908. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
  909. "rport rolechg: role:x%x did:x%x flg:x%x",
  910. roles, ndlp->nlp_DID, ndlp->nlp_flag);
  911. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
  912. fc_remote_port_rolechg(rport, roles);
  913. }
  914. }
  915. static uint32_t
  916. lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  917. {
  918. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  919. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
  920. spin_lock_irq(shost->host_lock);
  921. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  922. spin_unlock_irq(shost->host_lock);
  923. return 0;
  924. }
  925. if (!(vport->fc_flag & FC_PT2PT)) {
  926. /* Check config parameter use-adisc or FCP-2 */
  927. if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
  928. ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
  929. (ndlp->nlp_type & NLP_FCP_TARGET)))) {
  930. spin_lock_irq(shost->host_lock);
  931. ndlp->nlp_flag |= NLP_NPR_ADISC;
  932. spin_unlock_irq(shost->host_lock);
  933. return 1;
  934. }
  935. }
  936. spin_lock_irq(shost->host_lock);
  937. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  938. spin_unlock_irq(shost->host_lock);
  939. lpfc_unreg_rpi(vport, ndlp);
  940. return 0;
  941. }
  942. /**
  943. * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
  944. * @phba : Pointer to lpfc_hba structure.
  945. * @vport: Pointer to lpfc_vport structure.
  946. * @rpi : rpi to be release.
  947. *
  948. * This function will send a unreg_login mailbox command to the firmware
  949. * to release a rpi.
  950. **/
  951. static void
  952. lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
  953. struct lpfc_nodelist *ndlp, uint16_t rpi)
  954. {
  955. LPFC_MBOXQ_t *pmb;
  956. int rc;
  957. /* If there is already an UNREG in progress for this ndlp,
  958. * no need to queue up another one.
  959. */
  960. if (ndlp->nlp_flag & NLP_UNREG_INP) {
  961. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  962. "1435 release_rpi SKIP UNREG x%x on "
  963. "NPort x%x deferred x%x flg x%x "
  964. "Data: x%px\n",
  965. ndlp->nlp_rpi, ndlp->nlp_DID,
  966. ndlp->nlp_defer_did,
  967. ndlp->nlp_flag, ndlp);
  968. return;
  969. }
  970. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  971. GFP_KERNEL);
  972. if (!pmb)
  973. lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
  974. "2796 mailbox memory allocation failed \n");
  975. else {
  976. lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
  977. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  978. pmb->vport = vport;
  979. pmb->ctx_ndlp = ndlp;
  980. if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
  981. (!(vport->fc_flag & FC_OFFLINE_MODE)))
  982. ndlp->nlp_flag |= NLP_UNREG_INP;
  983. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  984. "1437 release_rpi UNREG x%x "
  985. "on NPort x%x flg x%x\n",
  986. ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
  987. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  988. if (rc == MBX_NOT_FINISHED)
  989. mempool_free(pmb, phba->mbox_mem_pool);
  990. }
  991. }
  992. static uint32_t
  993. lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  994. void *arg, uint32_t evt)
  995. {
  996. struct lpfc_hba *phba;
  997. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  998. uint16_t rpi;
  999. phba = vport->phba;
  1000. /* Release the RPI if reglogin completing */
  1001. if (!(phba->pport->load_flag & FC_UNLOADING) &&
  1002. (evt == NLP_EVT_CMPL_REG_LOGIN) &&
  1003. (!pmb->u.mb.mbxStatus)) {
  1004. rpi = pmb->u.mb.un.varWords[0];
  1005. lpfc_release_rpi(phba, vport, ndlp, rpi);
  1006. }
  1007. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  1008. "0271 Illegal State Transition: node x%x "
  1009. "event x%x, state x%x Data: x%x x%x\n",
  1010. ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
  1011. ndlp->nlp_flag);
  1012. return ndlp->nlp_state;
  1013. }
  1014. static uint32_t
  1015. lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1016. void *arg, uint32_t evt)
  1017. {
  1018. /* This transition is only legal if we previously
  1019. * rcv'ed a PLOGI. Since we don't want 2 discovery threads
  1020. * working on the same NPortID, do nothing for this thread
  1021. * to stop it.
  1022. */
  1023. if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
  1024. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  1025. "0272 Illegal State Transition: node x%x "
  1026. "event x%x, state x%x Data: x%x x%x\n",
  1027. ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
  1028. ndlp->nlp_flag);
  1029. }
  1030. return ndlp->nlp_state;
  1031. }
  1032. /* Start of Discovery State Machine routines */
  1033. static uint32_t
  1034. lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1035. void *arg, uint32_t evt)
  1036. {
  1037. struct lpfc_iocbq *cmdiocb;
  1038. cmdiocb = (struct lpfc_iocbq *) arg;
  1039. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1040. return ndlp->nlp_state;
  1041. }
  1042. return NLP_STE_FREED_NODE;
  1043. }
  1044. static uint32_t
  1045. lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1046. void *arg, uint32_t evt)
  1047. {
  1048. lpfc_issue_els_logo(vport, ndlp, 0);
  1049. return ndlp->nlp_state;
  1050. }
  1051. static uint32_t
  1052. lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1053. void *arg, uint32_t evt)
  1054. {
  1055. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1056. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1057. spin_lock_irq(shost->host_lock);
  1058. ndlp->nlp_flag |= NLP_LOGO_ACC;
  1059. spin_unlock_irq(shost->host_lock);
  1060. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  1061. return ndlp->nlp_state;
  1062. }
  1063. static uint32_t
  1064. lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1065. void *arg, uint32_t evt)
  1066. {
  1067. return NLP_STE_FREED_NODE;
  1068. }
  1069. static uint32_t
  1070. lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1071. void *arg, uint32_t evt)
  1072. {
  1073. return NLP_STE_FREED_NODE;
  1074. }
  1075. static uint32_t
  1076. lpfc_device_recov_unused_node(struct lpfc_vport *vport,
  1077. struct lpfc_nodelist *ndlp,
  1078. void *arg, uint32_t evt)
  1079. {
  1080. return ndlp->nlp_state;
  1081. }
  1082. static uint32_t
  1083. lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1084. void *arg, uint32_t evt)
  1085. {
  1086. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1087. struct lpfc_hba *phba = vport->phba;
  1088. struct lpfc_iocbq *cmdiocb = arg;
  1089. struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  1090. uint32_t *lp = (uint32_t *) pcmd->virt;
  1091. struct serv_parm *sp = (struct serv_parm *) (lp + 1);
  1092. struct ls_rjt stat;
  1093. int port_cmp;
  1094. memset(&stat, 0, sizeof (struct ls_rjt));
  1095. /* For a PLOGI, we only accept if our portname is less
  1096. * than the remote portname.
  1097. */
  1098. phba->fc_stat.elsLogiCol++;
  1099. port_cmp = memcmp(&vport->fc_portname, &sp->portName,
  1100. sizeof(struct lpfc_name));
  1101. if (port_cmp >= 0) {
  1102. /* Reject this request because the remote node will accept
  1103. ours */
  1104. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  1105. stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
  1106. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  1107. NULL);
  1108. } else {
  1109. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
  1110. (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
  1111. (vport->num_disc_nodes)) {
  1112. spin_lock_irq(shost->host_lock);
  1113. ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
  1114. spin_unlock_irq(shost->host_lock);
  1115. /* Check if there are more PLOGIs to be sent */
  1116. lpfc_more_plogi(vport);
  1117. if (vport->num_disc_nodes == 0) {
  1118. spin_lock_irq(shost->host_lock);
  1119. vport->fc_flag &= ~FC_NDISC_ACTIVE;
  1120. spin_unlock_irq(shost->host_lock);
  1121. lpfc_can_disctmo(vport);
  1122. lpfc_end_rscn(vport);
  1123. }
  1124. }
  1125. } /* If our portname was less */
  1126. return ndlp->nlp_state;
  1127. }
  1128. static uint32_t
  1129. lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1130. void *arg, uint32_t evt)
  1131. {
  1132. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1133. struct ls_rjt stat;
  1134. memset(&stat, 0, sizeof (struct ls_rjt));
  1135. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  1136. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  1137. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  1138. return ndlp->nlp_state;
  1139. }
  1140. static uint32_t
  1141. lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1142. void *arg, uint32_t evt)
  1143. {
  1144. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1145. /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
  1146. if (vport->phba->sli_rev == LPFC_SLI_REV3)
  1147. ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
  1148. /* software abort outstanding PLOGI */
  1149. lpfc_els_abort(vport->phba, ndlp);
  1150. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1151. return ndlp->nlp_state;
  1152. }
  1153. static uint32_t
  1154. lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1155. void *arg, uint32_t evt)
  1156. {
  1157. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1158. struct lpfc_hba *phba = vport->phba;
  1159. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1160. /* software abort outstanding PLOGI */
  1161. lpfc_els_abort(phba, ndlp);
  1162. if (evt == NLP_EVT_RCV_LOGO) {
  1163. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  1164. } else {
  1165. lpfc_issue_els_logo(vport, ndlp, 0);
  1166. }
  1167. /* Put ndlp in npr state set plogi timer for 1 sec */
  1168. mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
  1169. spin_lock_irq(shost->host_lock);
  1170. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1171. spin_unlock_irq(shost->host_lock);
  1172. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1173. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  1174. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1175. return ndlp->nlp_state;
  1176. }
  1177. static uint32_t
  1178. lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
  1179. struct lpfc_nodelist *ndlp,
  1180. void *arg,
  1181. uint32_t evt)
  1182. {
  1183. struct lpfc_hba *phba = vport->phba;
  1184. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1185. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1186. struct lpfc_dmabuf *pcmd, *prsp, *mp;
  1187. uint32_t *lp;
  1188. uint32_t vid, flag;
  1189. IOCB_t *irsp;
  1190. struct serv_parm *sp;
  1191. uint32_t ed_tov;
  1192. LPFC_MBOXQ_t *mbox;
  1193. int rc;
  1194. cmdiocb = (struct lpfc_iocbq *) arg;
  1195. rspiocb = cmdiocb->context_un.rsp_iocb;
  1196. if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
  1197. /* Recovery from PLOGI collision logic */
  1198. return ndlp->nlp_state;
  1199. }
  1200. irsp = &rspiocb->iocb;
  1201. if (irsp->ulpStatus)
  1202. goto out;
  1203. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  1204. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
  1205. if (!prsp)
  1206. goto out;
  1207. lp = (uint32_t *) prsp->virt;
  1208. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  1209. /* Some switches have FDMI servers returning 0 for WWN */
  1210. if ((ndlp->nlp_DID != FDMI_DID) &&
  1211. (wwn_to_u64(sp->portName.u.wwn) == 0 ||
  1212. wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
  1213. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1214. "0142 PLOGI RSP: Invalid WWN.\n");
  1215. goto out;
  1216. }
  1217. if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
  1218. goto out;
  1219. /* PLOGI chkparm OK */
  1220. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  1221. "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
  1222. ndlp->nlp_DID, ndlp->nlp_state,
  1223. ndlp->nlp_flag, ndlp->nlp_rpi);
  1224. if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
  1225. ndlp->nlp_fcp_info |= CLASS2;
  1226. else
  1227. ndlp->nlp_fcp_info |= CLASS3;
  1228. ndlp->nlp_class_sup = 0;
  1229. if (sp->cls1.classValid)
  1230. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  1231. if (sp->cls2.classValid)
  1232. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  1233. if (sp->cls3.classValid)
  1234. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  1235. if (sp->cls4.classValid)
  1236. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  1237. ndlp->nlp_maxframe =
  1238. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  1239. if ((vport->fc_flag & FC_PT2PT) &&
  1240. (vport->fc_flag & FC_PT2PT_PLOGI)) {
  1241. ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
  1242. if (sp->cmn.edtovResolution) {
  1243. /* E_D_TOV ticks are in nanoseconds */
  1244. ed_tov = (phba->fc_edtov + 999999) / 1000000;
  1245. }
  1246. ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
  1247. if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
  1248. sp->cmn.valid_vendor_ver_level) {
  1249. vid = be32_to_cpu(sp->un.vv.vid);
  1250. flag = be32_to_cpu(sp->un.vv.flags);
  1251. if ((vid == LPFC_VV_EMLX_ID) &&
  1252. (flag & LPFC_VV_SUPPRESS_RSP))
  1253. ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
  1254. }
  1255. /*
  1256. * Use the larger EDTOV
  1257. * RATOV = 2 * EDTOV for pt-to-pt
  1258. */
  1259. if (ed_tov > phba->fc_edtov)
  1260. phba->fc_edtov = ed_tov;
  1261. phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
  1262. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  1263. /* Issue config_link / reg_vfi to account for updated TOV's */
  1264. if (phba->sli_rev == LPFC_SLI_REV4) {
  1265. lpfc_issue_reg_vfi(vport);
  1266. } else {
  1267. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1268. if (!mbox) {
  1269. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1270. "0133 PLOGI: no memory "
  1271. "for config_link "
  1272. "Data: x%x x%x x%x x%x\n",
  1273. ndlp->nlp_DID, ndlp->nlp_state,
  1274. ndlp->nlp_flag, ndlp->nlp_rpi);
  1275. goto out;
  1276. }
  1277. lpfc_config_link(phba, mbox);
  1278. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  1279. mbox->vport = vport;
  1280. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  1281. if (rc == MBX_NOT_FINISHED) {
  1282. mempool_free(mbox, phba->mbox_mem_pool);
  1283. goto out;
  1284. }
  1285. }
  1286. }
  1287. lpfc_unreg_rpi(vport, ndlp);
  1288. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1289. if (!mbox) {
  1290. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1291. "0018 PLOGI: no memory for reg_login "
  1292. "Data: x%x x%x x%x x%x\n",
  1293. ndlp->nlp_DID, ndlp->nlp_state,
  1294. ndlp->nlp_flag, ndlp->nlp_rpi);
  1295. goto out;
  1296. }
  1297. if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
  1298. (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
  1299. switch (ndlp->nlp_DID) {
  1300. case NameServer_DID:
  1301. mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
  1302. break;
  1303. case FDMI_DID:
  1304. mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
  1305. break;
  1306. default:
  1307. ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
  1308. mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  1309. }
  1310. mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
  1311. mbox->vport = vport;
  1312. if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
  1313. != MBX_NOT_FINISHED) {
  1314. lpfc_nlp_set_state(vport, ndlp,
  1315. NLP_STE_REG_LOGIN_ISSUE);
  1316. return ndlp->nlp_state;
  1317. }
  1318. if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
  1319. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1320. /* decrement node reference count to the failed mbox
  1321. * command
  1322. */
  1323. lpfc_nlp_put(ndlp);
  1324. mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
  1325. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1326. kfree(mp);
  1327. mempool_free(mbox, phba->mbox_mem_pool);
  1328. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1329. "0134 PLOGI: cannot issue reg_login "
  1330. "Data: x%x x%x x%x x%x\n",
  1331. ndlp->nlp_DID, ndlp->nlp_state,
  1332. ndlp->nlp_flag, ndlp->nlp_rpi);
  1333. } else {
  1334. mempool_free(mbox, phba->mbox_mem_pool);
  1335. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1336. "0135 PLOGI: cannot format reg_login "
  1337. "Data: x%x x%x x%x x%x\n",
  1338. ndlp->nlp_DID, ndlp->nlp_state,
  1339. ndlp->nlp_flag, ndlp->nlp_rpi);
  1340. }
  1341. out:
  1342. if (ndlp->nlp_DID == NameServer_DID) {
  1343. lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  1344. lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  1345. "0261 Cannot Register NameServer login\n");
  1346. }
  1347. /*
  1348. ** In case the node reference counter does not go to zero, ensure that
  1349. ** the stale state for the node is not processed.
  1350. */
  1351. ndlp->nlp_prev_state = ndlp->nlp_state;
  1352. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1353. spin_lock_irq(shost->host_lock);
  1354. ndlp->nlp_flag |= NLP_DEFER_RM;
  1355. spin_unlock_irq(shost->host_lock);
  1356. return NLP_STE_FREED_NODE;
  1357. }
  1358. static uint32_t
  1359. lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1360. void *arg, uint32_t evt)
  1361. {
  1362. return ndlp->nlp_state;
  1363. }
  1364. static uint32_t
  1365. lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
  1366. struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
  1367. {
  1368. struct lpfc_hba *phba;
  1369. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1370. MAILBOX_t *mb = &pmb->u.mb;
  1371. uint16_t rpi;
  1372. phba = vport->phba;
  1373. /* Release the RPI */
  1374. if (!(phba->pport->load_flag & FC_UNLOADING) &&
  1375. !mb->mbxStatus) {
  1376. rpi = pmb->u.mb.un.varWords[0];
  1377. lpfc_release_rpi(phba, vport, ndlp, rpi);
  1378. }
  1379. return ndlp->nlp_state;
  1380. }
  1381. static uint32_t
  1382. lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1383. void *arg, uint32_t evt)
  1384. {
  1385. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1386. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1387. spin_lock_irq(shost->host_lock);
  1388. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1389. spin_unlock_irq(shost->host_lock);
  1390. return ndlp->nlp_state;
  1391. } else {
  1392. /* software abort outstanding PLOGI */
  1393. lpfc_els_abort(vport->phba, ndlp);
  1394. lpfc_drop_node(vport, ndlp);
  1395. return NLP_STE_FREED_NODE;
  1396. }
  1397. }
  1398. static uint32_t
  1399. lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
  1400. struct lpfc_nodelist *ndlp,
  1401. void *arg,
  1402. uint32_t evt)
  1403. {
  1404. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1405. struct lpfc_hba *phba = vport->phba;
  1406. /* Don't do anything that will mess up processing of the
  1407. * previous RSCN.
  1408. */
  1409. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1410. return ndlp->nlp_state;
  1411. /* software abort outstanding PLOGI */
  1412. lpfc_els_abort(phba, ndlp);
  1413. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  1414. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1415. spin_lock_irq(shost->host_lock);
  1416. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1417. spin_unlock_irq(shost->host_lock);
  1418. return ndlp->nlp_state;
  1419. }
  1420. static uint32_t
  1421. lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1422. void *arg, uint32_t evt)
  1423. {
  1424. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1425. struct lpfc_hba *phba = vport->phba;
  1426. struct lpfc_iocbq *cmdiocb;
  1427. /* software abort outstanding ADISC */
  1428. lpfc_els_abort(phba, ndlp);
  1429. cmdiocb = (struct lpfc_iocbq *) arg;
  1430. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1431. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1432. spin_lock_irq(shost->host_lock);
  1433. ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
  1434. spin_unlock_irq(shost->host_lock);
  1435. if (vport->num_disc_nodes)
  1436. lpfc_more_adisc(vport);
  1437. }
  1438. return ndlp->nlp_state;
  1439. }
  1440. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1441. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  1442. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  1443. return ndlp->nlp_state;
  1444. }
  1445. static uint32_t
  1446. lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1447. void *arg, uint32_t evt)
  1448. {
  1449. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1450. if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  1451. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1452. return ndlp->nlp_state;
  1453. }
  1454. static uint32_t
  1455. lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1456. void *arg, uint32_t evt)
  1457. {
  1458. struct lpfc_hba *phba = vport->phba;
  1459. struct lpfc_iocbq *cmdiocb;
  1460. cmdiocb = (struct lpfc_iocbq *) arg;
  1461. /* software abort outstanding ADISC */
  1462. lpfc_els_abort(phba, ndlp);
  1463. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1464. return ndlp->nlp_state;
  1465. }
  1466. static uint32_t
  1467. lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
  1468. struct lpfc_nodelist *ndlp,
  1469. void *arg, uint32_t evt)
  1470. {
  1471. struct lpfc_iocbq *cmdiocb;
  1472. cmdiocb = (struct lpfc_iocbq *) arg;
  1473. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1474. return ndlp->nlp_state;
  1475. }
  1476. static uint32_t
  1477. lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1478. void *arg, uint32_t evt)
  1479. {
  1480. struct lpfc_iocbq *cmdiocb;
  1481. cmdiocb = (struct lpfc_iocbq *) arg;
  1482. /* Treat like rcv logo */
  1483. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  1484. return ndlp->nlp_state;
  1485. }
  1486. static uint32_t
  1487. lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
  1488. struct lpfc_nodelist *ndlp,
  1489. void *arg, uint32_t evt)
  1490. {
  1491. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1492. struct lpfc_hba *phba = vport->phba;
  1493. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1494. IOCB_t *irsp;
  1495. ADISC *ap;
  1496. int rc;
  1497. cmdiocb = (struct lpfc_iocbq *) arg;
  1498. rspiocb = cmdiocb->context_un.rsp_iocb;
  1499. ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  1500. irsp = &rspiocb->iocb;
  1501. if ((irsp->ulpStatus) ||
  1502. (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
  1503. /* 1 sec timeout */
  1504. mod_timer(&ndlp->nlp_delayfunc,
  1505. jiffies + msecs_to_jiffies(1000));
  1506. spin_lock_irq(shost->host_lock);
  1507. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1508. spin_unlock_irq(shost->host_lock);
  1509. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1510. memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
  1511. memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
  1512. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1513. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1514. lpfc_unreg_rpi(vport, ndlp);
  1515. return ndlp->nlp_state;
  1516. }
  1517. if (phba->sli_rev == LPFC_SLI_REV4) {
  1518. rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
  1519. if (rc) {
  1520. /* Stay in state and retry. */
  1521. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1522. return ndlp->nlp_state;
  1523. }
  1524. }
  1525. if (ndlp->nlp_type & NLP_FCP_TARGET) {
  1526. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1527. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  1528. } else {
  1529. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1530. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1531. }
  1532. return ndlp->nlp_state;
  1533. }
  1534. static uint32_t
  1535. lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1536. void *arg, uint32_t evt)
  1537. {
  1538. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1539. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1540. spin_lock_irq(shost->host_lock);
  1541. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1542. spin_unlock_irq(shost->host_lock);
  1543. return ndlp->nlp_state;
  1544. } else {
  1545. /* software abort outstanding ADISC */
  1546. lpfc_els_abort(vport->phba, ndlp);
  1547. lpfc_drop_node(vport, ndlp);
  1548. return NLP_STE_FREED_NODE;
  1549. }
  1550. }
  1551. static uint32_t
  1552. lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
  1553. struct lpfc_nodelist *ndlp,
  1554. void *arg,
  1555. uint32_t evt)
  1556. {
  1557. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1558. struct lpfc_hba *phba = vport->phba;
  1559. /* Don't do anything that will mess up processing of the
  1560. * previous RSCN.
  1561. */
  1562. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1563. return ndlp->nlp_state;
  1564. /* software abort outstanding ADISC */
  1565. lpfc_els_abort(phba, ndlp);
  1566. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1567. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1568. spin_lock_irq(shost->host_lock);
  1569. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1570. spin_unlock_irq(shost->host_lock);
  1571. lpfc_disc_set_adisc(vport, ndlp);
  1572. return ndlp->nlp_state;
  1573. }
  1574. static uint32_t
  1575. lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
  1576. struct lpfc_nodelist *ndlp,
  1577. void *arg,
  1578. uint32_t evt)
  1579. {
  1580. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1581. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1582. return ndlp->nlp_state;
  1583. }
  1584. static uint32_t
  1585. lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
  1586. struct lpfc_nodelist *ndlp,
  1587. void *arg,
  1588. uint32_t evt)
  1589. {
  1590. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1591. struct ls_rjt stat;
  1592. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
  1593. return ndlp->nlp_state;
  1594. }
  1595. if (vport->phba->nvmet_support) {
  1596. /* NVME Target mode. Handle and respond to the PRLI and
  1597. * transition to UNMAPPED provided the RPI has completed
  1598. * registration.
  1599. */
  1600. if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
  1601. lpfc_rcv_prli(vport, ndlp, cmdiocb);
  1602. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1603. } else {
  1604. /* RPI registration has not completed. Reject the PRLI
  1605. * to prevent an illegal state transition when the
  1606. * rpi registration does complete.
  1607. */
  1608. memset(&stat, 0, sizeof(struct ls_rjt));
  1609. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  1610. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  1611. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  1612. ndlp, NULL);
  1613. return ndlp->nlp_state;
  1614. }
  1615. } else {
  1616. /* Initiator mode. */
  1617. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1618. }
  1619. return ndlp->nlp_state;
  1620. }
  1621. static uint32_t
  1622. lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
  1623. struct lpfc_nodelist *ndlp,
  1624. void *arg,
  1625. uint32_t evt)
  1626. {
  1627. struct lpfc_hba *phba = vport->phba;
  1628. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1629. LPFC_MBOXQ_t *mb;
  1630. LPFC_MBOXQ_t *nextmb;
  1631. struct lpfc_dmabuf *mp;
  1632. struct lpfc_nodelist *ns_ndlp;
  1633. cmdiocb = (struct lpfc_iocbq *) arg;
  1634. /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
  1635. if ((mb = phba->sli.mbox_active)) {
  1636. if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
  1637. (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
  1638. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1639. lpfc_nlp_put(ndlp);
  1640. mb->ctx_ndlp = NULL;
  1641. mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  1642. }
  1643. }
  1644. spin_lock_irq(&phba->hbalock);
  1645. list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
  1646. if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
  1647. (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
  1648. mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
  1649. if (mp) {
  1650. __lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1651. kfree(mp);
  1652. }
  1653. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1654. lpfc_nlp_put(ndlp);
  1655. list_del(&mb->list);
  1656. phba->sli.mboxq_cnt--;
  1657. mempool_free(mb, phba->mbox_mem_pool);
  1658. }
  1659. }
  1660. spin_unlock_irq(&phba->hbalock);
  1661. /* software abort if any GID_FT is outstanding */
  1662. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
  1663. ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
  1664. if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
  1665. lpfc_els_abort(phba, ns_ndlp);
  1666. }
  1667. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1668. return ndlp->nlp_state;
  1669. }
  1670. static uint32_t
  1671. lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
  1672. struct lpfc_nodelist *ndlp,
  1673. void *arg,
  1674. uint32_t evt)
  1675. {
  1676. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1677. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1678. return ndlp->nlp_state;
  1679. }
  1680. static uint32_t
  1681. lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
  1682. struct lpfc_nodelist *ndlp,
  1683. void *arg,
  1684. uint32_t evt)
  1685. {
  1686. struct lpfc_iocbq *cmdiocb;
  1687. cmdiocb = (struct lpfc_iocbq *) arg;
  1688. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  1689. return ndlp->nlp_state;
  1690. }
  1691. static uint32_t
  1692. lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
  1693. struct lpfc_nodelist *ndlp,
  1694. void *arg,
  1695. uint32_t evt)
  1696. {
  1697. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1698. struct lpfc_hba *phba = vport->phba;
  1699. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1700. MAILBOX_t *mb = &pmb->u.mb;
  1701. uint32_t did = mb->un.varWords[1];
  1702. if (mb->mbxStatus) {
  1703. /* RegLogin failed */
  1704. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  1705. "0246 RegLogin failed Data: x%x x%x x%x x%x "
  1706. "x%x\n",
  1707. did, mb->mbxStatus, vport->port_state,
  1708. mb->un.varRegLogin.vpi,
  1709. mb->un.varRegLogin.rpi);
  1710. /*
  1711. * If RegLogin failed due to lack of HBA resources do not
  1712. * retry discovery.
  1713. */
  1714. if (mb->mbxStatus == MBXERR_RPI_FULL) {
  1715. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1716. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1717. return ndlp->nlp_state;
  1718. }
  1719. /* Put ndlp in npr state set plogi timer for 1 sec */
  1720. mod_timer(&ndlp->nlp_delayfunc,
  1721. jiffies + msecs_to_jiffies(1000 * 1));
  1722. spin_lock_irq(shost->host_lock);
  1723. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1724. spin_unlock_irq(shost->host_lock);
  1725. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1726. lpfc_issue_els_logo(vport, ndlp, 0);
  1727. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1728. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1729. return ndlp->nlp_state;
  1730. }
  1731. /* SLI4 ports have preallocated logical rpis. */
  1732. if (phba->sli_rev < LPFC_SLI_REV4)
  1733. ndlp->nlp_rpi = mb->un.varWords[0];
  1734. ndlp->nlp_flag |= NLP_RPI_REGISTERED;
  1735. /* Only if we are not a fabric nport do we issue PRLI */
  1736. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  1737. "3066 RegLogin Complete on x%x x%x x%x\n",
  1738. did, ndlp->nlp_type, ndlp->nlp_fc4_type);
  1739. if (!(ndlp->nlp_type & NLP_FABRIC) &&
  1740. (phba->nvmet_support == 0)) {
  1741. /* The driver supports FCP and NVME concurrently. If the
  1742. * ndlp's nlp_fc4_type is still zero, the driver doesn't
  1743. * know what PRLI to send yet. Figure that out now and
  1744. * call PRLI depending on the outcome.
  1745. */
  1746. if (vport->fc_flag & FC_PT2PT) {
  1747. /* If we are pt2pt, there is no Fabric to determine
  1748. * the FC4 type of the remote nport. So if NVME
  1749. * is configured try it.
  1750. */
  1751. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  1752. if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
  1753. (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
  1754. ndlp->nlp_fc4_type |= NLP_FC4_NVME;
  1755. /* We need to update the localport also */
  1756. lpfc_nvme_update_localport(vport);
  1757. }
  1758. } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
  1759. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  1760. } else if (ndlp->nlp_fc4_type == 0) {
  1761. /* If we are only configured for FCP, the driver
  1762. * should just issue PRLI for FCP. Otherwise issue
  1763. * GFT_ID to determine if remote port supports NVME.
  1764. */
  1765. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
  1766. lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
  1767. ndlp->nlp_DID);
  1768. return ndlp->nlp_state;
  1769. }
  1770. ndlp->nlp_fc4_type = NLP_FC4_FCP;
  1771. }
  1772. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1773. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
  1774. if (lpfc_issue_els_prli(vport, ndlp, 0)) {
  1775. lpfc_issue_els_logo(vport, ndlp, 0);
  1776. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1777. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1778. }
  1779. } else {
  1780. if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
  1781. phba->targetport->port_id = vport->fc_myDID;
  1782. /* Only Fabric ports should transition. NVME target
  1783. * must complete PRLI.
  1784. */
  1785. if (ndlp->nlp_type & NLP_FABRIC) {
  1786. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1787. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1788. }
  1789. }
  1790. return ndlp->nlp_state;
  1791. }
  1792. static uint32_t
  1793. lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
  1794. struct lpfc_nodelist *ndlp,
  1795. void *arg,
  1796. uint32_t evt)
  1797. {
  1798. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1799. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1800. spin_lock_irq(shost->host_lock);
  1801. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1802. spin_unlock_irq(shost->host_lock);
  1803. return ndlp->nlp_state;
  1804. } else {
  1805. lpfc_drop_node(vport, ndlp);
  1806. return NLP_STE_FREED_NODE;
  1807. }
  1808. }
  1809. static uint32_t
  1810. lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
  1811. struct lpfc_nodelist *ndlp,
  1812. void *arg,
  1813. uint32_t evt)
  1814. {
  1815. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1816. /* Don't do anything that will mess up processing of the
  1817. * previous RSCN.
  1818. */
  1819. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1820. return ndlp->nlp_state;
  1821. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1822. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1823. spin_lock_irq(shost->host_lock);
  1824. /* If we are a target we won't immediately transition into PRLI,
  1825. * so if REG_LOGIN already completed we don't need to ignore it.
  1826. */
  1827. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
  1828. !vport->phba->nvmet_support)
  1829. ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
  1830. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1831. spin_unlock_irq(shost->host_lock);
  1832. lpfc_disc_set_adisc(vport, ndlp);
  1833. return ndlp->nlp_state;
  1834. }
  1835. static uint32_t
  1836. lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1837. void *arg, uint32_t evt)
  1838. {
  1839. struct lpfc_iocbq *cmdiocb;
  1840. cmdiocb = (struct lpfc_iocbq *) arg;
  1841. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1842. return ndlp->nlp_state;
  1843. }
  1844. static uint32_t
  1845. lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1846. void *arg, uint32_t evt)
  1847. {
  1848. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1849. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  1850. return ndlp->nlp_state;
  1851. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1852. return ndlp->nlp_state;
  1853. }
  1854. static uint32_t
  1855. lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1856. void *arg, uint32_t evt)
  1857. {
  1858. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1859. /* Software abort outstanding PRLI before sending acc */
  1860. lpfc_els_abort(vport->phba, ndlp);
  1861. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1862. return ndlp->nlp_state;
  1863. }
  1864. static uint32_t
  1865. lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1866. void *arg, uint32_t evt)
  1867. {
  1868. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1869. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1870. return ndlp->nlp_state;
  1871. }
  1872. /* This routine is envoked when we rcv a PRLO request from a nport
  1873. * we are logged into. We should send back a PRLO rsp setting the
  1874. * appropriate bits.
  1875. * NEXT STATE = PRLI_ISSUE
  1876. */
  1877. static uint32_t
  1878. lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1879. void *arg, uint32_t evt)
  1880. {
  1881. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1882. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  1883. return ndlp->nlp_state;
  1884. }
  1885. static uint32_t
  1886. lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1887. void *arg, uint32_t evt)
  1888. {
  1889. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1890. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1891. struct lpfc_hba *phba = vport->phba;
  1892. IOCB_t *irsp;
  1893. PRLI *npr;
  1894. struct lpfc_nvme_prli *nvpr;
  1895. void *temp_ptr;
  1896. cmdiocb = (struct lpfc_iocbq *) arg;
  1897. rspiocb = cmdiocb->context_un.rsp_iocb;
  1898. /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
  1899. * format is different so NULL the two PRLI types so that the
  1900. * driver correctly gets the correct context.
  1901. */
  1902. npr = NULL;
  1903. nvpr = NULL;
  1904. temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  1905. if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
  1906. npr = (PRLI *) temp_ptr;
  1907. else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
  1908. nvpr = (struct lpfc_nvme_prli *) temp_ptr;
  1909. irsp = &rspiocb->iocb;
  1910. if (irsp->ulpStatus) {
  1911. if ((vport->port_type == LPFC_NPIV_PORT) &&
  1912. vport->cfg_restrict_login) {
  1913. goto out;
  1914. }
  1915. /* Adjust the nlp_type accordingly if the PRLI failed */
  1916. if (npr)
  1917. ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
  1918. if (nvpr)
  1919. ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
  1920. /* We can't set the DSM state till BOTH PRLIs complete */
  1921. goto out_err;
  1922. }
  1923. if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
  1924. (npr->prliType == PRLI_FCP_TYPE)) {
  1925. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  1926. "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
  1927. npr->initiatorFunc,
  1928. npr->targetFunc);
  1929. if (npr->initiatorFunc)
  1930. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  1931. if (npr->targetFunc) {
  1932. ndlp->nlp_type |= NLP_FCP_TARGET;
  1933. if (npr->writeXferRdyDis)
  1934. ndlp->nlp_flag |= NLP_FIRSTBURST;
  1935. }
  1936. if (npr->Retry)
  1937. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  1938. } else if (nvpr &&
  1939. (bf_get_be32(prli_acc_rsp_code, nvpr) ==
  1940. PRLI_REQ_EXECUTED) &&
  1941. (bf_get_be32(prli_type_code, nvpr) ==
  1942. PRLI_NVME_TYPE)) {
  1943. /* Complete setting up the remote ndlp personality. */
  1944. if (bf_get_be32(prli_init, nvpr))
  1945. ndlp->nlp_type |= NLP_NVME_INITIATOR;
  1946. if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
  1947. bf_get_be32(prli_conf, nvpr))
  1948. ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
  1949. else
  1950. ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
  1951. /* Target driver cannot solicit NVME FB. */
  1952. if (bf_get_be32(prli_tgt, nvpr)) {
  1953. /* Complete the nvme target roles. The transport
  1954. * needs to know if the rport is capable of
  1955. * discovery in addition to its role.
  1956. */
  1957. ndlp->nlp_type |= NLP_NVME_TARGET;
  1958. if (bf_get_be32(prli_disc, nvpr))
  1959. ndlp->nlp_type |= NLP_NVME_DISCOVERY;
  1960. /*
  1961. * If prli_fba is set, the Target supports FirstBurst.
  1962. * If prli_fb_sz is 0, the FirstBurst size is unlimited,
  1963. * otherwise it defines the actual size supported by
  1964. * the NVME Target.
  1965. */
  1966. if ((bf_get_be32(prli_fba, nvpr) == 1) &&
  1967. (phba->cfg_nvme_enable_fb) &&
  1968. (!phba->nvmet_support)) {
  1969. /* Both sides support FB. The target's first
  1970. * burst size is a 512 byte encoded value.
  1971. */
  1972. ndlp->nlp_flag |= NLP_FIRSTBURST;
  1973. ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
  1974. nvpr);
  1975. /* Expressed in units of 512 bytes */
  1976. if (ndlp->nvme_fb_size)
  1977. ndlp->nvme_fb_size <<=
  1978. LPFC_NVME_FB_SHIFT;
  1979. else
  1980. ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
  1981. }
  1982. }
  1983. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  1984. "6029 NVME PRLI Cmpl w1 x%08x "
  1985. "w4 x%08x w5 x%08x flag x%x, "
  1986. "fcp_info x%x nlp_type x%x\n",
  1987. be32_to_cpu(nvpr->word1),
  1988. be32_to_cpu(nvpr->word4),
  1989. be32_to_cpu(nvpr->word5),
  1990. ndlp->nlp_flag, ndlp->nlp_fcp_info,
  1991. ndlp->nlp_type);
  1992. }
  1993. if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
  1994. (vport->port_type == LPFC_NPIV_PORT) &&
  1995. vport->cfg_restrict_login) {
  1996. out:
  1997. spin_lock_irq(shost->host_lock);
  1998. ndlp->nlp_flag |= NLP_TARGET_REMOVE;
  1999. spin_unlock_irq(shost->host_lock);
  2000. lpfc_issue_els_logo(vport, ndlp, 0);
  2001. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  2002. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2003. return ndlp->nlp_state;
  2004. }
  2005. out_err:
  2006. /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
  2007. * are complete.
  2008. */
  2009. if (ndlp->fc4_prli_sent == 0) {
  2010. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  2011. if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
  2012. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  2013. else if (ndlp->nlp_type &
  2014. (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
  2015. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  2016. } else
  2017. lpfc_printf_vlog(vport,
  2018. KERN_INFO, LOG_ELS,
  2019. "3067 PRLI's still outstanding "
  2020. "on x%06x - count %d, Pend Node Mode "
  2021. "transition...\n",
  2022. ndlp->nlp_DID, ndlp->fc4_prli_sent);
  2023. return ndlp->nlp_state;
  2024. }
  2025. /*! lpfc_device_rm_prli_issue
  2026. *
  2027. * \pre
  2028. * \post
  2029. * \param phba
  2030. * \param ndlp
  2031. * \param arg
  2032. * \param evt
  2033. * \return uint32_t
  2034. *
  2035. * \b Description:
  2036. * This routine is envoked when we a request to remove a nport we are in the
  2037. * process of PRLIing. We should software abort outstanding prli, unreg
  2038. * login, send a logout. We will change node state to UNUSED_NODE, put it
  2039. * on plogi list so it can be freed when LOGO completes.
  2040. *
  2041. */
  2042. static uint32_t
  2043. lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2044. void *arg, uint32_t evt)
  2045. {
  2046. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2047. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  2048. spin_lock_irq(shost->host_lock);
  2049. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  2050. spin_unlock_irq(shost->host_lock);
  2051. return ndlp->nlp_state;
  2052. } else {
  2053. /* software abort outstanding PLOGI */
  2054. lpfc_els_abort(vport->phba, ndlp);
  2055. lpfc_drop_node(vport, ndlp);
  2056. return NLP_STE_FREED_NODE;
  2057. }
  2058. }
  2059. /*! lpfc_device_recov_prli_issue
  2060. *
  2061. * \pre
  2062. * \post
  2063. * \param phba
  2064. * \param ndlp
  2065. * \param arg
  2066. * \param evt
  2067. * \return uint32_t
  2068. *
  2069. * \b Description:
  2070. * The routine is envoked when the state of a device is unknown, like
  2071. * during a link down. We should remove the nodelist entry from the
  2072. * unmapped list, issue a UNREG_LOGIN, do a software abort of the
  2073. * outstanding PRLI command, then free the node entry.
  2074. */
  2075. static uint32_t
  2076. lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
  2077. struct lpfc_nodelist *ndlp,
  2078. void *arg,
  2079. uint32_t evt)
  2080. {
  2081. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2082. struct lpfc_hba *phba = vport->phba;
  2083. /* Don't do anything that will mess up processing of the
  2084. * previous RSCN.
  2085. */
  2086. if (vport->fc_flag & FC_RSCN_DEFERRED)
  2087. return ndlp->nlp_state;
  2088. /* software abort outstanding PRLI */
  2089. lpfc_els_abort(phba, ndlp);
  2090. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  2091. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2092. spin_lock_irq(shost->host_lock);
  2093. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2094. spin_unlock_irq(shost->host_lock);
  2095. lpfc_disc_set_adisc(vport, ndlp);
  2096. return ndlp->nlp_state;
  2097. }
  2098. static uint32_t
  2099. lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2100. void *arg, uint32_t evt)
  2101. {
  2102. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2103. struct ls_rjt stat;
  2104. memset(&stat, 0, sizeof(struct ls_rjt));
  2105. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2106. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2107. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2108. return ndlp->nlp_state;
  2109. }
  2110. static uint32_t
  2111. lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2112. void *arg, uint32_t evt)
  2113. {
  2114. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2115. struct ls_rjt stat;
  2116. memset(&stat, 0, sizeof(struct ls_rjt));
  2117. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2118. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2119. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2120. return ndlp->nlp_state;
  2121. }
  2122. static uint32_t
  2123. lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2124. void *arg, uint32_t evt)
  2125. {
  2126. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2127. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2128. spin_lock_irq(shost->host_lock);
  2129. ndlp->nlp_flag |= NLP_LOGO_ACC;
  2130. spin_unlock_irq(shost->host_lock);
  2131. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  2132. return ndlp->nlp_state;
  2133. }
  2134. static uint32_t
  2135. lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2136. void *arg, uint32_t evt)
  2137. {
  2138. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2139. struct ls_rjt stat;
  2140. memset(&stat, 0, sizeof(struct ls_rjt));
  2141. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2142. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2143. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2144. return ndlp->nlp_state;
  2145. }
  2146. static uint32_t
  2147. lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2148. void *arg, uint32_t evt)
  2149. {
  2150. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2151. struct ls_rjt stat;
  2152. memset(&stat, 0, sizeof(struct ls_rjt));
  2153. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2154. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2155. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2156. return ndlp->nlp_state;
  2157. }
  2158. static uint32_t
  2159. lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2160. void *arg, uint32_t evt)
  2161. {
  2162. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2163. ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
  2164. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2165. spin_lock_irq(shost->host_lock);
  2166. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2167. spin_unlock_irq(shost->host_lock);
  2168. lpfc_disc_set_adisc(vport, ndlp);
  2169. return ndlp->nlp_state;
  2170. }
  2171. static uint32_t
  2172. lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2173. void *arg, uint32_t evt)
  2174. {
  2175. /*
  2176. * DevLoss has timed out and is calling for Device Remove.
  2177. * In this case, abort the LOGO and cleanup the ndlp
  2178. */
  2179. lpfc_unreg_rpi(vport, ndlp);
  2180. /* software abort outstanding PLOGI */
  2181. lpfc_els_abort(vport->phba, ndlp);
  2182. lpfc_drop_node(vport, ndlp);
  2183. return NLP_STE_FREED_NODE;
  2184. }
  2185. static uint32_t
  2186. lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
  2187. struct lpfc_nodelist *ndlp,
  2188. void *arg, uint32_t evt)
  2189. {
  2190. /*
  2191. * Device Recovery events have no meaning for a node with a LOGO
  2192. * outstanding. The LOGO has to complete first and handle the
  2193. * node from that point.
  2194. */
  2195. return ndlp->nlp_state;
  2196. }
  2197. static uint32_t
  2198. lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2199. void *arg, uint32_t evt)
  2200. {
  2201. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2202. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  2203. return ndlp->nlp_state;
  2204. }
  2205. static uint32_t
  2206. lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2207. void *arg, uint32_t evt)
  2208. {
  2209. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2210. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  2211. return ndlp->nlp_state;
  2212. lpfc_rcv_prli(vport, ndlp, cmdiocb);
  2213. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  2214. return ndlp->nlp_state;
  2215. }
  2216. static uint32_t
  2217. lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2218. void *arg, uint32_t evt)
  2219. {
  2220. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2221. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  2222. return ndlp->nlp_state;
  2223. }
  2224. static uint32_t
  2225. lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2226. void *arg, uint32_t evt)
  2227. {
  2228. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2229. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  2230. return ndlp->nlp_state;
  2231. }
  2232. static uint32_t
  2233. lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2234. void *arg, uint32_t evt)
  2235. {
  2236. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2237. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  2238. return ndlp->nlp_state;
  2239. }
  2240. static uint32_t
  2241. lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
  2242. struct lpfc_nodelist *ndlp,
  2243. void *arg,
  2244. uint32_t evt)
  2245. {
  2246. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2247. ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
  2248. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2249. spin_lock_irq(shost->host_lock);
  2250. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2251. ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
  2252. spin_unlock_irq(shost->host_lock);
  2253. lpfc_disc_set_adisc(vport, ndlp);
  2254. return ndlp->nlp_state;
  2255. }
  2256. static uint32_t
  2257. lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2258. void *arg, uint32_t evt)
  2259. {
  2260. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2261. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  2262. return ndlp->nlp_state;
  2263. }
  2264. static uint32_t
  2265. lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2266. void *arg, uint32_t evt)
  2267. {
  2268. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2269. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  2270. return ndlp->nlp_state;
  2271. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  2272. return ndlp->nlp_state;
  2273. }
  2274. static uint32_t
  2275. lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2276. void *arg, uint32_t evt)
  2277. {
  2278. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2279. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  2280. return ndlp->nlp_state;
  2281. }
  2282. static uint32_t
  2283. lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
  2284. struct lpfc_nodelist *ndlp,
  2285. void *arg, uint32_t evt)
  2286. {
  2287. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2288. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  2289. return ndlp->nlp_state;
  2290. }
  2291. static uint32_t
  2292. lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2293. void *arg, uint32_t evt)
  2294. {
  2295. struct lpfc_hba *phba = vport->phba;
  2296. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2297. /* flush the target */
  2298. lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
  2299. ndlp->nlp_sid, 0, LPFC_CTX_TGT);
  2300. /* Treat like rcv logo */
  2301. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  2302. return ndlp->nlp_state;
  2303. }
  2304. static uint32_t
  2305. lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
  2306. struct lpfc_nodelist *ndlp,
  2307. void *arg,
  2308. uint32_t evt)
  2309. {
  2310. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2311. ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
  2312. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2313. spin_lock_irq(shost->host_lock);
  2314. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2315. ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
  2316. spin_unlock_irq(shost->host_lock);
  2317. lpfc_disc_set_adisc(vport, ndlp);
  2318. return ndlp->nlp_state;
  2319. }
  2320. static uint32_t
  2321. lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2322. void *arg, uint32_t evt)
  2323. {
  2324. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2325. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2326. /* Ignore PLOGI if we have an outstanding LOGO */
  2327. if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
  2328. return ndlp->nlp_state;
  2329. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  2330. lpfc_cancel_retry_delay_tmo(vport, ndlp);
  2331. spin_lock_irq(shost->host_lock);
  2332. ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
  2333. spin_unlock_irq(shost->host_lock);
  2334. } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
  2335. /* send PLOGI immediately, move to PLOGI issue state */
  2336. if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
  2337. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2338. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  2339. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2340. }
  2341. }
  2342. return ndlp->nlp_state;
  2343. }
  2344. static uint32_t
  2345. lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2346. void *arg, uint32_t evt)
  2347. {
  2348. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2349. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2350. struct ls_rjt stat;
  2351. memset(&stat, 0, sizeof (struct ls_rjt));
  2352. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2353. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2354. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2355. if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
  2356. if (ndlp->nlp_flag & NLP_NPR_ADISC) {
  2357. spin_lock_irq(shost->host_lock);
  2358. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2359. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2360. spin_unlock_irq(shost->host_lock);
  2361. lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
  2362. lpfc_issue_els_adisc(vport, ndlp, 0);
  2363. } else {
  2364. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2365. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  2366. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2367. }
  2368. }
  2369. return ndlp->nlp_state;
  2370. }
  2371. static uint32_t
  2372. lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2373. void *arg, uint32_t evt)
  2374. {
  2375. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2376. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  2377. return ndlp->nlp_state;
  2378. }
  2379. static uint32_t
  2380. lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2381. void *arg, uint32_t evt)
  2382. {
  2383. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2384. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  2385. /*
  2386. * Do not start discovery if discovery is about to start
  2387. * or discovery in progress for this node. Starting discovery
  2388. * here will affect the counting of discovery threads.
  2389. */
  2390. if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
  2391. !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
  2392. if (ndlp->nlp_flag & NLP_NPR_ADISC) {
  2393. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2394. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2395. lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
  2396. lpfc_issue_els_adisc(vport, ndlp, 0);
  2397. } else {
  2398. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2399. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  2400. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2401. }
  2402. }
  2403. return ndlp->nlp_state;
  2404. }
  2405. static uint32_t
  2406. lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2407. void *arg, uint32_t evt)
  2408. {
  2409. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2410. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2411. spin_lock_irq(shost->host_lock);
  2412. ndlp->nlp_flag |= NLP_LOGO_ACC;
  2413. spin_unlock_irq(shost->host_lock);
  2414. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  2415. if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
  2416. mod_timer(&ndlp->nlp_delayfunc,
  2417. jiffies + msecs_to_jiffies(1000 * 1));
  2418. spin_lock_irq(shost->host_lock);
  2419. ndlp->nlp_flag |= NLP_DELAY_TMO;
  2420. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2421. spin_unlock_irq(shost->host_lock);
  2422. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  2423. } else {
  2424. spin_lock_irq(shost->host_lock);
  2425. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2426. spin_unlock_irq(shost->host_lock);
  2427. }
  2428. return ndlp->nlp_state;
  2429. }
  2430. static uint32_t
  2431. lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2432. void *arg, uint32_t evt)
  2433. {
  2434. struct lpfc_iocbq *cmdiocb, *rspiocb;
  2435. IOCB_t *irsp;
  2436. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2437. cmdiocb = (struct lpfc_iocbq *) arg;
  2438. rspiocb = cmdiocb->context_un.rsp_iocb;
  2439. irsp = &rspiocb->iocb;
  2440. if (irsp->ulpStatus) {
  2441. spin_lock_irq(shost->host_lock);
  2442. ndlp->nlp_flag |= NLP_DEFER_RM;
  2443. spin_unlock_irq(shost->host_lock);
  2444. return NLP_STE_FREED_NODE;
  2445. }
  2446. return ndlp->nlp_state;
  2447. }
  2448. static uint32_t
  2449. lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2450. void *arg, uint32_t evt)
  2451. {
  2452. struct lpfc_iocbq *cmdiocb, *rspiocb;
  2453. IOCB_t *irsp;
  2454. cmdiocb = (struct lpfc_iocbq *) arg;
  2455. rspiocb = cmdiocb->context_un.rsp_iocb;
  2456. irsp = &rspiocb->iocb;
  2457. if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
  2458. lpfc_drop_node(vport, ndlp);
  2459. return NLP_STE_FREED_NODE;
  2460. }
  2461. return ndlp->nlp_state;
  2462. }
  2463. static uint32_t
  2464. lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2465. void *arg, uint32_t evt)
  2466. {
  2467. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2468. /* For the fabric port just clear the fc flags. */
  2469. if (ndlp->nlp_DID == Fabric_DID) {
  2470. spin_lock_irq(shost->host_lock);
  2471. vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
  2472. spin_unlock_irq(shost->host_lock);
  2473. }
  2474. lpfc_unreg_rpi(vport, ndlp);
  2475. return ndlp->nlp_state;
  2476. }
  2477. static uint32_t
  2478. lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2479. void *arg, uint32_t evt)
  2480. {
  2481. struct lpfc_iocbq *cmdiocb, *rspiocb;
  2482. IOCB_t *irsp;
  2483. cmdiocb = (struct lpfc_iocbq *) arg;
  2484. rspiocb = cmdiocb->context_un.rsp_iocb;
  2485. irsp = &rspiocb->iocb;
  2486. if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
  2487. lpfc_drop_node(vport, ndlp);
  2488. return NLP_STE_FREED_NODE;
  2489. }
  2490. return ndlp->nlp_state;
  2491. }
  2492. static uint32_t
  2493. lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
  2494. struct lpfc_nodelist *ndlp,
  2495. void *arg, uint32_t evt)
  2496. {
  2497. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  2498. MAILBOX_t *mb = &pmb->u.mb;
  2499. if (!mb->mbxStatus) {
  2500. /* SLI4 ports have preallocated logical rpis. */
  2501. if (vport->phba->sli_rev < LPFC_SLI_REV4)
  2502. ndlp->nlp_rpi = mb->un.varWords[0];
  2503. ndlp->nlp_flag |= NLP_RPI_REGISTERED;
  2504. if (ndlp->nlp_flag & NLP_LOGO_ACC) {
  2505. lpfc_unreg_rpi(vport, ndlp);
  2506. }
  2507. } else {
  2508. if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
  2509. lpfc_drop_node(vport, ndlp);
  2510. return NLP_STE_FREED_NODE;
  2511. }
  2512. }
  2513. return ndlp->nlp_state;
  2514. }
  2515. static uint32_t
  2516. lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2517. void *arg, uint32_t evt)
  2518. {
  2519. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2520. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  2521. spin_lock_irq(shost->host_lock);
  2522. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  2523. spin_unlock_irq(shost->host_lock);
  2524. return ndlp->nlp_state;
  2525. }
  2526. lpfc_drop_node(vport, ndlp);
  2527. return NLP_STE_FREED_NODE;
  2528. }
  2529. static uint32_t
  2530. lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2531. void *arg, uint32_t evt)
  2532. {
  2533. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2534. /* Don't do anything that will mess up processing of the
  2535. * previous RSCN.
  2536. */
  2537. if (vport->fc_flag & FC_RSCN_DEFERRED)
  2538. return ndlp->nlp_state;
  2539. lpfc_cancel_retry_delay_tmo(vport, ndlp);
  2540. spin_lock_irq(shost->host_lock);
  2541. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2542. ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
  2543. spin_unlock_irq(shost->host_lock);
  2544. return ndlp->nlp_state;
  2545. }
  2546. /* This next section defines the NPort Discovery State Machine */
  2547. /* There are 4 different double linked lists nodelist entries can reside on.
  2548. * The plogi list and adisc list are used when Link Up discovery or RSCN
  2549. * processing is needed. Each list holds the nodes that we will send PLOGI
  2550. * or ADISC on. These lists will keep track of what nodes will be effected
  2551. * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
  2552. * The unmapped_list will contain all nodes that we have successfully logged
  2553. * into at the Fibre Channel level. The mapped_list will contain all nodes
  2554. * that are mapped FCP targets.
  2555. */
  2556. /*
  2557. * The bind list is a list of undiscovered (potentially non-existent) nodes
  2558. * that we have saved binding information on. This information is used when
  2559. * nodes transition from the unmapped to the mapped list.
  2560. */
  2561. /* For UNUSED_NODE state, the node has just been allocated .
  2562. * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
  2563. * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
  2564. * and put on the unmapped list. For ADISC processing, the node is taken off
  2565. * the ADISC list and placed on either the mapped or unmapped list (depending
  2566. * on its previous state). Once on the unmapped list, a PRLI is issued and the
  2567. * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
  2568. * changed to UNMAPPED_NODE. If the completion indicates a mapped
  2569. * node, the node is taken off the unmapped list. The binding list is checked
  2570. * for a valid binding, or a binding is automatically assigned. If binding
  2571. * assignment is unsuccessful, the node is left on the unmapped list. If
  2572. * binding assignment is successful, the associated binding list entry (if
  2573. * any) is removed, and the node is placed on the mapped list.
  2574. */
  2575. /*
  2576. * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
  2577. * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
  2578. * expire, all effected nodes will receive a DEVICE_RM event.
  2579. */
  2580. /*
  2581. * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
  2582. * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
  2583. * check, additional nodes may be added or removed (via DEVICE_RM) to / from
  2584. * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
  2585. * we will first process the ADISC list. 32 entries are processed initially and
  2586. * ADISC is initited for each one. Completions / Events for each node are
  2587. * funnelled thru the state machine. As each node finishes ADISC processing, it
  2588. * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
  2589. * waiting, and the ADISC list count is identically 0, then we are done. For
  2590. * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
  2591. * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
  2592. * list. 32 entries are processed initially and PLOGI is initited for each one.
  2593. * Completions / Events for each node are funnelled thru the state machine. As
  2594. * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
  2595. * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
  2596. * indentically 0, then we are done. We have now completed discovery / RSCN
  2597. * handling. Upon completion, ALL nodes should be on either the mapped or
  2598. * unmapped lists.
  2599. */
  2600. static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
  2601. (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
  2602. /* Action routine Event Current State */
  2603. lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
  2604. lpfc_rcv_els_unused_node, /* RCV_PRLI */
  2605. lpfc_rcv_logo_unused_node, /* RCV_LOGO */
  2606. lpfc_rcv_els_unused_node, /* RCV_ADISC */
  2607. lpfc_rcv_els_unused_node, /* RCV_PDISC */
  2608. lpfc_rcv_els_unused_node, /* RCV_PRLO */
  2609. lpfc_disc_illegal, /* CMPL_PLOGI */
  2610. lpfc_disc_illegal, /* CMPL_PRLI */
  2611. lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
  2612. lpfc_disc_illegal, /* CMPL_ADISC */
  2613. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2614. lpfc_device_rm_unused_node, /* DEVICE_RM */
  2615. lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
  2616. lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
  2617. lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
  2618. lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
  2619. lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
  2620. lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
  2621. lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
  2622. lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
  2623. lpfc_disc_illegal, /* CMPL_PRLI */
  2624. lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
  2625. lpfc_disc_illegal, /* CMPL_ADISC */
  2626. lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
  2627. lpfc_device_rm_plogi_issue, /* DEVICE_RM */
  2628. lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
  2629. lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
  2630. lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
  2631. lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
  2632. lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
  2633. lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
  2634. lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
  2635. lpfc_disc_illegal, /* CMPL_PLOGI */
  2636. lpfc_disc_illegal, /* CMPL_PRLI */
  2637. lpfc_disc_illegal, /* CMPL_LOGO */
  2638. lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
  2639. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2640. lpfc_device_rm_adisc_issue, /* DEVICE_RM */
  2641. lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
  2642. lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
  2643. lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
  2644. lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
  2645. lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
  2646. lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
  2647. lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
  2648. lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
  2649. lpfc_disc_illegal, /* CMPL_PRLI */
  2650. lpfc_disc_illegal, /* CMPL_LOGO */
  2651. lpfc_disc_illegal, /* CMPL_ADISC */
  2652. lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
  2653. lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
  2654. lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
  2655. lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
  2656. lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
  2657. lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
  2658. lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
  2659. lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
  2660. lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
  2661. lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
  2662. lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
  2663. lpfc_disc_illegal, /* CMPL_LOGO */
  2664. lpfc_disc_illegal, /* CMPL_ADISC */
  2665. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2666. lpfc_device_rm_prli_issue, /* DEVICE_RM */
  2667. lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
  2668. lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
  2669. lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
  2670. lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
  2671. lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
  2672. lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
  2673. lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
  2674. lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
  2675. lpfc_disc_illegal, /* CMPL_PRLI */
  2676. lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
  2677. lpfc_disc_illegal, /* CMPL_ADISC */
  2678. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2679. lpfc_device_rm_logo_issue, /* DEVICE_RM */
  2680. lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
  2681. lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
  2682. lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
  2683. lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
  2684. lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
  2685. lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
  2686. lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
  2687. lpfc_disc_illegal, /* CMPL_PLOGI */
  2688. lpfc_disc_illegal, /* CMPL_PRLI */
  2689. lpfc_disc_illegal, /* CMPL_LOGO */
  2690. lpfc_disc_illegal, /* CMPL_ADISC */
  2691. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2692. lpfc_disc_illegal, /* DEVICE_RM */
  2693. lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
  2694. lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
  2695. lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
  2696. lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
  2697. lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
  2698. lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
  2699. lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
  2700. lpfc_disc_illegal, /* CMPL_PLOGI */
  2701. lpfc_disc_illegal, /* CMPL_PRLI */
  2702. lpfc_disc_illegal, /* CMPL_LOGO */
  2703. lpfc_disc_illegal, /* CMPL_ADISC */
  2704. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2705. lpfc_disc_illegal, /* DEVICE_RM */
  2706. lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
  2707. lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
  2708. lpfc_rcv_prli_npr_node, /* RCV_PRLI */
  2709. lpfc_rcv_logo_npr_node, /* RCV_LOGO */
  2710. lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
  2711. lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
  2712. lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
  2713. lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */
  2714. lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */
  2715. lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
  2716. lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */
  2717. lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
  2718. lpfc_device_rm_npr_node, /* DEVICE_RM */
  2719. lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
  2720. };
  2721. int
  2722. lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2723. void *arg, uint32_t evt)
  2724. {
  2725. uint32_t cur_state, rc;
  2726. uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
  2727. uint32_t);
  2728. uint32_t got_ndlp = 0;
  2729. uint32_t data1;
  2730. if (lpfc_nlp_get(ndlp))
  2731. got_ndlp = 1;
  2732. cur_state = ndlp->nlp_state;
  2733. data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
  2734. ((uint32_t)ndlp->nlp_type));
  2735. /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
  2736. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  2737. "0211 DSM in event x%x on NPort x%x in "
  2738. "state %d rpi x%x Data: x%x x%x\n",
  2739. evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
  2740. ndlp->nlp_flag, data1);
  2741. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
  2742. "DSM in: evt:%d ste:%d did:x%x",
  2743. evt, cur_state, ndlp->nlp_DID);
  2744. func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
  2745. rc = (func) (vport, ndlp, arg, evt);
  2746. /* DSM out state <rc> on NPort <nlp_DID> */
  2747. if (got_ndlp) {
  2748. data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
  2749. ((uint32_t)ndlp->nlp_type));
  2750. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  2751. "0212 DSM out state %d on NPort x%x "
  2752. "rpi x%x Data: x%x x%x\n",
  2753. rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
  2754. data1);
  2755. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
  2756. "DSM out: ste:%d did:x%x flg:x%x",
  2757. rc, ndlp->nlp_DID, ndlp->nlp_flag);
  2758. /* Decrement the ndlp reference count held for this function */
  2759. lpfc_nlp_put(ndlp);
  2760. } else {
  2761. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  2762. "0213 DSM out state %d on NPort free\n", rc);
  2763. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
  2764. "DSM out: ste:%d did:x%x flg:x%x",
  2765. rc, 0, 0);
  2766. }
  2767. return rc;
  2768. }