PageRenderTime 518ms CodeModel.GetById 16ms app.highlight 435ms RepoModel.GetById 1ms app.codeStats 3ms

/drivers/scsi/lpfc/lpfc_els.c

http://github.com/mirrors/linux
C | 10152 lines | 6613 code | 1025 blank | 2514 comment | 1180 complexity | bff4b2cf65119e8010bb275c58f7b75d MD5 | raw file
    1/*******************************************************************
    2 * This file is part of the Emulex Linux Device Driver for         *
    3 * Fibre Channel Host Bus Adapters.                                *
    4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
    5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
    6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
    7 * EMULEX and SLI are trademarks of Emulex.                        *
    8 * www.broadcom.com                                                *
    9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
   10 *                                                                 *
   11 * This program is free software; you can redistribute it and/or   *
   12 * modify it under the terms of version 2 of the GNU General       *
   13 * Public License as published by the Free Software Foundation.    *
   14 * This program is distributed in the hope that it will be useful. *
   15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
   16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
   17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
   18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
   19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
   20 * more details, a copy of which can be found in the file COPYING  *
   21 * included with this package.                                     *
   22 *******************************************************************/
   23/* See Fibre Channel protocol T11 FC-LS for details */
   24#include <linux/blkdev.h>
   25#include <linux/pci.h>
   26#include <linux/slab.h>
   27#include <linux/interrupt.h>
   28
   29#include <scsi/scsi.h>
   30#include <scsi/scsi_device.h>
   31#include <scsi/scsi_host.h>
   32#include <scsi/scsi_transport_fc.h>
   33#include <uapi/scsi/fc/fc_fs.h>
   34#include <uapi/scsi/fc/fc_els.h>
   35
   36#include "lpfc_hw4.h"
   37#include "lpfc_hw.h"
   38#include "lpfc_sli.h"
   39#include "lpfc_sli4.h"
   40#include "lpfc_nl.h"
   41#include "lpfc_disc.h"
   42#include "lpfc_scsi.h"
   43#include "lpfc.h"
   44#include "lpfc_logmsg.h"
   45#include "lpfc_crtn.h"
   46#include "lpfc_vport.h"
   47#include "lpfc_debugfs.h"
   48
   49static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
   50			  struct lpfc_iocbq *);
   51static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
   52			struct lpfc_iocbq *);
   53static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
   54static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
   55				struct lpfc_nodelist *ndlp, uint8_t retry);
   56static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
   57				  struct lpfc_iocbq *iocb);
   58
   59static int lpfc_max_els_tries = 3;
   60
   61/**
   62 * lpfc_els_chk_latt - Check host link attention event for a vport
   63 * @vport: pointer to a host virtual N_Port data structure.
   64 *
   65 * This routine checks whether there is an outstanding host link
   66 * attention event during the discovery process with the @vport. It is done
   67 * by reading the HBA's Host Attention (HA) register. If there is any host
   68 * link attention events during this @vport's discovery process, the @vport
   69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
   70 * be issued if the link state is not already in host link cleared state,
   71 * and a return code shall indicate whether the host link attention event
   72 * had happened.
   73 *
   74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
   75 * state in LPFC_VPORT_READY, the request for checking host link attention
   76 * event will be ignored and a return code shall indicate no host link
   77 * attention event had happened.
   78 *
   79 * Return codes
   80 *   0 - no host link attention event happened
   81 *   1 - host link attention event happened
   82 **/
   83int
   84lpfc_els_chk_latt(struct lpfc_vport *vport)
   85{
   86	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
   87	struct lpfc_hba  *phba = vport->phba;
   88	uint32_t ha_copy;
   89
   90	if (vport->port_state >= LPFC_VPORT_READY ||
   91	    phba->link_state == LPFC_LINK_DOWN ||
   92	    phba->sli_rev > LPFC_SLI_REV3)
   93		return 0;
   94
   95	/* Read the HBA Host Attention Register */
   96	if (lpfc_readl(phba->HAregaddr, &ha_copy))
   97		return 1;
   98
   99	if (!(ha_copy & HA_LATT))
  100		return 0;
  101
  102	/* Pending Link Event during Discovery */
  103	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  104			 "0237 Pending Link Event during "
  105			 "Discovery: State x%x\n",
  106			 phba->pport->port_state);
  107
  108	/* CLEAR_LA should re-enable link attention events and
  109	 * we should then immediately take a LATT event. The
  110	 * LATT processing should call lpfc_linkdown() which
  111	 * will cleanup any left over in-progress discovery
  112	 * events.
  113	 */
  114	spin_lock_irq(shost->host_lock);
  115	vport->fc_flag |= FC_ABORT_DISCOVERY;
  116	spin_unlock_irq(shost->host_lock);
  117
  118	if (phba->link_state != LPFC_CLEAR_LA)
  119		lpfc_issue_clear_la(phba, vport);
  120
  121	return 1;
  122}
  123
  124/**
  125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
  126 * @vport: pointer to a host virtual N_Port data structure.
  127 * @expectRsp: flag indicating whether response is expected.
  128 * @cmdSize: size of the ELS command.
  129 * @retry: number of retries to the command IOCB when it fails.
  130 * @ndlp: pointer to a node-list data structure.
  131 * @did: destination identifier.
  132 * @elscmd: the ELS command code.
  133 *
  134 * This routine is used for allocating a lpfc-IOCB data structure from
  135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
  136 * passed into the routine for discovery state machine to issue an Extended
  137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
  138 * and preparation routine that is used by all the discovery state machine
  139 * routines and the ELS command-specific fields will be later set up by
  140 * the individual discovery machine routines after calling this routine
  141 * allocating and preparing a generic IOCB data structure. It fills in the
  142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
  143 * payload and response payload (if expected). The reference count on the
  144 * ndlp is incremented by 1 and the reference to the ndlp is put into
  145 * context1 of the IOCB data structure for this IOCB to hold the ndlp
  146 * reference for the command's callback function to access later.
  147 *
  148 * Return code
  149 *   Pointer to the newly allocated/prepared els iocb data structure
  150 *   NULL - when els iocb data structure allocation/preparation failed
  151 **/
  152struct lpfc_iocbq *
  153lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
  154		   uint16_t cmdSize, uint8_t retry,
  155		   struct lpfc_nodelist *ndlp, uint32_t did,
  156		   uint32_t elscmd)
  157{
  158	struct lpfc_hba  *phba = vport->phba;
  159	struct lpfc_iocbq *elsiocb;
  160	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
  161	struct ulp_bde64 *bpl;
  162	IOCB_t *icmd;
  163
  164
  165	if (!lpfc_is_link_up(phba))
  166		return NULL;
  167
  168	/* Allocate buffer for  command iocb */
  169	elsiocb = lpfc_sli_get_iocbq(phba);
  170
  171	if (elsiocb == NULL)
  172		return NULL;
  173
  174	/*
  175	 * If this command is for fabric controller and HBA running
  176	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
  177	 */
  178	if ((did == Fabric_DID) &&
  179		(phba->hba_flag & HBA_FIP_SUPPORT) &&
  180		((elscmd == ELS_CMD_FLOGI) ||
  181		 (elscmd == ELS_CMD_FDISC) ||
  182		 (elscmd == ELS_CMD_LOGO)))
  183		switch (elscmd) {
  184		case ELS_CMD_FLOGI:
  185		elsiocb->iocb_flag |=
  186			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
  187					& LPFC_FIP_ELS_ID_MASK);
  188		break;
  189		case ELS_CMD_FDISC:
  190		elsiocb->iocb_flag |=
  191			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
  192					& LPFC_FIP_ELS_ID_MASK);
  193		break;
  194		case ELS_CMD_LOGO:
  195		elsiocb->iocb_flag |=
  196			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
  197					& LPFC_FIP_ELS_ID_MASK);
  198		break;
  199		}
  200	else
  201		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
  202
  203	icmd = &elsiocb->iocb;
  204
  205	/* fill in BDEs for command */
  206	/* Allocate buffer for command payload */
  207	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  208	if (pcmd)
  209		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
  210	if (!pcmd || !pcmd->virt)
  211		goto els_iocb_free_pcmb_exit;
  212
  213	INIT_LIST_HEAD(&pcmd->list);
  214
  215	/* Allocate buffer for response payload */
  216	if (expectRsp) {
  217		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  218		if (prsp)
  219			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  220						     &prsp->phys);
  221		if (!prsp || !prsp->virt)
  222			goto els_iocb_free_prsp_exit;
  223		INIT_LIST_HEAD(&prsp->list);
  224	} else
  225		prsp = NULL;
  226
  227	/* Allocate buffer for Buffer ptr list */
  228	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  229	if (pbuflist)
  230		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  231						 &pbuflist->phys);
  232	if (!pbuflist || !pbuflist->virt)
  233		goto els_iocb_free_pbuf_exit;
  234
  235	INIT_LIST_HEAD(&pbuflist->list);
  236
  237	if (expectRsp) {
  238		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
  239		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
  240		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  241		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
  242
  243		icmd->un.elsreq64.remoteID = did;		/* DID */
  244		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
  245		if (elscmd == ELS_CMD_FLOGI)
  246			icmd->ulpTimeout = FF_DEF_RATOV * 2;
  247		else if (elscmd == ELS_CMD_LOGO)
  248			icmd->ulpTimeout = phba->fc_ratov;
  249		else
  250			icmd->ulpTimeout = phba->fc_ratov * 2;
  251	} else {
  252		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
  253		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
  254		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  255		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
  256		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
  257		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
  258	}
  259	icmd->ulpBdeCount = 1;
  260	icmd->ulpLe = 1;
  261	icmd->ulpClass = CLASS3;
  262
  263	/*
  264	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
  265	 * For SLI4, since the driver controls VPIs we also want to include
  266	 * all ELS pt2pt protocol traffic as well.
  267	 */
  268	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
  269		((phba->sli_rev == LPFC_SLI_REV4) &&
  270		    (vport->fc_flag & FC_PT2PT))) {
  271
  272		if (expectRsp) {
  273			icmd->un.elsreq64.myID = vport->fc_myDID;
  274
  275			/* For ELS_REQUEST64_CR, use the VPI by default */
  276			icmd->ulpContext = phba->vpi_ids[vport->vpi];
  277		}
  278
  279		icmd->ulpCt_h = 0;
  280		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
  281		if (elscmd == ELS_CMD_ECHO)
  282			icmd->ulpCt_l = 0; /* context = invalid RPI */
  283		else
  284			icmd->ulpCt_l = 1; /* context = VPI */
  285	}
  286
  287	bpl = (struct ulp_bde64 *) pbuflist->virt;
  288	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
  289	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
  290	bpl->tus.f.bdeSize = cmdSize;
  291	bpl->tus.f.bdeFlags = 0;
  292	bpl->tus.w = le32_to_cpu(bpl->tus.w);
  293
  294	if (expectRsp) {
  295		bpl++;
  296		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
  297		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
  298		bpl->tus.f.bdeSize = FCELSSIZE;
  299		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  300		bpl->tus.w = le32_to_cpu(bpl->tus.w);
  301	}
  302
  303	/* prevent preparing iocb with NULL ndlp reference */
  304	elsiocb->context1 = lpfc_nlp_get(ndlp);
  305	if (!elsiocb->context1)
  306		goto els_iocb_free_pbuf_exit;
  307	elsiocb->context2 = pcmd;
  308	elsiocb->context3 = pbuflist;
  309	elsiocb->retry = retry;
  310	elsiocb->vport = vport;
  311	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
  312
  313	if (prsp) {
  314		list_add(&prsp->list, &pcmd->list);
  315	}
  316	if (expectRsp) {
  317		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
  318		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  319				 "0116 Xmit ELS command x%x to remote "
  320				 "NPORT x%x I/O tag: x%x, port state:x%x "
  321				 "rpi x%x fc_flag:x%x\n",
  322				 elscmd, did, elsiocb->iotag,
  323				 vport->port_state, ndlp->nlp_rpi,
  324				 vport->fc_flag);
  325	} else {
  326		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
  327		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  328				 "0117 Xmit ELS response x%x to remote "
  329				 "NPORT x%x I/O tag: x%x, size: x%x "
  330				 "port_state x%x  rpi x%x fc_flag x%x\n",
  331				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
  332				 cmdSize, vport->port_state,
  333				 ndlp->nlp_rpi, vport->fc_flag);
  334	}
  335	return elsiocb;
  336
  337els_iocb_free_pbuf_exit:
  338	if (expectRsp)
  339		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
  340	kfree(pbuflist);
  341
  342els_iocb_free_prsp_exit:
  343	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  344	kfree(prsp);
  345
  346els_iocb_free_pcmb_exit:
  347	kfree(pcmd);
  348	lpfc_sli_release_iocbq(phba, elsiocb);
  349	return NULL;
  350}
  351
  352/**
  353 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
  354 * @vport: pointer to a host virtual N_Port data structure.
  355 *
  356 * This routine issues a fabric registration login for a @vport. An
  357 * active ndlp node with Fabric_DID must already exist for this @vport.
  358 * The routine invokes two mailbox commands to carry out fabric registration
  359 * login through the HBA firmware: the first mailbox command requests the
  360 * HBA to perform link configuration for the @vport; and the second mailbox
  361 * command requests the HBA to perform the actual fabric registration login
  362 * with the @vport.
  363 *
  364 * Return code
  365 *   0 - successfully issued fabric registration login for @vport
  366 *   -ENXIO -- failed to issue fabric registration login for @vport
  367 **/
  368int
  369lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
  370{
  371	struct lpfc_hba  *phba = vport->phba;
  372	LPFC_MBOXQ_t *mbox;
  373	struct lpfc_dmabuf *mp;
  374	struct lpfc_nodelist *ndlp;
  375	struct serv_parm *sp;
  376	int rc;
  377	int err = 0;
  378
  379	sp = &phba->fc_fabparam;
  380	ndlp = lpfc_findnode_did(vport, Fabric_DID);
  381	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  382		err = 1;
  383		goto fail;
  384	}
  385
  386	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  387	if (!mbox) {
  388		err = 2;
  389		goto fail;
  390	}
  391
  392	vport->port_state = LPFC_FABRIC_CFG_LINK;
  393	lpfc_config_link(phba, mbox);
  394	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  395	mbox->vport = vport;
  396
  397	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  398	if (rc == MBX_NOT_FINISHED) {
  399		err = 3;
  400		goto fail_free_mbox;
  401	}
  402
  403	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  404	if (!mbox) {
  405		err = 4;
  406		goto fail;
  407	}
  408	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
  409			  ndlp->nlp_rpi);
  410	if (rc) {
  411		err = 5;
  412		goto fail_free_mbox;
  413	}
  414
  415	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
  416	mbox->vport = vport;
  417	/* increment the reference count on ndlp to hold reference
  418	 * for the callback routine.
  419	 */
  420	mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
  421
  422	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  423	if (rc == MBX_NOT_FINISHED) {
  424		err = 6;
  425		goto fail_issue_reg_login;
  426	}
  427
  428	return 0;
  429
  430fail_issue_reg_login:
  431	/* decrement the reference count on ndlp just incremented
  432	 * for the failed mbox command.
  433	 */
  434	lpfc_nlp_put(ndlp);
  435	mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
  436	lpfc_mbuf_free(phba, mp->virt, mp->phys);
  437	kfree(mp);
  438fail_free_mbox:
  439	mempool_free(mbox, phba->mbox_mem_pool);
  440
  441fail:
  442	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  443	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  444		"0249 Cannot issue Register Fabric login: Err %d\n", err);
  445	return -ENXIO;
  446}
  447
  448/**
  449 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
  450 * @vport: pointer to a host virtual N_Port data structure.
  451 *
  452 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
  453 * the @vport. This mailbox command is necessary for SLI4 port only.
  454 *
  455 * Return code
  456 *   0 - successfully issued REG_VFI for @vport
  457 *   A failure code otherwise.
  458 **/
  459int
  460lpfc_issue_reg_vfi(struct lpfc_vport *vport)
  461{
  462	struct lpfc_hba  *phba = vport->phba;
  463	LPFC_MBOXQ_t *mboxq = NULL;
  464	struct lpfc_nodelist *ndlp;
  465	struct lpfc_dmabuf *dmabuf = NULL;
  466	int rc = 0;
  467
  468	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
  469	if ((phba->sli_rev == LPFC_SLI_REV4) &&
  470	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
  471	    !(vport->fc_flag & FC_PT2PT)) {
  472		ndlp = lpfc_findnode_did(vport, Fabric_DID);
  473		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  474			rc = -ENODEV;
  475			goto fail;
  476		}
  477	}
  478
  479	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  480	if (!mboxq) {
  481		rc = -ENOMEM;
  482		goto fail;
  483	}
  484
  485	/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
  486	if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
  487		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  488		if (!dmabuf) {
  489			rc = -ENOMEM;
  490			goto fail;
  491		}
  492		dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
  493		if (!dmabuf->virt) {
  494			rc = -ENOMEM;
  495			goto fail;
  496		}
  497		memcpy(dmabuf->virt, &phba->fc_fabparam,
  498		       sizeof(struct serv_parm));
  499	}
  500
  501	vport->port_state = LPFC_FABRIC_CFG_LINK;
  502	if (dmabuf)
  503		lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
  504	else
  505		lpfc_reg_vfi(mboxq, vport, 0);
  506
  507	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
  508	mboxq->vport = vport;
  509	mboxq->ctx_buf = dmabuf;
  510	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  511	if (rc == MBX_NOT_FINISHED) {
  512		rc = -ENXIO;
  513		goto fail;
  514	}
  515	return 0;
  516
  517fail:
  518	if (mboxq)
  519		mempool_free(mboxq, phba->mbox_mem_pool);
  520	if (dmabuf) {
  521		if (dmabuf->virt)
  522			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
  523		kfree(dmabuf);
  524	}
  525
  526	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  527	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
  528		"0289 Issue Register VFI failed: Err %d\n", rc);
  529	return rc;
  530}
  531
  532/**
  533 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
  534 * @vport: pointer to a host virtual N_Port data structure.
  535 *
  536 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
  537 * the @vport. This mailbox command is necessary for SLI4 port only.
  538 *
  539 * Return code
  540 *   0 - successfully issued REG_VFI for @vport
  541 *   A failure code otherwise.
  542 **/
  543int
  544lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
  545{
  546	struct lpfc_hba *phba = vport->phba;
  547	struct Scsi_Host *shost;
  548	LPFC_MBOXQ_t *mboxq;
  549	int rc;
  550
  551	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  552	if (!mboxq) {
  553		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
  554				"2556 UNREG_VFI mbox allocation failed"
  555				"HBA state x%x\n", phba->pport->port_state);
  556		return -ENOMEM;
  557	}
  558
  559	lpfc_unreg_vfi(mboxq, vport);
  560	mboxq->vport = vport;
  561	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
  562
  563	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  564	if (rc == MBX_NOT_FINISHED) {
  565		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
  566				"2557 UNREG_VFI issue mbox failed rc x%x "
  567				"HBA state x%x\n",
  568				rc, phba->pport->port_state);
  569		mempool_free(mboxq, phba->mbox_mem_pool);
  570		return -EIO;
  571	}
  572
  573	shost = lpfc_shost_from_vport(vport);
  574	spin_lock_irq(shost->host_lock);
  575	vport->fc_flag &= ~FC_VFI_REGISTERED;
  576	spin_unlock_irq(shost->host_lock);
  577	return 0;
  578}
  579
  580/**
  581 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
  582 * @vport: pointer to a host virtual N_Port data structure.
  583 * @sp: pointer to service parameter data structure.
  584 *
  585 * This routine is called from FLOGI/FDISC completion handler functions.
  586 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
  587 * node nodename is changed in the completion service parameter else return
  588 * 0. This function also set flag in the vport data structure to delay
  589 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
  590 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
  591 * node nodename is changed in the completion service parameter.
  592 *
  593 * Return code
  594 *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
  595 *   1 - FCID or Fabric Nodename or Fabric portname is changed.
  596 *
  597 **/
  598static uint8_t
  599lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
  600		struct serv_parm *sp)
  601{
  602	struct lpfc_hba *phba = vport->phba;
  603	uint8_t fabric_param_changed = 0;
  604	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  605
  606	if ((vport->fc_prevDID != vport->fc_myDID) ||
  607		memcmp(&vport->fabric_portname, &sp->portName,
  608			sizeof(struct lpfc_name)) ||
  609		memcmp(&vport->fabric_nodename, &sp->nodeName,
  610			sizeof(struct lpfc_name)) ||
  611		(vport->vport_flag & FAWWPN_PARAM_CHG)) {
  612		fabric_param_changed = 1;
  613		vport->vport_flag &= ~FAWWPN_PARAM_CHG;
  614	}
  615	/*
  616	 * Word 1 Bit 31 in common service parameter is overloaded.
  617	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
  618	 * Word 1 Bit 31 in FLOGI response is clean address bit
  619	 *
  620	 * If fabric parameter is changed and clean address bit is
  621	 * cleared delay nport discovery if
  622	 * - vport->fc_prevDID != 0 (not initial discovery) OR
  623	 * - lpfc_delay_discovery module parameter is set.
  624	 */
  625	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
  626	    (vport->fc_prevDID || phba->cfg_delay_discovery)) {
  627		spin_lock_irq(shost->host_lock);
  628		vport->fc_flag |= FC_DISC_DELAYED;
  629		spin_unlock_irq(shost->host_lock);
  630	}
  631
  632	return fabric_param_changed;
  633}
  634
  635
  636/**
  637 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  638 * @vport: pointer to a host virtual N_Port data structure.
  639 * @ndlp: pointer to a node-list data structure.
  640 * @sp: pointer to service parameter data structure.
  641 * @irsp: pointer to the IOCB within the lpfc response IOCB.
  642 *
  643 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
  644 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
  645 * port in a fabric topology. It properly sets up the parameters to the @ndlp
  646 * from the IOCB response. It also check the newly assigned N_Port ID to the
  647 * @vport against the previously assigned N_Port ID. If it is different from
  648 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
  649 * is invoked on all the remaining nodes with the @vport to unregister the
  650 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
  651 * is invoked to register login to the fabric.
  652 *
  653 * Return code
  654 *   0 - Success (currently, always return 0)
  655 **/
  656static int
  657lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  658			   struct serv_parm *sp, IOCB_t *irsp)
  659{
  660	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  661	struct lpfc_hba  *phba = vport->phba;
  662	struct lpfc_nodelist *np;
  663	struct lpfc_nodelist *next_np;
  664	uint8_t fabric_param_changed;
  665
  666	spin_lock_irq(shost->host_lock);
  667	vport->fc_flag |= FC_FABRIC;
  668	spin_unlock_irq(shost->host_lock);
  669
  670	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
  671	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
  672		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
  673
  674	phba->fc_edtovResol = sp->cmn.edtovResolution;
  675	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
  676
  677	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
  678		spin_lock_irq(shost->host_lock);
  679		vport->fc_flag |= FC_PUBLIC_LOOP;
  680		spin_unlock_irq(shost->host_lock);
  681	}
  682
  683	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
  684	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
  685	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
  686	ndlp->nlp_class_sup = 0;
  687	if (sp->cls1.classValid)
  688		ndlp->nlp_class_sup |= FC_COS_CLASS1;
  689	if (sp->cls2.classValid)
  690		ndlp->nlp_class_sup |= FC_COS_CLASS2;
  691	if (sp->cls3.classValid)
  692		ndlp->nlp_class_sup |= FC_COS_CLASS3;
  693	if (sp->cls4.classValid)
  694		ndlp->nlp_class_sup |= FC_COS_CLASS4;
  695	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
  696				sp->cmn.bbRcvSizeLsb;
  697
  698	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
  699	if (fabric_param_changed) {
  700		/* Reset FDMI attribute masks based on config parameter */
  701		if (phba->cfg_enable_SmartSAN ||
  702		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
  703			/* Setup appropriate attribute masks */
  704			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
  705			if (phba->cfg_enable_SmartSAN)
  706				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
  707			else
  708				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
  709		} else {
  710			vport->fdmi_hba_mask = 0;
  711			vport->fdmi_port_mask = 0;
  712		}
  713
  714	}
  715	memcpy(&vport->fabric_portname, &sp->portName,
  716			sizeof(struct lpfc_name));
  717	memcpy(&vport->fabric_nodename, &sp->nodeName,
  718			sizeof(struct lpfc_name));
  719	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  720
  721	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
  722		if (sp->cmn.response_multiple_NPort) {
  723			lpfc_printf_vlog(vport, KERN_WARNING,
  724					 LOG_ELS | LOG_VPORT,
  725					 "1816 FLOGI NPIV supported, "
  726					 "response data 0x%x\n",
  727					 sp->cmn.response_multiple_NPort);
  728			spin_lock_irq(&phba->hbalock);
  729			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
  730			spin_unlock_irq(&phba->hbalock);
  731		} else {
  732			/* Because we asked f/w for NPIV it still expects us
  733			to call reg_vnpid atleast for the physcial host */
  734			lpfc_printf_vlog(vport, KERN_WARNING,
  735					 LOG_ELS | LOG_VPORT,
  736					 "1817 Fabric does not support NPIV "
  737					 "- configuring single port mode.\n");
  738			spin_lock_irq(&phba->hbalock);
  739			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
  740			spin_unlock_irq(&phba->hbalock);
  741		}
  742	}
  743
  744	/*
  745	 * For FC we need to do some special processing because of the SLI
  746	 * Port's default settings of the Common Service Parameters.
  747	 */
  748	if ((phba->sli_rev == LPFC_SLI_REV4) &&
  749	    (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
  750		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
  751		if (fabric_param_changed)
  752			lpfc_unregister_fcf_prep(phba);
  753
  754		/* This should just update the VFI CSPs*/
  755		if (vport->fc_flag & FC_VFI_REGISTERED)
  756			lpfc_issue_reg_vfi(vport);
  757	}
  758
  759	if (fabric_param_changed &&
  760		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
  761
  762		/* If our NportID changed, we need to ensure all
  763		 * remaining NPORTs get unreg_login'ed.
  764		 */
  765		list_for_each_entry_safe(np, next_np,
  766					&vport->fc_nodes, nlp_listp) {
  767			if (!NLP_CHK_NODE_ACT(np))
  768				continue;
  769			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
  770				   !(np->nlp_flag & NLP_NPR_ADISC))
  771				continue;
  772			spin_lock_irq(shost->host_lock);
  773			np->nlp_flag &= ~NLP_NPR_ADISC;
  774			spin_unlock_irq(shost->host_lock);
  775			lpfc_unreg_rpi(vport, np);
  776		}
  777		lpfc_cleanup_pending_mbox(vport);
  778
  779		if (phba->sli_rev == LPFC_SLI_REV4) {
  780			lpfc_sli4_unreg_all_rpis(vport);
  781			lpfc_mbx_unreg_vpi(vport);
  782			spin_lock_irq(shost->host_lock);
  783			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
  784			spin_unlock_irq(shost->host_lock);
  785		}
  786
  787		/*
  788		 * For SLI3 and SLI4, the VPI needs to be reregistered in
  789		 * response to this fabric parameter change event.
  790		 */
  791		spin_lock_irq(shost->host_lock);
  792		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  793		spin_unlock_irq(shost->host_lock);
  794	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
  795		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
  796			/*
  797			 * Driver needs to re-reg VPI in order for f/w
  798			 * to update the MAC address.
  799			 */
  800			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  801			lpfc_register_new_vport(phba, vport, ndlp);
  802			return 0;
  803	}
  804
  805	if (phba->sli_rev < LPFC_SLI_REV4) {
  806		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
  807		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
  808		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
  809			lpfc_register_new_vport(phba, vport, ndlp);
  810		else
  811			lpfc_issue_fabric_reglogin(vport);
  812	} else {
  813		ndlp->nlp_type |= NLP_FABRIC;
  814		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  815		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
  816			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
  817			lpfc_start_fdiscs(phba);
  818			lpfc_do_scr_ns_plogi(phba, vport);
  819		} else if (vport->fc_flag & FC_VFI_REGISTERED)
  820			lpfc_issue_init_vpi(vport);
  821		else {
  822			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  823					"3135 Need register VFI: (x%x/%x)\n",
  824					vport->fc_prevDID, vport->fc_myDID);
  825			lpfc_issue_reg_vfi(vport);
  826		}
  827	}
  828	return 0;
  829}
  830
  831/**
  832 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  833 * @vport: pointer to a host virtual N_Port data structure.
  834 * @ndlp: pointer to a node-list data structure.
  835 * @sp: pointer to service parameter data structure.
  836 *
  837 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
  838 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
  839 * in a point-to-point topology. First, the @vport's N_Port Name is compared
  840 * with the received N_Port Name: if the @vport's N_Port Name is greater than
  841 * the received N_Port Name lexicographically, this node shall assign local
  842 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
  843 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
  844 * this node shall just wait for the remote node to issue PLOGI and assign
  845 * N_Port IDs.
  846 *
  847 * Return code
  848 *   0 - Success
  849 *   -ENXIO - Fail
  850 **/
  851static int
  852lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  853			  struct serv_parm *sp)
  854{
  855	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  856	struct lpfc_hba  *phba = vport->phba;
  857	LPFC_MBOXQ_t *mbox;
  858	int rc;
  859
  860	spin_lock_irq(shost->host_lock);
  861	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
  862	vport->fc_flag |= FC_PT2PT;
  863	spin_unlock_irq(shost->host_lock);
  864
  865	/* If we are pt2pt with another NPort, force NPIV off! */
  866	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
  867
  868	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
  869	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
  870		lpfc_unregister_fcf_prep(phba);
  871
  872		spin_lock_irq(shost->host_lock);
  873		vport->fc_flag &= ~FC_VFI_REGISTERED;
  874		spin_unlock_irq(shost->host_lock);
  875		phba->fc_topology_changed = 0;
  876	}
  877
  878	rc = memcmp(&vport->fc_portname, &sp->portName,
  879		    sizeof(vport->fc_portname));
  880
  881	if (rc >= 0) {
  882		/* This side will initiate the PLOGI */
  883		spin_lock_irq(shost->host_lock);
  884		vport->fc_flag |= FC_PT2PT_PLOGI;
  885		spin_unlock_irq(shost->host_lock);
  886
  887		/*
  888		 * N_Port ID cannot be 0, set our Id to LocalID
  889		 * the other side will be RemoteID.
  890		 */
  891
  892		/* not equal */
  893		if (rc)
  894			vport->fc_myDID = PT2PT_LocalID;
  895
  896		/* Decrement ndlp reference count indicating that ndlp can be
  897		 * safely released when other references to it are done.
  898		 */
  899		lpfc_nlp_put(ndlp);
  900
  901		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
  902		if (!ndlp) {
  903			/*
  904			 * Cannot find existing Fabric ndlp, so allocate a
  905			 * new one
  906			 */
  907			ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
  908			if (!ndlp)
  909				goto fail;
  910		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
  911			ndlp = lpfc_enable_node(vport, ndlp,
  912						NLP_STE_UNUSED_NODE);
  913			if(!ndlp)
  914				goto fail;
  915		}
  916
  917		memcpy(&ndlp->nlp_portname, &sp->portName,
  918		       sizeof(struct lpfc_name));
  919		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
  920		       sizeof(struct lpfc_name));
  921		/* Set state will put ndlp onto node list if not already done */
  922		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  923		spin_lock_irq(shost->host_lock);
  924		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
  925		spin_unlock_irq(shost->host_lock);
  926
  927		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  928		if (!mbox)
  929			goto fail;
  930
  931		lpfc_config_link(phba, mbox);
  932
  933		mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
  934		mbox->vport = vport;
  935		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  936		if (rc == MBX_NOT_FINISHED) {
  937			mempool_free(mbox, phba->mbox_mem_pool);
  938			goto fail;
  939		}
  940	} else {
  941		/* This side will wait for the PLOGI, decrement ndlp reference
  942		 * count indicating that ndlp can be released when other
  943		 * references to it are done.
  944		 */
  945		lpfc_nlp_put(ndlp);
  946
  947		/* Start discovery - this should just do CLEAR_LA */
  948		lpfc_disc_start(vport);
  949	}
  950
  951	return 0;
  952fail:
  953	return -ENXIO;
  954}
  955
  956/**
  957 * lpfc_cmpl_els_flogi - Completion callback function for flogi
  958 * @phba: pointer to lpfc hba data structure.
  959 * @cmdiocb: pointer to lpfc command iocb data structure.
  960 * @rspiocb: pointer to lpfc response iocb data structure.
  961 *
  962 * This routine is the top-level completion callback function for issuing
  963 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
  964 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
  965 * retry has been made (either immediately or delayed with lpfc_els_retry()
  966 * returning 1), the command IOCB will be released and function returned.
  967 * If the retry attempt has been given up (possibly reach the maximum
  968 * number of retries), one additional decrement of ndlp reference shall be
  969 * invoked before going out after releasing the command IOCB. This will
  970 * actually release the remote node (Note, lpfc_els_free_iocb() will also
  971 * invoke one decrement of ndlp reference count). If no error reported in
  972 * the IOCB status, the command Port ID field is used to determine whether
  973 * this is a point-to-point topology or a fabric topology: if the Port ID
  974 * field is assigned, it is a fabric topology; otherwise, it is a
  975 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
  976 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
  977 * specific topology completion conditions.
  978 **/
  979static void
  980lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  981		    struct lpfc_iocbq *rspiocb)
  982{
  983	struct lpfc_vport *vport = cmdiocb->vport;
  984	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
  985	IOCB_t *irsp = &rspiocb->iocb;
  986	struct lpfc_nodelist *ndlp = cmdiocb->context1;
  987	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
  988	struct serv_parm *sp;
  989	uint16_t fcf_index;
  990	int rc;
  991
  992	/* Check to see if link went down during discovery */
  993	if (lpfc_els_chk_latt(vport)) {
  994		/* One additional decrement on node reference count to
  995		 * trigger the release of the node
  996		 */
  997		lpfc_nlp_put(ndlp);
  998		goto out;
  999	}
 1000
 1001	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 1002		"FLOGI cmpl:      status:x%x/x%x state:x%x",
 1003		irsp->ulpStatus, irsp->un.ulpWord[4],
 1004		vport->port_state);
 1005
 1006	if (irsp->ulpStatus) {
 1007		/*
 1008		 * In case of FIP mode, perform roundrobin FCF failover
 1009		 * due to new FCF discovery
 1010		 */
 1011		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
 1012		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
 1013			if (phba->link_state < LPFC_LINK_UP)
 1014				goto stop_rr_fcf_flogi;
 1015			if ((phba->fcoe_cvl_eventtag_attn ==
 1016			     phba->fcoe_cvl_eventtag) &&
 1017			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
 1018			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
 1019			    IOERR_SLI_ABORTED))
 1020				goto stop_rr_fcf_flogi;
 1021			else
 1022				phba->fcoe_cvl_eventtag_attn =
 1023					phba->fcoe_cvl_eventtag;
 1024			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
 1025					"2611 FLOGI failed on FCF (x%x), "
 1026					"status:x%x/x%x, tmo:x%x, perform "
 1027					"roundrobin FCF failover\n",
 1028					phba->fcf.current_rec.fcf_indx,
 1029					irsp->ulpStatus, irsp->un.ulpWord[4],
 1030					irsp->ulpTimeout);
 1031			lpfc_sli4_set_fcf_flogi_fail(phba,
 1032					phba->fcf.current_rec.fcf_indx);
 1033			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
 1034			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
 1035			if (rc)
 1036				goto out;
 1037		}
 1038
 1039stop_rr_fcf_flogi:
 1040		/* FLOGI failure */
 1041		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
 1042		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
 1043					IOERR_LOOP_OPEN_FAILURE)))
 1044			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 1045					"2858 FLOGI failure Status:x%x/x%x "
 1046					"TMO:x%x Data x%x x%x\n",
 1047					irsp->ulpStatus, irsp->un.ulpWord[4],
 1048					irsp->ulpTimeout, phba->hba_flag,
 1049					phba->fcf.fcf_flag);
 1050
 1051		/* Check for retry */
 1052		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 1053			goto out;
 1054
 1055		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
 1056				 "0150 FLOGI failure Status:x%x/x%x "
 1057				 "xri x%x TMO:x%x\n",
 1058				 irsp->ulpStatus, irsp->un.ulpWord[4],
 1059				 cmdiocb->sli4_xritag, irsp->ulpTimeout);
 1060
 1061		/* If this is not a loop open failure, bail out */
 1062		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
 1063		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
 1064					IOERR_LOOP_OPEN_FAILURE)))
 1065			goto flogifail;
 1066
 1067		/* FLOGI failed, so there is no fabric */
 1068		spin_lock_irq(shost->host_lock);
 1069		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
 1070		spin_unlock_irq(shost->host_lock);
 1071
 1072		/* If private loop, then allow max outstanding els to be
 1073		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
 1074		 * alpa map would take too long otherwise.
 1075		 */
 1076		if (phba->alpa_map[0] == 0)
 1077			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
 1078		if ((phba->sli_rev == LPFC_SLI_REV4) &&
 1079		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
 1080		     (vport->fc_prevDID != vport->fc_myDID) ||
 1081			phba->fc_topology_changed)) {
 1082			if (vport->fc_flag & FC_VFI_REGISTERED) {
 1083				if (phba->fc_topology_changed) {
 1084					lpfc_unregister_fcf_prep(phba);
 1085					spin_lock_irq(shost->host_lock);
 1086					vport->fc_flag &= ~FC_VFI_REGISTERED;
 1087					spin_unlock_irq(shost->host_lock);
 1088					phba->fc_topology_changed = 0;
 1089				} else {
 1090					lpfc_sli4_unreg_all_rpis(vport);
 1091				}
 1092			}
 1093
 1094			/* Do not register VFI if the driver aborted FLOGI */
 1095			if (!lpfc_error_lost_link(irsp))
 1096				lpfc_issue_reg_vfi(vport);
 1097			lpfc_nlp_put(ndlp);
 1098			goto out;
 1099		}
 1100		goto flogifail;
 1101	}
 1102	spin_lock_irq(shost->host_lock);
 1103	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
 1104	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
 1105	spin_unlock_irq(shost->host_lock);
 1106
 1107	/*
 1108	 * The FLogI succeeded.  Sync the data for the CPU before
 1109	 * accessing it.
 1110	 */
 1111	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
 1112	if (!prsp)
 1113		goto out;
 1114	sp = prsp->virt + sizeof(uint32_t);
 1115
 1116	/* FLOGI completes successfully */
 1117	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 1118			 "0101 FLOGI completes successfully, I/O tag:x%x, "
 1119			 "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
 1120			 cmdiocb->iotag, cmdiocb->sli4_xritag,
 1121			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
 1122			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
 1123			 vport->port_state, vport->fc_flag);
 1124
 1125	if (vport->port_state == LPFC_FLOGI) {
 1126		/*
 1127		 * If Common Service Parameters indicate Nport
 1128		 * we are point to point, if Fport we are Fabric.
 1129		 */
 1130		if (sp->cmn.fPort)
 1131			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
 1132		else if (!(phba->hba_flag & HBA_FCOE_MODE))
 1133			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
 1134		else {
 1135			lpfc_printf_vlog(vport, KERN_ERR,
 1136				LOG_FIP | LOG_ELS,
 1137				"2831 FLOGI response with cleared Fabric "
 1138				"bit fcf_index 0x%x "
 1139				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
 1140				"Fabric Name "
 1141				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
 1142				phba->fcf.current_rec.fcf_indx,
 1143				phba->fcf.current_rec.switch_name[0],
 1144				phba->fcf.current_rec.switch_name[1],
 1145				phba->fcf.current_rec.switch_name[2],
 1146				phba->fcf.current_rec.switch_name[3],
 1147				phba->fcf.current_rec.switch_name[4],
 1148				phba->fcf.current_rec.switch_name[5],
 1149				phba->fcf.current_rec.switch_name[6],
 1150				phba->fcf.current_rec.switch_name[7],
 1151				phba->fcf.current_rec.fabric_name[0],
 1152				phba->fcf.current_rec.fabric_name[1],
 1153				phba->fcf.current_rec.fabric_name[2],
 1154				phba->fcf.current_rec.fabric_name[3],
 1155				phba->fcf.current_rec.fabric_name[4],
 1156				phba->fcf.current_rec.fabric_name[5],
 1157				phba->fcf.current_rec.fabric_name[6],
 1158				phba->fcf.current_rec.fabric_name[7]);
 1159			lpfc_nlp_put(ndlp);
 1160			spin_lock_irq(&phba->hbalock);
 1161			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
 1162			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
 1163			spin_unlock_irq(&phba->hbalock);
 1164			phba->fcf.fcf_redisc_attempted = 0; /* reset */
 1165			goto out;
 1166		}
 1167		if (!rc) {
 1168			/* Mark the FCF discovery process done */
 1169			if (phba->hba_flag & HBA_FIP_SUPPORT)
 1170				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
 1171						LOG_ELS,
 1172						"2769 FLOGI to FCF (x%x) "
 1173						"completed successfully\n",
 1174						phba->fcf.current_rec.fcf_indx);
 1175			spin_lock_irq(&phba->hbalock);
 1176			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
 1177			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
 1178			spin_unlock_irq(&phba->hbalock);
 1179			phba->fcf.fcf_redisc_attempted = 0; /* reset */
 1180			goto out;
 1181		}
 1182	}
 1183
 1184flogifail:
 1185	spin_lock_irq(&phba->hbalock);
 1186	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
 1187	spin_unlock_irq(&phba->hbalock);
 1188
 1189	lpfc_nlp_put(ndlp);
 1190
 1191	if (!lpfc_error_lost_link(irsp)) {
 1192		/* FLOGI failed, so just use loop map to make discovery list */
 1193		lpfc_disc_list_loopmap(vport);
 1194
 1195		/* Start discovery */
 1196		lpfc_disc_start(vport);
 1197	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
 1198			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
 1199			 IOERR_SLI_ABORTED) &&
 1200			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
 1201			 IOERR_SLI_DOWN))) &&
 1202			(phba->link_state != LPFC_CLEAR_LA)) {
 1203		/* If FLOGI failed enable link interrupt. */
 1204		lpfc_issue_clear_la(phba, vport);
 1205	}
 1206out:
 1207	lpfc_els_free_iocb(phba, cmdiocb);
 1208}
 1209
 1210/**
 1211 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
 1212 *                           aborted during a link down
 1213 * @phba: pointer to lpfc hba data structure.
 1214 * @cmdiocb: pointer to lpfc command iocb data structure.
 1215 * @rspiocb: pointer to lpfc response iocb data structure.
 1216 *
 1217 */
 1218static void
 1219lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 1220			struct lpfc_iocbq *rspiocb)
 1221{
 1222	IOCB_t *irsp;
 1223	uint32_t *pcmd;
 1224	uint32_t cmd;
 1225
 1226	pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
 1227	cmd = *pcmd;
 1228	irsp = &rspiocb->iocb;
 1229
 1230	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
 1231			"6445 ELS completes after LINK_DOWN: "
 1232			" Status %x/%x cmd x%x flg x%x\n",
 1233			irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
 1234			cmdiocb->iocb_flag);
 1235
 1236	if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
 1237		cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
 1238		atomic_dec(&phba->fabric_iocb_count);
 1239	}
 1240	lpfc_els_free_iocb(phba, cmdiocb);
 1241}
 1242
 1243/**
 1244 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
 1245 * @vport: pointer to a host virtual N_Port data structure.
 1246 * @ndlp: pointer to a node-list data structure.
 1247 * @retry: number of retries to the command IOCB.
 1248 *
 1249 * This routine issues a Fabric Login (FLOGI) Request ELS command
 1250 * for a @vport. The initiator service parameters are put into the payload
 1251 * of the FLOGI Request IOCB and the top-level callback function pointer
 1252 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
 1253 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
 1254 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
 1255 *
 1256 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 1257 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 1258 * will be stored into the context1 field of the IOCB for the completion
 1259 * callback function to the FLOGI ELS command.
 1260 *
 1261 * Return code
 1262 *   0 - successfully issued flogi iocb for @vport
 1263 *   1 - failed to issue flogi iocb for @vport
 1264 **/
 1265static int
 1266lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 1267		     uint8_t retry)
 1268{
 1269	struct lpfc_hba  *phba = vport->phba;
 1270	struct serv_parm *sp;
 1271	IOCB_t *icmd;
 1272	struct lpfc_iocbq *elsiocb;
 1273	struct lpfc_iocbq defer_flogi_acc;
 1274	uint8_t *pcmd;
 1275	uint16_t cmdsize;
 1276	uint32_t tmo, did;
 1277	int rc;
 1278
 1279	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
 1280	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 1281				     ndlp->nlp_DID, ELS_CMD_FLOGI);
 1282
 1283	if (!elsiocb)
 1284		return 1;
 1285
 1286	icmd = &elsiocb->iocb;
 1287	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 1288
 1289	/* For FLOGI request, remainder of payload is service parameters */
 1290	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
 1291	pcmd += sizeof(uint32_t);
 1292	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
 1293	sp = (struct serv_parm *) pcmd;
 1294
 1295	/* Setup CSPs accordingly for Fabric */
 1296	sp->cmn.e_d_tov = 0;
 1297	sp->cmn.w2.r_a_tov = 0;
 1298	sp->cmn.virtual_fabric_support = 0;
 1299	sp->cls1.classValid = 0;
 1300	if (sp->cmn.fcphLow < FC_PH3)
 1301		sp->cmn.fcphLow = FC_PH3;
 1302	if (sp->cmn.fcphHigh < FC_PH3)
 1303		sp->cmn.fcphHigh = FC_PH3;
 1304
 1305	if  (phba->sli_rev == LPFC_SLI_REV4) {
 1306		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
 1307		    LPFC_SLI_INTF_IF_TYPE_0) {
 1308			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
 1309			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
 1310			/* FLOGI needs to be 3 for WQE FCFI */
 1311			/* Set the fcfi to the fcfi we registered with */
 1312			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
 1313		}
 1314		/* Can't do SLI4 class2 without support sequence coalescing */
 1315		sp->cls2.classValid = 0;
 1316		sp->cls2.seqDelivery = 0;
 1317	} else {
 1318		/* Historical, setting sequential-delivery bit for SLI3 */
 1319		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
 1320		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
 1321		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
 1322			sp->cmn.request_multiple_Nport = 1;
 1323			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
 1324			icmd->ulpCt_h = 1;
 1325			icmd->ulpCt_l = 0;
 1326		} else
 1327			sp->cmn.request_multiple_Nport = 0;
 1328	}
 1329
 1330	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
 1331		icmd->un.elsreq64.myID = 0;
 1332		icmd->un.elsreq64.fl = 1;
 1333	}
 1334
 1335	tmo = phba->fc_ratov;
 1336	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
 1337	lpfc_set_disctmo(vport);
 1338	phba->fc_ratov = tmo;
 1339
 1340	phba->fc_stat.elsXmitFLOGI++;
 1341	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
 1342
 1343	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 1344		"Issue FLOGI:     opt:x%x",
 1345		phba->sli3_options, 0, 0);
 1346
 1347	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
 1348
 1349	phba->hba_flag |= HBA_FLOGI_ISSUED;
 1350
 1351	/* Check for a deferred FLOGI ACC condition */
 1352	if (phba->defer_flogi_acc_flag) {
 1353		did = vport->fc_myDID;
 1354		vport->fc_myDID = Fabric_DID;
 1355
 1356		memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
 1357
 1358		defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
 1359		defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
 1360						phba->defer_flogi_acc_ox_id;
 1361
 1362		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 1363				 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
 1364				 " ox_id: x%x, hba_flag x%x\n",
 1365				 phba->defer_flogi_acc_rx_id,
 1366				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
 1367
 1368		/* Send deferred FLOGI ACC */
 1369		lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
 1370				 ndlp, NULL);
 1371
 1372		phba->defer_flogi_acc_flag = false;
 1373
 1374		vport->fc_myDID = did;
 1375	}
 1376
 1377	if (rc == IOCB_ERROR) {
 1378		lpfc_els_free_iocb(phba, elsiocb);
 1379		return 1;
 1380	}
 1381	return 0;
 1382}
 1383
 1384/**
 1385 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
 1386 * @phba: pointer to lpfc hba data structure.
 1387 *
 1388 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
 1389 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
 1390 * list and issues an abort IOCB commond on each outstanding IOCB that
 1391 * contains a active Fabric_DID ndlp. Note that this function is to issue
 1392 * the abort IOCB command on all the outstanding IOCBs, thus when this
 1393 * function returns, it does not guarantee all the IOCBs are actually aborted.
 1394 *
 1395 * Return code
 1396 *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
 1397 **/
 1398int
 1399lpfc_els_abort_flogi(struct lpfc_hba *phba)
 1400{
 1401	struct lpfc_sli_ring *pring;
 1402	struct lpfc_iocbq *iocb, *next_iocb;
 1403	struct lpfc_nodelist *ndlp;
 1404	IOCB_t *icmd;
 1405
 1406	/* Abort outstanding I/O on NPort <nlp_DID> */
 1407	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
 1408			"0201 Abort outstanding I/O on NPort x%x\n",
 1409			Fabric_DID);
 1410
 1411	pring = lpfc_phba_elsring(phba);
 1412	if (unlikely(!pring))
 1413		return -EIO;
 1414
 1415	/*
 1416	 * Check the txcmplq for an iocb that matches the nport the driver is
 1417	 * searching for.
 1418	 */
 1419	spin_lock_irq(&phba->hbalock);
 1420	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
 1421		icmd = &iocb->iocb;
 1422		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
 1423			ndlp = (struct lpfc_nodelist *)(iocb->context1);
 1424			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
 1425			    (ndlp->nlp_DID == Fabric_DID))
 1426				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
 1427		}
 1428	}
 1429	spin_unlock_irq(&phba->hbalock);
 1430
 1431	return 0;
 1432}
 1433
 1434/**
 1435 * lpfc_initial_flogi - Issue an initial fabric login for a vport
 1436 * @vport: pointer to a host virtual N_Port data structure.
 1437 *
 1438 * This routine issues an initial Fabric Login (FLOGI) for the @vport
 1439 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
 1440 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
 1441 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
 1442 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
 1443 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
 1444 * @vport.
 1445 *
 1446 * Return code
 1447 *   0 - failed to issue initial flogi for @vport
 1448 *   1 - successfully issued initial flogi for @vport
 1449 **/
 1450int
 1451lpfc_initial_flogi(struct lpfc_vport *vport)
 1452{
 1453	struct lpfc_nodelist *ndlp;
 1454
 1455	vport->port_state = LPFC_FLOGI;
 1456	lpfc_set_disctmo(vport);
 1457
 1458	/* First look for the Fabric ndlp */
 1459	ndlp = lpfc_findnode_did(vport, Fabric_DID);
 1460	if (!ndlp) {
 1461		/* Cannot find existing Fabric ndlp, so allocate a new one */
 1462		ndlp = lpfc_nlp_init(vport, Fabric_DID);
 1463		if (!ndlp)
 1464			return 0;
 1465		/* Set the node type */
 1466		ndlp->nlp_type |= NLP_FABRIC;
 1467		/* Put ndlp onto node list */
 1468		lpfc_enqueue_node(vport, ndlp);
 1469	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 1470		/* re-setup ndlp without removing from node list */
 1471		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 1472		if (!ndlp)
 1473			return 0;
 1474	}
 1475
 1476	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
 1477		/* This decrement of reference count to node shall kick off
 1478		 * the release of the node.
 1479		 */
 1480		lpfc_nlp_put(ndlp);
 1481		return 0;
 1482	}
 1483	return 1;
 1484}
 1485
 1486/**
 1487 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
 1488 * @vport: pointer to a host virtual N_Port data structure.
 1489 *
 1490 * This routine issues an initial Fabric Discover (FDISC) for the @vport
 1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
 1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
 1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
 1494 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
 1495 * is then invoked with the @vport and the ndlp to perform the FDISC for the
 1496 * @vport.
 1497 *
 1498 * Return code
 1499 *   0 - failed to issue initial fdisc for @vport
 1500 *   1 - successfully issued initial fdisc for @vport
 1501 **/
 1502int
 1503lpfc_initial_fdisc(struct lpfc_vport *vport)
 1504{
 1505	struct lpfc_nodelist *ndlp;
 1506
 1507	/* First look for the Fabric ndlp */
 1508	ndlp = lpfc_findnode_did(vport, Fabric_DID);
 1509	if (!ndlp) {
 1510		/* Cannot find existing Fabric ndlp, so allocate a new one */
 1511		ndlp = lpfc_nlp_init(vport, Fabric_DID);
 1512		if (!ndlp)
 1513			return 0;
 1514		/* Put ndlp onto node list */
 1515		lpfc_enqueue_node(vport, ndlp);
 1516	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 1517		/* re-setup ndlp without removing from node list */
 1518		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 1519		if (!ndlp)
 1520			return 0;
 1521	}
 1522
 1523	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
 1524		/* decrement node reference count to trigger the release of
 1525		 * the node.
 1526		 */
 1527		lpfc_nlp_put(ndlp);
 1528		return 0;
 1529	}
 1530	return 1;
 1531}
 1532
 1533/**
 1534 * lpfc_more_plogi - Check and issue remaining plogis for a vport
 1535 * @vport: pointer to a host virtual N_Port data structure.
 1536 *
 1537 * This routine checks whether there are more remaining Port Logins
 1538 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
 1539 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
 1540 * to issue ELS PLOGIs up to the configured discover threads with the
 1541 * @vport (@vport->cfg_discovery_threads). The function also decrement
 1542 * the @vport's num_disc_node by 1 if it is not already 0.
 1543 **/
 1544void
 1545lpfc_more_plogi(struct lpfc_vport *vport)
 1546{
 1547	if (vport->num_disc_nodes)
 1548		vport->num_disc_nodes--;
 1549
 1550	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
 1551	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 1552			 "0232 Continue discovery with %d PLOGIs to go "
 1553			 "Data: x%x x%x x%x\n",
 1554			 vport->num_disc_nodes, vport->fc_plogi_cnt,
 1555			 vport->fc_flag, vport->port_state);
 1556	/* Check to see if there are more PLOGIs to be sent */
 1557	if (vport->fc_flag & FC_NLP_MORE)
 1558		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
 1559		lpfc_els_disc_plogi(vport);
 1560
 1561	return;
 1562}
 1563
 1564/**
 1565 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
 1566 * @phba: pointer to lpfc hba data structure.
 1567 * @prsp: pointer to response IOCB payload.
 1568 * @ndlp: pointer to a node-list data structure.
 1569 *
 1570 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
 1571 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
 1572 * The following cases are considered N_Port confirmed:
 1573 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
 1574 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
 1575 * it does not have WWPN assigned either. If the WWPN is confirmed, the
 1576 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
 1577 * 1) if there is a node on vport list other than the @ndlp with the same
 1578 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
 1579 * on that node to release the RPI associated with the node; 2) if there is
 1580 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
 1581 * into, a new node shall be allocated (or activated). In either case, the
 1582 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
 1583 * be released and the new_ndlp shall be put on to the vport node list and
 1584 * its pointer returned as the confirmed node.
 1585 *
 1586 * Note that before the @ndlp got "released", the keepDID from not-matching
 1587 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
 1588 * of the @ndlp. This is because the release of @ndlp is actually to put it
 1589 * into an inactive state on the vport node list and the vport node list
 1590 * management algorithm does not allow two node with a same DID.
 1591 *
 1592 * Return code
 1593 *   pointer to the PLOGI N_Port @ndlp
 1594 **/
 1595static struct lpfc_nodelist *
 1596lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
 1597			 struct lpfc_nodelist *ndlp)
 1598{
 1599	struct lpfc_vport *vport = ndlp->vport;
 1600	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 1601	struct lpfc_nodelist *new_ndlp;
 1602	struct lpfc_rport_data *rdata;
 1603	struct fc_rport *rport;
 1604	struct serv_parm *sp;
 1605	uint8_t  name[sizeof(struct lpfc_name)];
 1606	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
 1607	uint32_t keep_new_nlp_flag = 0;
 1608	uint16_t keep_nlp_state;
 1609	u32 keep_nlp_fc4_type = 0;
 1610	struct lpfc_nvme_rport *keep_nrport = NULL;
 1611	int  put_node;
 1612	int  put_rport;
 1613	unsigned long *active_rrqs_xri_bitmap = NULL;
 1614
 1615	/* Fabric nodes can have the same WWPN so we don't bother searching
 1616	 * by WWPN.  Just return the ndlp that was given to us.
 1617	 */
 1618	if (ndlp->nlp_type & NLP_FABRIC)
 1619		return ndlp;
 1620
 1621	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
 1622	memset(name, 0, sizeof(struct lpfc_name));
 1623
 1624	/* Now we find out if the NPort we are logging into, matches the WWPN
 1625	 * we have for that ndlp. If not, we have some work to do.
 1626	 */
 1627	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
 1628
 1629	/* return immediately if the WWPN matches ndlp */
 1630	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
 1631		return ndlp;
 1632
 1633	if (phba->sli_rev == LPFC_SLI_REV4) {
 1634		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
 1635						       GFP_KERNEL);
 1636		if (active_rrqs_xri_bitmap)
 1637			memset(active_rrqs_xri_bitmap, 0,
 1638			       phba->cfg_rrq_xri_bitmap_sz);
 1639	}
 1640
 1641	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
 1642			 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
 1643			 "new_ndlp x%x x%x x%x\n",
 1644			 ndlp->nlp_DID, ndlp->nlp_flag,  ndlp->nlp_fc4_type,
 1645			 (new_ndlp ? new_ndlp->nlp_DID : 0),
 1646			 (new_ndlp ? new_ndlp->nlp_flag : 0),
 1647			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
 1648
 1649	if (!new_ndlp) {
 1650		rc = memcmp(&ndlp->nlp_portname, name,
 1651			    sizeof(struct lpfc_name));
 1652		if (!rc) {
 1653			if (active_rrqs_xri_bitmap)
 1654				mempool_free(active_rrqs_xri_bitmap,
 1655					     phba->active_rrq_pool);
 1656			return ndlp;
 1657		}
 1658		new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
 1659		if (!new_ndlp) {
 1660			if (active_rrqs_xri_bitmap)
 1661				mempool_free(active_rrqs_xri_bitmap,
 1662					     phba->active_rrq_pool);
 1663			return ndlp;
 1664		}
 1665	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
 1666		rc = memcmp(&ndlp->nlp_portname, name,
 1667			    sizeof(struct lpfc_name));
 1668		if (!rc) {
 1669			if (active_rrqs_xri_bitmap)
 1670				mempool_free(active_rrqs_xri_bitmap,
 1671					     phba->active_rrq_pool);
 1672			return ndlp;
 1673		}
 1674		new_ndlp = lpfc_enable_node(vport, new_ndlp,
 1675						NLP_STE_UNUSED_NODE);
 1676		if (!new_ndlp) {
 1677			if (active_rrqs_xri_bitmap)
 1678				mempool_free(active_rrqs_xri_bitmap,
 1679					     phba->active_rrq_pool);
 1680			return ndlp;
 1681		}
 1682		keepDID = new_ndlp->nlp_DID;
 1683		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
 1684			memcpy(active_rrqs_xri_bitmap,
 1685			       new_ndlp->active_rrqs_xri_bitmap,
 1686			       phba->cfg_rrq_xri_bitmap_sz);
 1687	} else {
 1688		keepDID = new_ndlp->nlp_DID;
 1689		if (phba->sli_rev == LPFC_SLI_REV4 &&
 1690		    active_rrqs_xri_bitmap)
 1691			memcpy(active_rrqs_xri_bitmap,
 1692			       new_ndlp->active_rrqs_xri_bitmap,
 1693			       phba->cfg_rrq_xri_bitmap_sz);
 1694	}
 1695
 1696	/* At this point in this routine, we know new_ndlp will be
 1697	 * returned. however, any previous GID_FTs that were done
 1698	 * would have updated nlp_fc4_type in ndlp, so we must ensure
 1699	 * new_ndlp has the right value.
 1700	 */
 1701	if (vport->fc_flag & FC_FABRIC) {
 1702		keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
 1703		new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
 1704	}
 1705
 1706	lpfc_unreg_rpi(vport, new_ndlp);
 1707	new_ndlp->nlp_DID = ndlp->nlp_DID;
 1708	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
 1709	if (phba->sli_rev == LPFC_SLI_REV4)
 1710		memcpy(new_ndlp->active_rrqs_xri_bitmap,
 1711		       ndlp->active_rrqs_xri_bitmap,
 1712		       phba->cfg_rrq_xri_bitmap_sz);
 1713
 1714	spin_lock_irq(shost->host_lock);
 1715	keep_new_nlp_flag = new_ndlp->nlp_flag;
 1716	keep_nlp_flag = ndlp->nlp_flag;
 1717	new_ndlp->nlp_flag = ndlp->nlp_flag;
 1718
 1719	/* if new_ndlp had NLP_UNREG_INP set, keep it */
 1720	if (keep_new_nlp_flag & NLP_UNREG_INP)
 1721		new_ndlp->nlp_flag |= NLP_UNREG_INP;
 1722	else
 1723		new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
 1724
 1725	/* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
 1726	if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
 1727		new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 1728	else
 1729		new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
 1730
 1731	ndlp->nlp_flag = keep_new_nlp_flag;
 1732
 1733	/* if ndlp had NLP_UNREG_INP set, keep it */
 1734	if (keep_nlp_flag & NLP_UNREG_INP)
 1735		ndlp->nlp_flag |= NLP_UNREG_INP;
 1736	else
 1737		ndlp->nlp_flag &= ~NLP_UNREG_INP;
 1738
 1739	/* if ndlp had NLP_RPI_REGISTERED set, keep it */
 1740	if (keep_nlp_flag & NLP_RPI_REGISTERED)
 1741		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 1742	else
 1743		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
 1744
 1745	spin_unlock_irq(shost->host_lock);
 1746
 1747	/* Set nlp_states accordingly */
 1748	keep_nlp_state = new_ndlp->nlp_state;
 1749	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
 1750
 1751	/* interchange the nvme remoteport structs */
 1752	keep_nrport = new_ndlp->nrport;
 1753	new_ndlp->nrport = ndlp->nrport;
 1754
 1755	/* Move this back to NPR state */
 1756	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
 1757		/* The new_ndlp is replacing ndlp totally, so we need
 1758		 * to put ndlp on UNUSED list and try to free it.
 1759		 */
 1760		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 1761			 "3179 PLOGI confirm NEW: %x %x\n",
 1762			 new_ndlp->nlp_DID, keepDID);
 1763
 1764		/* Fix up the rport accordingly */
 1765		rport =  ndlp->rport;
 1766		if (rport) {
 1767			rdata = rport->dd_data;
 1768			if (rdata->pnode == ndlp) {
 1769				/* break the link before dropping the ref */
 1770				ndlp->rport = NULL;
 1771				lpfc_nlp_put(ndlp);
 1772				rdata->pnode = lpfc_nlp_get(new_ndlp);
 1773				new_ndlp->rport = rport;
 1774			}
 1775			new_ndlp->nlp_type = ndlp->nlp_type;
 1776		}
 1777
 1778		/* Fix up the nvme rport */
 1779		if (ndlp->nrport) {
 1780			ndlp->nrport = NULL;
 1781			lpfc_nlp_put(ndlp);
 1782		}
 1783
 1784		/* We shall actually free the ndlp with both nlp_DID and
 1785		 * nlp_portname fields equals 0 to avoid any ndlp on the
 1786		 * nodelist never to be used.
 1787		 */
 1788		if (ndlp->nlp_DID == 0) {
 1789			spin_lock_irq(&phba->ndlp_lock);
 1790			NLP_SET_FREE_REQ(ndlp);
 1791			spin_unlock_irq(&phba->ndlp_lock);
 1792		}
 1793
 1794		/* Two ndlps cannot have the same did on the nodelist.
 1795		 * Note: for this case, ndlp has a NULL WWPN so setting
 1796		 * the nlp_fc4_type isn't required.
 1797		 */
 1798		ndlp->nlp_DID = keepDID;
 1799		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
 1800		if (phba->sli_rev == LPFC_SLI_REV4 &&
 1801		    active_rrqs_xri_bitmap)
 1802			memcpy(ndlp->active_rrqs_xri_bitmap,
 1803			       active_rrqs_xri_bitmap,
 1804			       phba->cfg_rrq_xri_bitmap_sz);
 1805
 1806		if (!NLP_CHK_NODE_ACT(ndlp))
 1807			lpfc_drop_node(vport, ndlp);
 1808	}
 1809	else {
 1810		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 1811			 "3180 PLOGI confirm SWAP: %x %x\n",
 1812			 new_ndlp->nlp_DID, keepDID);
 1813
 1814		lpfc_unreg_rpi(vport, ndlp);
 1815
 1816		/* Two ndlps cannot have the same did and the fc4
 1817		 * type must be transferred because the ndlp is in
 1818		 * flight.
 1819		 */
 1820		ndlp->nlp_DID = keepDID;
 1821		ndlp->nlp_fc4_type = keep_nlp_fc4_type;
 1822
 1823		if (phba->sli_rev == LPFC_SLI_REV4 &&
 1824		    active_rrqs_xri_bitmap)
 1825			memcpy(ndlp->active_rrqs_xri_bitmap,
 1826			       active_rrqs_xri_bitmap,
 1827			       phba->cfg_rrq_xri_bitmap_sz);
 1828
 1829		/* Since we are switching over to the new_ndlp,
 1830		 * reset the old ndlp state
 1831		 */
 1832		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
 1833		    (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
 1834			keep_nlp_state = NLP_STE_NPR_NODE;
 1835		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
 1836
 1837		/* Previous ndlp no longer active with nvme host transport.
 1838		 * Remove reference from earlier registration unless the
 1839		 * nvme host took care of it.
 1840		 */
 1841		if (ndlp->nrport)
 1842			lpfc_nlp_put(ndlp);
 1843		ndlp->nrport = keep_nrport;
 1844
 1845		/* Fix up the rport accordingly */
 1846		rport = ndlp->rport;
 1847		if (rport) {
 1848			rdata = rport->dd_data;
 1849			put_node = rdata->pnode != NULL;
 1850			put_rport = ndlp->rport != NULL;
 1851			rdata->pnode = NULL;
 1852			ndlp->rport = NULL;
 1853			if (put_node)
 1854				lpfc_nlp_put(ndlp);
 1855			if (put_rport)
 1856				put_device(&rport->dev);
 1857		}
 1858	}
 1859	if (phba->sli_rev == LPFC_SLI_REV4 &&
 1860	    active_rrqs_xri_bitmap)
 1861		mempool_free(active_rrqs_xri_bitmap,
 1862			     phba->active_rrq_pool);
 1863
 1864	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
 1865			 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
 1866			 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
 1867			 new_ndlp->nlp_fc4_type);
 1868
 1869	return new_ndlp;
 1870}
 1871
 1872/**
 1873 * lpfc_end_rscn - Check and handle more rscn for a vport
 1874 * @vport: pointer to a host virtual N_Port data structure.
 1875 *
 1876 * This routine checks whether more Registration State Change
 1877 * Notifications (RSCNs) came in while the discovery state machine was in
 1878 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
 1879 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
 1880 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
 1881 * handling the RSCNs.
 1882 **/
 1883void
 1884lpfc_end_rscn(struct lpfc_vport *vport)
 1885{
 1886	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 1887
 1888	if (vport->fc_flag & FC_RSCN_MODE) {
 1889		/*
 1890		 * Check to see if more RSCNs came in while we were
 1891		 * processing this one.
 1892		 */
 1893		if (vport->fc_rscn_id_cnt ||
 1894		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
 1895			lpfc_els_handle_rscn(vport);
 1896		else {
 1897			spin_lock_irq(shost->host_lock);
 1898			vport->fc_flag &= ~FC_RSCN_MODE;
 1899			spin_unlock_irq(shost->host_lock);
 1900		}
 1901	}
 1902}
 1903
 1904/**
 1905 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
 1906 * @phba: pointer to lpfc hba data structure.
 1907 * @cmdiocb: pointer to lpfc command iocb data structure.
 1908 * @rspiocb: pointer to lpfc response iocb data structure.
 1909 *
 1910 * This routine will call the clear rrq function to free the rrq and
 1911 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
 1912 * exist then the clear_rrq is still called because the rrq needs to
 1913 * be freed.
 1914 **/
 1915
 1916static void
 1917lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 1918		    struct lpfc_iocbq *rspiocb)
 1919{
 1920	struct lpfc_vport *vport = cmdiocb->vport;
 1921	IOCB_t *irsp;
 1922	struct lpfc_nodelist *ndlp;
 1923	struct lpfc_node_rrq *rrq;
 1924
 1925	/* we pass cmdiocb to state machine which needs rspiocb as well */
 1926	rrq = cmdiocb->context_un.rrq;
 1927	cmdiocb->context_un.rsp_iocb = rspiocb;
 1928
 1929	irsp = &rspiocb->iocb;
 1930	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 1931		"RRQ cmpl:      status:x%x/x%x did:x%x",
 1932		irsp->ulpStatus, irsp->un.ulpWord[4],
 1933		irsp->un.elsreq64.remoteID);
 1934
 1935	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
 1936	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
 1937		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 1938				 "2882 RRQ completes to NPort x%x "
 1939				 "with no ndlp. Data: x%x x%x x%x\n",
 1940				 irsp->un.elsreq64.remoteID,
 1941				 irsp->ulpStatus, irsp->un.ulpWord[4],
 1942				 irsp->ulpIoTag);
 1943		goto out;
 1944	}
 1945
 1946	/* rrq completes to NPort <nlp_DID> */
 1947	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 1948			 "2880 RRQ completes to NPort x%x "
 1949			 "Data: x%x x%x x%x x%x x%x\n",
 1950			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
 1951			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
 1952
 1953	if (irsp->ulpStatus) {
 1954		/* Check for retry */
 1955		/* RRQ failed Don't print the vport to vport rjts */
 1956		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
 1957			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
 1958			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
 1959			(phba)->pport->cfg_log_verbose & LOG_ELS)
 1960			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 1961				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
 1962				 ndlp->nlp_DID, irsp->ulpStatus,
 1963				 irsp->un.ulpWord[4]);
 1964	}
 1965out:
 1966	if (rrq)
 1967		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 1968	lpfc_els_free_iocb(phba, cmdiocb);
 1969	return;
 1970}
 1971/**
 1972 * lpfc_cmpl_els_plogi - Completion callback function for plogi
 1973 * @phba: pointer to lpfc hba data structure.
 1974 * @cmdiocb: pointer to lpfc command iocb data structure.
 1975 * @rspiocb: pointer to lpfc response iocb data structure.
 1976 *
 1977 * This routine is the completion callback function for issuing the Port
 1978 * Login (PLOGI) command. For PLOGI completion, there must be an active
 1979 * ndlp on the vport node list that matches the remote node ID from the
 1980 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
 1981 * ignored and command IOCB released. The PLOGI response IOCB status is
 1982 * checked for error conditons. If there is error status reported, PLOGI
 1983 * retry shall be attempted by invoking the lpfc_els_retry() routine.
 1984 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
 1985 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
 1986 * (DSM) is set for this PLOGI completion. Finally, it checks whether
 1987 * there are additional N_Port nodes with the vport that need to perform
 1988 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
 1989 * PLOGIs.
 1990 **/
 1991static void
 1992lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 1993		    struct lpfc_iocbq *rspiocb)
 1994{
 1995	struct lpfc_vport *vport = cmdiocb->vport;
 1996	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 1997	IOCB_t *irsp;
 1998	struct lpfc_nodelist *ndlp;
 1999	struct lpfc_dmabuf *prsp;
 2000	int disc;
 2001
 2002	/* we pass cmdiocb to state machine which needs rspiocb as well */
 2003	cmdiocb->context_un.rsp_iocb = rspiocb;
 2004
 2005	irsp = &rspiocb->iocb;
 2006	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2007		"PLOGI cmpl:      status:x%x/x%x did:x%x",
 2008		irsp->ulpStatus, irsp->un.ulpWord[4],
 2009		irsp->un.elsreq64.remoteID);
 2010
 2011	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
 2012	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 2013		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 2014				 "0136 PLOGI completes to NPort x%x "
 2015				 "with no ndlp. Data: x%x x%x x%x\n",
 2016				 irsp->un.elsreq64.remoteID,
 2017				 irsp->ulpStatus, irsp->un.ulpWord[4],
 2018				 irsp->ulpIoTag);
 2019		goto out;
 2020	}
 2021
 2022	/* Since ndlp can be freed in the disc state machine, note if this node
 2023	 * is being used during discovery.
 2024	 */
 2025	spin_lock_irq(shost->host_lock);
 2026	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
 2027	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
 2028	spin_unlock_irq(shost->host_lock);
 2029
 2030	/* PLOGI completes to NPort <nlp_DID> */
 2031	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 2032			 "0102 PLOGI completes to NPort x%06x "
 2033			 "Data: x%x x%x x%x x%x x%x\n",
 2034			 ndlp->nlp_DID, ndlp->nlp_fc4_type,
 2035			 irsp->ulpStatus, irsp->un.ulpWord[4],
 2036			 disc, vport->num_disc_nodes);
 2037
 2038	/* Check to see if link went down during discovery */
 2039	if (lpfc_els_chk_latt(vport)) {
 2040		spin_lock_irq(shost->host_lock);
 2041		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 2042		spin_unlock_irq(shost->host_lock);
 2043		goto out;
 2044	}
 2045
 2046	if (irsp->ulpStatus) {
 2047		/* Check for retry */
 2048		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
 2049			/* ELS command is being retried */
 2050			if (disc) {
 2051				spin_lock_irq(shost->host_lock);
 2052				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 2053				spin_unlock_irq(shost->host_lock);
 2054			}
 2055			goto out;
 2056		}
 2057		/* PLOGI failed Don't print the vport to vport rjts */
 2058		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
 2059			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
 2060			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
 2061			(phba)->pport->cfg_log_verbose & LOG_ELS)
 2062			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 2063				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
 2064				 ndlp->nlp_DID, irsp->ulpStatus,
 2065				 irsp->un.ulpWord[4]);
 2066		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 2067		if (!lpfc_error_lost_link(irsp))
 2068			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2069						NLP_EVT_CMPL_PLOGI);
 2070	} else {
 2071		/* Good status, call state machine */
 2072		prsp = list_entry(((struct lpfc_dmabuf *)
 2073				   cmdiocb->context2)->list.next,
 2074				  struct lpfc_dmabuf, list);
 2075		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
 2076		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2077					     NLP_EVT_CMPL_PLOGI);
 2078	}
 2079
 2080	if (disc && vport->num_disc_nodes) {
 2081		/* Check to see if there are more PLOGIs to be sent */
 2082		lpfc_more_plogi(vport);
 2083
 2084		if (vport->num_disc_nodes == 0) {
 2085			spin_lock_irq(shost->host_lock);
 2086			vport->fc_flag &= ~FC_NDISC_ACTIVE;
 2087			spin_unlock_irq(shost->host_lock);
 2088
 2089			lpfc_can_disctmo(vport);
 2090			lpfc_end_rscn(vport);
 2091		}
 2092	}
 2093
 2094out:
 2095	lpfc_els_free_iocb(phba, cmdiocb);
 2096	return;
 2097}
 2098
 2099/**
 2100 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
 2101 * @vport: pointer to a host virtual N_Port data structure.
 2102 * @did: destination port identifier.
 2103 * @retry: number of retries to the command IOCB.
 2104 *
 2105 * This routine issues a Port Login (PLOGI) command to a remote N_Port
 2106 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
 2107 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
 2108 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
 2109 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
 2110 *
 2111 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 2112 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 2113 * will be stored into the context1 field of the IOCB for the completion
 2114 * callback function to the PLOGI ELS command.
 2115 *
 2116 * Return code
 2117 *   0 - Successfully issued a plogi for @vport
 2118 *   1 - failed to issue a plogi for @vport
 2119 **/
 2120int
 2121lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 2122{
 2123	struct lpfc_hba  *phba = vport->phba;
 2124	struct Scsi_Host *shost;
 2125	struct serv_parm *sp;
 2126	struct lpfc_nodelist *ndlp;
 2127	struct lpfc_iocbq *elsiocb;
 2128	uint8_t *pcmd;
 2129	uint16_t cmdsize;
 2130	int ret;
 2131
 2132	ndlp = lpfc_findnode_did(vport, did);
 2133
 2134	if (ndlp) {
 2135		/* Defer the processing of the issue PLOGI until after the
 2136		 * outstanding UNREG_RPI mbox command completes, unless we
 2137		 * are going offline. This logic does not apply for Fabric DIDs
 2138		 */
 2139		if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
 2140		    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
 2141		    !(vport->fc_flag & FC_OFFLINE_MODE)) {
 2142			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 2143					 "4110 Issue PLOGI x%x deferred "
 2144					 "on NPort x%x rpi x%x Data: x%px\n",
 2145					 ndlp->nlp_defer_did, ndlp->nlp_DID,
 2146					 ndlp->nlp_rpi, ndlp);
 2147
 2148			/* We can only defer 1st PLOGI */
 2149			if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
 2150				ndlp->nlp_defer_did = did;
 2151			return 0;
 2152		}
 2153		if (!NLP_CHK_NODE_ACT(ndlp))
 2154			ndlp = NULL;
 2155	}
 2156
 2157	/* If ndlp is not NULL, we will bump the reference count on it */
 2158	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
 2159	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
 2160				     ELS_CMD_PLOGI);
 2161	if (!elsiocb)
 2162		return 1;
 2163
 2164	shost = lpfc_shost_from_vport(vport);
 2165	spin_lock_irq(shost->host_lock);
 2166	ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
 2167	spin_unlock_irq(shost->host_lock);
 2168
 2169	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 2170
 2171	/* For PLOGI request, remainder of payload is service parameters */
 2172	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
 2173	pcmd += sizeof(uint32_t);
 2174	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
 2175	sp = (struct serv_parm *) pcmd;
 2176
 2177	/*
 2178	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
 2179	 * to device on remote loops work.
 2180	 */
 2181	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
 2182		sp->cmn.altBbCredit = 1;
 2183
 2184	if (sp->cmn.fcphLow < FC_PH_4_3)
 2185		sp->cmn.fcphLow = FC_PH_4_3;
 2186
 2187	if (sp->cmn.fcphHigh < FC_PH3)
 2188		sp->cmn.fcphHigh = FC_PH3;
 2189
 2190	sp->cmn.valid_vendor_ver_level = 0;
 2191	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
 2192	sp->cmn.bbRcvSizeMsb &= 0xF;
 2193
 2194	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2195		"Issue PLOGI:     did:x%x",
 2196		did, 0, 0);
 2197
 2198	/* If our firmware supports this feature, convey that
 2199	 * information to the target using the vendor specific field.
 2200	 */
 2201	if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
 2202		sp->cmn.valid_vendor_ver_level = 1;
 2203		sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
 2204		sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
 2205	}
 2206
 2207	phba->fc_stat.elsXmitPLOGI++;
 2208	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
 2209	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 2210
 2211	if (ret == IOCB_ERROR) {
 2212		lpfc_els_free_iocb(phba, elsiocb);
 2213		return 1;
 2214	}
 2215	return 0;
 2216}
 2217
 2218/**
 2219 * lpfc_cmpl_els_prli - Completion callback function for prli
 2220 * @phba: pointer to lpfc hba data structure.
 2221 * @cmdiocb: pointer to lpfc command iocb data structure.
 2222 * @rspiocb: pointer to lpfc response iocb data structure.
 2223 *
 2224 * This routine is the completion callback function for a Process Login
 2225 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
 2226 * status. If there is error status reported, PRLI retry shall be attempted
 2227 * by invoking the lpfc_els_retry() routine. Otherwise, the state
 2228 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
 2229 * ndlp to mark the PRLI completion.
 2230 **/
 2231static void
 2232lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 2233		   struct lpfc_iocbq *rspiocb)
 2234{
 2235	struct lpfc_vport *vport = cmdiocb->vport;
 2236	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 2237	IOCB_t *irsp;
 2238	struct lpfc_nodelist *ndlp;
 2239	char *mode;
 2240
 2241	/* we pass cmdiocb to state machine which needs rspiocb as well */
 2242	cmdiocb->context_un.rsp_iocb = rspiocb;
 2243
 2244	irsp = &(rspiocb->iocb);
 2245	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
 2246	spin_lock_irq(shost->host_lock);
 2247	ndlp->nlp_flag &= ~NLP_PRLI_SND;
 2248
 2249	/* Driver supports multiple FC4 types.  Counters matter. */
 2250	vport->fc_prli_sent--;
 2251	ndlp->fc4_prli_sent--;
 2252	spin_unlock_irq(shost->host_lock);
 2253
 2254	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2255		"PRLI cmpl:       status:x%x/x%x did:x%x",
 2256		irsp->ulpStatus, irsp->un.ulpWord[4],
 2257		ndlp->nlp_DID);
 2258
 2259	/* PRLI completes to NPort <nlp_DID> */
 2260	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 2261			 "0103 PRLI completes to NPort x%06x "
 2262			 "Data: x%x x%x x%x x%x\n",
 2263			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
 2264			 vport->num_disc_nodes, ndlp->fc4_prli_sent);
 2265
 2266	/* Check to see if link went down during discovery */
 2267	if (lpfc_els_chk_latt(vport))
 2268		goto out;
 2269
 2270	if (irsp->ulpStatus) {
 2271		/* Check for retry */
 2272		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
 2273			/* ELS command is being retried */
 2274			goto out;
 2275		}
 2276
 2277		/* If we don't send GFT_ID to Fabric, a PRLI error
 2278		 * could be expected.
 2279		 */
 2280		if ((vport->fc_flag & FC_FABRIC) ||
 2281		    (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
 2282			mode = KERN_ERR;
 2283		else
 2284			mode = KERN_INFO;
 2285
 2286		/* PRLI failed */
 2287		lpfc_printf_vlog(vport, mode, LOG_ELS,
 2288				 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
 2289				 "data: x%x\n",
 2290				 ndlp->nlp_DID, irsp->ulpStatus,
 2291				 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
 2292
 2293		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 2294		if (lpfc_error_lost_link(irsp))
 2295			goto out;
 2296		else
 2297			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2298						NLP_EVT_CMPL_PRLI);
 2299	} else {
 2300		/* Good status, call state machine.  However, if another
 2301		 * PRLI is outstanding, don't call the state machine
 2302		 * because final disposition to Mapped or Unmapped is
 2303		 * completed there.
 2304		 */
 2305		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2306					NLP_EVT_CMPL_PRLI);
 2307	}
 2308
 2309out:
 2310	lpfc_els_free_iocb(phba, cmdiocb);
 2311	return;
 2312}
 2313
 2314/**
 2315 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
 2316 * @vport: pointer to a host virtual N_Port data structure.
 2317 * @ndlp: pointer to a node-list data structure.
 2318 * @retry: number of retries to the command IOCB.
 2319 *
 2320 * This routine issues a Process Login (PRLI) ELS command for the
 2321 * @vport. The PRLI service parameters are set up in the payload of the
 2322 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
 2323 * is put to the IOCB completion callback func field before invoking the
 2324 * routine lpfc_sli_issue_iocb() to send out PRLI command.
 2325 *
 2326 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 2327 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 2328 * will be stored into the context1 field of the IOCB for the completion
 2329 * callback function to the PRLI ELS command.
 2330 *
 2331 * Return code
 2332 *   0 - successfully issued prli iocb command for @vport
 2333 *   1 - failed to issue prli iocb command for @vport
 2334 **/
 2335int
 2336lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 2337		    uint8_t retry)
 2338{
 2339	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 2340	struct lpfc_hba *phba = vport->phba;
 2341	PRLI *npr;
 2342	struct lpfc_nvme_prli *npr_nvme;
 2343	struct lpfc_iocbq *elsiocb;
 2344	uint8_t *pcmd;
 2345	uint16_t cmdsize;
 2346	u32 local_nlp_type, elscmd;
 2347
 2348	/*
 2349	 * If we are in RSCN mode, the FC4 types supported from a
 2350	 * previous GFT_ID command may not be accurate. So, if we
 2351	 * are a NVME Initiator, always look for the possibility of
 2352	 * the remote NPort beng a NVME Target.
 2353	 */
 2354	if (phba->sli_rev == LPFC_SLI_REV4 &&
 2355	    vport->fc_flag & FC_RSCN_MODE &&
 2356	    vport->nvmei_support)
 2357		ndlp->nlp_fc4_type |= NLP_FC4_NVME;
 2358	local_nlp_type = ndlp->nlp_fc4_type;
 2359
 2360	/* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
 2361	 * fields here before any of them can complete.
 2362	 */
 2363	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
 2364	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
 2365	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
 2366	ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
 2367	ndlp->nvme_fb_size = 0;
 2368
 2369 send_next_prli:
 2370	if (local_nlp_type & NLP_FC4_FCP) {
 2371		/* Payload is 4 + 16 = 20 x14 bytes. */
 2372		cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
 2373		elscmd = ELS_CMD_PRLI;
 2374	} else if (local_nlp_type & NLP_FC4_NVME) {
 2375		/* Payload is 4 + 20 = 24 x18 bytes. */
 2376		cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
 2377		elscmd = ELS_CMD_NVMEPRLI;
 2378	} else {
 2379		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 2380				 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
 2381				 ndlp->nlp_fc4_type, ndlp->nlp_DID);
 2382		return 1;
 2383	}
 2384
 2385	/* SLI3 ports don't support NVME.  If this rport is a strict NVME
 2386	 * FC4 type, implicitly LOGO.
 2387	 */
 2388	if (phba->sli_rev == LPFC_SLI_REV3 &&
 2389	    ndlp->nlp_fc4_type == NLP_FC4_NVME) {
 2390		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 2391				 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
 2392				 ndlp->nlp_type);
 2393		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 2394		return 1;
 2395	}
 2396
 2397	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 2398				     ndlp->nlp_DID, elscmd);
 2399	if (!elsiocb)
 2400		return 1;
 2401
 2402	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 2403
 2404	/* For PRLI request, remainder of payload is service parameters */
 2405	memset(pcmd, 0, cmdsize);
 2406
 2407	if (local_nlp_type & NLP_FC4_FCP) {
 2408		/* Remainder of payload is FCP PRLI parameter page.
 2409		 * Note: this data structure is defined as
 2410		 * BE/LE in the structure definition so no
 2411		 * byte swap call is made.
 2412		 */
 2413		*((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
 2414		pcmd += sizeof(uint32_t);
 2415		npr = (PRLI *)pcmd;
 2416
 2417		/*
 2418		 * If our firmware version is 3.20 or later,
 2419		 * set the following bits for FC-TAPE support.
 2420		 */
 2421		if (phba->vpd.rev.feaLevelHigh >= 0x02) {
 2422			npr->ConfmComplAllowed = 1;
 2423			npr->Retry = 1;
 2424			npr->TaskRetryIdReq = 1;
 2425		}
 2426		npr->estabImagePair = 1;
 2427		npr->readXferRdyDis = 1;
 2428		if (vport->cfg_first_burst_size)
 2429			npr->writeXferRdyDis = 1;
 2430
 2431		/* For FCP support */
 2432		npr->prliType = PRLI_FCP_TYPE;
 2433		npr->initiatorFunc = 1;
 2434		elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
 2435
 2436		/* Remove FCP type - processed. */
 2437		local_nlp_type &= ~NLP_FC4_FCP;
 2438	} else if (local_nlp_type & NLP_FC4_NVME) {
 2439		/* Remainder of payload is NVME PRLI parameter page.
 2440		 * This data structure is the newer definition that
 2441		 * uses bf macros so a byte swap is required.
 2442		 */
 2443		*((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
 2444		pcmd += sizeof(uint32_t);
 2445		npr_nvme = (struct lpfc_nvme_prli *)pcmd;
 2446		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
 2447		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
 2448		if (phba->nsler) {
 2449			bf_set(prli_nsler, npr_nvme, 1);
 2450			bf_set(prli_conf, npr_nvme, 1);
 2451		}
 2452
 2453		/* Only initiators request first burst. */
 2454		if ((phba->cfg_nvme_enable_fb) &&
 2455		    !phba->nvmet_support)
 2456			bf_set(prli_fba, npr_nvme, 1);
 2457
 2458		if (phba->nvmet_support) {
 2459			bf_set(prli_tgt, npr_nvme, 1);
 2460			bf_set(prli_disc, npr_nvme, 1);
 2461		} else {
 2462			bf_set(prli_init, npr_nvme, 1);
 2463			bf_set(prli_conf, npr_nvme, 1);
 2464		}
 2465
 2466		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
 2467		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
 2468		elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
 2469
 2470		/* Remove NVME type - processed. */
 2471		local_nlp_type &= ~NLP_FC4_NVME;
 2472	}
 2473
 2474	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2475		"Issue PRLI:      did:x%x",
 2476		ndlp->nlp_DID, 0, 0);
 2477
 2478	phba->fc_stat.elsXmitPRLI++;
 2479	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
 2480	spin_lock_irq(shost->host_lock);
 2481	ndlp->nlp_flag |= NLP_PRLI_SND;
 2482
 2483	/* The vport counters are used for lpfc_scan_finished, but
 2484	 * the ndlp is used to track outstanding PRLIs for different
 2485	 * FC4 types.
 2486	 */
 2487	vport->fc_prli_sent++;
 2488	ndlp->fc4_prli_sent++;
 2489	spin_unlock_irq(shost->host_lock);
 2490	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
 2491	    IOCB_ERROR) {
 2492		spin_lock_irq(shost->host_lock);
 2493		ndlp->nlp_flag &= ~NLP_PRLI_SND;
 2494		spin_unlock_irq(shost->host_lock);
 2495		lpfc_els_free_iocb(phba, elsiocb);
 2496		return 1;
 2497	}
 2498
 2499
 2500	/* The driver supports 2 FC4 types.  Make sure
 2501	 * a PRLI is issued for all types before exiting.
 2502	 */
 2503	if (phba->sli_rev == LPFC_SLI_REV4 &&
 2504	    local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
 2505		goto send_next_prli;
 2506
 2507	return 0;
 2508}
 2509
 2510/**
 2511 * lpfc_rscn_disc - Perform rscn discovery for a vport
 2512 * @vport: pointer to a host virtual N_Port data structure.
 2513 *
 2514 * This routine performs Registration State Change Notification (RSCN)
 2515 * discovery for a @vport. If the @vport's node port recovery count is not
 2516 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
 2517 * the nodes that need recovery. If none of the PLOGI were needed through
 2518 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
 2519 * invoked to check and handle possible more RSCN came in during the period
 2520 * of processing the current ones.
 2521 **/
 2522static void
 2523lpfc_rscn_disc(struct lpfc_vport *vport)
 2524{
 2525	lpfc_can_disctmo(vport);
 2526
 2527	/* RSCN discovery */
 2528	/* go thru NPR nodes and issue ELS PLOGIs */
 2529	if (vport->fc_npr_cnt)
 2530		if (lpfc_els_disc_plogi(vport))
 2531			return;
 2532
 2533	lpfc_end_rscn(vport);
 2534}
 2535
 2536/**
 2537 * lpfc_adisc_done - Complete the adisc phase of discovery
 2538 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
 2539 *
 2540 * This function is called when the final ADISC is completed during discovery.
 2541 * This function handles clearing link attention or issuing reg_vpi depending
 2542 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
 2543 * discovery.
 2544 * This function is called with no locks held.
 2545 **/
 2546static void
 2547lpfc_adisc_done(struct lpfc_vport *vport)
 2548{
 2549	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 2550	struct lpfc_hba   *phba = vport->phba;
 2551
 2552	/*
 2553	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
 2554	 * and continue discovery.
 2555	 */
 2556	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 2557	    !(vport->fc_flag & FC_RSCN_MODE) &&
 2558	    (phba->sli_rev < LPFC_SLI_REV4)) {
 2559		/* The ADISCs are complete.  Doesn't matter if they
 2560		 * succeeded or failed because the ADISC completion
 2561		 * routine guarantees to call the state machine and
 2562		 * the RPI is either unregistered (failed ADISC response)
 2563		 * or the RPI is still valid and the node is marked
 2564		 * mapped for a target.  The exchanges should be in the
 2565		 * correct state. This code is specific to SLI3.
 2566		 */
 2567		lpfc_issue_clear_la(phba, vport);
 2568		lpfc_issue_reg_vpi(phba, vport);
 2569		return;
 2570	}
 2571	/*
 2572	* For SLI2, we need to set port_state to READY
 2573	* and continue discovery.
 2574	*/
 2575	if (vport->port_state < LPFC_VPORT_READY) {
 2576		/* If we get here, there is nothing to ADISC */
 2577		lpfc_issue_clear_la(phba, vport);
 2578		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
 2579			vport->num_disc_nodes = 0;
 2580			/* go thru NPR list, issue ELS PLOGIs */
 2581			if (vport->fc_npr_cnt)
 2582				lpfc_els_disc_plogi(vport);
 2583			if (!vport->num_disc_nodes) {
 2584				spin_lock_irq(shost->host_lock);
 2585				vport->fc_flag &= ~FC_NDISC_ACTIVE;
 2586				spin_unlock_irq(shost->host_lock);
 2587				lpfc_can_disctmo(vport);
 2588				lpfc_end_rscn(vport);
 2589			}
 2590		}
 2591		vport->port_state = LPFC_VPORT_READY;
 2592	} else
 2593		lpfc_rscn_disc(vport);
 2594}
 2595
 2596/**
 2597 * lpfc_more_adisc - Issue more adisc as needed
 2598 * @vport: pointer to a host virtual N_Port data structure.
 2599 *
 2600 * This routine determines whether there are more ndlps on a @vport
 2601 * node list need to have Address Discover (ADISC) issued. If so, it will
 2602 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
 2603 * remaining nodes which need to have ADISC sent.
 2604 **/
 2605void
 2606lpfc_more_adisc(struct lpfc_vport *vport)
 2607{
 2608	if (vport->num_disc_nodes)
 2609		vport->num_disc_nodes--;
 2610	/* Continue discovery with <num_disc_nodes> ADISCs to go */
 2611	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 2612			 "0210 Continue discovery with %d ADISCs to go "
 2613			 "Data: x%x x%x x%x\n",
 2614			 vport->num_disc_nodes, vport->fc_adisc_cnt,
 2615			 vport->fc_flag, vport->port_state);
 2616	/* Check to see if there are more ADISCs to be sent */
 2617	if (vport->fc_flag & FC_NLP_MORE) {
 2618		lpfc_set_disctmo(vport);
 2619		/* go thru NPR nodes and issue any remaining ELS ADISCs */
 2620		lpfc_els_disc_adisc(vport);
 2621	}
 2622	if (!vport->num_disc_nodes)
 2623		lpfc_adisc_done(vport);
 2624	return;
 2625}
 2626
 2627/**
 2628 * lpfc_cmpl_els_adisc - Completion callback function for adisc
 2629 * @phba: pointer to lpfc hba data structure.
 2630 * @cmdiocb: pointer to lpfc command iocb data structure.
 2631 * @rspiocb: pointer to lpfc response iocb data structure.
 2632 *
 2633 * This routine is the completion function for issuing the Address Discover
 2634 * (ADISC) command. It first checks to see whether link went down during
 2635 * the discovery process. If so, the node will be marked as node port
 2636 * recovery for issuing discover IOCB by the link attention handler and
 2637 * exit. Otherwise, the response status is checked. If error was reported
 2638 * in the response status, the ADISC command shall be retried by invoking
 2639 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
 2640 * the response status, the state machine is invoked to set transition
 2641 * with respect to NLP_EVT_CMPL_ADISC event.
 2642 **/
 2643static void
 2644lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 2645		    struct lpfc_iocbq *rspiocb)
 2646{
 2647	struct lpfc_vport *vport = cmdiocb->vport;
 2648	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 2649	IOCB_t *irsp;
 2650	struct lpfc_nodelist *ndlp;
 2651	int  disc;
 2652
 2653	/* we pass cmdiocb to state machine which needs rspiocb as well */
 2654	cmdiocb->context_un.rsp_iocb = rspiocb;
 2655
 2656	irsp = &(rspiocb->iocb);
 2657	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
 2658
 2659	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2660		"ADISC cmpl:      status:x%x/x%x did:x%x",
 2661		irsp->ulpStatus, irsp->un.ulpWord[4],
 2662		ndlp->nlp_DID);
 2663
 2664	/* Since ndlp can be freed in the disc state machine, note if this node
 2665	 * is being used during discovery.
 2666	 */
 2667	spin_lock_irq(shost->host_lock);
 2668	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
 2669	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
 2670	spin_unlock_irq(shost->host_lock);
 2671	/* ADISC completes to NPort <nlp_DID> */
 2672	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 2673			 "0104 ADISC completes to NPort x%x "
 2674			 "Data: x%x x%x x%x x%x x%x\n",
 2675			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
 2676			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
 2677	/* Check to see if link went down during discovery */
 2678	if (lpfc_els_chk_latt(vport)) {
 2679		spin_lock_irq(shost->host_lock);
 2680		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 2681		spin_unlock_irq(shost->host_lock);
 2682		goto out;
 2683	}
 2684
 2685	if (irsp->ulpStatus) {
 2686		/* Check for retry */
 2687		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
 2688			/* ELS command is being retried */
 2689			if (disc) {
 2690				spin_lock_irq(shost->host_lock);
 2691				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 2692				spin_unlock_irq(shost->host_lock);
 2693				lpfc_set_disctmo(vport);
 2694			}
 2695			goto out;
 2696		}
 2697		/* ADISC failed */
 2698		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 2699				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
 2700				 ndlp->nlp_DID, irsp->ulpStatus,
 2701				 irsp->un.ulpWord[4]);
 2702		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 2703		if (!lpfc_error_lost_link(irsp))
 2704			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2705						NLP_EVT_CMPL_ADISC);
 2706	} else
 2707		/* Good status, call state machine */
 2708		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2709					NLP_EVT_CMPL_ADISC);
 2710
 2711	/* Check to see if there are more ADISCs to be sent */
 2712	if (disc && vport->num_disc_nodes)
 2713		lpfc_more_adisc(vport);
 2714out:
 2715	lpfc_els_free_iocb(phba, cmdiocb);
 2716	return;
 2717}
 2718
 2719/**
 2720 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
 2721 * @vport: pointer to a virtual N_Port data structure.
 2722 * @ndlp: pointer to a node-list data structure.
 2723 * @retry: number of retries to the command IOCB.
 2724 *
 2725 * This routine issues an Address Discover (ADISC) for an @ndlp on a
 2726 * @vport. It prepares the payload of the ADISC ELS command, updates the
 2727 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
 2728 * to issue the ADISC ELS command.
 2729 *
 2730 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 2731 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 2732 * will be stored into the context1 field of the IOCB for the completion
 2733 * callback function to the ADISC ELS command.
 2734 *
 2735 * Return code
 2736 *   0 - successfully issued adisc
 2737 *   1 - failed to issue adisc
 2738 **/
 2739int
 2740lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 2741		     uint8_t retry)
 2742{
 2743	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 2744	struct lpfc_hba  *phba = vport->phba;
 2745	ADISC *ap;
 2746	struct lpfc_iocbq *elsiocb;
 2747	uint8_t *pcmd;
 2748	uint16_t cmdsize;
 2749
 2750	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
 2751	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 2752				     ndlp->nlp_DID, ELS_CMD_ADISC);
 2753	if (!elsiocb)
 2754		return 1;
 2755
 2756	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 2757
 2758	/* For ADISC request, remainder of payload is service parameters */
 2759	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
 2760	pcmd += sizeof(uint32_t);
 2761
 2762	/* Fill in ADISC payload */
 2763	ap = (ADISC *) pcmd;
 2764	ap->hardAL_PA = phba->fc_pref_ALPA;
 2765	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
 2766	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
 2767	ap->DID = be32_to_cpu(vport->fc_myDID);
 2768
 2769	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2770		"Issue ADISC:     did:x%x",
 2771		ndlp->nlp_DID, 0, 0);
 2772
 2773	phba->fc_stat.elsXmitADISC++;
 2774	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
 2775	spin_lock_irq(shost->host_lock);
 2776	ndlp->nlp_flag |= NLP_ADISC_SND;
 2777	spin_unlock_irq(shost->host_lock);
 2778	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
 2779	    IOCB_ERROR) {
 2780		spin_lock_irq(shost->host_lock);
 2781		ndlp->nlp_flag &= ~NLP_ADISC_SND;
 2782		spin_unlock_irq(shost->host_lock);
 2783		lpfc_els_free_iocb(phba, elsiocb);
 2784		return 1;
 2785	}
 2786	return 0;
 2787}
 2788
 2789/**
 2790 * lpfc_cmpl_els_logo - Completion callback function for logo
 2791 * @phba: pointer to lpfc hba data structure.
 2792 * @cmdiocb: pointer to lpfc command iocb data structure.
 2793 * @rspiocb: pointer to lpfc response iocb data structure.
 2794 *
 2795 * This routine is the completion function for issuing the ELS Logout (LOGO)
 2796 * command. If no error status was reported from the LOGO response, the
 2797 * state machine of the associated ndlp shall be invoked for transition with
 2798 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
 2799 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
 2800 **/
 2801static void
 2802lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 2803		   struct lpfc_iocbq *rspiocb)
 2804{
 2805	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
 2806	struct lpfc_vport *vport = ndlp->vport;
 2807	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 2808	IOCB_t *irsp;
 2809	struct lpfcMboxq *mbox;
 2810	unsigned long flags;
 2811	uint32_t skip_recovery = 0;
 2812
 2813	/* we pass cmdiocb to state machine which needs rspiocb as well */
 2814	cmdiocb->context_un.rsp_iocb = rspiocb;
 2815
 2816	irsp = &(rspiocb->iocb);
 2817	spin_lock_irq(shost->host_lock);
 2818	ndlp->nlp_flag &= ~NLP_LOGO_SND;
 2819	spin_unlock_irq(shost->host_lock);
 2820
 2821	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2822		"LOGO cmpl:       status:x%x/x%x did:x%x",
 2823		irsp->ulpStatus, irsp->un.ulpWord[4],
 2824		ndlp->nlp_DID);
 2825
 2826	/* LOGO completes to NPort <nlp_DID> */
 2827	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 2828			 "0105 LOGO completes to NPort x%x "
 2829			 "Data: x%x x%x x%x x%x\n",
 2830			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
 2831			 irsp->ulpTimeout, vport->num_disc_nodes);
 2832
 2833	if (lpfc_els_chk_latt(vport)) {
 2834		skip_recovery = 1;
 2835		goto out;
 2836	}
 2837
 2838	/* Check to see if link went down during discovery */
 2839	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
 2840	        /* NLP_EVT_DEVICE_RM should unregister the RPI
 2841		 * which should abort all outstanding IOs.
 2842		 */
 2843		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 2844					NLP_EVT_DEVICE_RM);
 2845		skip_recovery = 1;
 2846		goto out;
 2847	}
 2848
 2849	/* The LOGO will not be retried on failure.  A LOGO was
 2850	 * issued to the remote rport and a ACC or RJT or no Answer are
 2851	 * all acceptable.  Note the failure and move forward with
 2852	 * discovery.  The PLOGI will retry.
 2853	 */
 2854	if (irsp->ulpStatus) {
 2855		/* LOGO failed */
 2856		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 2857				 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
 2858				 ndlp->nlp_DID, irsp->ulpStatus,
 2859				 irsp->un.ulpWord[4]);
 2860		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 2861		if (lpfc_error_lost_link(irsp)) {
 2862			skip_recovery = 1;
 2863			goto out;
 2864		}
 2865	}
 2866
 2867	/* Call state machine. This will unregister the rpi if needed. */
 2868	lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
 2869
 2870out:
 2871	lpfc_els_free_iocb(phba, cmdiocb);
 2872	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
 2873	if ((vport->fc_flag & FC_PT2PT) &&
 2874		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
 2875		phba->pport->fc_myDID = 0;
 2876
 2877		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
 2878		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 2879			if (phba->nvmet_support)
 2880				lpfc_nvmet_update_targetport(phba);
 2881			else
 2882				lpfc_nvme_update_localport(phba->pport);
 2883		}
 2884
 2885		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 2886		if (mbox) {
 2887			lpfc_config_link(phba, mbox);
 2888			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 2889			mbox->vport = vport;
 2890			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
 2891				MBX_NOT_FINISHED) {
 2892				mempool_free(mbox, phba->mbox_mem_pool);
 2893				skip_recovery = 1;
 2894			}
 2895		}
 2896	}
 2897
 2898	/*
 2899	 * If the node is a target, the handling attempts to recover the port.
 2900	 * For any other port type, the rpi is unregistered as an implicit
 2901	 * LOGO.
 2902	 */
 2903	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
 2904	    skip_recovery == 0) {
 2905		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 2906		spin_lock_irqsave(shost->host_lock, flags);
 2907		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 2908		spin_unlock_irqrestore(shost->host_lock, flags);
 2909
 2910		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 2911				 "3187 LOGO completes to NPort x%x: Start "
 2912				 "Recovery Data: x%x x%x x%x x%x\n",
 2913				 ndlp->nlp_DID, irsp->ulpStatus,
 2914				 irsp->un.ulpWord[4], irsp->ulpTimeout,
 2915				 vport->num_disc_nodes);
 2916		lpfc_disc_start(vport);
 2917	}
 2918	return;
 2919}
 2920
 2921/**
 2922 * lpfc_issue_els_logo - Issue a logo to an node on a vport
 2923 * @vport: pointer to a virtual N_Port data structure.
 2924 * @ndlp: pointer to a node-list data structure.
 2925 * @retry: number of retries to the command IOCB.
 2926 *
 2927 * This routine constructs and issues an ELS Logout (LOGO) iocb command
 2928 * to a remote node, referred by an @ndlp on a @vport. It constructs the
 2929 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
 2930 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
 2931 *
 2932 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 2933 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 2934 * will be stored into the context1 field of the IOCB for the completion
 2935 * callback function to the LOGO ELS command.
 2936 *
 2937 * Callers of this routine are expected to unregister the RPI first
 2938 *
 2939 * Return code
 2940 *   0 - successfully issued logo
 2941 *   1 - failed to issue logo
 2942 **/
 2943int
 2944lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 2945		    uint8_t retry)
 2946{
 2947	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 2948	struct lpfc_hba  *phba = vport->phba;
 2949	struct lpfc_iocbq *elsiocb;
 2950	uint8_t *pcmd;
 2951	uint16_t cmdsize;
 2952	int rc;
 2953
 2954	spin_lock_irq(shost->host_lock);
 2955	if (ndlp->nlp_flag & NLP_LOGO_SND) {
 2956		spin_unlock_irq(shost->host_lock);
 2957		return 0;
 2958	}
 2959	spin_unlock_irq(shost->host_lock);
 2960
 2961	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
 2962	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 2963				     ndlp->nlp_DID, ELS_CMD_LOGO);
 2964	if (!elsiocb)
 2965		return 1;
 2966
 2967	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 2968	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
 2969	pcmd += sizeof(uint32_t);
 2970
 2971	/* Fill in LOGO payload */
 2972	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
 2973	pcmd += sizeof(uint32_t);
 2974	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
 2975
 2976	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 2977		"Issue LOGO:      did:x%x",
 2978		ndlp->nlp_DID, 0, 0);
 2979
 2980	phba->fc_stat.elsXmitLOGO++;
 2981	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
 2982	spin_lock_irq(shost->host_lock);
 2983	ndlp->nlp_flag |= NLP_LOGO_SND;
 2984	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
 2985	spin_unlock_irq(shost->host_lock);
 2986	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 2987	if (rc == IOCB_ERROR) {
 2988		spin_lock_irq(shost->host_lock);
 2989		ndlp->nlp_flag &= ~NLP_LOGO_SND;
 2990		spin_unlock_irq(shost->host_lock);
 2991		lpfc_els_free_iocb(phba, elsiocb);
 2992		return 1;
 2993	}
 2994
 2995	spin_lock_irq(shost->host_lock);
 2996	ndlp->nlp_prev_state = ndlp->nlp_state;
 2997	spin_unlock_irq(shost->host_lock);
 2998	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
 2999	return 0;
 3000}
 3001
 3002/**
 3003 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
 3004 * @phba: pointer to lpfc hba data structure.
 3005 * @cmdiocb: pointer to lpfc command iocb data structure.
 3006 * @rspiocb: pointer to lpfc response iocb data structure.
 3007 *
 3008 * This routine is a generic completion callback function for ELS commands.
 3009 * Specifically, it is the callback function which does not need to perform
 3010 * any command specific operations. It is currently used by the ELS command
 3011 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
 3012 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
 3013 * Other than certain debug loggings, this callback function simply invokes the
 3014 * lpfc_els_chk_latt() routine to check whether link went down during the
 3015 * discovery process.
 3016 **/
 3017static void
 3018lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 3019		  struct lpfc_iocbq *rspiocb)
 3020{
 3021	struct lpfc_vport *vport = cmdiocb->vport;
 3022	IOCB_t *irsp;
 3023
 3024	irsp = &rspiocb->iocb;
 3025
 3026	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3027			      "ELS cmd cmpl:    status:x%x/x%x did:x%x",
 3028			      irsp->ulpStatus, irsp->un.ulpWord[4],
 3029			      irsp->un.elsreq64.remoteID);
 3030
 3031	/* ELS cmd tag <ulpIoTag> completes */
 3032	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 3033			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
 3034			 irsp->ulpIoTag, irsp->ulpStatus,
 3035			 irsp->un.ulpWord[4], irsp->ulpTimeout);
 3036
 3037	/* Check to see if link went down during discovery */
 3038	lpfc_els_chk_latt(vport);
 3039	lpfc_els_free_iocb(phba, cmdiocb);
 3040}
 3041
 3042/**
 3043 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
 3044 * @phba: pointer to lpfc hba data structure.
 3045 * @cmdiocb: pointer to lpfc command iocb data structure.
 3046 * @rspiocb: pointer to lpfc response iocb data structure.
 3047 *
 3048 * This routine is a generic completion callback function for Discovery ELS cmd.
 3049 * Currently used by the ELS command issuing routines for the ELS State Change
 3050 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
 3051 * These commands will be retried once only for ELS timeout errors.
 3052 **/
 3053static void
 3054lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 3055		       struct lpfc_iocbq *rspiocb)
 3056{
 3057	struct lpfc_vport *vport = cmdiocb->vport;
 3058	IOCB_t *irsp;
 3059	struct lpfc_els_rdf_rsp *prdf;
 3060	struct lpfc_dmabuf *pcmd, *prsp;
 3061	u32 *pdata;
 3062	u32 cmd;
 3063
 3064	irsp = &rspiocb->iocb;
 3065
 3066	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3067		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
 3068		irsp->ulpStatus, irsp->un.ulpWord[4],
 3069		irsp->un.elsreq64.remoteID);
 3070	/* ELS cmd tag <ulpIoTag> completes */
 3071	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 3072			 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
 3073			 "x%x\n",
 3074			 irsp->ulpIoTag, irsp->ulpStatus,
 3075			 irsp->un.ulpWord[4], irsp->ulpTimeout,
 3076			 cmdiocb->retry);
 3077
 3078	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
 3079	if (!pcmd)
 3080		goto out;
 3081
 3082	pdata = (u32 *)pcmd->virt;
 3083	if (!pdata)
 3084		goto out;
 3085	cmd = *pdata;
 3086
 3087	/* Only 1 retry for ELS Timeout only */
 3088	if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
 3089	    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
 3090	    IOERR_SEQUENCE_TIMEOUT)) {
 3091		cmdiocb->retry++;
 3092		if (cmdiocb->retry <= 1) {
 3093			switch (cmd) {
 3094			case ELS_CMD_SCR:
 3095				lpfc_issue_els_scr(vport, cmdiocb->retry);
 3096				break;
 3097			case ELS_CMD_RDF:
 3098				cmdiocb->context1 = NULL; /* save ndlp refcnt */
 3099				lpfc_issue_els_rdf(vport, cmdiocb->retry);
 3100				break;
 3101			}
 3102			goto out;
 3103		}
 3104		phba->fc_stat.elsRetryExceeded++;
 3105	}
 3106	if (irsp->ulpStatus) {
 3107		/* ELS discovery cmd completes with error */
 3108		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
 3109				 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
 3110				 irsp->ulpStatus, irsp->un.ulpWord[4]);
 3111		goto out;
 3112	}
 3113
 3114	/* The RDF response doesn't have any impact on the running driver
 3115	 * but the notification descriptors are dumped here for support.
 3116	 */
 3117	if (cmd == ELS_CMD_RDF) {
 3118		int i;
 3119
 3120		prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
 3121		if (!prsp)
 3122			goto out;
 3123
 3124		prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
 3125		if (!prdf)
 3126			goto out;
 3127
 3128		for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
 3129			    i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
 3130			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 3131				 "4677 Fabric RDF Notification Grant Data: "
 3132				 "0x%08x\n",
 3133				 be32_to_cpu(
 3134					prdf->reg_d1.desc_tags[i]));
 3135	}
 3136
 3137out:
 3138	/* Check to see if link went down during discovery */
 3139	lpfc_els_chk_latt(vport);
 3140	lpfc_els_free_iocb(phba, cmdiocb);
 3141	return;
 3142}
 3143
 3144/**
 3145 * lpfc_issue_els_scr - Issue a scr to an node on a vport
 3146 * @vport: pointer to a host virtual N_Port data structure.
 3147 * @retry: retry counter for the command IOCB.
 3148 *
 3149 * This routine issues a State Change Request (SCR) to a fabric node
 3150 * on a @vport. The remote node is Fabric Controller (0xfffffd). It
 3151 * first search the @vport node list to find the matching ndlp. If no such
 3152 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
 3153 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
 3154 * routine is invoked to send the SCR IOCB.
 3155 *
 3156 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 3157 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 3158 * will be stored into the context1 field of the IOCB for the completion
 3159 * callback function to the SCR ELS command.
 3160 *
 3161 * Return code
 3162 *   0 - Successfully issued scr command
 3163 *   1 - Failed to issue scr command
 3164 **/
 3165int
 3166lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
 3167{
 3168	struct lpfc_hba  *phba = vport->phba;
 3169	struct lpfc_iocbq *elsiocb;
 3170	uint8_t *pcmd;
 3171	uint16_t cmdsize;
 3172	struct lpfc_nodelist *ndlp;
 3173
 3174	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 3175
 3176	ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
 3177	if (!ndlp) {
 3178		ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
 3179		if (!ndlp)
 3180			return 1;
 3181		lpfc_enqueue_node(vport, ndlp);
 3182	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 3183		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 3184		if (!ndlp)
 3185			return 1;
 3186	}
 3187
 3188	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 3189				     ndlp->nlp_DID, ELS_CMD_SCR);
 3190
 3191	if (!elsiocb) {
 3192		/* This will trigger the release of the node just
 3193		 * allocated
 3194		 */
 3195		lpfc_nlp_put(ndlp);
 3196		return 1;
 3197	}
 3198
 3199	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 3200
 3201	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
 3202	pcmd += sizeof(uint32_t);
 3203
 3204	/* For SCR, remainder of payload is SCR parameter page */
 3205	memset(pcmd, 0, sizeof(SCR));
 3206	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
 3207
 3208	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3209		"Issue SCR:       did:x%x",
 3210		ndlp->nlp_DID, 0, 0);
 3211
 3212	phba->fc_stat.elsXmitSCR++;
 3213	elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
 3214	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
 3215	    IOCB_ERROR) {
 3216		/* The additional lpfc_nlp_put will cause the following
 3217		 * lpfc_els_free_iocb routine to trigger the rlease of
 3218		 * the node.
 3219		 */
 3220		lpfc_nlp_put(ndlp);
 3221		lpfc_els_free_iocb(phba, elsiocb);
 3222		return 1;
 3223	}
 3224	/* This will cause the callback-function lpfc_cmpl_els_cmd to
 3225	 * trigger the release of node.
 3226	 */
 3227	if (!(vport->fc_flag & FC_PT2PT))
 3228		lpfc_nlp_put(ndlp);
 3229	return 0;
 3230}
 3231
 3232/**
 3233 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
 3234 *   or the other nport (pt2pt).
 3235 * @vport: pointer to a host virtual N_Port data structure.
 3236 * @retry: number of retries to the command IOCB.
 3237 *
 3238 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
 3239 *  when connected to a fabric, or to the remote port when connected
 3240 *  in point-to-point mode. When sent to the Fabric Controller, it will
 3241 *  replay the RSCN to registered recipients.
 3242 *
 3243 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 3244 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 3245 * will be stored into the context1 field of the IOCB for the completion
 3246 * callback function to the RSCN ELS command.
 3247 *
 3248 * Return code
 3249 *   0 - Successfully issued RSCN command
 3250 *   1 - Failed to issue RSCN command
 3251 **/
 3252int
 3253lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
 3254{
 3255	struct lpfc_hba *phba = vport->phba;
 3256	struct lpfc_iocbq *elsiocb;
 3257	struct lpfc_nodelist *ndlp;
 3258	struct {
 3259		struct fc_els_rscn rscn;
 3260		struct fc_els_rscn_page portid;
 3261	} *event;
 3262	uint32_t nportid;
 3263	uint16_t cmdsize = sizeof(*event);
 3264
 3265	/* Not supported for private loop */
 3266	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
 3267	    !(vport->fc_flag & FC_PUBLIC_LOOP))
 3268		return 1;
 3269
 3270	if (vport->fc_flag & FC_PT2PT) {
 3271		/* find any mapped nport - that would be the other nport */
 3272		ndlp = lpfc_findnode_mapped(vport);
 3273		if (!ndlp)
 3274			return 1;
 3275	} else {
 3276		nportid = FC_FID_FCTRL;
 3277		/* find the fabric controller node */
 3278		ndlp = lpfc_findnode_did(vport, nportid);
 3279		if (!ndlp) {
 3280			/* if one didn't exist, make one */
 3281			ndlp = lpfc_nlp_init(vport, nportid);
 3282			if (!ndlp)
 3283				return 1;
 3284			lpfc_enqueue_node(vport, ndlp);
 3285		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 3286			ndlp = lpfc_enable_node(vport, ndlp,
 3287						NLP_STE_UNUSED_NODE);
 3288			if (!ndlp)
 3289				return 1;
 3290		}
 3291	}
 3292
 3293	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 3294				     ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
 3295
 3296	if (!elsiocb) {
 3297		/* This will trigger the release of the node just
 3298		 * allocated
 3299		 */
 3300		lpfc_nlp_put(ndlp);
 3301		return 1;
 3302	}
 3303
 3304	event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
 3305
 3306	event->rscn.rscn_cmd = ELS_RSCN;
 3307	event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
 3308	event->rscn.rscn_plen = cpu_to_be16(cmdsize);
 3309
 3310	nportid = vport->fc_myDID;
 3311	/* appears that page flags must be 0 for fabric to broadcast RSCN */
 3312	event->portid.rscn_page_flags = 0;
 3313	event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
 3314	event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
 3315	event->portid.rscn_fid[2] = nportid & 0x000000FF;
 3316
 3317	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3318			      "Issue RSCN:       did:x%x",
 3319			      ndlp->nlp_DID, 0, 0);
 3320
 3321	phba->fc_stat.elsXmitRSCN++;
 3322	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
 3323	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
 3324	    IOCB_ERROR) {
 3325		/* The additional lpfc_nlp_put will cause the following
 3326		 * lpfc_els_free_iocb routine to trigger the rlease of
 3327		 * the node.
 3328		 */
 3329		lpfc_nlp_put(ndlp);
 3330		lpfc_els_free_iocb(phba, elsiocb);
 3331		return 1;
 3332	}
 3333	/* This will cause the callback-function lpfc_cmpl_els_cmd to
 3334	 * trigger the release of node.
 3335	 */
 3336	if (!(vport->fc_flag & FC_PT2PT))
 3337		lpfc_nlp_put(ndlp);
 3338
 3339	return 0;
 3340}
 3341
 3342/**
 3343 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
 3344 * @vport: pointer to a host virtual N_Port data structure.
 3345 * @nportid: N_Port identifier to the remote node.
 3346 * @retry: number of retries to the command IOCB.
 3347 *
 3348 * This routine issues a Fibre Channel Address Resolution Response
 3349 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
 3350 * is passed into the function. It first search the @vport node list to find
 3351 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
 3352 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
 3353 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
 3354 *
 3355 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 3356 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 3357 * will be stored into the context1 field of the IOCB for the completion
 3358 * callback function to the PARPR ELS command.
 3359 *
 3360 * Return code
 3361 *   0 - Successfully issued farpr command
 3362 *   1 - Failed to issue farpr command
 3363 **/
 3364static int
 3365lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 3366{
 3367	struct lpfc_hba  *phba = vport->phba;
 3368	struct lpfc_iocbq *elsiocb;
 3369	FARP *fp;
 3370	uint8_t *pcmd;
 3371	uint32_t *lp;
 3372	uint16_t cmdsize;
 3373	struct lpfc_nodelist *ondlp;
 3374	struct lpfc_nodelist *ndlp;
 3375
 3376	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 3377
 3378	ndlp = lpfc_findnode_did(vport, nportid);
 3379	if (!ndlp) {
 3380		ndlp = lpfc_nlp_init(vport, nportid);
 3381		if (!ndlp)
 3382			return 1;
 3383		lpfc_enqueue_node(vport, ndlp);
 3384	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 3385		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 3386		if (!ndlp)
 3387			return 1;
 3388	}
 3389
 3390	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 3391				     ndlp->nlp_DID, ELS_CMD_RNID);
 3392	if (!elsiocb) {
 3393		/* This will trigger the release of the node just
 3394		 * allocated
 3395		 */
 3396		lpfc_nlp_put(ndlp);
 3397		return 1;
 3398	}
 3399
 3400	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 3401
 3402	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
 3403	pcmd += sizeof(uint32_t);
 3404
 3405	/* Fill in FARPR payload */
 3406	fp = (FARP *) (pcmd);
 3407	memset(fp, 0, sizeof(FARP));
 3408	lp = (uint32_t *) pcmd;
 3409	*lp++ = be32_to_cpu(nportid);
 3410	*lp++ = be32_to_cpu(vport->fc_myDID);
 3411	fp->Rflags = 0;
 3412	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
 3413
 3414	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
 3415	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
 3416	ondlp = lpfc_findnode_did(vport, nportid);
 3417	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
 3418		memcpy(&fp->OportName, &ondlp->nlp_portname,
 3419		       sizeof(struct lpfc_name));
 3420		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
 3421		       sizeof(struct lpfc_name));
 3422	}
 3423
 3424	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3425		"Issue FARPR:     did:x%x",
 3426		ndlp->nlp_DID, 0, 0);
 3427
 3428	phba->fc_stat.elsXmitFARPR++;
 3429	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
 3430	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
 3431	    IOCB_ERROR) {
 3432		/* The additional lpfc_nlp_put will cause the following
 3433		 * lpfc_els_free_iocb routine to trigger the release of
 3434		 * the node.
 3435		 */
 3436		lpfc_nlp_put(ndlp);
 3437		lpfc_els_free_iocb(phba, elsiocb);
 3438		return 1;
 3439	}
 3440	/* This will cause the callback-function lpfc_cmpl_els_cmd to
 3441	 * trigger the release of the node.
 3442	 */
 3443	/* Don't release reference count as RDF is likely outstanding */
 3444	return 0;
 3445}
 3446
 3447/**
 3448 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
 3449 * @vport: pointer to a host virtual N_Port data structure.
 3450 * @retry: retry counter for the command IOCB.
 3451 *
 3452 * This routine issues an ELS RDF to the Fabric Controller to register
 3453 * for diagnostic functions.
 3454 *
 3455 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 3456 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 3457 * will be stored into the context1 field of the IOCB for the completion
 3458 * callback function to the RDF ELS command.
 3459 *
 3460 * Return code
 3461 *   0 - Successfully issued rdf command
 3462 *   1 - Failed to issue rdf command
 3463 **/
 3464int
 3465lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
 3466{
 3467	struct lpfc_hba *phba = vport->phba;
 3468	struct lpfc_iocbq *elsiocb;
 3469	struct lpfc_els_rdf_req *prdf;
 3470	struct lpfc_nodelist *ndlp;
 3471	uint16_t cmdsize;
 3472
 3473	cmdsize = sizeof(*prdf);
 3474
 3475	ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
 3476	if (!ndlp) {
 3477		ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
 3478		if (!ndlp)
 3479			return -ENODEV;
 3480		lpfc_enqueue_node(vport, ndlp);
 3481	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 3482		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 3483		if (!ndlp)
 3484			return -ENODEV;
 3485	}
 3486
 3487	/* RDF ELS is not required on an NPIV VN_Port.  */
 3488	if (vport->port_type == LPFC_NPIV_PORT) {
 3489		lpfc_nlp_put(ndlp);
 3490		return -EACCES;
 3491	}
 3492
 3493	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 3494				     ndlp->nlp_DID, ELS_CMD_RDF);
 3495	if (!elsiocb) {
 3496		/* This will trigger the release of the node just
 3497		 * allocated
 3498		 */
 3499		lpfc_nlp_put(ndlp);
 3500		return -ENOMEM;
 3501	}
 3502
 3503	/* Configure the payload for the supported FPIN events. */
 3504	prdf = (struct lpfc_els_rdf_req *)
 3505		(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
 3506	memset(prdf, 0, cmdsize);
 3507	prdf->rdf.fpin_cmd = ELS_RDF;
 3508	prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
 3509					 sizeof(struct fc_els_rdf));
 3510	prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
 3511	prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
 3512				FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
 3513	prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
 3514	prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
 3515
 3516	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3517			      "Issue RDF:       did:x%x",
 3518			      ndlp->nlp_DID, 0, 0);
 3519
 3520	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 3521			 "6444 Xmit RDF to remote NPORT x%x\n",
 3522			 ndlp->nlp_DID);
 3523
 3524	elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
 3525	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
 3526	    IOCB_ERROR) {
 3527		/* The additional lpfc_nlp_put will cause the following
 3528		 * lpfc_els_free_iocb routine to trigger the rlease of
 3529		 * the node.
 3530		 */
 3531		lpfc_nlp_put(ndlp);
 3532		lpfc_els_free_iocb(phba, elsiocb);
 3533		return -EIO;
 3534	}
 3535
 3536	/* An RDF was issued - this put ensures the ndlp is cleaned up
 3537	 * when the RDF completes.
 3538	 */
 3539	lpfc_nlp_put(ndlp);
 3540	return 0;
 3541}
 3542
 3543/**
 3544 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
 3545 * @vport: pointer to a host virtual N_Port data structure.
 3546 * @nlp: pointer to a node-list data structure.
 3547 *
 3548 * This routine cancels the timer with a delayed IOCB-command retry for
 3549 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
 3550 * removes the ELS retry event if it presents. In addition, if the
 3551 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
 3552 * commands are sent for the @vport's nodes that require issuing discovery
 3553 * ADISC.
 3554 **/
 3555void
 3556lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
 3557{
 3558	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 3559	struct lpfc_work_evt *evtp;
 3560
 3561	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
 3562		return;
 3563	spin_lock_irq(shost->host_lock);
 3564	nlp->nlp_flag &= ~NLP_DELAY_TMO;
 3565	spin_unlock_irq(shost->host_lock);
 3566	del_timer_sync(&nlp->nlp_delayfunc);
 3567	nlp->nlp_last_elscmd = 0;
 3568	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
 3569		list_del_init(&nlp->els_retry_evt.evt_listp);
 3570		/* Decrement nlp reference count held for the delayed retry */
 3571		evtp = &nlp->els_retry_evt;
 3572		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
 3573	}
 3574	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
 3575		spin_lock_irq(shost->host_lock);
 3576		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
 3577		spin_unlock_irq(shost->host_lock);
 3578		if (vport->num_disc_nodes) {
 3579			if (vport->port_state < LPFC_VPORT_READY) {
 3580				/* Check if there are more ADISCs to be sent */
 3581				lpfc_more_adisc(vport);
 3582			} else {
 3583				/* Check if there are more PLOGIs to be sent */
 3584				lpfc_more_plogi(vport);
 3585				if (vport->num_disc_nodes == 0) {
 3586					spin_lock_irq(shost->host_lock);
 3587					vport->fc_flag &= ~FC_NDISC_ACTIVE;
 3588					spin_unlock_irq(shost->host_lock);
 3589					lpfc_can_disctmo(vport);
 3590					lpfc_end_rscn(vport);
 3591				}
 3592			}
 3593		}
 3594	}
 3595	return;
 3596}
 3597
 3598/**
 3599 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
 3600 * @ptr: holder for the pointer to the timer function associated data (ndlp).
 3601 *
 3602 * This routine is invoked by the ndlp delayed-function timer to check
 3603 * whether there is any pending ELS retry event(s) with the node. If not, it
 3604 * simply returns. Otherwise, if there is at least one ELS delayed event, it
 3605 * adds the delayed events to the HBA work list and invokes the
 3606 * lpfc_worker_wake_up() routine to wake up worker thread to process the
 3607 * event. Note that lpfc_nlp_get() is called before posting the event to
 3608 * the work list to hold reference count of ndlp so that it guarantees the
 3609 * reference to ndlp will still be available when the worker thread gets
 3610 * to the event associated with the ndlp.
 3611 **/
 3612void
 3613lpfc_els_retry_delay(struct timer_list *t)
 3614{
 3615	struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
 3616	struct lpfc_vport *vport = ndlp->vport;
 3617	struct lpfc_hba   *phba = vport->phba;
 3618	unsigned long flags;
 3619	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
 3620
 3621	spin_lock_irqsave(&phba->hbalock, flags);
 3622	if (!list_empty(&evtp->evt_listp)) {
 3623		spin_unlock_irqrestore(&phba->hbalock, flags);
 3624		return;
 3625	}
 3626
 3627	/* We need to hold the node by incrementing the reference
 3628	 * count until the queued work is done
 3629	 */
 3630	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
 3631	if (evtp->evt_arg1) {
 3632		evtp->evt = LPFC_EVT_ELS_RETRY;
 3633		list_add_tail(&evtp->evt_listp, &phba->work_list);
 3634		lpfc_worker_wake_up(phba);
 3635	}
 3636	spin_unlock_irqrestore(&phba->hbalock, flags);
 3637	return;
 3638}
 3639
 3640/**
 3641 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
 3642 * @ndlp: pointer to a node-list data structure.
 3643 *
 3644 * This routine is the worker-thread handler for processing the @ndlp delayed
 3645 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
 3646 * the last ELS command from the associated ndlp and invokes the proper ELS
 3647 * function according to the delayed ELS command to retry the command.
 3648 **/
 3649void
 3650lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
 3651{
 3652	struct lpfc_vport *vport = ndlp->vport;
 3653	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 3654	uint32_t cmd, retry;
 3655
 3656	spin_lock_irq(shost->host_lock);
 3657	cmd = ndlp->nlp_last_elscmd;
 3658	ndlp->nlp_last_elscmd = 0;
 3659
 3660	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
 3661		spin_unlock_irq(shost->host_lock);
 3662		return;
 3663	}
 3664
 3665	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
 3666	spin_unlock_irq(shost->host_lock);
 3667	/*
 3668	 * If a discovery event readded nlp_delayfunc after timer
 3669	 * firing and before processing the timer, cancel the
 3670	 * nlp_delayfunc.
 3671	 */
 3672	del_timer_sync(&ndlp->nlp_delayfunc);
 3673	retry = ndlp->nlp_retry;
 3674	ndlp->nlp_retry = 0;
 3675
 3676	switch (cmd) {
 3677	case ELS_CMD_FLOGI:
 3678		lpfc_issue_els_flogi(vport, ndlp, retry);
 3679		break;
 3680	case ELS_CMD_PLOGI:
 3681		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
 3682			ndlp->nlp_prev_state = ndlp->nlp_state;
 3683			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
 3684		}
 3685		break;
 3686	case ELS_CMD_ADISC:
 3687		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
 3688			ndlp->nlp_prev_state = ndlp->nlp_state;
 3689			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
 3690		}
 3691		break;
 3692	case ELS_CMD_PRLI:
 3693	case ELS_CMD_NVMEPRLI:
 3694		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
 3695			ndlp->nlp_prev_state = ndlp->nlp_state;
 3696			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
 3697		}
 3698		break;
 3699	case ELS_CMD_LOGO:
 3700		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
 3701			ndlp->nlp_prev_state = ndlp->nlp_state;
 3702			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
 3703		}
 3704		break;
 3705	case ELS_CMD_FDISC:
 3706		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
 3707			lpfc_issue_els_fdisc(vport, ndlp, retry);
 3708		break;
 3709	}
 3710	return;
 3711}
 3712
 3713/**
 3714 * lpfc_link_reset - Issue link reset
 3715 * @vport: pointer to a virtual N_Port data structure.
 3716 *
 3717 * This routine performs link reset by sending INIT_LINK mailbox command.
 3718 * For SLI-3 adapter, link attention interrupt is enabled before issuing
 3719 * INIT_LINK mailbox command.
 3720 *
 3721 * Return code
 3722 *   0 - Link reset initiated successfully
 3723 *   1 - Failed to initiate link reset
 3724 **/
 3725int
 3726lpfc_link_reset(struct lpfc_vport *vport)
 3727{
 3728	struct lpfc_hba *phba = vport->phba;
 3729	LPFC_MBOXQ_t *mbox;
 3730	uint32_t control;
 3731	int rc;
 3732
 3733	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 3734			 "2851 Attempt link reset\n");
 3735	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 3736	if (!mbox) {
 3737		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 3738				"2852 Failed to allocate mbox memory");
 3739		return 1;
 3740	}
 3741
 3742	/* Enable Link attention interrupts */
 3743	if (phba->sli_rev <= LPFC_SLI_REV3) {
 3744		spin_lock_irq(&phba->hbalock);
 3745		phba->sli.sli_flag |= LPFC_PROCESS_LA;
 3746		control = readl(phba->HCregaddr);
 3747		control |= HC_LAINT_ENA;
 3748		writel(control, phba->HCregaddr);
 3749		readl(phba->HCregaddr); /* flush */
 3750		spin_unlock_irq(&phba->hbalock);
 3751	}
 3752
 3753	lpfc_init_link(phba, mbox, phba->cfg_topology,
 3754		       phba->cfg_link_speed);
 3755	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 3756	mbox->vport = vport;
 3757	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 3758	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 3759		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 3760				"2853 Failed to issue INIT_LINK "
 3761				"mbox command, rc:x%x\n", rc);
 3762		mempool_free(mbox, phba->mbox_mem_pool);
 3763		return 1;
 3764	}
 3765
 3766	return 0;
 3767}
 3768
 3769/**
 3770 * lpfc_els_retry - Make retry decision on an els command iocb
 3771 * @phba: pointer to lpfc hba data structure.
 3772 * @cmdiocb: pointer to lpfc command iocb data structure.
 3773 * @rspiocb: pointer to lpfc response iocb data structure.
 3774 *
 3775 * This routine makes a retry decision on an ELS command IOCB, which has
 3776 * failed. The following ELS IOCBs use this function for retrying the command
 3777 * when previously issued command responsed with error status: FLOGI, PLOGI,
 3778 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
 3779 * returned error status, it makes the decision whether a retry shall be
 3780 * issued for the command, and whether a retry shall be made immediately or
 3781 * delayed. In the former case, the corresponding ELS command issuing-function
 3782 * is called to retry the command. In the later case, the ELS command shall
 3783 * be posted to the ndlp delayed event and delayed function timer set to the
 3784 * ndlp for the delayed command issusing.
 3785 *
 3786 * Return code
 3787 *   0 - No retry of els command is made
 3788 *   1 - Immediate or delayed retry of els command is made
 3789 **/
 3790static int
 3791lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 3792	       struct lpfc_iocbq *rspiocb)
 3793{
 3794	struct lpfc_vport *vport = cmdiocb->vport;
 3795	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 3796	IOCB_t *irsp = &rspiocb->iocb;
 3797	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
 3798	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 3799	uint32_t *elscmd;
 3800	struct ls_rjt stat;
 3801	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
 3802	int logerr = 0;
 3803	uint32_t cmd = 0;
 3804	uint32_t did;
 3805	int link_reset = 0, rc;
 3806
 3807
 3808	/* Note: context2 may be 0 for internal driver abort
 3809	 * of delays ELS command.
 3810	 */
 3811
 3812	if (pcmd && pcmd->virt) {
 3813		elscmd = (uint32_t *) (pcmd->virt);
 3814		cmd = *elscmd++;
 3815	}
 3816
 3817	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
 3818		did = ndlp->nlp_DID;
 3819	else {
 3820		/* We should only hit this case for retrying PLOGI */
 3821		did = irsp->un.elsreq64.remoteID;
 3822		ndlp = lpfc_findnode_did(vport, did);
 3823		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
 3824		    && (cmd != ELS_CMD_PLOGI))
 3825			return 1;
 3826	}
 3827
 3828	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
 3829		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
 3830		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
 3831
 3832	switch (irsp->ulpStatus) {
 3833	case IOSTAT_FCP_RSP_ERROR:
 3834		break;
 3835	case IOSTAT_REMOTE_STOP:
 3836		if (phba->sli_rev == LPFC_SLI_REV4) {
 3837			/* This IO was aborted by the target, we don't
 3838			 * know the rxid and because we did not send the
 3839			 * ABTS we cannot generate and RRQ.
 3840			 */
 3841			lpfc_set_rrq_active(phba, ndlp,
 3842					 cmdiocb->sli4_lxritag, 0, 0);
 3843		}
 3844		break;
 3845	case IOSTAT_LOCAL_REJECT:
 3846		switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
 3847		case IOERR_LOOP_OPEN_FAILURE:
 3848			if (cmd == ELS_CMD_FLOGI) {
 3849				if (PCI_DEVICE_ID_HORNET ==
 3850					phba->pcidev->device) {
 3851					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
 3852					phba->pport->fc_myDID = 0;
 3853					phba->alpa_map[0] = 0;
 3854					phba->alpa_map[1] = 0;
 3855				}
 3856			}
 3857			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
 3858				delay = 1000;
 3859			retry = 1;
 3860			break;
 3861
 3862		case IOERR_ILLEGAL_COMMAND:
 3863			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 3864					 "0124 Retry illegal cmd x%x "
 3865					 "retry:x%x delay:x%x\n",
 3866					 cmd, cmdiocb->retry, delay);
 3867			retry = 1;
 3868			/* All command's retry policy */
 3869			maxretry = 8;
 3870			if (cmdiocb->retry > 2)
 3871				delay = 1000;
 3872			break;
 3873
 3874		case IOERR_NO_RESOURCES:
 3875			logerr = 1; /* HBA out of resources */
 3876			retry = 1;
 3877			if (cmdiocb->retry > 100)
 3878				delay = 100;
 3879			maxretry = 250;
 3880			break;
 3881
 3882		case IOERR_ILLEGAL_FRAME:
 3883			delay = 100;
 3884			retry = 1;
 3885			break;
 3886
 3887		case IOERR_INVALID_RPI:
 3888			if (cmd == ELS_CMD_PLOGI &&
 3889			    did == NameServer_DID) {
 3890				/* Continue forever if plogi to */
 3891				/* the nameserver fails */
 3892				maxretry = 0;
 3893				delay = 100;
 3894			}
 3895			retry = 1;
 3896			break;
 3897
 3898		case IOERR_SEQUENCE_TIMEOUT:
 3899			if (cmd == ELS_CMD_PLOGI &&
 3900			    did == NameServer_DID &&
 3901			    (cmdiocb->retry + 1) == maxretry) {
 3902				/* Reset the Link */
 3903				link_reset = 1;
 3904				break;
 3905			}
 3906			retry = 1;
 3907			delay = 100;
 3908			break;
 3909		}
 3910		break;
 3911
 3912	case IOSTAT_NPORT_RJT:
 3913	case IOSTAT_FABRIC_RJT:
 3914		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
 3915			retry = 1;
 3916			break;
 3917		}
 3918		break;
 3919
 3920	case IOSTAT_NPORT_BSY:
 3921	case IOSTAT_FABRIC_BSY:
 3922		logerr = 1; /* Fabric / Remote NPort out of resources */
 3923		retry = 1;
 3924		break;
 3925
 3926	case IOSTAT_LS_RJT:
 3927		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
 3928		/* Added for Vendor specifc support
 3929		 * Just keep retrying for these Rsn / Exp codes
 3930		 */
 3931		switch (stat.un.b.lsRjtRsnCode) {
 3932		case LSRJT_UNABLE_TPC:
 3933			/* The driver has a VALID PLOGI but the rport has
 3934			 * rejected the PRLI - can't do it now.  Delay
 3935			 * for 1 second and try again - don't care about
 3936			 * the explanation.
 3937			 */
 3938			if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
 3939				delay = 1000;
 3940				maxretry = lpfc_max_els_tries + 1;
 3941				retry = 1;
 3942				break;
 3943			}
 3944
 3945			/* Legacy bug fix code for targets with PLOGI delays. */
 3946			if (stat.un.b.lsRjtRsnCodeExp ==
 3947			    LSEXP_CMD_IN_PROGRESS) {
 3948				if (cmd == ELS_CMD_PLOGI) {
 3949					delay = 1000;
 3950					maxretry = 48;
 3951				}
 3952				retry = 1;
 3953				break;
 3954			}
 3955			if (stat.un.b.lsRjtRsnCodeExp ==
 3956			    LSEXP_CANT_GIVE_DATA) {
 3957				if (cmd == ELS_CMD_PLOGI) {
 3958					delay = 1000;
 3959					maxretry = 48;
 3960				}
 3961				retry = 1;
 3962				break;
 3963			}
 3964			if (cmd == ELS_CMD_PLOGI) {
 3965				delay = 1000;
 3966				maxretry = lpfc_max_els_tries + 1;
 3967				retry = 1;
 3968				break;
 3969			}
 3970			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 3971			  (cmd == ELS_CMD_FDISC) &&
 3972			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
 3973				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 3974						 "0125 FDISC Failed (x%x). "
 3975						 "Fabric out of resources\n",
 3976						 stat.un.lsRjtError);
 3977				lpfc_vport_set_state(vport,
 3978						     FC_VPORT_NO_FABRIC_RSCS);
 3979			}
 3980			break;
 3981
 3982		case LSRJT_LOGICAL_BSY:
 3983			if ((cmd == ELS_CMD_PLOGI) ||
 3984			    (cmd == ELS_CMD_PRLI) ||
 3985			    (cmd == ELS_CMD_NVMEPRLI)) {
 3986				delay = 1000;
 3987				maxretry = 48;
 3988			} else if (cmd == ELS_CMD_FDISC) {
 3989				/* FDISC retry policy */
 3990				maxretry = 48;
 3991				if (cmdiocb->retry >= 32)
 3992					delay = 1000;
 3993			}
 3994			retry = 1;
 3995			break;
 3996
 3997		case LSRJT_LOGICAL_ERR:
 3998			/* There are some cases where switches return this
 3999			 * error when they are not ready and should be returning
 4000			 * Logical Busy. We should delay every time.
 4001			 */
 4002			if (cmd == ELS_CMD_FDISC &&
 4003			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
 4004				maxretry = 3;
 4005				delay = 1000;
 4006				retry = 1;
 4007			} else if (cmd == ELS_CMD_FLOGI &&
 4008				   stat.un.b.lsRjtRsnCodeExp ==
 4009						LSEXP_NOTHING_MORE) {
 4010				vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
 4011				retry = 1;
 4012				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 4013						 "0820 FLOGI Failed (x%x). "
 4014						 "BBCredit Not Supported\n",
 4015						 stat.un.lsRjtError);
 4016			}
 4017			break;
 4018
 4019		case LSRJT_PROTOCOL_ERR:
 4020			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 4021			  (cmd == ELS_CMD_FDISC) &&
 4022			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
 4023			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
 4024			  ) {
 4025				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 4026						 "0122 FDISC Failed (x%x). "
 4027						 "Fabric Detected Bad WWN\n",
 4028						 stat.un.lsRjtError);
 4029				lpfc_vport_set_state(vport,
 4030						     FC_VPORT_FABRIC_REJ_WWN);
 4031			}
 4032			break;
 4033		case LSRJT_VENDOR_UNIQUE:
 4034			if ((stat.un.b.vendorUnique == 0x45) &&
 4035			    (cmd == ELS_CMD_FLOGI)) {
 4036				goto out_retry;
 4037			}
 4038			break;
 4039		case LSRJT_CMD_UNSUPPORTED:
 4040			/* lpfc nvmet returns this type of LS_RJT when it
 4041			 * receives an FCP PRLI because lpfc nvmet only
 4042			 * support NVME.  ELS request is terminated for FCP4
 4043			 * on this rport.
 4044			 */
 4045			if (stat.un.b.lsRjtRsnCodeExp ==
 4046			    LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
 4047				spin_lock_irq(shost->host_lock);
 4048				ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
 4049				spin_unlock_irq(shost->host_lock);
 4050				retry = 0;
 4051				goto out_retry;
 4052			}
 4053			break;
 4054		}
 4055		break;
 4056
 4057	case IOSTAT_INTERMED_RSP:
 4058	case IOSTAT_BA_RJT:
 4059		break;
 4060
 4061	default:
 4062		break;
 4063	}
 4064
 4065	if (link_reset) {
 4066		rc = lpfc_link_reset(vport);
 4067		if (rc) {
 4068			/* Do not give up. Retry PLOGI one more time and attempt
 4069			 * link reset if PLOGI fails again.
 4070			 */
 4071			retry = 1;
 4072			delay = 100;
 4073			goto out_retry;
 4074		}
 4075		return 1;
 4076	}
 4077
 4078	if (did == FDMI_DID)
 4079		retry = 1;
 4080
 4081	if ((cmd == ELS_CMD_FLOGI) &&
 4082	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
 4083	    !lpfc_error_lost_link(irsp)) {
 4084		/* FLOGI retry policy */
 4085		retry = 1;
 4086		/* retry FLOGI forever */
 4087		if (phba->link_flag != LS_LOOPBACK_MODE)
 4088			maxretry = 0;
 4089		else
 4090			maxretry = 2;
 4091
 4092		if (cmdiocb->retry >= 100)
 4093			delay = 5000;
 4094		else if (cmdiocb->retry >= 32)
 4095			delay = 1000;
 4096	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
 4097		/* retry FDISCs every second up to devloss */
 4098		retry = 1;
 4099		maxretry = vport->cfg_devloss_tmo;
 4100		delay = 1000;
 4101	}
 4102
 4103	cmdiocb->retry++;
 4104	if (maxretry && (cmdiocb->retry >= maxretry)) {
 4105		phba->fc_stat.elsRetryExceeded++;
 4106		retry = 0;
 4107	}
 4108
 4109	if ((vport->load_flag & FC_UNLOADING) != 0)
 4110		retry = 0;
 4111
 4112out_retry:
 4113	if (retry) {
 4114		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
 4115			/* Stop retrying PLOGI and FDISC if in FCF discovery */
 4116			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
 4117				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4118						 "2849 Stop retry ELS command "
 4119						 "x%x to remote NPORT x%x, "
 4120						 "Data: x%x x%x\n", cmd, did,
 4121						 cmdiocb->retry, delay);
 4122				return 0;
 4123			}
 4124		}
 4125
 4126		/* Retry ELS command <elsCmd> to remote NPORT <did> */
 4127		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4128				 "0107 Retry ELS command x%x to remote "
 4129				 "NPORT x%x Data: x%x x%x\n",
 4130				 cmd, did, cmdiocb->retry, delay);
 4131
 4132		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
 4133			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
 4134			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
 4135			IOERR_NO_RESOURCES))) {
 4136			/* Don't reset timer for no resources */
 4137
 4138			/* If discovery / RSCN timer is running, reset it */
 4139			if (timer_pending(&vport->fc_disctmo) ||
 4140			    (vport->fc_flag & FC_RSCN_MODE))
 4141				lpfc_set_disctmo(vport);
 4142		}
 4143
 4144		phba->fc_stat.elsXmitRetry++;
 4145		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
 4146			phba->fc_stat.elsDelayRetry++;
 4147			ndlp->nlp_retry = cmdiocb->retry;
 4148
 4149			/* delay is specified in milliseconds */
 4150			mod_timer(&ndlp->nlp_delayfunc,
 4151				jiffies + msecs_to_jiffies(delay));
 4152			spin_lock_irq(shost->host_lock);
 4153			ndlp->nlp_flag |= NLP_DELAY_TMO;
 4154			spin_unlock_irq(shost->host_lock);
 4155
 4156			ndlp->nlp_prev_state = ndlp->nlp_state;
 4157			if ((cmd == ELS_CMD_PRLI) ||
 4158			    (cmd == ELS_CMD_NVMEPRLI))
 4159				lpfc_nlp_set_state(vport, ndlp,
 4160					NLP_STE_PRLI_ISSUE);
 4161			else
 4162				lpfc_nlp_set_state(vport, ndlp,
 4163					NLP_STE_NPR_NODE);
 4164			ndlp->nlp_last_elscmd = cmd;
 4165
 4166			return 1;
 4167		}
 4168		switch (cmd) {
 4169		case ELS_CMD_FLOGI:
 4170			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
 4171			return 1;
 4172		case ELS_CMD_FDISC:
 4173			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
 4174			return 1;
 4175		case ELS_CMD_PLOGI:
 4176			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 4177				ndlp->nlp_prev_state = ndlp->nlp_state;
 4178				lpfc_nlp_set_state(vport, ndlp,
 4179						   NLP_STE_PLOGI_ISSUE);
 4180			}
 4181			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
 4182			return 1;
 4183		case ELS_CMD_ADISC:
 4184			ndlp->nlp_prev_state = ndlp->nlp_state;
 4185			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
 4186			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
 4187			return 1;
 4188		case ELS_CMD_PRLI:
 4189		case ELS_CMD_NVMEPRLI:
 4190			ndlp->nlp_prev_state = ndlp->nlp_state;
 4191			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
 4192			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
 4193			return 1;
 4194		case ELS_CMD_LOGO:
 4195			ndlp->nlp_prev_state = ndlp->nlp_state;
 4196			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
 4197			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
 4198			return 1;
 4199		}
 4200	}
 4201	/* No retry ELS command <elsCmd> to remote NPORT <did> */
 4202	if (logerr) {
 4203		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 4204			 "0137 No retry ELS command x%x to remote "
 4205			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
 4206			 cmd, did, irsp->ulpStatus,
 4207			 irsp->un.ulpWord[4]);
 4208	}
 4209	else {
 4210		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4211			 "0108 No retry ELS command x%x to remote "
 4212			 "NPORT x%x Retried:%d Error:x%x/%x\n",
 4213			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
 4214			 irsp->un.ulpWord[4]);
 4215	}
 4216	return 0;
 4217}
 4218
 4219/**
 4220 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
 4221 * @phba: pointer to lpfc hba data structure.
 4222 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
 4223 *
 4224 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
 4225 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
 4226 * checks to see whether there is a lpfc DMA buffer associated with the
 4227 * response of the command IOCB. If so, it will be released before releasing
 4228 * the lpfc DMA buffer associated with the IOCB itself.
 4229 *
 4230 * Return code
 4231 *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
 4232 **/
 4233static int
 4234lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
 4235{
 4236	struct lpfc_dmabuf *buf_ptr;
 4237
 4238	/* Free the response before processing the command. */
 4239	if (!list_empty(&buf_ptr1->list)) {
 4240		list_remove_head(&buf_ptr1->list, buf_ptr,
 4241				 struct lpfc_dmabuf,
 4242				 list);
 4243		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
 4244		kfree(buf_ptr);
 4245	}
 4246	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
 4247	kfree(buf_ptr1);
 4248	return 0;
 4249}
 4250
 4251/**
 4252 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
 4253 * @phba: pointer to lpfc hba data structure.
 4254 * @buf_ptr: pointer to the lpfc dma buffer data structure.
 4255 *
 4256 * This routine releases the lpfc Direct Memory Access (DMA) buffer
 4257 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
 4258 * pool.
 4259 *
 4260 * Return code
 4261 *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
 4262 **/
 4263static int
 4264lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
 4265{
 4266	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
 4267	kfree(buf_ptr);
 4268	return 0;
 4269}
 4270
 4271/**
 4272 * lpfc_els_free_iocb - Free a command iocb and its associated resources
 4273 * @phba: pointer to lpfc hba data structure.
 4274 * @elsiocb: pointer to lpfc els command iocb data structure.
 4275 *
 4276 * This routine frees a command IOCB and its associated resources. The
 4277 * command IOCB data structure contains the reference to various associated
 4278 * resources, these fields must be set to NULL if the associated reference
 4279 * not present:
 4280 *   context1 - reference to ndlp
 4281 *   context2 - reference to cmd
 4282 *   context2->next - reference to rsp
 4283 *   context3 - reference to bpl
 4284 *
 4285 * It first properly decrements the reference count held on ndlp for the
 4286 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
 4287 * set, it invokes the lpfc_els_free_data() routine to release the Direct
 4288 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
 4289 * adds the DMA buffer the @phba data structure for the delayed release.
 4290 * If reference to the Buffer Pointer List (BPL) is present, the
 4291 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
 4292 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
 4293 * invoked to release the IOCB data structure back to @phba IOCBQ list.
 4294 *
 4295 * Return code
 4296 *   0 - Success (currently, always return 0)
 4297 **/
 4298int
 4299lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
 4300{
 4301	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
 4302	struct lpfc_nodelist *ndlp;
 4303
 4304	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
 4305	if (ndlp) {
 4306		if (ndlp->nlp_flag & NLP_DEFER_RM) {
 4307			lpfc_nlp_put(ndlp);
 4308
 4309			/* If the ndlp is not being used by another discovery
 4310			 * thread, free it.
 4311			 */
 4312			if (!lpfc_nlp_not_used(ndlp)) {
 4313				/* If ndlp is being used by another discovery
 4314				 * thread, just clear NLP_DEFER_RM
 4315				 */
 4316				ndlp->nlp_flag &= ~NLP_DEFER_RM;
 4317			}
 4318		}
 4319		else
 4320			lpfc_nlp_put(ndlp);
 4321		elsiocb->context1 = NULL;
 4322	}
 4323	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
 4324	if (elsiocb->context2) {
 4325		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
 4326			/* Firmware could still be in progress of DMAing
 4327			 * payload, so don't free data buffer till after
 4328			 * a hbeat.
 4329			 */
 4330			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
 4331			buf_ptr = elsiocb->context2;
 4332			elsiocb->context2 = NULL;
 4333			if (buf_ptr) {
 4334				buf_ptr1 = NULL;
 4335				spin_lock_irq(&phba->hbalock);
 4336				if (!list_empty(&buf_ptr->list)) {
 4337					list_remove_head(&buf_ptr->list,
 4338						buf_ptr1, struct lpfc_dmabuf,
 4339						list);
 4340					INIT_LIST_HEAD(&buf_ptr1->list);
 4341					list_add_tail(&buf_ptr1->list,
 4342						&phba->elsbuf);
 4343					phba->elsbuf_cnt++;
 4344				}
 4345				INIT_LIST_HEAD(&buf_ptr->list);
 4346				list_add_tail(&buf_ptr->list, &phba->elsbuf);
 4347				phba->elsbuf_cnt++;
 4348				spin_unlock_irq(&phba->hbalock);
 4349			}
 4350		} else {
 4351			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
 4352			lpfc_els_free_data(phba, buf_ptr1);
 4353			elsiocb->context2 = NULL;
 4354		}
 4355	}
 4356
 4357	if (elsiocb->context3) {
 4358		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
 4359		lpfc_els_free_bpl(phba, buf_ptr);
 4360		elsiocb->context3 = NULL;
 4361	}
 4362	lpfc_sli_release_iocbq(phba, elsiocb);
 4363	return 0;
 4364}
 4365
 4366/**
 4367 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
 4368 * @phba: pointer to lpfc hba data structure.
 4369 * @cmdiocb: pointer to lpfc command iocb data structure.
 4370 * @rspiocb: pointer to lpfc response iocb data structure.
 4371 *
 4372 * This routine is the completion callback function to the Logout (LOGO)
 4373 * Accept (ACC) Response ELS command. This routine is invoked to indicate
 4374 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
 4375 * release the ndlp if it has the last reference remaining (reference count
 4376 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
 4377 * field to NULL to inform the following lpfc_els_free_iocb() routine no
 4378 * ndlp reference count needs to be decremented. Otherwise, the ndlp
 4379 * reference use-count shall be decremented by the lpfc_els_free_iocb()
 4380 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
 4381 * IOCB data structure.
 4382 **/
 4383static void
 4384lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 4385		       struct lpfc_iocbq *rspiocb)
 4386{
 4387	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
 4388	struct lpfc_vport *vport = cmdiocb->vport;
 4389	IOCB_t *irsp;
 4390
 4391	irsp = &rspiocb->iocb;
 4392	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4393		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
 4394		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
 4395	/* ACC to LOGO completes to NPort <nlp_DID> */
 4396	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4397			 "0109 ACC to LOGO completes to NPort x%x "
 4398			 "Data: x%x x%x x%x\n",
 4399			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 4400			 ndlp->nlp_rpi);
 4401
 4402	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
 4403		/* NPort Recovery mode or node is just allocated */
 4404		if (!lpfc_nlp_not_used(ndlp)) {
 4405			/* If the ndlp is being used by another discovery
 4406			 * thread, just unregister the RPI.
 4407			 */
 4408			lpfc_unreg_rpi(vport, ndlp);
 4409		} else {
 4410			/* Indicate the node has already released, should
 4411			 * not reference to it from within lpfc_els_free_iocb.
 4412			 */
 4413			cmdiocb->context1 = NULL;
 4414		}
 4415	}
 4416
 4417	/*
 4418	 * The driver received a LOGO from the rport and has ACK'd it.
 4419	 * At this point, the driver is done so release the IOCB
 4420	 */
 4421	lpfc_els_free_iocb(phba, cmdiocb);
 4422}
 4423
 4424/**
 4425 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
 4426 * @phba: pointer to lpfc hba data structure.
 4427 * @pmb: pointer to the driver internal queue element for mailbox command.
 4428 *
 4429 * This routine is the completion callback function for unregister default
 4430 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
 4431 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
 4432 * decrements the ndlp reference count held for this completion callback
 4433 * function. After that, it invokes the lpfc_nlp_not_used() to check
 4434 * whether there is only one reference left on the ndlp. If so, it will
 4435 * perform one more decrement and trigger the release of the ndlp.
 4436 **/
 4437void
 4438lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 4439{
 4440	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
 4441	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 4442
 4443	pmb->ctx_buf = NULL;
 4444	pmb->ctx_ndlp = NULL;
 4445
 4446	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 4447	kfree(mp);
 4448	mempool_free(pmb, phba->mbox_mem_pool);
 4449	if (ndlp) {
 4450		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
 4451				 "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
 4452				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
 4453				 kref_read(&ndlp->kref),
 4454				 ndlp->nlp_usg_map, ndlp);
 4455		if (NLP_CHK_NODE_ACT(ndlp)) {
 4456			lpfc_nlp_put(ndlp);
 4457			/* This is the end of the default RPI cleanup logic for
 4458			 * this ndlp. If no other discovery threads are using
 4459			 * this ndlp, free all resources associated with it.
 4460			 */
 4461			lpfc_nlp_not_used(ndlp);
 4462		} else {
 4463			lpfc_drop_node(ndlp->vport, ndlp);
 4464		}
 4465	}
 4466
 4467	return;
 4468}
 4469
 4470/**
 4471 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
 4472 * @phba: pointer to lpfc hba data structure.
 4473 * @cmdiocb: pointer to lpfc command iocb data structure.
 4474 * @rspiocb: pointer to lpfc response iocb data structure.
 4475 *
 4476 * This routine is the completion callback function for ELS Response IOCB
 4477 * command. In normal case, this callback function just properly sets the
 4478 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
 4479 * field in the command IOCB is not NULL, the referred mailbox command will
 4480 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
 4481 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
 4482 * link down event occurred during the discovery, the lpfc_nlp_not_used()
 4483 * routine shall be invoked trying to release the ndlp if no other threads
 4484 * are currently referring it.
 4485 **/
 4486static void
 4487lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 4488		  struct lpfc_iocbq *rspiocb)
 4489{
 4490	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
 4491	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
 4492	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
 4493	IOCB_t  *irsp;
 4494	uint8_t *pcmd;
 4495	LPFC_MBOXQ_t *mbox = NULL;
 4496	struct lpfc_dmabuf *mp = NULL;
 4497	uint32_t ls_rjt = 0;
 4498
 4499	irsp = &rspiocb->iocb;
 4500
 4501	if (!vport) {
 4502		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
 4503				"3177 ELS response failed\n");
 4504		goto out;
 4505	}
 4506	if (cmdiocb->context_un.mbox)
 4507		mbox = cmdiocb->context_un.mbox;
 4508
 4509	/* First determine if this is a LS_RJT cmpl. Note, this callback
 4510	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
 4511	 */
 4512	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
 4513	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
 4514	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
 4515		/* A LS_RJT associated with Default RPI cleanup has its own
 4516		 * separate code path.
 4517		 */
 4518		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
 4519			ls_rjt = 1;
 4520	}
 4521
 4522	/* Check to see if link went down during discovery */
 4523	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
 4524		if (mbox) {
 4525			mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 4526			if (mp) {
 4527				lpfc_mbuf_free(phba, mp->virt, mp->phys);
 4528				kfree(mp);
 4529			}
 4530			mempool_free(mbox, phba->mbox_mem_pool);
 4531		}
 4532		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
 4533		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
 4534			if (lpfc_nlp_not_used(ndlp)) {
 4535				ndlp = NULL;
 4536				/* Indicate the node has already released,
 4537				 * should not reference to it from within
 4538				 * the routine lpfc_els_free_iocb.
 4539				 */
 4540				cmdiocb->context1 = NULL;
 4541			}
 4542		goto out;
 4543	}
 4544
 4545	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4546		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
 4547		irsp->ulpStatus, irsp->un.ulpWord[4],
 4548		cmdiocb->iocb.un.elsreq64.remoteID);
 4549	/* ELS response tag <ulpIoTag> completes */
 4550	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4551			 "0110 ELS response tag x%x completes "
 4552			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
 4553			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
 4554			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
 4555			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 4556			 ndlp->nlp_rpi);
 4557	if (mbox) {
 4558		if ((rspiocb->iocb.ulpStatus == 0)
 4559		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
 4560			if (!lpfc_unreg_rpi(vport, ndlp) &&
 4561			    (!(vport->fc_flag & FC_PT2PT)) &&
 4562			    (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
 4563			     ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
 4564				lpfc_printf_vlog(vport, KERN_INFO,
 4565					LOG_DISCOVERY,
 4566					"0314 PLOGI recov DID x%x "
 4567					"Data: x%x x%x x%x\n",
 4568					ndlp->nlp_DID, ndlp->nlp_state,
 4569					ndlp->nlp_rpi, ndlp->nlp_flag);
 4570				mp = mbox->ctx_buf;
 4571				if (mp) {
 4572					lpfc_mbuf_free(phba, mp->virt,
 4573						       mp->phys);
 4574					kfree(mp);
 4575				}
 4576				mempool_free(mbox, phba->mbox_mem_pool);
 4577				goto out;
 4578			}
 4579
 4580			/* Increment reference count to ndlp to hold the
 4581			 * reference to ndlp for the callback function.
 4582			 */
 4583			mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 4584			mbox->vport = vport;
 4585			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
 4586				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
 4587				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
 4588			}
 4589			else {
 4590				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
 4591				ndlp->nlp_prev_state = ndlp->nlp_state;
 4592				lpfc_nlp_set_state(vport, ndlp,
 4593					   NLP_STE_REG_LOGIN_ISSUE);
 4594			}
 4595
 4596			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
 4597			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
 4598			    != MBX_NOT_FINISHED)
 4599				goto out;
 4600
 4601			/* Decrement the ndlp reference count we
 4602			 * set for this failed mailbox command.
 4603			 */
 4604			lpfc_nlp_put(ndlp);
 4605			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
 4606
 4607			/* ELS rsp: Cannot issue reg_login for <NPortid> */
 4608			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 4609				"0138 ELS rsp: Cannot issue reg_login for x%x "
 4610				"Data: x%x x%x x%x\n",
 4611				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 4612				ndlp->nlp_rpi);
 4613
 4614			if (lpfc_nlp_not_used(ndlp)) {
 4615				ndlp = NULL;
 4616				/* Indicate node has already been released,
 4617				 * should not reference to it from within
 4618				 * the routine lpfc_els_free_iocb.
 4619				 */
 4620				cmdiocb->context1 = NULL;
 4621			}
 4622		} else {
 4623			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
 4624			if (!lpfc_error_lost_link(irsp) &&
 4625			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
 4626				if (lpfc_nlp_not_used(ndlp)) {
 4627					ndlp = NULL;
 4628					/* Indicate node has already been
 4629					 * released, should not reference
 4630					 * to it from within the routine
 4631					 * lpfc_els_free_iocb.
 4632					 */
 4633					cmdiocb->context1 = NULL;
 4634				}
 4635			}
 4636		}
 4637		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 4638		if (mp) {
 4639			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 4640			kfree(mp);
 4641		}
 4642		mempool_free(mbox, phba->mbox_mem_pool);
 4643	}
 4644out:
 4645	if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
 4646		spin_lock_irq(shost->host_lock);
 4647		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
 4648		spin_unlock_irq(shost->host_lock);
 4649
 4650		/* If the node is not being used by another discovery thread,
 4651		 * and we are sending a reject, we are done with it.
 4652		 * Release driver reference count here and free associated
 4653		 * resources.
 4654		 */
 4655		if (ls_rjt)
 4656			if (lpfc_nlp_not_used(ndlp))
 4657				/* Indicate node has already been released,
 4658				 * should not reference to it from within
 4659				 * the routine lpfc_els_free_iocb.
 4660				 */
 4661				cmdiocb->context1 = NULL;
 4662
 4663	}
 4664
 4665	lpfc_els_free_iocb(phba, cmdiocb);
 4666	return;
 4667}
 4668
 4669/**
 4670 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
 4671 * @vport: pointer to a host virtual N_Port data structure.
 4672 * @flag: the els command code to be accepted.
 4673 * @oldiocb: pointer to the original lpfc command iocb data structure.
 4674 * @ndlp: pointer to a node-list data structure.
 4675 * @mbox: pointer to the driver internal queue element for mailbox command.
 4676 *
 4677 * This routine prepares and issues an Accept (ACC) response IOCB
 4678 * command. It uses the @flag to properly set up the IOCB field for the
 4679 * specific ACC response command to be issued and invokes the
 4680 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
 4681 * @mbox pointer is passed in, it will be put into the context_un.mbox
 4682 * field of the IOCB for the completion callback function to issue the
 4683 * mailbox command to the HBA later when callback is invoked.
 4684 *
 4685 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 4686 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 4687 * will be stored into the context1 field of the IOCB for the completion
 4688 * callback function to the corresponding response ELS IOCB command.
 4689 *
 4690 * Return code
 4691 *   0 - Successfully issued acc response
 4692 *   1 - Failed to issue acc response
 4693 **/
 4694int
 4695lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 4696		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
 4697		 LPFC_MBOXQ_t *mbox)
 4698{
 4699	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 4700	struct lpfc_hba  *phba = vport->phba;
 4701	IOCB_t *icmd;
 4702	IOCB_t *oldcmd;
 4703	struct lpfc_iocbq *elsiocb;
 4704	uint8_t *pcmd;
 4705	struct serv_parm *sp;
 4706	uint16_t cmdsize;
 4707	int rc;
 4708	ELS_PKT *els_pkt_ptr;
 4709
 4710	oldcmd = &oldiocb->iocb;
 4711
 4712	switch (flag) {
 4713	case ELS_CMD_ACC:
 4714		cmdsize = sizeof(uint32_t);
 4715		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
 4716					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
 4717		if (!elsiocb) {
 4718			spin_lock_irq(shost->host_lock);
 4719			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
 4720			spin_unlock_irq(shost->host_lock);
 4721			return 1;
 4722		}
 4723
 4724		icmd = &elsiocb->iocb;
 4725		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 4726		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 4727		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 4728		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 4729		pcmd += sizeof(uint32_t);
 4730
 4731		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4732			"Issue ACC:       did:x%x flg:x%x",
 4733			ndlp->nlp_DID, ndlp->nlp_flag, 0);
 4734		break;
 4735	case ELS_CMD_FLOGI:
 4736	case ELS_CMD_PLOGI:
 4737		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
 4738		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
 4739					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
 4740		if (!elsiocb)
 4741			return 1;
 4742
 4743		icmd = &elsiocb->iocb;
 4744		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 4745		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 4746		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 4747
 4748		if (mbox)
 4749			elsiocb->context_un.mbox = mbox;
 4750
 4751		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 4752		pcmd += sizeof(uint32_t);
 4753		sp = (struct serv_parm *)pcmd;
 4754
 4755		if (flag == ELS_CMD_FLOGI) {
 4756			/* Copy the received service parameters back */
 4757			memcpy(sp, &phba->fc_fabparam,
 4758			       sizeof(struct serv_parm));
 4759
 4760			/* Clear the F_Port bit */
 4761			sp->cmn.fPort = 0;
 4762
 4763			/* Mark all class service parameters as invalid */
 4764			sp->cls1.classValid = 0;
 4765			sp->cls2.classValid = 0;
 4766			sp->cls3.classValid = 0;
 4767			sp->cls4.classValid = 0;
 4768
 4769			/* Copy our worldwide names */
 4770			memcpy(&sp->portName, &vport->fc_sparam.portName,
 4771			       sizeof(struct lpfc_name));
 4772			memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
 4773			       sizeof(struct lpfc_name));
 4774		} else {
 4775			memcpy(pcmd, &vport->fc_sparam,
 4776			       sizeof(struct serv_parm));
 4777
 4778			sp->cmn.valid_vendor_ver_level = 0;
 4779			memset(sp->un.vendorVersion, 0,
 4780			       sizeof(sp->un.vendorVersion));
 4781			sp->cmn.bbRcvSizeMsb &= 0xF;
 4782
 4783			/* If our firmware supports this feature, convey that
 4784			 * info to the target using the vendor specific field.
 4785			 */
 4786			if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
 4787				sp->cmn.valid_vendor_ver_level = 1;
 4788				sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
 4789				sp->un.vv.flags =
 4790					cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
 4791			}
 4792		}
 4793
 4794		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4795			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
 4796			ndlp->nlp_DID, ndlp->nlp_flag, 0);
 4797		break;
 4798	case ELS_CMD_PRLO:
 4799		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
 4800		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
 4801					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
 4802		if (!elsiocb)
 4803			return 1;
 4804
 4805		icmd = &elsiocb->iocb;
 4806		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 4807		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 4808		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 4809
 4810		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
 4811		       sizeof(uint32_t) + sizeof(PRLO));
 4812		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
 4813		els_pkt_ptr = (ELS_PKT *) pcmd;
 4814		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
 4815
 4816		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4817			"Issue ACC PRLO:  did:x%x flg:x%x",
 4818			ndlp->nlp_DID, ndlp->nlp_flag, 0);
 4819		break;
 4820	default:
 4821		return 1;
 4822	}
 4823	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
 4824		spin_lock_irq(shost->host_lock);
 4825		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
 4826			ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
 4827			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
 4828		spin_unlock_irq(shost->host_lock);
 4829		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
 4830	} else {
 4831		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 4832	}
 4833
 4834	phba->fc_stat.elsXmitACC++;
 4835	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 4836	if (rc == IOCB_ERROR) {
 4837		lpfc_els_free_iocb(phba, elsiocb);
 4838		return 1;
 4839	}
 4840	return 0;
 4841}
 4842
 4843/**
 4844 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
 4845 * @vport: pointer to a virtual N_Port data structure.
 4846 * @rejectError:
 4847 * @oldiocb: pointer to the original lpfc command iocb data structure.
 4848 * @ndlp: pointer to a node-list data structure.
 4849 * @mbox: pointer to the driver internal queue element for mailbox command.
 4850 *
 4851 * This routine prepares and issue an Reject (RJT) response IOCB
 4852 * command. If a @mbox pointer is passed in, it will be put into the
 4853 * context_un.mbox field of the IOCB for the completion callback function
 4854 * to issue to the HBA later.
 4855 *
 4856 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 4857 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 4858 * will be stored into the context1 field of the IOCB for the completion
 4859 * callback function to the reject response ELS IOCB command.
 4860 *
 4861 * Return code
 4862 *   0 - Successfully issued reject response
 4863 *   1 - Failed to issue reject response
 4864 **/
 4865int
 4866lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 4867		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
 4868		    LPFC_MBOXQ_t *mbox)
 4869{
 4870	struct lpfc_hba  *phba = vport->phba;
 4871	IOCB_t *icmd;
 4872	IOCB_t *oldcmd;
 4873	struct lpfc_iocbq *elsiocb;
 4874	uint8_t *pcmd;
 4875	uint16_t cmdsize;
 4876	int rc;
 4877
 4878	cmdsize = 2 * sizeof(uint32_t);
 4879	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 4880				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
 4881	if (!elsiocb)
 4882		return 1;
 4883
 4884	icmd = &elsiocb->iocb;
 4885	oldcmd = &oldiocb->iocb;
 4886	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 4887	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 4888	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 4889
 4890	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
 4891	pcmd += sizeof(uint32_t);
 4892	*((uint32_t *) (pcmd)) = rejectError;
 4893
 4894	if (mbox)
 4895		elsiocb->context_un.mbox = mbox;
 4896
 4897	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
 4898	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4899			 "0129 Xmit ELS RJT x%x response tag x%x "
 4900			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
 4901			 "rpi x%x\n",
 4902			 rejectError, elsiocb->iotag,
 4903			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
 4904			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
 4905	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4906		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
 4907		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
 4908
 4909	phba->fc_stat.elsXmitLSRJT++;
 4910	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 4911	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 4912
 4913	if (rc == IOCB_ERROR) {
 4914		lpfc_els_free_iocb(phba, elsiocb);
 4915		return 1;
 4916	}
 4917	return 0;
 4918}
 4919
 4920/**
 4921 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
 4922 * @vport: pointer to a virtual N_Port data structure.
 4923 * @oldiocb: pointer to the original lpfc command iocb data structure.
 4924 * @ndlp: pointer to a node-list data structure.
 4925 *
 4926 * This routine prepares and issues an Accept (ACC) response to Address
 4927 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
 4928 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
 4929 *
 4930 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 4931 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 4932 * will be stored into the context1 field of the IOCB for the completion
 4933 * callback function to the ADISC Accept response ELS IOCB command.
 4934 *
 4935 * Return code
 4936 *   0 - Successfully issued acc adisc response
 4937 *   1 - Failed to issue adisc acc response
 4938 **/
 4939int
 4940lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 4941		       struct lpfc_nodelist *ndlp)
 4942{
 4943	struct lpfc_hba  *phba = vport->phba;
 4944	ADISC *ap;
 4945	IOCB_t *icmd, *oldcmd;
 4946	struct lpfc_iocbq *elsiocb;
 4947	uint8_t *pcmd;
 4948	uint16_t cmdsize;
 4949	int rc;
 4950
 4951	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
 4952	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 4953				     ndlp->nlp_DID, ELS_CMD_ACC);
 4954	if (!elsiocb)
 4955		return 1;
 4956
 4957	icmd = &elsiocb->iocb;
 4958	oldcmd = &oldiocb->iocb;
 4959	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 4960	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 4961
 4962	/* Xmit ADISC ACC response tag <ulpIoTag> */
 4963	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4964			 "0130 Xmit ADISC ACC response iotag x%x xri: "
 4965			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
 4966			 elsiocb->iotag, elsiocb->iocb.ulpContext,
 4967			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 4968			 ndlp->nlp_rpi);
 4969	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 4970
 4971	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 4972	pcmd += sizeof(uint32_t);
 4973
 4974	ap = (ADISC *) (pcmd);
 4975	ap->hardAL_PA = phba->fc_pref_ALPA;
 4976	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
 4977	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
 4978	ap->DID = be32_to_cpu(vport->fc_myDID);
 4979
 4980	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 4981		"Issue ACC ADISC: did:x%x flg:x%x",
 4982		ndlp->nlp_DID, ndlp->nlp_flag, 0);
 4983
 4984	phba->fc_stat.elsXmitACC++;
 4985	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 4986	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 4987	if (rc == IOCB_ERROR) {
 4988		lpfc_els_free_iocb(phba, elsiocb);
 4989		return 1;
 4990	}
 4991
 4992	/* Xmit ELS ACC response tag <ulpIoTag> */
 4993	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 4994			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
 4995			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
 4996			 "RPI: x%x, fc_flag x%x\n",
 4997			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
 4998			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 4999			 ndlp->nlp_rpi, vport->fc_flag);
 5000	return 0;
 5001}
 5002
 5003/**
 5004 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
 5005 * @vport: pointer to a virtual N_Port data structure.
 5006 * @oldiocb: pointer to the original lpfc command iocb data structure.
 5007 * @ndlp: pointer to a node-list data structure.
 5008 *
 5009 * This routine prepares and issues an Accept (ACC) response to Process
 5010 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
 5011 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
 5012 *
 5013 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 5014 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 5015 * will be stored into the context1 field of the IOCB for the completion
 5016 * callback function to the PRLI Accept response ELS IOCB command.
 5017 *
 5018 * Return code
 5019 *   0 - Successfully issued acc prli response
 5020 *   1 - Failed to issue acc prli response
 5021 **/
 5022int
 5023lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 5024		      struct lpfc_nodelist *ndlp)
 5025{
 5026	struct lpfc_hba  *phba = vport->phba;
 5027	PRLI *npr;
 5028	struct lpfc_nvme_prli *npr_nvme;
 5029	lpfc_vpd_t *vpd;
 5030	IOCB_t *icmd;
 5031	IOCB_t *oldcmd;
 5032	struct lpfc_iocbq *elsiocb;
 5033	uint8_t *pcmd;
 5034	uint16_t cmdsize;
 5035	uint32_t prli_fc4_req, *req_payload;
 5036	struct lpfc_dmabuf *req_buf;
 5037	int rc;
 5038	u32 elsrspcmd;
 5039
 5040	/* Need the incoming PRLI payload to determine if the ACC is for an
 5041	 * FC4 or NVME PRLI type.  The PRLI type is at word 1.
 5042	 */
 5043	req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
 5044	req_payload = (((uint32_t *)req_buf->virt) + 1);
 5045
 5046	/* PRLI type payload is at byte 3 for FCP or NVME. */
 5047	prli_fc4_req = be32_to_cpu(*req_payload);
 5048	prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
 5049	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 5050			 "6127 PRLI_ACC:  Req Type x%x, Word1 x%08x\n",
 5051			 prli_fc4_req, *((uint32_t *)req_payload));
 5052
 5053	if (prli_fc4_req == PRLI_FCP_TYPE) {
 5054		cmdsize = sizeof(uint32_t) + sizeof(PRLI);
 5055		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
 5056	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
 5057		cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
 5058		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
 5059	} else {
 5060		return 1;
 5061	}
 5062
 5063	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 5064		ndlp->nlp_DID, elsrspcmd);
 5065	if (!elsiocb)
 5066		return 1;
 5067
 5068	icmd = &elsiocb->iocb;
 5069	oldcmd = &oldiocb->iocb;
 5070	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 5071	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 5072
 5073	/* Xmit PRLI ACC response tag <ulpIoTag> */
 5074	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 5075			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
 5076			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
 5077			 elsiocb->iotag, elsiocb->iocb.ulpContext,
 5078			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 5079			 ndlp->nlp_rpi);
 5080	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 5081	memset(pcmd, 0, cmdsize);
 5082
 5083	*((uint32_t *)(pcmd)) = elsrspcmd;
 5084	pcmd += sizeof(uint32_t);
 5085
 5086	/* For PRLI, remainder of payload is PRLI parameter page */
 5087	vpd = &phba->vpd;
 5088
 5089	if (prli_fc4_req == PRLI_FCP_TYPE) {
 5090		/*
 5091		 * If the remote port is a target and our firmware version
 5092		 * is 3.20 or later, set the following bits for FC-TAPE
 5093		 * support.
 5094		 */
 5095		npr = (PRLI *) pcmd;
 5096		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
 5097		    (vpd->rev.feaLevelHigh >= 0x02)) {
 5098			npr->ConfmComplAllowed = 1;
 5099			npr->Retry = 1;
 5100			npr->TaskRetryIdReq = 1;
 5101		}
 5102		npr->acceptRspCode = PRLI_REQ_EXECUTED;
 5103		npr->estabImagePair = 1;
 5104		npr->readXferRdyDis = 1;
 5105		npr->ConfmComplAllowed = 1;
 5106		npr->prliType = PRLI_FCP_TYPE;
 5107		npr->initiatorFunc = 1;
 5108	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
 5109		/* Respond with an NVME PRLI Type */
 5110		npr_nvme = (struct lpfc_nvme_prli *) pcmd;
 5111		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
 5112		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
 5113		bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
 5114		if (phba->nvmet_support) {
 5115			bf_set(prli_tgt, npr_nvme, 1);
 5116			bf_set(prli_disc, npr_nvme, 1);
 5117			if (phba->cfg_nvme_enable_fb) {
 5118				bf_set(prli_fba, npr_nvme, 1);
 5119
 5120				/* TBD.  Target mode needs to post buffers
 5121				 * that support the configured first burst
 5122				 * byte size.
 5123				 */
 5124				bf_set(prli_fb_sz, npr_nvme,
 5125				       phba->cfg_nvmet_fb_size);
 5126			}
 5127		} else {
 5128			bf_set(prli_init, npr_nvme, 1);
 5129		}
 5130
 5131		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 5132				 "6015 NVME issue PRLI ACC word1 x%08x "
 5133				 "word4 x%08x word5 x%08x flag x%x, "
 5134				 "fcp_info x%x nlp_type x%x\n",
 5135				 npr_nvme->word1, npr_nvme->word4,
 5136				 npr_nvme->word5, ndlp->nlp_flag,
 5137				 ndlp->nlp_fcp_info, ndlp->nlp_type);
 5138		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
 5139		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
 5140		npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
 5141	} else
 5142		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 5143				 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
 5144				 prli_fc4_req, ndlp->nlp_fc4_type,
 5145				 ndlp->nlp_DID);
 5146
 5147	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 5148		"Issue ACC PRLI:  did:x%x flg:x%x",
 5149		ndlp->nlp_DID, ndlp->nlp_flag, 0);
 5150
 5151	phba->fc_stat.elsXmitACC++;
 5152	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 5153
 5154	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 5155	if (rc == IOCB_ERROR) {
 5156		lpfc_els_free_iocb(phba, elsiocb);
 5157		return 1;
 5158	}
 5159	return 0;
 5160}
 5161
 5162/**
 5163 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
 5164 * @vport: pointer to a virtual N_Port data structure.
 5165 * @format: rnid command format.
 5166 * @oldiocb: pointer to the original lpfc command iocb data structure.
 5167 * @ndlp: pointer to a node-list data structure.
 5168 *
 5169 * This routine issues a Request Node Identification Data (RNID) Accept
 5170 * (ACC) response. It constructs the RNID ACC response command according to
 5171 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
 5172 * issue the response. Note that this command does not need to hold the ndlp
 5173 * reference count for the callback. So, the ndlp reference count taken by
 5174 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
 5175 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
 5176 * there is no ndlp reference available.
 5177 *
 5178 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
 5179 * will be incremented by 1 for holding the ndlp and the reference to ndlp
 5180 * will be stored into the context1 field of the IOCB for the completion
 5181 * callback function. However, for the RNID Accept Response ELS command,
 5182 * this is undone later by this routine after the IOCB is allocated.
 5183 *
 5184 * Return code
 5185 *   0 - Successfully issued acc rnid response
 5186 *   1 - Failed to issue acc rnid response
 5187 **/
 5188static int
 5189lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 5190		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
 5191{
 5192	struct lpfc_hba  *phba = vport->phba;
 5193	RNID *rn;
 5194	IOCB_t *icmd, *oldcmd;
 5195	struct lpfc_iocbq *elsiocb;
 5196	uint8_t *pcmd;
 5197	uint16_t cmdsize;
 5198	int rc;
 5199
 5200	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
 5201					+ (2 * sizeof(struct lpfc_name));
 5202	if (format)
 5203		cmdsize += sizeof(RNID_TOP_DISC);
 5204
 5205	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 5206				     ndlp->nlp_DID, ELS_CMD_ACC);
 5207	if (!elsiocb)
 5208		return 1;
 5209
 5210	icmd = &elsiocb->iocb;
 5211	oldcmd = &oldiocb->iocb;
 5212	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
 5213	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
 5214
 5215	/* Xmit RNID ACC response tag <ulpIoTag> */
 5216	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 5217			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
 5218			 elsiocb->iotag, elsiocb->iocb.ulpContext);
 5219	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 5220	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 5221	pcmd += sizeof(uint32_t);
 5222
 5223	memset(pcmd, 0, sizeof(RNID));
 5224	rn = (RNID *) (pcmd);
 5225	rn->Format = format;
 5226	rn->CommonLen = (2 * sizeof(struct lpfc_name));
 5227	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
 5228	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
 5229	switch (format) {
 5230	case 0:
 5231		rn->SpecificLen = 0;
 5232		break;
 5233	case RNID_TOPOLOGY_DISC:
 5234		rn->SpecificLen = sizeof(RNID_TOP_DISC);
 5235		memcpy(&rn->un.topologyDisc.portName,
 5236		       &vport->fc_portname, sizeof(struct lpfc_name));
 5237		rn->un.topologyDisc.unitType = RNID_HBA;
 5238		rn->un.topologyDisc.physPort = 0;
 5239		rn->un.topologyDisc.attachedNodes = 0;
 5240		break;
 5241	default:
 5242		rn->CommonLen = 0;
 5243		rn->SpecificLen = 0;
 5244		break;
 5245	}
 5246
 5247	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 5248		"Issue ACC RNID:  did:x%x flg:x%x",
 5249		ndlp->nlp_DID, ndlp->nlp_flag, 0);
 5250
 5251	phba->fc_stat.elsXmitACC++;
 5252	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 5253
 5254	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 5255	if (rc == IOCB_ERROR) {
 5256		lpfc_els_free_iocb(phba, elsiocb);
 5257		return 1;
 5258	}
 5259	return 0;
 5260}
 5261
 5262/**
 5263 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
 5264 * @vport: pointer to a virtual N_Port data structure.
 5265 * @iocb: pointer to the lpfc command iocb data structure.
 5266 * @ndlp: pointer to a node-list data structure.
 5267 *
 5268 * Return
 5269 **/
 5270static void
 5271lpfc_els_clear_rrq(struct lpfc_vport *vport,
 5272		   struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
 5273{
 5274	struct lpfc_hba  *phba = vport->phba;
 5275	uint8_t *pcmd;
 5276	struct RRQ *rrq;
 5277	uint16_t rxid;
 5278	uint16_t xri;
 5279	struct lpfc_node_rrq *prrq;
 5280
 5281
 5282	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
 5283	pcmd += sizeof(uint32_t);
 5284	rrq = (struct RRQ *)pcmd;
 5285	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
 5286	rxid = bf_get(rrq_rxid, rrq);
 5287
 5288	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 5289			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
 5290			" x%x x%x\n",
 5291			be32_to_cpu(bf_get(rrq_did, rrq)),
 5292			bf_get(rrq_oxid, rrq),
 5293			rxid,
 5294			iocb->iotag, iocb->iocb.ulpContext);
 5295
 5296	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 5297		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
 5298		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
 5299	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
 5300		xri = bf_get(rrq_oxid, rrq);
 5301	else
 5302		xri = rxid;
 5303	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
 5304	if (prrq)
 5305		lpfc_clr_rrq_active(phba, xri, prrq);
 5306	return;
 5307}
 5308
 5309/**
 5310 * lpfc_els_rsp_echo_acc - Issue echo acc response
 5311 * @vport: pointer to a virtual N_Port data structure.
 5312 * @data: pointer to echo data to return in the accept.
 5313 * @oldiocb: pointer to the original lpfc command iocb data structure.
 5314 * @ndlp: pointer to a node-list data structure.
 5315 *
 5316 * Return code
 5317 *   0 - Successfully issued acc echo response
 5318 *   1 - Failed to issue acc echo response
 5319 **/
 5320static int
 5321lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
 5322		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
 5323{
 5324	struct lpfc_hba  *phba = vport->phba;
 5325	struct lpfc_iocbq *elsiocb;
 5326	uint8_t *pcmd;
 5327	uint16_t cmdsize;
 5328	int rc;
 5329
 5330	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
 5331
 5332	/* The accumulated length can exceed the BPL_SIZE.  For
 5333	 * now, use this as the limit
 5334	 */
 5335	if (cmdsize > LPFC_BPL_SIZE)
 5336		cmdsize = LPFC_BPL_SIZE;
 5337	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 5338				     ndlp->nlp_DID, ELS_CMD_ACC);
 5339	if (!elsiocb)
 5340		return 1;
 5341
 5342	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
 5343	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
 5344
 5345	/* Xmit ECHO ACC response tag <ulpIoTag> */
 5346	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 5347			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
 5348			 elsiocb->iotag, elsiocb->iocb.ulpContext);
 5349	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 5350	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 5351	pcmd += sizeof(uint32_t);
 5352	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
 5353
 5354	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 5355		"Issue ACC ECHO:  did:x%x flg:x%x",
 5356		ndlp->nlp_DID, ndlp->nlp_flag, 0);
 5357
 5358	phba->fc_stat.elsXmitACC++;
 5359	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 5360
 5361	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 5362	if (rc == IOCB_ERROR) {
 5363		lpfc_els_free_iocb(phba, elsiocb);
 5364		return 1;
 5365	}
 5366	return 0;
 5367}
 5368
 5369/**
 5370 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
 5371 * @vport: pointer to a host virtual N_Port data structure.
 5372 *
 5373 * This routine issues Address Discover (ADISC) ELS commands to those
 5374 * N_Ports which are in node port recovery state and ADISC has not been issued
 5375 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
 5376 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
 5377 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
 5378 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
 5379 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
 5380 * IOCBs quit for later pick up. On the other hand, after walking through
 5381 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
 5382 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
 5383 * no more ADISC need to be sent.
 5384 *
 5385 * Return code
 5386 *    The number of N_Ports with adisc issued.
 5387 **/
 5388int
 5389lpfc_els_disc_adisc(struct lpfc_vport *vport)
 5390{
 5391	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 5392	struct lpfc_nodelist *ndlp, *next_ndlp;
 5393	int sentadisc = 0;
 5394
 5395	/* go thru NPR nodes and issue any remaining ELS ADISCs */
 5396	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 5397		if (!NLP_CHK_NODE_ACT(ndlp))
 5398			continue;
 5399		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
 5400		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
 5401		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
 5402			spin_lock_irq(shost->host_lock);
 5403			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 5404			spin_unlock_irq(shost->host_lock);
 5405			ndlp->nlp_prev_state = ndlp->nlp_state;
 5406			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
 5407			lpfc_issue_els_adisc(vport, ndlp, 0);
 5408			sentadisc++;
 5409			vport->num_disc_nodes++;
 5410			if (vport->num_disc_nodes >=
 5411			    vport->cfg_discovery_threads) {
 5412				spin_lock_irq(shost->host_lock);
 5413				vport->fc_flag |= FC_NLP_MORE;
 5414				spin_unlock_irq(shost->host_lock);
 5415				break;
 5416			}
 5417		}
 5418	}
 5419	if (sentadisc == 0) {
 5420		spin_lock_irq(shost->host_lock);
 5421		vport->fc_flag &= ~FC_NLP_MORE;
 5422		spin_unlock_irq(shost->host_lock);
 5423	}
 5424	return sentadisc;
 5425}
 5426
 5427/**
 5428 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
 5429 * @vport: pointer to a host virtual N_Port data structure.
 5430 *
 5431 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
 5432 * which are in node port recovery state, with a @vport. Each time an ELS
 5433 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
 5434 * the per @vport number of discover count (num_disc_nodes) shall be
 5435 * incremented. If the num_disc_nodes reaches a pre-configured threshold
 5436 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
 5437 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
 5438 * later pick up. On the other hand, after walking through all the ndlps with
 5439 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
 5440 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
 5441 * PLOGI need to be sent.
 5442 *
 5443 * Return code
 5444 *   The number of N_Ports with plogi issued.
 5445 **/
 5446int
 5447lpfc_els_disc_plogi(struct lpfc_vport *vport)
 5448{
 5449	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 5450	struct lpfc_nodelist *ndlp, *next_ndlp;
 5451	int sentplogi = 0;
 5452
 5453	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
 5454	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 5455		if (!NLP_CHK_NODE_ACT(ndlp))
 5456			continue;
 5457		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
 5458				(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
 5459				(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
 5460				(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
 5461			ndlp->nlp_prev_state = ndlp->nlp_state;
 5462			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
 5463			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
 5464			sentplogi++;
 5465			vport->num_disc_nodes++;
 5466			if (vport->num_disc_nodes >=
 5467					vport->cfg_discovery_threads) {
 5468				spin_lock_irq(shost->host_lock);
 5469				vport->fc_flag |= FC_NLP_MORE;
 5470				spin_unlock_irq(shost->host_lock);
 5471				break;
 5472			}
 5473		}
 5474	}
 5475
 5476	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 5477			 "6452 Discover PLOGI %d flag x%x\n",
 5478			 sentplogi, vport->fc_flag);
 5479
 5480	if (sentplogi) {
 5481		lpfc_set_disctmo(vport);
 5482	}
 5483	else {
 5484		spin_lock_irq(shost->host_lock);
 5485		vport->fc_flag &= ~FC_NLP_MORE;
 5486		spin_unlock_irq(shost->host_lock);
 5487	}
 5488	return sentplogi;
 5489}
 5490
 5491static uint32_t
 5492lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
 5493		uint32_t word0)
 5494{
 5495
 5496	desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
 5497	desc->payload.els_req = word0;
 5498	desc->length = cpu_to_be32(sizeof(desc->payload));
 5499
 5500	return sizeof(struct fc_rdp_link_service_desc);
 5501}
 5502
 5503static uint32_t
 5504lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
 5505		uint8_t *page_a0, uint8_t *page_a2)
 5506{
 5507	uint16_t wavelength;
 5508	uint16_t temperature;
 5509	uint16_t rx_power;
 5510	uint16_t tx_bias;
 5511	uint16_t tx_power;
 5512	uint16_t vcc;
 5513	uint16_t flag = 0;
 5514	struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
 5515	struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
 5516
 5517	desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
 5518
 5519	trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
 5520			&page_a0[SSF_TRANSCEIVER_CODE_B4];
 5521	trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
 5522			&page_a0[SSF_TRANSCEIVER_CODE_B5];
 5523
 5524	if ((trasn_code_byte4->fc_sw_laser) ||
 5525	    (trasn_code_byte5->fc_sw_laser_sl) ||
 5526	    (trasn_code_byte5->fc_sw_laser_sn)) {  /* check if its short WL */
 5527		flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
 5528	} else if (trasn_code_byte4->fc_lw_laser) {
 5529		wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
 5530			page_a0[SSF_WAVELENGTH_B0];
 5531		if (wavelength == SFP_WAVELENGTH_LC1310)
 5532			flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
 5533		if (wavelength == SFP_WAVELENGTH_LL1550)
 5534			flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
 5535	}
 5536	/* check if its SFP+ */
 5537	flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
 5538			SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
 5539					<< SFP_FLAG_CT_SHIFT;
 5540
 5541	/* check if its OPTICAL */
 5542	flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
 5543			SFP_FLAG_IS_OPTICAL_PORT : 0)
 5544					<< SFP_FLAG_IS_OPTICAL_SHIFT;
 5545
 5546	temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
 5547		page_a2[SFF_TEMPERATURE_B0]);
 5548	vcc = (page_a2[SFF_VCC_B1] << 8 |
 5549		page_a2[SFF_VCC_B0]);
 5550	tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
 5551		page_a2[SFF_TXPOWER_B0]);
 5552	tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
 5553		page_a2[SFF_TX_BIAS_CURRENT_B0]);
 5554	rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
 5555		page_a2[SFF_RXPOWER_B0]);
 5556	desc->sfp_info.temperature = cpu_to_be16(temperature);
 5557	desc->sfp_info.rx_power = cpu_to_be16(rx_power);
 5558	desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
 5559	desc->sfp_info.tx_power = cpu_to_be16(tx_power);
 5560	desc->sfp_info.vcc = cpu_to_be16(vcc);
 5561
 5562	desc->sfp_info.flags = cpu_to_be16(flag);
 5563	desc->length = cpu_to_be32(sizeof(desc->sfp_info));
 5564
 5565	return sizeof(struct fc_rdp_sfp_desc);
 5566}
 5567
 5568static uint32_t
 5569lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
 5570		READ_LNK_VAR *stat)
 5571{
 5572	uint32_t type;
 5573
 5574	desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
 5575
 5576	type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
 5577
 5578	desc->info.port_type = cpu_to_be32(type);
 5579
 5580	desc->info.link_status.link_failure_cnt =
 5581		cpu_to_be32(stat->linkFailureCnt);
 5582	desc->info.link_status.loss_of_synch_cnt =
 5583		cpu_to_be32(stat->lossSyncCnt);
 5584	desc->info.link_status.loss_of_signal_cnt =
 5585		cpu_to_be32(stat->lossSignalCnt);
 5586	desc->info.link_status.primitive_seq_proto_err =
 5587		cpu_to_be32(stat->primSeqErrCnt);
 5588	desc->info.link_status.invalid_trans_word =
 5589		cpu_to_be32(stat->invalidXmitWord);
 5590	desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
 5591
 5592	desc->length = cpu_to_be32(sizeof(desc->info));
 5593
 5594	return sizeof(struct fc_rdp_link_error_status_desc);
 5595}
 5596
 5597static uint32_t
 5598lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
 5599		      struct lpfc_vport *vport)
 5600{
 5601	uint32_t bbCredit;
 5602
 5603	desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
 5604
 5605	bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
 5606			(vport->fc_sparam.cmn.bbCreditMsb << 8);
 5607	desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
 5608	if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
 5609		bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
 5610			(vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
 5611		desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
 5612	} else {
 5613		desc->bbc_info.attached_port_bbc = 0;
 5614	}
 5615
 5616	desc->bbc_info.rtt = 0;
 5617	desc->length = cpu_to_be32(sizeof(desc->bbc_info));
 5618
 5619	return sizeof(struct fc_rdp_bbc_desc);
 5620}
 5621
 5622static uint32_t
 5623lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
 5624			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
 5625{
 5626	uint32_t flags = 0;
 5627
 5628	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
 5629
 5630	desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
 5631	desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
 5632	desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
 5633	desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
 5634
 5635	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
 5636		flags |= RDP_OET_HIGH_ALARM;
 5637	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
 5638		flags |= RDP_OET_LOW_ALARM;
 5639	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
 5640		flags |= RDP_OET_HIGH_WARNING;
 5641	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
 5642		flags |= RDP_OET_LOW_WARNING;
 5643
 5644	flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
 5645	desc->oed_info.function_flags = cpu_to_be32(flags);
 5646	desc->length = cpu_to_be32(sizeof(desc->oed_info));
 5647	return sizeof(struct fc_rdp_oed_sfp_desc);
 5648}
 5649
 5650static uint32_t
 5651lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
 5652			      struct fc_rdp_oed_sfp_desc *desc,
 5653			      uint8_t *page_a2)
 5654{
 5655	uint32_t flags = 0;
 5656
 5657	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
 5658
 5659	desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
 5660	desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
 5661	desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
 5662	desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
 5663
 5664	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
 5665		flags |= RDP_OET_HIGH_ALARM;
 5666	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
 5667		flags |= RDP_OET_LOW_ALARM;
 5668	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
 5669		flags |= RDP_OET_HIGH_WARNING;
 5670	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
 5671		flags |= RDP_OET_LOW_WARNING;
 5672
 5673	flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
 5674	desc->oed_info.function_flags = cpu_to_be32(flags);
 5675	desc->length = cpu_to_be32(sizeof(desc->oed_info));
 5676	return sizeof(struct fc_rdp_oed_sfp_desc);
 5677}
 5678
 5679static uint32_t
 5680lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
 5681			     struct fc_rdp_oed_sfp_desc *desc,
 5682			     uint8_t *page_a2)
 5683{
 5684	uint32_t flags = 0;
 5685
 5686	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
 5687
 5688	desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
 5689	desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
 5690	desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
 5691	desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
 5692
 5693	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
 5694		flags |= RDP_OET_HIGH_ALARM;
 5695	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
 5696		flags |= RDP_OET_LOW_ALARM;
 5697	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
 5698		flags |= RDP_OET_HIGH_WARNING;
 5699	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
 5700		flags |= RDP_OET_LOW_WARNING;
 5701
 5702	flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
 5703	desc->oed_info.function_flags = cpu_to_be32(flags);
 5704	desc->length = cpu_to_be32(sizeof(desc->oed_info));
 5705	return sizeof(struct fc_rdp_oed_sfp_desc);
 5706}
 5707
 5708static uint32_t
 5709lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
 5710			      struct fc_rdp_oed_sfp_desc *desc,
 5711			      uint8_t *page_a2)
 5712{
 5713	uint32_t flags = 0;
 5714
 5715	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
 5716
 5717	desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
 5718	desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
 5719	desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
 5720	desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
 5721
 5722	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
 5723		flags |= RDP_OET_HIGH_ALARM;
 5724	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
 5725		flags |= RDP_OET_LOW_ALARM;
 5726	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
 5727		flags |= RDP_OET_HIGH_WARNING;
 5728	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
 5729		flags |= RDP_OET_LOW_WARNING;
 5730
 5731	flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
 5732	desc->oed_info.function_flags = cpu_to_be32(flags);
 5733	desc->length = cpu_to_be32(sizeof(desc->oed_info));
 5734	return sizeof(struct fc_rdp_oed_sfp_desc);
 5735}
 5736
 5737
 5738static uint32_t
 5739lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
 5740			      struct fc_rdp_oed_sfp_desc *desc,
 5741			      uint8_t *page_a2)
 5742{
 5743	uint32_t flags = 0;
 5744
 5745	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
 5746
 5747	desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
 5748	desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
 5749	desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
 5750	desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
 5751
 5752	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
 5753		flags |= RDP_OET_HIGH_ALARM;
 5754	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
 5755		flags |= RDP_OET_LOW_ALARM;
 5756	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
 5757		flags |= RDP_OET_HIGH_WARNING;
 5758	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
 5759		flags |= RDP_OET_LOW_WARNING;
 5760
 5761	flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
 5762	desc->oed_info.function_flags = cpu_to_be32(flags);
 5763	desc->length = cpu_to_be32(sizeof(desc->oed_info));
 5764	return sizeof(struct fc_rdp_oed_sfp_desc);
 5765}
 5766
 5767static uint32_t
 5768lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
 5769		      uint8_t *page_a0, struct lpfc_vport *vport)
 5770{
 5771	desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
 5772	memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
 5773	memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
 5774	memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
 5775	memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
 5776	memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
 5777	desc->length = cpu_to_be32(sizeof(desc->opd_info));
 5778	return sizeof(struct fc_rdp_opd_sfp_desc);
 5779}
 5780
 5781static uint32_t
 5782lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
 5783{
 5784	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
 5785		return 0;
 5786	desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
 5787
 5788	desc->info.CorrectedBlocks =
 5789		cpu_to_be32(stat->fecCorrBlkCount);
 5790	desc->info.UncorrectableBlocks =
 5791		cpu_to_be32(stat->fecUncorrBlkCount);
 5792
 5793	desc->length = cpu_to_be32(sizeof(desc->info));
 5794
 5795	return sizeof(struct fc_fec_rdp_desc);
 5796}
 5797
 5798static uint32_t
 5799lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 5800{
 5801	uint16_t rdp_cap = 0;
 5802	uint16_t rdp_speed;
 5803
 5804	desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
 5805
 5806	switch (phba->fc_linkspeed) {
 5807	case LPFC_LINK_SPEED_1GHZ:
 5808		rdp_speed = RDP_PS_1GB;
 5809		break;
 5810	case LPFC_LINK_SPEED_2GHZ:
 5811		rdp_speed = RDP_PS_2GB;
 5812		break;
 5813	case LPFC_LINK_SPEED_4GHZ:
 5814		rdp_speed = RDP_PS_4GB;
 5815		break;
 5816	case LPFC_LINK_SPEED_8GHZ:
 5817		rdp_speed = RDP_PS_8GB;
 5818		break;
 5819	case LPFC_LINK_SPEED_10GHZ:
 5820		rdp_speed = RDP_PS_10GB;
 5821		break;
 5822	case LPFC_LINK_SPEED_16GHZ:
 5823		rdp_speed = RDP_PS_16GB;
 5824		break;
 5825	case LPFC_LINK_SPEED_32GHZ:
 5826		rdp_speed = RDP_PS_32GB;
 5827		break;
 5828	case LPFC_LINK_SPEED_64GHZ:
 5829		rdp_speed = RDP_PS_64GB;
 5830		break;
 5831	default:
 5832		rdp_speed = RDP_PS_UNKNOWN;
 5833		break;
 5834	}
 5835
 5836	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
 5837
 5838	if (phba->lmt & LMT_128Gb)
 5839		rdp_cap |= RDP_PS_128GB;
 5840	if (phba->lmt & LMT_64Gb)
 5841		rdp_cap |= RDP_PS_64GB;
 5842	if (phba->lmt & LMT_32Gb)
 5843		rdp_cap |= RDP_PS_32GB;
 5844	if (phba->lmt & LMT_16Gb)
 5845		rdp_cap |= RDP_PS_16GB;
 5846	if (phba->lmt & LMT_10Gb)
 5847		rdp_cap |= RDP_PS_10GB;
 5848	if (phba->lmt & LMT_8Gb)
 5849		rdp_cap |= RDP_PS_8GB;
 5850	if (phba->lmt & LMT_4Gb)
 5851		rdp_cap |= RDP_PS_4GB;
 5852	if (phba->lmt & LMT_2Gb)
 5853		rdp_cap |= RDP_PS_2GB;
 5854	if (phba->lmt & LMT_1Gb)
 5855		rdp_cap |= RDP_PS_1GB;
 5856
 5857	if (rdp_cap == 0)
 5858		rdp_cap = RDP_CAP_UNKNOWN;
 5859	if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
 5860		rdp_cap |= RDP_CAP_USER_CONFIGURED;
 5861
 5862	desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
 5863	desc->length = cpu_to_be32(sizeof(desc->info));
 5864	return sizeof(struct fc_rdp_port_speed_desc);
 5865}
 5866
 5867static uint32_t
 5868lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
 5869		struct lpfc_vport *vport)
 5870{
 5871
 5872	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
 5873
 5874	memcpy(desc->port_names.wwnn, &vport->fc_nodename,
 5875			sizeof(desc->port_names.wwnn));
 5876
 5877	memcpy(desc->port_names.wwpn, &vport->fc_portname,
 5878			sizeof(desc->port_names.wwpn));
 5879
 5880	desc->length = cpu_to_be32(sizeof(desc->port_names));
 5881	return sizeof(struct fc_rdp_port_name_desc);
 5882}
 5883
 5884static uint32_t
 5885lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
 5886		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 5887{
 5888
 5889	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
 5890	if (vport->fc_flag & FC_FABRIC) {
 5891		memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
 5892		       sizeof(desc->port_names.wwnn));
 5893
 5894		memcpy(desc->port_names.wwpn, &vport->fabric_portname,
 5895		       sizeof(desc->port_names.wwpn));
 5896	} else {  /* Point to Point */
 5897		memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
 5898		       sizeof(desc->port_names.wwnn));
 5899
 5900		memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
 5901		       sizeof(desc->port_names.wwpn));
 5902	}
 5903
 5904	desc->length = cpu_to_be32(sizeof(desc->port_names));
 5905	return sizeof(struct fc_rdp_port_name_desc);
 5906}
 5907
 5908static void
 5909lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
 5910		int status)
 5911{
 5912	struct lpfc_nodelist *ndlp = rdp_context->ndlp;
 5913	struct lpfc_vport *vport = ndlp->vport;
 5914	struct lpfc_iocbq *elsiocb;
 5915	struct ulp_bde64 *bpl;
 5916	IOCB_t *icmd;
 5917	uint8_t *pcmd;
 5918	struct ls_rjt *stat;
 5919	struct fc_rdp_res_frame *rdp_res;
 5920	uint32_t cmdsize, len;
 5921	uint16_t *flag_ptr;
 5922	int rc;
 5923
 5924	if (status != SUCCESS)
 5925		goto error;
 5926
 5927	/* This will change once we know the true size of the RDP payload */
 5928	cmdsize = sizeof(struct fc_rdp_res_frame);
 5929
 5930	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
 5931			lpfc_max_els_tries, rdp_context->ndlp,
 5932			rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
 5933	lpfc_nlp_put(ndlp);
 5934	if (!elsiocb)
 5935		goto free_rdp_context;
 5936
 5937	icmd = &elsiocb->iocb;
 5938	icmd->ulpContext = rdp_context->rx_id;
 5939	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
 5940
 5941	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 5942			"2171 Xmit RDP response tag x%x xri x%x, "
 5943			"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
 5944			elsiocb->iotag, elsiocb->iocb.ulpContext,
 5945			ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 5946			ndlp->nlp_rpi);
 5947	rdp_res = (struct fc_rdp_res_frame *)
 5948		(((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 5949	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 5950	memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
 5951	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 5952
 5953	/* Update Alarm and Warning */
 5954	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
 5955	phba->sfp_alarm |= *flag_ptr;
 5956	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
 5957	phba->sfp_warning |= *flag_ptr;
 5958
 5959	/* For RDP payload */
 5960	len = 8;
 5961	len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
 5962					 (len + pcmd), ELS_CMD_RDP);
 5963
 5964	len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
 5965			rdp_context->page_a0, rdp_context->page_a2);
 5966	len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
 5967				  phba);
 5968	len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
 5969				       (len + pcmd), &rdp_context->link_stat);
 5970	len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
 5971					     (len + pcmd), vport);
 5972	len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
 5973					(len + pcmd), vport, ndlp);
 5974	len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
 5975			&rdp_context->link_stat);
 5976	len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
 5977				     &rdp_context->link_stat, vport);
 5978	len += lpfc_rdp_res_oed_temp_desc(phba,
 5979				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
 5980				rdp_context->page_a2);
 5981	len += lpfc_rdp_res_oed_voltage_desc(phba,
 5982				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
 5983				rdp_context->page_a2);
 5984