PageRenderTime 135ms CodeModel.GetById 22ms app.highlight 85ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/scsi/lpfc/lpfc_els.c

http://github.com/mirrors/linux
C | 10152 lines | 6613 code | 1025 blank | 2514 comment | 1180 complexity | bff4b2cf65119e8010bb275c58f7b75d MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23/* See Fibre Channel protocol T11 FC-LS for details */
  24#include <linux/blkdev.h>
  25#include <linux/pci.h>
  26#include <linux/slab.h>
  27#include <linux/interrupt.h>
  28
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_device.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_transport_fc.h>
  33#include <uapi/scsi/fc/fc_fs.h>
  34#include <uapi/scsi/fc/fc_els.h>
  35
  36#include "lpfc_hw4.h"
  37#include "lpfc_hw.h"
  38#include "lpfc_sli.h"
  39#include "lpfc_sli4.h"
  40#include "lpfc_nl.h"
  41#include "lpfc_disc.h"
  42#include "lpfc_scsi.h"
  43#include "lpfc.h"
  44#include "lpfc_logmsg.h"
  45#include "lpfc_crtn.h"
  46#include "lpfc_vport.h"
  47#include "lpfc_debugfs.h"
  48
  49static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
  50			  struct lpfc_iocbq *);
  51static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
  52			struct lpfc_iocbq *);
  53static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
  54static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
  55				struct lpfc_nodelist *ndlp, uint8_t retry);
  56static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
  57				  struct lpfc_iocbq *iocb);
  58
  59static int lpfc_max_els_tries = 3;
  60
  61/**
  62 * lpfc_els_chk_latt - Check host link attention event for a vport
  63 * @vport: pointer to a host virtual N_Port data structure.
  64 *
  65 * This routine checks whether there is an outstanding host link
  66 * attention event during the discovery process with the @vport. It is done
  67 * by reading the HBA's Host Attention (HA) register. If there is any host
  68 * link attention events during this @vport's discovery process, the @vport
  69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
  70 * be issued if the link state is not already in host link cleared state,
  71 * and a return code shall indicate whether the host link attention event
  72 * had happened.
  73 *
  74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
  75 * state in LPFC_VPORT_READY, the request for checking host link attention
  76 * event will be ignored and a return code shall indicate no host link
  77 * attention event had happened.
  78 *
  79 * Return codes
  80 *   0 - no host link attention event happened
  81 *   1 - host link attention event happened
  82 **/
  83int
  84lpfc_els_chk_latt(struct lpfc_vport *vport)
  85{
  86	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  87	struct lpfc_hba  *phba = vport->phba;
  88	uint32_t ha_copy;
  89
  90	if (vport->port_state >= LPFC_VPORT_READY ||
  91	    phba->link_state == LPFC_LINK_DOWN ||
  92	    phba->sli_rev > LPFC_SLI_REV3)
  93		return 0;
  94
  95	/* Read the HBA Host Attention Register */
  96	if (lpfc_readl(phba->HAregaddr, &ha_copy))
  97		return 1;
  98
  99	if (!(ha_copy & HA_LATT))
 100		return 0;
 101
 102	/* Pending Link Event during Discovery */
 103	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 104			 "0237 Pending Link Event during "
 105			 "Discovery: State x%x\n",
 106			 phba->pport->port_state);
 107
 108	/* CLEAR_LA should re-enable link attention events and
 109	 * we should then immediately take a LATT event. The
 110	 * LATT processing should call lpfc_linkdown() which
 111	 * will cleanup any left over in-progress discovery
 112	 * events.
 113	 */
 114	spin_lock_irq(shost->host_lock);
 115	vport->fc_flag |= FC_ABORT_DISCOVERY;
 116	spin_unlock_irq(shost->host_lock);
 117
 118	if (phba->link_state != LPFC_CLEAR_LA)
 119		lpfc_issue_clear_la(phba, vport);
 120
 121	return 1;
 122}
 123
 124/**
 125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
 126 * @vport: pointer to a host virtual N_Port data structure.
 127 * @expectRsp: flag indicating whether response is expected.
 128 * @cmdSize: size of the ELS command.
 129 * @retry: number of retries to the command IOCB when it fails.
 130 * @ndlp: pointer to a node-list data structure.
 131 * @did: destination identifier.
 132 * @elscmd: the ELS command code.
 133 *
 134 * This routine is used for allocating a lpfc-IOCB data structure from
 135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
 136 * passed into the routine for discovery state machine to issue an Extended
 137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
 138 * and preparation routine that is used by all the discovery state machine
 139 * routines and the ELS command-specific fields will be later set up by
 140 * the individual discovery machine routines after calling this routine
 141 * allocating and preparing a generic IOCB data structure. It fills in the
 142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
 143 * payload and response payload (if expected). The reference count on the
 144 * ndlp is incremented by 1 and the reference to the ndlp is put into
 145 * context1 of the IOCB data structure for this IOCB to hold the ndlp
 146 * reference for the command's callback function to access later.
 147 *
 148 * Return code
 149 *   Pointer to the newly allocated/prepared els iocb data structure
 150 *   NULL - when els iocb data structure allocation/preparation failed
 151 **/
 152struct lpfc_iocbq *
 153lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 154		   uint16_t cmdSize, uint8_t retry,
 155		   struct lpfc_nodelist *ndlp, uint32_t did,
 156		   uint32_t elscmd)
 157{
 158	struct lpfc_hba  *phba = vport->phba;
 159	struct lpfc_iocbq *elsiocb;
 160	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
 161	struct ulp_bde64 *bpl;
 162	IOCB_t *icmd;
 163
 164
 165	if (!lpfc_is_link_up(phba))
 166		return NULL;
 167
 168	/* Allocate buffer for  command iocb */
 169	elsiocb = lpfc_sli_get_iocbq(phba);
 170
 171	if (elsiocb == NULL)
 172		return NULL;
 173
 174	/*
 175	 * If this command is for fabric controller and HBA running
 176	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
 177	 */
 178	if ((did == Fabric_DID) &&
 179		(phba->hba_flag & HBA_FIP_SUPPORT) &&
 180		((elscmd == ELS_CMD_FLOGI) ||
 181		 (elscmd == ELS_CMD_FDISC) ||
 182		 (elscmd == ELS_CMD_LOGO)))
 183		switch (elscmd) {
 184		case ELS_CMD_FLOGI:
 185		elsiocb->iocb_flag |=
 186			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
 187					& LPFC_FIP_ELS_ID_MASK);
 188		break;
 189		case ELS_CMD_FDISC:
 190		elsiocb->iocb_flag |=
 191			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
 192					& LPFC_FIP_ELS_ID_MASK);
 193		break;
 194		case ELS_CMD_LOGO:
 195		elsiocb->iocb_flag |=
 196			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
 197					& LPFC_FIP_ELS_ID_MASK);
 198		break;
 199		}
 200	else
 201		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
 202
 203	icmd = &elsiocb->iocb;
 204
 205	/* fill in BDEs for command */
 206	/* Allocate buffer for command payload */
 207	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 208	if (pcmd)
 209		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
 210	if (!pcmd || !pcmd->virt)
 211		goto els_iocb_free_pcmb_exit;
 212
 213	INIT_LIST_HEAD(&pcmd->list);
 214
 215	/* Allocate buffer for response payload */
 216	if (expectRsp) {
 217		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 218		if (prsp)
 219			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
 220						     &prsp->phys);
 221		if (!prsp || !prsp->virt)
 222			goto els_iocb_free_prsp_exit;
 223		INIT_LIST_HEAD(&prsp->list);
 224	} else
 225		prsp = NULL;
 226
 227	/* Allocate buffer for Buffer ptr list */
 228	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 229	if (pbuflist)
 230		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
 231						 &pbuflist->phys);
 232	if (!pbuflist || !pbuflist->virt)
 233		goto els_iocb_free_pbuf_exit;
 234
 235	INIT_LIST_HEAD(&pbuflist->list);
 236
 237	if (expectRsp) {
 238		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
 239		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
 240		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
 241		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
 242
 243		icmd->un.elsreq64.remoteID = did;		/* DID */
 244		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
 245		if (elscmd == ELS_CMD_FLOGI)
 246			icmd->ulpTimeout = FF_DEF_RATOV * 2;
 247		else if (elscmd == ELS_CMD_LOGO)
 248			icmd->ulpTimeout = phba->fc_ratov;
 249		else
 250			icmd->ulpTimeout = phba->fc_ratov * 2;
 251	} else {
 252		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
 253		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
 254		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
 255		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
 256		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
 257		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
 258	}
 259	icmd->ulpBdeCount = 1;
 260	icmd->ulpLe = 1;
 261	icmd->ulpClass = CLASS3;
 262
 263	/*
 264	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
 265	 * For SLI4, since the driver controls VPIs we also want to include
 266	 * all ELS pt2pt protocol traffic as well.
 267	 */
 268	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
 269		((phba->sli_rev == LPFC_SLI_REV4) &&
 270		    (vport->fc_flag & FC_PT2PT))) {
 271
 272		if (expectRsp) {
 273			icmd->un.elsreq64.myID = vport->fc_myDID;
 274
 275			/* For ELS_REQUEST64_CR, use the VPI by default */
 276			icmd->ulpContext = phba->vpi_ids[vport->vpi];
 277		}
 278
 279		icmd->ulpCt_h = 0;
 280		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
 281		if (elscmd == ELS_CMD_ECHO)
 282			icmd->ulpCt_l = 0; /* context = invalid RPI */
 283		else
 284			icmd->ulpCt_l = 1; /* context = VPI */
 285	}
 286
 287	bpl = (struct ulp_bde64 *) pbuflist->virt;
 288	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
 289	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
 290	bpl->tus.f.bdeSize = cmdSize;
 291	bpl->tus.f.bdeFlags = 0;
 292	bpl->tus.w = le32_to_cpu(bpl->tus.w);
 293
 294	if (expectRsp) {
 295		bpl++;
 296		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
 297		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
 298		bpl->tus.f.bdeSize = FCELSSIZE;
 299		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 300		bpl->tus.w = le32_to_cpu(bpl->tus.w);
 301	}
 302
 303	/* prevent preparing iocb with NULL ndlp reference */
 304	elsiocb->context1 = lpfc_nlp_get(ndlp);
 305	if (!elsiocb->context1)
 306		goto els_iocb_free_pbuf_exit;
 307	elsiocb->context2 = pcmd;
 308	elsiocb->context3 = pbuflist;
 309	elsiocb->retry = retry;
 310	elsiocb->vport = vport;
 311	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
 312
 313	if (prsp) {
 314		list_add(&prsp->list, &pcmd->list);
 315	}
 316	if (expectRsp) {
 317		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
 318		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 319				 "0116 Xmit ELS command x%x to remote "
 320				 "NPORT x%x I/O tag: x%x, port state:x%x "
 321				 "rpi x%x fc_flag:x%x\n",
 322				 elscmd, did, elsiocb->iotag,
 323				 vport->port_state, ndlp->nlp_rpi,
 324				 vport->fc_flag);
 325	} else {
 326		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
 327		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 328				 "0117 Xmit ELS response x%x to remote "
 329				 "NPORT x%x I/O tag: x%x, size: x%x "
 330				 "port_state x%x  rpi x%x fc_flag x%x\n",
 331				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
 332				 cmdSize, vport->port_state,
 333				 ndlp->nlp_rpi, vport->fc_flag);
 334	}
 335	return elsiocb;
 336
 337els_iocb_free_pbuf_exit:
 338	if (expectRsp)
 339		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
 340	kfree(pbuflist);
 341
 342els_iocb_free_prsp_exit:
 343	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
 344	kfree(prsp);
 345
 346els_iocb_free_pcmb_exit:
 347	kfree(pcmd);
 348	lpfc_sli_release_iocbq(phba, elsiocb);
 349	return NULL;
 350}
 351
 352/**
 353 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
 354 * @vport: pointer to a host virtual N_Port data structure.
 355 *
 356 * This routine issues a fabric registration login for a @vport. An
 357 * active ndlp node with Fabric_DID must already exist for this @vport.
 358 * The routine invokes two mailbox commands to carry out fabric registration
 359 * login through the HBA firmware: the first mailbox command requests the
 360 * HBA to perform link configuration for the @vport; and the second mailbox
 361 * command requests the HBA to perform the actual fabric registration login
 362 * with the @vport.
 363 *
 364 * Return code
 365 *   0 - successfully issued fabric registration login for @vport
 366 *   -ENXIO -- failed to issue fabric registration login for @vport
 367 **/
 368int
 369lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 370{
 371	struct lpfc_hba  *phba = vport->phba;
 372	LPFC_MBOXQ_t *mbox;
 373	struct lpfc_dmabuf *mp;
 374	struct lpfc_nodelist *ndlp;
 375	struct serv_parm *sp;
 376	int rc;
 377	int err = 0;
 378
 379	sp = &phba->fc_fabparam;
 380	ndlp = lpfc_findnode_did(vport, Fabric_DID);
 381	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 382		err = 1;
 383		goto fail;
 384	}
 385
 386	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 387	if (!mbox) {
 388		err = 2;
 389		goto fail;
 390	}
 391
 392	vport->port_state = LPFC_FABRIC_CFG_LINK;
 393	lpfc_config_link(phba, mbox);
 394	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 395	mbox->vport = vport;
 396
 397	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 398	if (rc == MBX_NOT_FINISHED) {
 399		err = 3;
 400		goto fail_free_mbox;
 401	}
 402
 403	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 404	if (!mbox) {
 405		err = 4;
 406		goto fail;
 407	}
 408	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
 409			  ndlp->nlp_rpi);
 410	if (rc) {
 411		err = 5;
 412		goto fail_free_mbox;
 413	}
 414
 415	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
 416	mbox->vport = vport;
 417	/* increment the reference count on ndlp to hold reference
 418	 * for the callback routine.
 419	 */
 420	mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 421
 422	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 423	if (rc == MBX_NOT_FINISHED) {
 424		err = 6;
 425		goto fail_issue_reg_login;
 426	}
 427
 428	return 0;
 429
 430fail_issue_reg_login:
 431	/* decrement the reference count on ndlp just incremented
 432	 * for the failed mbox command.
 433	 */
 434	lpfc_nlp_put(ndlp);
 435	mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 436	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 437	kfree(mp);
 438fail_free_mbox:
 439	mempool_free(mbox, phba->mbox_mem_pool);
 440
 441fail:
 442	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 443	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 444		"0249 Cannot issue Register Fabric login: Err %d\n", err);
 445	return -ENXIO;
 446}
 447
 448/**
 449 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
 450 * @vport: pointer to a host virtual N_Port data structure.
 451 *
 452 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
 453 * the @vport. This mailbox command is necessary for SLI4 port only.
 454 *
 455 * Return code
 456 *   0 - successfully issued REG_VFI for @vport
 457 *   A failure code otherwise.
 458 **/
 459int
 460lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 461{
 462	struct lpfc_hba  *phba = vport->phba;
 463	LPFC_MBOXQ_t *mboxq = NULL;
 464	struct lpfc_nodelist *ndlp;
 465	struct lpfc_dmabuf *dmabuf = NULL;
 466	int rc = 0;
 467
 468	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
 469	if ((phba->sli_rev == LPFC_SLI_REV4) &&
 470	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
 471	    !(vport->fc_flag & FC_PT2PT)) {
 472		ndlp = lpfc_findnode_did(vport, Fabric_DID);
 473		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 474			rc = -ENODEV;
 475			goto fail;
 476		}
 477	}
 478
 479	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 480	if (!mboxq) {
 481		rc = -ENOMEM;
 482		goto fail;
 483	}
 484
 485	/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
 486	if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
 487		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 488		if (!dmabuf) {
 489			rc = -ENOMEM;
 490			goto fail;
 491		}
 492		dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
 493		if (!dmabuf->virt) {
 494			rc = -ENOMEM;
 495			goto fail;
 496		}
 497		memcpy(dmabuf->virt, &phba->fc_fabparam,
 498		       sizeof(struct serv_parm));
 499	}
 500
 501	vport->port_state = LPFC_FABRIC_CFG_LINK;
 502	if (dmabuf)
 503		lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
 504	else
 505		lpfc_reg_vfi(mboxq, vport, 0);
 506
 507	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
 508	mboxq->vport = vport;
 509	mboxq->ctx_buf = dmabuf;
 510	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 511	if (rc == MBX_NOT_FINISHED) {
 512		rc = -ENXIO;
 513		goto fail;
 514	}
 515	return 0;
 516
 517fail:
 518	if (mboxq)
 519		mempool_free(mboxq, phba->mbox_mem_pool);
 520	if (dmabuf) {
 521		if (dmabuf->virt)
 522			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
 523		kfree(dmabuf);
 524	}
 525
 526	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 527	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 528		"0289 Issue Register VFI failed: Err %d\n", rc);
 529	return rc;
 530}
 531
 532/**
 533 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
 534 * @vport: pointer to a host virtual N_Port data structure.
 535 *
 536 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
 537 * the @vport. This mailbox command is necessary for SLI4 port only.
 538 *
 539 * Return code
 540 *   0 - successfully issued REG_VFI for @vport
 541 *   A failure code otherwise.
 542 **/
 543int
 544lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
 545{
 546	struct lpfc_hba *phba = vport->phba;
 547	struct Scsi_Host *shost;
 548	LPFC_MBOXQ_t *mboxq;
 549	int rc;
 550
 551	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 552	if (!mboxq) {
 553		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
 554				"2556 UNREG_VFI mbox allocation failed"
 555				"HBA state x%x\n", phba->pport->port_state);
 556		return -ENOMEM;
 557	}
 558
 559	lpfc_unreg_vfi(mboxq, vport);
 560	mboxq->vport = vport;
 561	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
 562
 563	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 564	if (rc == MBX_NOT_FINISHED) {
 565		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
 566				"2557 UNREG_VFI issue mbox failed rc x%x "
 567				"HBA state x%x\n",
 568				rc, phba->pport->port_state);
 569		mempool_free(mboxq, phba->mbox_mem_pool);
 570		return -EIO;
 571	}
 572
 573	shost = lpfc_shost_from_vport(vport);
 574	spin_lock_irq(shost->host_lock);
 575	vport->fc_flag &= ~FC_VFI_REGISTERED;
 576	spin_unlock_irq(shost->host_lock);
 577	return 0;
 578}
 579
 580/**
 581 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
 582 * @vport: pointer to a host virtual N_Port data structure.
 583 * @sp: pointer to service parameter data structure.
 584 *
 585 * This routine is called from FLOGI/FDISC completion handler functions.
 586 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
 587 * node nodename is changed in the completion service parameter else return
 588 * 0. This function also set flag in the vport data structure to delay
 589 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
 590 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
 591 * node nodename is changed in the completion service parameter.
 592 *
 593 * Return code
 594 *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
 595 *   1 - FCID or Fabric Nodename or Fabric portname is changed.
 596 *
 597 **/
 598static uint8_t
 599lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
 600		struct serv_parm *sp)
 601{
 602	struct lpfc_hba *phba = vport->phba;
 603	uint8_t fabric_param_changed = 0;
 604	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 605
 606	if ((vport->fc_prevDID != vport->fc_myDID) ||
 607		memcmp(&vport->fabric_portname, &sp->portName,
 608			sizeof(struct lpfc_name)) ||
 609		memcmp(&vport->fabric_nodename, &sp->nodeName,
 610			sizeof(struct lpfc_name)) ||
 611		(vport->vport_flag & FAWWPN_PARAM_CHG)) {
 612		fabric_param_changed = 1;
 613		vport->vport_flag &= ~FAWWPN_PARAM_CHG;
 614	}
 615	/*
 616	 * Word 1 Bit 31 in common service parameter is overloaded.
 617	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
 618	 * Word 1 Bit 31 in FLOGI response is clean address bit
 619	 *
 620	 * If fabric parameter is changed and clean address bit is
 621	 * cleared delay nport discovery if
 622	 * - vport->fc_prevDID != 0 (not initial discovery) OR
 623	 * - lpfc_delay_discovery module parameter is set.
 624	 */
 625	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
 626	    (vport->fc_prevDID || phba->cfg_delay_discovery)) {
 627		spin_lock_irq(shost->host_lock);
 628		vport->fc_flag |= FC_DISC_DELAYED;
 629		spin_unlock_irq(shost->host_lock);
 630	}
 631
 632	return fabric_param_changed;
 633}
 634
 635
 636/**
 637 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
 638 * @vport: pointer to a host virtual N_Port data structure.
 639 * @ndlp: pointer to a node-list data structure.
 640 * @sp: pointer to service parameter data structure.
 641 * @irsp: pointer to the IOCB within the lpfc response IOCB.
 642 *
 643 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
 644 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
 645 * port in a fabric topology. It properly sets up the parameters to the @ndlp
 646 * from the IOCB response. It also check the newly assigned N_Port ID to the
 647 * @vport against the previously assigned N_Port ID. If it is different from
 648 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
 649 * is invoked on all the remaining nodes with the @vport to unregister the
 650 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
 651 * is invoked to register login to the fabric.
 652 *
 653 * Return code
 654 *   0 - Success (currently, always return 0)
 655 **/
 656static int
 657lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 658			   struct serv_parm *sp, IOCB_t *irsp)
 659{
 660	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 661	struct lpfc_hba  *phba = vport->phba;
 662	struct lpfc_nodelist *np;
 663	struct lpfc_nodelist *next_np;
 664	uint8_t fabric_param_changed;
 665
 666	spin_lock_irq(shost->host_lock);
 667	vport->fc_flag |= FC_FABRIC;
 668	spin_unlock_irq(shost->host_lock);
 669
 670	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
 671	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
 672		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
 673
 674	phba->fc_edtovResol = sp->cmn.edtovResolution;
 675	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
 676
 677	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 678		spin_lock_irq(shost->host_lock);
 679		vport->fc_flag |= FC_PUBLIC_LOOP;
 680		spin_unlock_irq(shost->host_lock);
 681	}
 682
 683	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
 684	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
 685	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
 686	ndlp->nlp_class_sup = 0;
 687	if (sp->cls1.classValid)
 688		ndlp->nlp_class_sup |= FC_COS_CLASS1;
 689	if (sp->cls2.classValid)
 690		ndlp->nlp_class_sup |= FC_COS_CLASS2;
 691	if (sp->cls3.classValid)
 692		ndlp->nlp_class_sup |= FC_COS_CLASS3;
 693	if (sp->cls4.classValid)
 694		ndlp->nlp_class_sup |= FC_COS_CLASS4;
 695	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
 696				sp->cmn.bbRcvSizeLsb;
 697
 698	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
 699	if (fabric_param_changed) {
 700		/* Reset FDMI attribute masks based on config parameter */
 701		if (phba->cfg_enable_SmartSAN ||
 702		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
 703			/* Setup appropriate attribute masks */
 704			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
 705			if (phba->cfg_enable_SmartSAN)
 706				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
 707			else
 708				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
 709		} else {
 710			vport->fdmi_hba_mask = 0;
 711			vport->fdmi_port_mask = 0;
 712		}
 713
 714	}
 715	memcpy(&vport->fabric_portname, &sp->portName,
 716			sizeof(struct lpfc_name));
 717	memcpy(&vport->fabric_nodename, &sp->nodeName,
 718			sizeof(struct lpfc_name));
 719	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
 720
 721	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
 722		if (sp->cmn.response_multiple_NPort) {
 723			lpfc_printf_vlog(vport, KERN_WARNING,
 724					 LOG_ELS | LOG_VPORT,
 725					 "1816 FLOGI NPIV supported, "
 726					 "response data 0x%x\n",
 727					 sp->cmn.response_multiple_NPort);
 728			spin_lock_irq(&phba->hbalock);
 729			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
 730			spin_unlock_irq(&phba->hbalock);
 731		} else {
 732			/* Because we asked f/w for NPIV it still expects us
 733			to call reg_vnpid atleast for the physcial host */
 734			lpfc_printf_vlog(vport, KERN_WARNING,
 735					 LOG_ELS | LOG_VPORT,
 736					 "1817 Fabric does not support NPIV "
 737					 "- configuring single port mode.\n");
 738			spin_lock_irq(&phba->hbalock);
 739			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 740			spin_unlock_irq(&phba->hbalock);
 741		}
 742	}
 743
 744	/*
 745	 * For FC we need to do some special processing because of the SLI
 746	 * Port's default settings of the Common Service Parameters.
 747	 */
 748	if ((phba->sli_rev == LPFC_SLI_REV4) &&
 749	    (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
 750		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
 751		if (fabric_param_changed)
 752			lpfc_unregister_fcf_prep(phba);
 753
 754		/* This should just update the VFI CSPs*/
 755		if (vport->fc_flag & FC_VFI_REGISTERED)
 756			lpfc_issue_reg_vfi(vport);
 757	}
 758
 759	if (fabric_param_changed &&
 760		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 761
 762		/* If our NportID changed, we need to ensure all
 763		 * remaining NPORTs get unreg_login'ed.
 764		 */
 765		list_for_each_entry_safe(np, next_np,
 766					&vport->fc_nodes, nlp_listp) {
 767			if (!NLP_CHK_NODE_ACT(np))
 768				continue;
 769			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
 770				   !(np->nlp_flag & NLP_NPR_ADISC))
 771				continue;
 772			spin_lock_irq(shost->host_lock);
 773			np->nlp_flag &= ~NLP_NPR_ADISC;
 774			spin_unlock_irq(shost->host_lock);
 775			lpfc_unreg_rpi(vport, np);
 776		}
 777		lpfc_cleanup_pending_mbox(vport);
 778
 779		if (phba->sli_rev == LPFC_SLI_REV4) {
 780			lpfc_sli4_unreg_all_rpis(vport);
 781			lpfc_mbx_unreg_vpi(vport);
 782			spin_lock_irq(shost->host_lock);
 783			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
 784			spin_unlock_irq(shost->host_lock);
 785		}
 786
 787		/*
 788		 * For SLI3 and SLI4, the VPI needs to be reregistered in
 789		 * response to this fabric parameter change event.
 790		 */
 791		spin_lock_irq(shost->host_lock);
 792		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 793		spin_unlock_irq(shost->host_lock);
 794	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
 795		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 796			/*
 797			 * Driver needs to re-reg VPI in order for f/w
 798			 * to update the MAC address.
 799			 */
 800			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 801			lpfc_register_new_vport(phba, vport, ndlp);
 802			return 0;
 803	}
 804
 805	if (phba->sli_rev < LPFC_SLI_REV4) {
 806		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
 807		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
 808		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
 809			lpfc_register_new_vport(phba, vport, ndlp);
 810		else
 811			lpfc_issue_fabric_reglogin(vport);
 812	} else {
 813		ndlp->nlp_type |= NLP_FABRIC;
 814		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 815		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
 816			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
 817			lpfc_start_fdiscs(phba);
 818			lpfc_do_scr_ns_plogi(phba, vport);
 819		} else if (vport->fc_flag & FC_VFI_REGISTERED)
 820			lpfc_issue_init_vpi(vport);
 821		else {
 822			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 823					"3135 Need register VFI: (x%x/%x)\n",
 824					vport->fc_prevDID, vport->fc_myDID);
 825			lpfc_issue_reg_vfi(vport);
 826		}
 827	}
 828	return 0;
 829}
 830
 831/**
 832 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
 833 * @vport: pointer to a host virtual N_Port data structure.
 834 * @ndlp: pointer to a node-list data structure.
 835 * @sp: pointer to service parameter data structure.
 836 *
 837 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
 838 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
 839 * in a point-to-point topology. First, the @vport's N_Port Name is compared
 840 * with the received N_Port Name: if the @vport's N_Port Name is greater than
 841 * the received N_Port Name lexicographically, this node shall assign local
 842 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
 843 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
 844 * this node shall just wait for the remote node to issue PLOGI and assign
 845 * N_Port IDs.
 846 *
 847 * Return code
 848 *   0 - Success
 849 *   -ENXIO - Fail
 850 **/
 851static int
 852lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 853			  struct serv_parm *sp)
 854{
 855	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 856	struct lpfc_hba  *phba = vport->phba;
 857	LPFC_MBOXQ_t *mbox;
 858	int rc;
 859
 860	spin_lock_irq(shost->host_lock);
 861	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
 862	vport->fc_flag |= FC_PT2PT;
 863	spin_unlock_irq(shost->host_lock);
 864
 865	/* If we are pt2pt with another NPort, force NPIV off! */
 866	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 867
 868	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
 869	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
 870		lpfc_unregister_fcf_prep(phba);
 871
 872		spin_lock_irq(shost->host_lock);
 873		vport->fc_flag &= ~FC_VFI_REGISTERED;
 874		spin_unlock_irq(shost->host_lock);
 875		phba->fc_topology_changed = 0;
 876	}
 877
 878	rc = memcmp(&vport->fc_portname, &sp->portName,
 879		    sizeof(vport->fc_portname));
 880
 881	if (rc >= 0) {
 882		/* This side will initiate the PLOGI */
 883		spin_lock_irq(shost->host_lock);
 884		vport->fc_flag |= FC_PT2PT_PLOGI;
 885		spin_unlock_irq(shost->host_lock);
 886
 887		/*
 888		 * N_Port ID cannot be 0, set our Id to LocalID
 889		 * the other side will be RemoteID.
 890		 */
 891
 892		/* not equal */
 893		if (rc)
 894			vport->fc_myDID = PT2PT_LocalID;
 895
 896		/* Decrement ndlp reference count indicating that ndlp can be
 897		 * safely released when other references to it are done.
 898		 */
 899		lpfc_nlp_put(ndlp);
 900
 901		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
 902		if (!ndlp) {
 903			/*
 904			 * Cannot find existing Fabric ndlp, so allocate a
 905			 * new one
 906			 */
 907			ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
 908			if (!ndlp)
 909				goto fail;
 910		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 911			ndlp = lpfc_enable_node(vport, ndlp,
 912						NLP_STE_UNUSED_NODE);
 913			if(!ndlp)
 914				goto fail;
 915		}
 916
 917		memcpy(&ndlp->nlp_portname, &sp->portName,
 918		       sizeof(struct lpfc_name));
 919		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
 920		       sizeof(struct lpfc_name));
 921		/* Set state will put ndlp onto node list if not already done */
 922		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 923		spin_lock_irq(shost->host_lock);
 924		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 925		spin_unlock_irq(shost->host_lock);
 926
 927		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 928		if (!mbox)
 929			goto fail;
 930
 931		lpfc_config_link(phba, mbox);
 932
 933		mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
 934		mbox->vport = vport;
 935		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 936		if (rc == MBX_NOT_FINISHED) {
 937			mempool_free(mbox, phba->mbox_mem_pool);
 938			goto fail;
 939		}
 940	} else {
 941		/* This side will wait for the PLOGI, decrement ndlp reference
 942		 * count indicating that ndlp can be released when other
 943		 * references to it are done.
 944		 */
 945		lpfc_nlp_put(ndlp);
 946
 947		/* Start discovery - this should just do CLEAR_LA */
 948		lpfc_disc_start(vport);
 949	}
 950
 951	return 0;
 952fail:
 953	return -ENXIO;
 954}
 955
 956/**
 957 * lpfc_cmpl_els_flogi - Completion callback function for flogi
 958 * @phba: pointer to lpfc hba data structure.
 959 * @cmdiocb: pointer to lpfc command iocb data structure.
 960 * @rspiocb: pointer to lpfc response iocb data structure.
 961 *
 962 * This routine is the top-level completion callback function for issuing
 963 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
 964 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
 965 * retry has been made (either immediately or delayed with lpfc_els_retry()
 966 * returning 1), the command IOCB will be released and function returned.
 967 * If the retry attempt has been given up (possibly reach the maximum
 968 * number of retries), one additional decrement of ndlp reference shall be
 969 * invoked before going out after releasing the command IOCB. This will
 970 * actually release the remote node (Note, lpfc_els_free_iocb() will also
 971 * invoke one decrement of ndlp reference count). If no error reported in
 972 * the IOCB status, the command Port ID field is used to determine whether
 973 * this is a point-to-point topology or a fabric topology: if the Port ID
 974 * field is assigned, it is a fabric topology; otherwise, it is a
 975 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
 976 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
 977 * specific topology completion conditions.
 978 **/
 979static void
 980lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 981		    struct lpfc_iocbq *rspiocb)
 982{
 983	struct lpfc_vport *vport = cmdiocb->vport;
 984	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 985	IOCB_t *irsp = &rspiocb->iocb;
 986	struct lpfc_nodelist *ndlp = cmdiocb->context1;
 987	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
 988	struct serv_parm *sp;
 989	uint16_t fcf_index;
 990	int rc;
 991
 992	/* Check to see if link went down during discovery */
 993	if (lpfc_els_chk_latt(vport)) {
 994		/* One additional decrement on node reference count to
 995		 * trigger the release of the node
 996		 */
 997		lpfc_nlp_put(ndlp);
 998		goto out;
 999	}
1000
1001	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1002		"FLOGI cmpl:      status:x%x/x%x state:x%x",
1003		irsp->ulpStatus, irsp->un.ulpWord[4],
1004		vport->port_state);
1005
1006	if (irsp->ulpStatus) {
1007		/*
1008		 * In case of FIP mode, perform roundrobin FCF failover
1009		 * due to new FCF discovery
1010		 */
1011		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
1012		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1013			if (phba->link_state < LPFC_LINK_UP)
1014				goto stop_rr_fcf_flogi;
1015			if ((phba->fcoe_cvl_eventtag_attn ==
1016			     phba->fcoe_cvl_eventtag) &&
1017			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1018			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1019			    IOERR_SLI_ABORTED))
1020				goto stop_rr_fcf_flogi;
1021			else
1022				phba->fcoe_cvl_eventtag_attn =
1023					phba->fcoe_cvl_eventtag;
1024			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1025					"2611 FLOGI failed on FCF (x%x), "
1026					"status:x%x/x%x, tmo:x%x, perform "
1027					"roundrobin FCF failover\n",
1028					phba->fcf.current_rec.fcf_indx,
1029					irsp->ulpStatus, irsp->un.ulpWord[4],
1030					irsp->ulpTimeout);
1031			lpfc_sli4_set_fcf_flogi_fail(phba,
1032					phba->fcf.current_rec.fcf_indx);
1033			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1034			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1035			if (rc)
1036				goto out;
1037		}
1038
1039stop_rr_fcf_flogi:
1040		/* FLOGI failure */
1041		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1042		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1043					IOERR_LOOP_OPEN_FAILURE)))
1044			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1045					"2858 FLOGI failure Status:x%x/x%x "
1046					"TMO:x%x Data x%x x%x\n",
1047					irsp->ulpStatus, irsp->un.ulpWord[4],
1048					irsp->ulpTimeout, phba->hba_flag,
1049					phba->fcf.fcf_flag);
1050
1051		/* Check for retry */
1052		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1053			goto out;
1054
1055		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1056				 "0150 FLOGI failure Status:x%x/x%x "
1057				 "xri x%x TMO:x%x\n",
1058				 irsp->ulpStatus, irsp->un.ulpWord[4],
1059				 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1060
1061		/* If this is not a loop open failure, bail out */
1062		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1063		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1064					IOERR_LOOP_OPEN_FAILURE)))
1065			goto flogifail;
1066
1067		/* FLOGI failed, so there is no fabric */
1068		spin_lock_irq(shost->host_lock);
1069		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1070		spin_unlock_irq(shost->host_lock);
1071
1072		/* If private loop, then allow max outstanding els to be
1073		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1074		 * alpa map would take too long otherwise.
1075		 */
1076		if (phba->alpa_map[0] == 0)
1077			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1078		if ((phba->sli_rev == LPFC_SLI_REV4) &&
1079		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1080		     (vport->fc_prevDID != vport->fc_myDID) ||
1081			phba->fc_topology_changed)) {
1082			if (vport->fc_flag & FC_VFI_REGISTERED) {
1083				if (phba->fc_topology_changed) {
1084					lpfc_unregister_fcf_prep(phba);
1085					spin_lock_irq(shost->host_lock);
1086					vport->fc_flag &= ~FC_VFI_REGISTERED;
1087					spin_unlock_irq(shost->host_lock);
1088					phba->fc_topology_changed = 0;
1089				} else {
1090					lpfc_sli4_unreg_all_rpis(vport);
1091				}
1092			}
1093
1094			/* Do not register VFI if the driver aborted FLOGI */
1095			if (!lpfc_error_lost_link(irsp))
1096				lpfc_issue_reg_vfi(vport);
1097			lpfc_nlp_put(ndlp);
1098			goto out;
1099		}
1100		goto flogifail;
1101	}
1102	spin_lock_irq(shost->host_lock);
1103	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1104	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1105	spin_unlock_irq(shost->host_lock);
1106
1107	/*
1108	 * The FLogI succeeded.  Sync the data for the CPU before
1109	 * accessing it.
1110	 */
1111	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1112	if (!prsp)
1113		goto out;
1114	sp = prsp->virt + sizeof(uint32_t);
1115
1116	/* FLOGI completes successfully */
1117	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1118			 "0101 FLOGI completes successfully, I/O tag:x%x, "
1119			 "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
1120			 cmdiocb->iotag, cmdiocb->sli4_xritag,
1121			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1122			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1123			 vport->port_state, vport->fc_flag);
1124
1125	if (vport->port_state == LPFC_FLOGI) {
1126		/*
1127		 * If Common Service Parameters indicate Nport
1128		 * we are point to point, if Fport we are Fabric.
1129		 */
1130		if (sp->cmn.fPort)
1131			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1132		else if (!(phba->hba_flag & HBA_FCOE_MODE))
1133			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1134		else {
1135			lpfc_printf_vlog(vport, KERN_ERR,
1136				LOG_FIP | LOG_ELS,
1137				"2831 FLOGI response with cleared Fabric "
1138				"bit fcf_index 0x%x "
1139				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1140				"Fabric Name "
1141				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
1142				phba->fcf.current_rec.fcf_indx,
1143				phba->fcf.current_rec.switch_name[0],
1144				phba->fcf.current_rec.switch_name[1],
1145				phba->fcf.current_rec.switch_name[2],
1146				phba->fcf.current_rec.switch_name[3],
1147				phba->fcf.current_rec.switch_name[4],
1148				phba->fcf.current_rec.switch_name[5],
1149				phba->fcf.current_rec.switch_name[6],
1150				phba->fcf.current_rec.switch_name[7],
1151				phba->fcf.current_rec.fabric_name[0],
1152				phba->fcf.current_rec.fabric_name[1],
1153				phba->fcf.current_rec.fabric_name[2],
1154				phba->fcf.current_rec.fabric_name[3],
1155				phba->fcf.current_rec.fabric_name[4],
1156				phba->fcf.current_rec.fabric_name[5],
1157				phba->fcf.current_rec.fabric_name[6],
1158				phba->fcf.current_rec.fabric_name[7]);
1159			lpfc_nlp_put(ndlp);
1160			spin_lock_irq(&phba->hbalock);
1161			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1162			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1163			spin_unlock_irq(&phba->hbalock);
1164			phba->fcf.fcf_redisc_attempted = 0; /* reset */
1165			goto out;
1166		}
1167		if (!rc) {
1168			/* Mark the FCF discovery process done */
1169			if (phba->hba_flag & HBA_FIP_SUPPORT)
1170				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1171						LOG_ELS,
1172						"2769 FLOGI to FCF (x%x) "
1173						"completed successfully\n",
1174						phba->fcf.current_rec.fcf_indx);
1175			spin_lock_irq(&phba->hbalock);
1176			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1177			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1178			spin_unlock_irq(&phba->hbalock);
1179			phba->fcf.fcf_redisc_attempted = 0; /* reset */
1180			goto out;
1181		}
1182	}
1183
1184flogifail:
1185	spin_lock_irq(&phba->hbalock);
1186	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1187	spin_unlock_irq(&phba->hbalock);
1188
1189	lpfc_nlp_put(ndlp);
1190
1191	if (!lpfc_error_lost_link(irsp)) {
1192		/* FLOGI failed, so just use loop map to make discovery list */
1193		lpfc_disc_list_loopmap(vport);
1194
1195		/* Start discovery */
1196		lpfc_disc_start(vport);
1197	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1198			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1199			 IOERR_SLI_ABORTED) &&
1200			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1201			 IOERR_SLI_DOWN))) &&
1202			(phba->link_state != LPFC_CLEAR_LA)) {
1203		/* If FLOGI failed enable link interrupt. */
1204		lpfc_issue_clear_la(phba, vport);
1205	}
1206out:
1207	lpfc_els_free_iocb(phba, cmdiocb);
1208}
1209
1210/**
1211 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1212 *                           aborted during a link down
1213 * @phba: pointer to lpfc hba data structure.
1214 * @cmdiocb: pointer to lpfc command iocb data structure.
1215 * @rspiocb: pointer to lpfc response iocb data structure.
1216 *
1217 */
1218static void
1219lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1220			struct lpfc_iocbq *rspiocb)
1221{
1222	IOCB_t *irsp;
1223	uint32_t *pcmd;
1224	uint32_t cmd;
1225
1226	pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
1227	cmd = *pcmd;
1228	irsp = &rspiocb->iocb;
1229
1230	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1231			"6445 ELS completes after LINK_DOWN: "
1232			" Status %x/%x cmd x%x flg x%x\n",
1233			irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
1234			cmdiocb->iocb_flag);
1235
1236	if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
1237		cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
1238		atomic_dec(&phba->fabric_iocb_count);
1239	}
1240	lpfc_els_free_iocb(phba, cmdiocb);
1241}
1242
1243/**
1244 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1245 * @vport: pointer to a host virtual N_Port data structure.
1246 * @ndlp: pointer to a node-list data structure.
1247 * @retry: number of retries to the command IOCB.
1248 *
1249 * This routine issues a Fabric Login (FLOGI) Request ELS command
1250 * for a @vport. The initiator service parameters are put into the payload
1251 * of the FLOGI Request IOCB and the top-level callback function pointer
1252 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1253 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1254 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1255 *
1256 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1257 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1258 * will be stored into the context1 field of the IOCB for the completion
1259 * callback function to the FLOGI ELS command.
1260 *
1261 * Return code
1262 *   0 - successfully issued flogi iocb for @vport
1263 *   1 - failed to issue flogi iocb for @vport
1264 **/
1265static int
1266lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1267		     uint8_t retry)
1268{
1269	struct lpfc_hba  *phba = vport->phba;
1270	struct serv_parm *sp;
1271	IOCB_t *icmd;
1272	struct lpfc_iocbq *elsiocb;
1273	struct lpfc_iocbq defer_flogi_acc;
1274	uint8_t *pcmd;
1275	uint16_t cmdsize;
1276	uint32_t tmo, did;
1277	int rc;
1278
1279	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1280	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1281				     ndlp->nlp_DID, ELS_CMD_FLOGI);
1282
1283	if (!elsiocb)
1284		return 1;
1285
1286	icmd = &elsiocb->iocb;
1287	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1288
1289	/* For FLOGI request, remainder of payload is service parameters */
1290	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1291	pcmd += sizeof(uint32_t);
1292	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1293	sp = (struct serv_parm *) pcmd;
1294
1295	/* Setup CSPs accordingly for Fabric */
1296	sp->cmn.e_d_tov = 0;
1297	sp->cmn.w2.r_a_tov = 0;
1298	sp->cmn.virtual_fabric_support = 0;
1299	sp->cls1.classValid = 0;
1300	if (sp->cmn.fcphLow < FC_PH3)
1301		sp->cmn.fcphLow = FC_PH3;
1302	if (sp->cmn.fcphHigh < FC_PH3)
1303		sp->cmn.fcphHigh = FC_PH3;
1304
1305	if  (phba->sli_rev == LPFC_SLI_REV4) {
1306		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1307		    LPFC_SLI_INTF_IF_TYPE_0) {
1308			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1309			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1310			/* FLOGI needs to be 3 for WQE FCFI */
1311			/* Set the fcfi to the fcfi we registered with */
1312			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1313		}
1314		/* Can't do SLI4 class2 without support sequence coalescing */
1315		sp->cls2.classValid = 0;
1316		sp->cls2.seqDelivery = 0;
1317	} else {
1318		/* Historical, setting sequential-delivery bit for SLI3 */
1319		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1320		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1321		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1322			sp->cmn.request_multiple_Nport = 1;
1323			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1324			icmd->ulpCt_h = 1;
1325			icmd->ulpCt_l = 0;
1326		} else
1327			sp->cmn.request_multiple_Nport = 0;
1328	}
1329
1330	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1331		icmd->un.elsreq64.myID = 0;
1332		icmd->un.elsreq64.fl = 1;
1333	}
1334
1335	tmo = phba->fc_ratov;
1336	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1337	lpfc_set_disctmo(vport);
1338	phba->fc_ratov = tmo;
1339
1340	phba->fc_stat.elsXmitFLOGI++;
1341	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1342
1343	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1344		"Issue FLOGI:     opt:x%x",
1345		phba->sli3_options, 0, 0);
1346
1347	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1348
1349	phba->hba_flag |= HBA_FLOGI_ISSUED;
1350
1351	/* Check for a deferred FLOGI ACC condition */
1352	if (phba->defer_flogi_acc_flag) {
1353		did = vport->fc_myDID;
1354		vport->fc_myDID = Fabric_DID;
1355
1356		memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1357
1358		defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
1359		defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
1360						phba->defer_flogi_acc_ox_id;
1361
1362		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1363				 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1364				 " ox_id: x%x, hba_flag x%x\n",
1365				 phba->defer_flogi_acc_rx_id,
1366				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1367
1368		/* Send deferred FLOGI ACC */
1369		lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1370				 ndlp, NULL);
1371
1372		phba->defer_flogi_acc_flag = false;
1373
1374		vport->fc_myDID = did;
1375	}
1376
1377	if (rc == IOCB_ERROR) {
1378		lpfc_els_free_iocb(phba, elsiocb);
1379		return 1;
1380	}
1381	return 0;
1382}
1383
1384/**
1385 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1386 * @phba: pointer to lpfc hba data structure.
1387 *
1388 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1389 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1390 * list and issues an abort IOCB commond on each outstanding IOCB that
1391 * contains a active Fabric_DID ndlp. Note that this function is to issue
1392 * the abort IOCB command on all the outstanding IOCBs, thus when this
1393 * function returns, it does not guarantee all the IOCBs are actually aborted.
1394 *
1395 * Return code
1396 *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1397 **/
1398int
1399lpfc_els_abort_flogi(struct lpfc_hba *phba)
1400{
1401	struct lpfc_sli_ring *pring;
1402	struct lpfc_iocbq *iocb, *next_iocb;
1403	struct lpfc_nodelist *ndlp;
1404	IOCB_t *icmd;
1405
1406	/* Abort outstanding I/O on NPort <nlp_DID> */
1407	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1408			"0201 Abort outstanding I/O on NPort x%x\n",
1409			Fabric_DID);
1410
1411	pring = lpfc_phba_elsring(phba);
1412	if (unlikely(!pring))
1413		return -EIO;
1414
1415	/*
1416	 * Check the txcmplq for an iocb that matches the nport the driver is
1417	 * searching for.
1418	 */
1419	spin_lock_irq(&phba->hbalock);
1420	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1421		icmd = &iocb->iocb;
1422		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1423			ndlp = (struct lpfc_nodelist *)(iocb->context1);
1424			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1425			    (ndlp->nlp_DID == Fabric_DID))
1426				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1427		}
1428	}
1429	spin_unlock_irq(&phba->hbalock);
1430
1431	return 0;
1432}
1433
1434/**
1435 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1436 * @vport: pointer to a host virtual N_Port data structure.
1437 *
1438 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1439 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1440 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1441 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1442 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1443 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1444 * @vport.
1445 *
1446 * Return code
1447 *   0 - failed to issue initial flogi for @vport
1448 *   1 - successfully issued initial flogi for @vport
1449 **/
1450int
1451lpfc_initial_flogi(struct lpfc_vport *vport)
1452{
1453	struct lpfc_nodelist *ndlp;
1454
1455	vport->port_state = LPFC_FLOGI;
1456	lpfc_set_disctmo(vport);
1457
1458	/* First look for the Fabric ndlp */
1459	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1460	if (!ndlp) {
1461		/* Cannot find existing Fabric ndlp, so allocate a new one */
1462		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1463		if (!ndlp)
1464			return 0;
1465		/* Set the node type */
1466		ndlp->nlp_type |= NLP_FABRIC;
1467		/* Put ndlp onto node list */
1468		lpfc_enqueue_node(vport, ndlp);
1469	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1470		/* re-setup ndlp without removing from node list */
1471		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1472		if (!ndlp)
1473			return 0;
1474	}
1475
1476	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1477		/* This decrement of reference count to node shall kick off
1478		 * the release of the node.
1479		 */
1480		lpfc_nlp_put(ndlp);
1481		return 0;
1482	}
1483	return 1;
1484}
1485
1486/**
1487 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1488 * @vport: pointer to a host virtual N_Port data structure.
1489 *
1490 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1494 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1495 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1496 * @vport.
1497 *
1498 * Return code
1499 *   0 - failed to issue initial fdisc for @vport
1500 *   1 - successfully issued initial fdisc for @vport
1501 **/
1502int
1503lpfc_initial_fdisc(struct lpfc_vport *vport)
1504{
1505	struct lpfc_nodelist *ndlp;
1506
1507	/* First look for the Fabric ndlp */
1508	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1509	if (!ndlp) {
1510		/* Cannot find existing Fabric ndlp, so allocate a new one */
1511		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1512		if (!ndlp)
1513			return 0;
1514		/* Put ndlp onto node list */
1515		lpfc_enqueue_node(vport, ndlp);
1516	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1517		/* re-setup ndlp without removing from node list */
1518		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1519		if (!ndlp)
1520			return 0;
1521	}
1522
1523	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1524		/* decrement node reference count to trigger the release of
1525		 * the node.
1526		 */
1527		lpfc_nlp_put(ndlp);
1528		return 0;
1529	}
1530	return 1;
1531}
1532
1533/**
1534 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1535 * @vport: pointer to a host virtual N_Port data structure.
1536 *
1537 * This routine checks whether there are more remaining Port Logins
1538 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1539 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1540 * to issue ELS PLOGIs up to the configured discover threads with the
1541 * @vport (@vport->cfg_discovery_threads). The function also decrement
1542 * the @vport's num_disc_node by 1 if it is not already 0.
1543 **/
1544void
1545lpfc_more_plogi(struct lpfc_vport *vport)
1546{
1547	if (vport->num_disc_nodes)
1548		vport->num

Large files files are truncated, but you can click here to view the full file