/drivers/scsi/lpfc/lpfc_els.c
http://github.com/mirrors/linux · C · 10152 lines · 6613 code · 1025 blank · 2514 comment · 1180 complexity · bff4b2cf65119e8010bb275c58f7b75d MD5 · raw file
Large files are truncated click here to view the full file
- /*******************************************************************
- * This file is part of the Emulex Linux Device Driver for *
- * Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
- * Copyright (C) 2004-2016 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.broadcom.com *
- * Portions Copyright (C) 2004-2005 Christoph Hellwig *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
- *******************************************************************/
- /* See Fibre Channel protocol T11 FC-LS for details */
- #include <linux/blkdev.h>
- #include <linux/pci.h>
- #include <linux/slab.h>
- #include <linux/interrupt.h>
- #include <scsi/scsi.h>
- #include <scsi/scsi_device.h>
- #include <scsi/scsi_host.h>
- #include <scsi/scsi_transport_fc.h>
- #include <uapi/scsi/fc/fc_fs.h>
- #include <uapi/scsi/fc/fc_els.h>
- #include "lpfc_hw4.h"
- #include "lpfc_hw.h"
- #include "lpfc_sli.h"
- #include "lpfc_sli4.h"
- #include "lpfc_nl.h"
- #include "lpfc_disc.h"
- #include "lpfc_scsi.h"
- #include "lpfc.h"
- #include "lpfc_logmsg.h"
- #include "lpfc_crtn.h"
- #include "lpfc_vport.h"
- #include "lpfc_debugfs.h"
- static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
- static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
- struct lpfc_nodelist *ndlp, uint8_t retry);
- static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
- struct lpfc_iocbq *iocb);
- static int lpfc_max_els_tries = 3;
- /**
- * lpfc_els_chk_latt - Check host link attention event for a vport
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine checks whether there is an outstanding host link
- * attention event during the discovery process with the @vport. It is done
- * by reading the HBA's Host Attention (HA) register. If there is any host
- * link attention events during this @vport's discovery process, the @vport
- * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
- * be issued if the link state is not already in host link cleared state,
- * and a return code shall indicate whether the host link attention event
- * had happened.
- *
- * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
- * state in LPFC_VPORT_READY, the request for checking host link attention
- * event will be ignored and a return code shall indicate no host link
- * attention event had happened.
- *
- * Return codes
- * 0 - no host link attention event happened
- * 1 - host link attention event happened
- **/
- int
- lpfc_els_chk_latt(struct lpfc_vport *vport)
- {
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
- uint32_t ha_copy;
- if (vport->port_state >= LPFC_VPORT_READY ||
- phba->link_state == LPFC_LINK_DOWN ||
- phba->sli_rev > LPFC_SLI_REV3)
- return 0;
- /* Read the HBA Host Attention Register */
- if (lpfc_readl(phba->HAregaddr, &ha_copy))
- return 1;
- if (!(ha_copy & HA_LATT))
- return 0;
- /* Pending Link Event during Discovery */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0237 Pending Link Event during "
- "Discovery: State x%x\n",
- phba->pport->port_state);
- /* CLEAR_LA should re-enable link attention events and
- * we should then immediately take a LATT event. The
- * LATT processing should call lpfc_linkdown() which
- * will cleanup any left over in-progress discovery
- * events.
- */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_ABORT_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
- if (phba->link_state != LPFC_CLEAR_LA)
- lpfc_issue_clear_la(phba, vport);
- return 1;
- }
- /**
- * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
- * @vport: pointer to a host virtual N_Port data structure.
- * @expectRsp: flag indicating whether response is expected.
- * @cmdSize: size of the ELS command.
- * @retry: number of retries to the command IOCB when it fails.
- * @ndlp: pointer to a node-list data structure.
- * @did: destination identifier.
- * @elscmd: the ELS command code.
- *
- * This routine is used for allocating a lpfc-IOCB data structure from
- * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
- * passed into the routine for discovery state machine to issue an Extended
- * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
- * and preparation routine that is used by all the discovery state machine
- * routines and the ELS command-specific fields will be later set up by
- * the individual discovery machine routines after calling this routine
- * allocating and preparing a generic IOCB data structure. It fills in the
- * Buffer Descriptor Entries (BDEs), allocates buffers for both command
- * payload and response payload (if expected). The reference count on the
- * ndlp is incremented by 1 and the reference to the ndlp is put into
- * context1 of the IOCB data structure for this IOCB to hold the ndlp
- * reference for the command's callback function to access later.
- *
- * Return code
- * Pointer to the newly allocated/prepared els iocb data structure
- * NULL - when els iocb data structure allocation/preparation failed
- **/
- struct lpfc_iocbq *
- lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry,
- struct lpfc_nodelist *ndlp, uint32_t did,
- uint32_t elscmd)
- {
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *elsiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
- struct ulp_bde64 *bpl;
- IOCB_t *icmd;
- if (!lpfc_is_link_up(phba))
- return NULL;
- /* Allocate buffer for command iocb */
- elsiocb = lpfc_sli_get_iocbq(phba);
- if (elsiocb == NULL)
- return NULL;
- /*
- * If this command is for fabric controller and HBA running
- * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
- */
- if ((did == Fabric_DID) &&
- (phba->hba_flag & HBA_FIP_SUPPORT) &&
- ((elscmd == ELS_CMD_FLOGI) ||
- (elscmd == ELS_CMD_FDISC) ||
- (elscmd == ELS_CMD_LOGO)))
- switch (elscmd) {
- case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
- case ELS_CMD_FDISC:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
- case ELS_CMD_LOGO:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
- }
- else
- elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
- icmd = &elsiocb->iocb;
- /* fill in BDEs for command */
- /* Allocate buffer for command payload */
- pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (pcmd)
- pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
- if (!pcmd || !pcmd->virt)
- goto els_iocb_free_pcmb_exit;
- INIT_LIST_HEAD(&pcmd->list);
- /* Allocate buffer for response payload */
- if (expectRsp) {
- prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (prsp)
- prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
- &prsp->phys);
- if (!prsp || !prsp->virt)
- goto els_iocb_free_prsp_exit;
- INIT_LIST_HEAD(&prsp->list);
- } else
- prsp = NULL;
- /* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (pbuflist)
- pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
- &pbuflist->phys);
- if (!pbuflist || !pbuflist->virt)
- goto els_iocb_free_pbuf_exit;
- INIT_LIST_HEAD(&pbuflist->list);
- if (expectRsp) {
- icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
- icmd->un.elsreq64.remoteID = did; /* DID */
- icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
- if (elscmd == ELS_CMD_FLOGI)
- icmd->ulpTimeout = FF_DEF_RATOV * 2;
- else if (elscmd == ELS_CMD_LOGO)
- icmd->ulpTimeout = phba->fc_ratov;
- else
- icmd->ulpTimeout = phba->fc_ratov * 2;
- } else {
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
- icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
- }
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- /*
- * If we have NPIV enabled, we want to send ELS traffic by VPI.
- * For SLI4, since the driver controls VPIs we also want to include
- * all ELS pt2pt protocol traffic as well.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- ((phba->sli_rev == LPFC_SLI_REV4) &&
- (vport->fc_flag & FC_PT2PT))) {
- if (expectRsp) {
- icmd->un.elsreq64.myID = vport->fc_myDID;
- /* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = phba->vpi_ids[vport->vpi];
- }
- icmd->ulpCt_h = 0;
- /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
- if (elscmd == ELS_CMD_ECHO)
- icmd->ulpCt_l = 0; /* context = invalid RPI */
- else
- icmd->ulpCt_l = 1; /* context = VPI */
- }
- bpl = (struct ulp_bde64 *) pbuflist->virt;
- bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
- bpl->tus.f.bdeSize = cmdSize;
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- if (expectRsp) {
- bpl++;
- bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
- bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- }
- /* prevent preparing iocb with NULL ndlp reference */
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto els_iocb_free_pbuf_exit;
- elsiocb->context2 = pcmd;
- elsiocb->context3 = pbuflist;
- elsiocb->retry = retry;
- elsiocb->vport = vport;
- elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
- if (prsp) {
- list_add(&prsp->list, &pcmd->list);
- }
- if (expectRsp) {
- /* Xmit ELS command <elsCmd> to remote NPORT <did> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0116 Xmit ELS command x%x to remote "
- "NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x\n",
- elscmd, did, elsiocb->iotag,
- vport->port_state, ndlp->nlp_rpi,
- vport->fc_flag);
- } else {
- /* Xmit ELS response <elsCmd> to remote NPORT <did> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0117 Xmit ELS response x%x to remote "
- "NPORT x%x I/O tag: x%x, size: x%x "
- "port_state x%x rpi x%x fc_flag x%x\n",
- elscmd, ndlp->nlp_DID, elsiocb->iotag,
- cmdSize, vport->port_state,
- ndlp->nlp_rpi, vport->fc_flag);
- }
- return elsiocb;
- els_iocb_free_pbuf_exit:
- if (expectRsp)
- lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
- kfree(pbuflist);
- els_iocb_free_prsp_exit:
- lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
- kfree(prsp);
- els_iocb_free_pcmb_exit:
- kfree(pcmd);
- lpfc_sli_release_iocbq(phba, elsiocb);
- return NULL;
- }
- /**
- * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine issues a fabric registration login for a @vport. An
- * active ndlp node with Fabric_DID must already exist for this @vport.
- * The routine invokes two mailbox commands to carry out fabric registration
- * login through the HBA firmware: the first mailbox command requests the
- * HBA to perform link configuration for the @vport; and the second mailbox
- * command requests the HBA to perform the actual fabric registration login
- * with the @vport.
- *
- * Return code
- * 0 - successfully issued fabric registration login for @vport
- * -ENXIO -- failed to issue fabric registration login for @vport
- **/
- int
- lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
- {
- struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
- struct serv_parm *sp;
- int rc;
- int err = 0;
- sp = &phba->fc_fabparam;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- err = 1;
- goto fail;
- }
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- err = 2;
- goto fail;
- }
- vport->port_state = LPFC_FABRIC_CFG_LINK;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- err = 3;
- goto fail_free_mbox;
- }
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- err = 4;
- goto fail;
- }
- rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
- ndlp->nlp_rpi);
- if (rc) {
- err = 5;
- goto fail_free_mbox;
- }
- mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
- mbox->vport = vport;
- /* increment the reference count on ndlp to hold reference
- * for the callback routine.
- */
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- err = 6;
- goto fail_issue_reg_login;
- }
- return 0;
- fail_issue_reg_login:
- /* decrement the reference count on ndlp just incremented
- * for the failed mbox command.
- */
- lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- fail_free_mbox:
- mempool_free(mbox, phba->mbox_mem_pool);
- fail:
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0249 Cannot issue Register Fabric login: Err %d\n", err);
- return -ENXIO;
- }
- /**
- * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for SLI4 port only.
- *
- * Return code
- * 0 - successfully issued REG_VFI for @vport
- * A failure code otherwise.
- **/
- int
- lpfc_issue_reg_vfi(struct lpfc_vport *vport)
- {
- struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mboxq = NULL;
- struct lpfc_nodelist *ndlp;
- struct lpfc_dmabuf *dmabuf = NULL;
- int rc = 0;
- /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(phba->link_flag & LS_LOOPBACK_MODE) &&
- !(vport->fc_flag & FC_PT2PT)) {
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = -ENODEV;
- goto fail;
- }
- }
- mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- rc = -ENOMEM;
- goto fail;
- }
- /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
- if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!dmabuf) {
- rc = -ENOMEM;
- goto fail;
- }
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
- if (!dmabuf->virt) {
- rc = -ENOMEM;
- goto fail;
- }
- memcpy(dmabuf->virt, &phba->fc_fabparam,
- sizeof(struct serv_parm));
- }
- vport->port_state = LPFC_FABRIC_CFG_LINK;
- if (dmabuf)
- lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
- else
- lpfc_reg_vfi(mboxq, vport, 0);
- mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
- mboxq->vport = vport;
- mboxq->ctx_buf = dmabuf;
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- rc = -ENXIO;
- goto fail;
- }
- return 0;
- fail:
- if (mboxq)
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- if (dmabuf->virt)
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0289 Issue Register VFI failed: Err %d\n", rc);
- return rc;
- }
- /**
- * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for SLI4 port only.
- *
- * Return code
- * 0 - successfully issued REG_VFI for @vport
- * A failure code otherwise.
- **/
- int
- lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
- {
- struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost;
- LPFC_MBOXQ_t *mboxq;
- int rc;
- mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2556 UNREG_VFI mbox allocation failed"
- "HBA state x%x\n", phba->pport->port_state);
- return -ENOMEM;
- }
- lpfc_unreg_vfi(mboxq, vport);
- mboxq->vport = vport;
- mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2557 UNREG_VFI issue mbox failed rc x%x "
- "HBA state x%x\n",
- rc, phba->pport->port_state);
- mempool_free(mboxq, phba->mbox_mem_pool);
- return -EIO;
- }
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
- return 0;
- }
- /**
- * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
- * @vport: pointer to a host virtual N_Port data structure.
- * @sp: pointer to service parameter data structure.
- *
- * This routine is called from FLOGI/FDISC completion handler functions.
- * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
- * node nodename is changed in the completion service parameter else return
- * 0. This function also set flag in the vport data structure to delay
- * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
- * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
- * node nodename is changed in the completion service parameter.
- *
- * Return code
- * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
- * 1 - FCID or Fabric Nodename or Fabric portname is changed.
- *
- **/
- static uint8_t
- lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
- struct serv_parm *sp)
- {
- struct lpfc_hba *phba = vport->phba;
- uint8_t fabric_param_changed = 0;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if ((vport->fc_prevDID != vport->fc_myDID) ||
- memcmp(&vport->fabric_portname, &sp->portName,
- sizeof(struct lpfc_name)) ||
- memcmp(&vport->fabric_nodename, &sp->nodeName,
- sizeof(struct lpfc_name)) ||
- (vport->vport_flag & FAWWPN_PARAM_CHG)) {
- fabric_param_changed = 1;
- vport->vport_flag &= ~FAWWPN_PARAM_CHG;
- }
- /*
- * Word 1 Bit 31 in common service parameter is overloaded.
- * Word 1 Bit 31 in FLOGI request is multiple NPort request
- * Word 1 Bit 31 in FLOGI response is clean address bit
- *
- * If fabric parameter is changed and clean address bit is
- * cleared delay nport discovery if
- * - vport->fc_prevDID != 0 (not initial discovery) OR
- * - lpfc_delay_discovery module parameter is set.
- */
- if (fabric_param_changed && !sp->cmn.clean_address_bit &&
- (vport->fc_prevDID || phba->cfg_delay_discovery)) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
- }
- return fabric_param_changed;
- }
- /**
- * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
- * @vport: pointer to a host virtual N_Port data structure.
- * @ndlp: pointer to a node-list data structure.
- * @sp: pointer to service parameter data structure.
- * @irsp: pointer to the IOCB within the lpfc response IOCB.
- *
- * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
- * function to handle the completion of a Fabric Login (FLOGI) into a fabric
- * port in a fabric topology. It properly sets up the parameters to the @ndlp
- * from the IOCB response. It also check the newly assigned N_Port ID to the
- * @vport against the previously assigned N_Port ID. If it is different from
- * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
- * is invoked on all the remaining nodes with the @vport to unregister the
- * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
- * is invoked to register login to the fabric.
- *
- * Return code
- * 0 - Success (currently, always return 0)
- **/
- static int
- lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
- {
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_nodelist *np;
- struct lpfc_nodelist *next_np;
- uint8_t fabric_param_changed;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_FABRIC;
- spin_unlock_irq(shost->host_lock);
- phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
- if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
- phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
- phba->fc_edtovResol = sp->cmn.edtovResolution;
- phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
- }
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
- memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
- memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
- ndlp->nlp_class_sup = 0;
- if (sp->cls1.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS1;
- if (sp->cls2.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS2;
- if (sp->cls3.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS3;
- if (sp->cls4.classValid)
- ndlp->nlp_class_sup |= FC_COS_CLASS4;
- ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- sp->cmn.bbRcvSizeLsb;
- fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
- if (fabric_param_changed) {
- /* Reset FDMI attribute masks based on config parameter */
- if (phba->cfg_enable_SmartSAN ||
- (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
- /* Setup appropriate attribute masks */
- vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
- if (phba->cfg_enable_SmartSAN)
- vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
- else
- vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
- } else {
- vport->fdmi_hba_mask = 0;
- vport->fdmi_port_mask = 0;
- }
- }
- memcpy(&vport->fabric_portname, &sp->portName,
- sizeof(struct lpfc_name));
- memcpy(&vport->fabric_nodename, &sp->nodeName,
- sizeof(struct lpfc_name));
- memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- if (sp->cmn.response_multiple_NPort) {
- lpfc_printf_vlog(vport, KERN_WARNING,
- LOG_ELS | LOG_VPORT,
- "1816 FLOGI NPIV supported, "
- "response data 0x%x\n",
- sp->cmn.response_multiple_NPort);
- spin_lock_irq(&phba->hbalock);
- phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
- spin_unlock_irq(&phba->hbalock);
- } else {
- /* Because we asked f/w for NPIV it still expects us
- to call reg_vnpid atleast for the physcial host */
- lpfc_printf_vlog(vport, KERN_WARNING,
- LOG_ELS | LOG_VPORT,
- "1817 Fabric does not support NPIV "
- "- configuring single port mode.\n");
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
- spin_unlock_irq(&phba->hbalock);
- }
- }
- /*
- * For FC we need to do some special processing because of the SLI
- * Port's default settings of the Common Service Parameters.
- */
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
- /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
- if (fabric_param_changed)
- lpfc_unregister_fcf_prep(phba);
- /* This should just update the VFI CSPs*/
- if (vport->fc_flag & FC_VFI_REGISTERED)
- lpfc_issue_reg_vfi(vport);
- }
- if (fabric_param_changed &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
- /* If our NportID changed, we need to ensure all
- * remaining NPORTs get unreg_login'ed.
- */
- list_for_each_entry_safe(np, next_np,
- &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(np))
- continue;
- if ((np->nlp_state != NLP_STE_NPR_NODE) ||
- !(np->nlp_flag & NLP_NPR_ADISC))
- continue;
- spin_lock_irq(shost->host_lock);
- np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
- lpfc_unreg_rpi(vport, np);
- }
- lpfc_cleanup_pending_mbox(vport);
- if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_sli4_unreg_all_rpis(vport);
- lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
- }
- /*
- * For SLI3 and SLI4, the VPI needs to be reregistered in
- * response to this fabric parameter change event.
- */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
- /*
- * Driver needs to re-reg VPI in order for f/w
- * to update the MAC address.
- */
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_register_new_vport(phba, vport, ndlp);
- return 0;
- }
- if (phba->sli_rev < LPFC_SLI_REV4) {
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
- vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
- lpfc_register_new_vport(phba, vport, ndlp);
- else
- lpfc_issue_fabric_reglogin(vport);
- } else {
- ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
- (vport->vpi_state & LPFC_VPI_REGISTERED)) {
- lpfc_start_fdiscs(phba);
- lpfc_do_scr_ns_plogi(phba, vport);
- } else if (vport->fc_flag & FC_VFI_REGISTERED)
- lpfc_issue_init_vpi(vport);
- else {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3135 Need register VFI: (x%x/%x)\n",
- vport->fc_prevDID, vport->fc_myDID);
- lpfc_issue_reg_vfi(vport);
- }
- }
- return 0;
- }
- /**
- * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
- * @vport: pointer to a host virtual N_Port data structure.
- * @ndlp: pointer to a node-list data structure.
- * @sp: pointer to service parameter data structure.
- *
- * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
- * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
- * in a point-to-point topology. First, the @vport's N_Port Name is compared
- * with the received N_Port Name: if the @vport's N_Port Name is greater than
- * the received N_Port Name lexicographically, this node shall assign local
- * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
- * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
- * this node shall just wait for the remote node to issue PLOGI and assign
- * N_Port IDs.
- *
- * Return code
- * 0 - Success
- * -ENXIO - Fail
- **/
- static int
- lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp)
- {
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mbox;
- int rc;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- vport->fc_flag |= FC_PT2PT;
- spin_unlock_irq(shost->host_lock);
- /* If we are pt2pt with another NPort, force NPIV off! */
- phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
- /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
- if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
- lpfc_unregister_fcf_prep(phba);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
- phba->fc_topology_changed = 0;
- }
- rc = memcmp(&vport->fc_portname, &sp->portName,
- sizeof(vport->fc_portname));
- if (rc >= 0) {
- /* This side will initiate the PLOGI */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
- /*
- * N_Port ID cannot be 0, set our Id to LocalID
- * the other side will be RemoteID.
- */
- /* not equal */
- if (rc)
- vport->fc_myDID = PT2PT_LocalID;
- /* Decrement ndlp reference count indicating that ndlp can be
- * safely released when other references to it are done.
- */
- lpfc_nlp_put(ndlp);
- ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
- if (!ndlp) {
- /*
- * Cannot find existing Fabric ndlp, so allocate a
- * new one
- */
- ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
- if (!ndlp)
- goto fail;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if(!ndlp)
- goto fail;
- }
- memcpy(&ndlp->nlp_portname, &sp->portName,
- sizeof(struct lpfc_name));
- memcpy(&ndlp->nlp_nodename, &sp->nodeName,
- sizeof(struct lpfc_name));
- /* Set state will put ndlp onto node list if not already done */
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
- }
- } else {
- /* This side will wait for the PLOGI, decrement ndlp reference
- * count indicating that ndlp can be released when other
- * references to it are done.
- */
- lpfc_nlp_put(ndlp);
- /* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(vport);
- }
- return 0;
- fail:
- return -ENXIO;
- }
- /**
- * lpfc_cmpl_els_flogi - Completion callback function for flogi
- * @phba: pointer to lpfc hba data structure.
- * @cmdiocb: pointer to lpfc command iocb data structure.
- * @rspiocb: pointer to lpfc response iocb data structure.
- *
- * This routine is the top-level completion callback function for issuing
- * a Fabric Login (FLOGI) command. If the response IOCB reported error,
- * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
- * retry has been made (either immediately or delayed with lpfc_els_retry()
- * returning 1), the command IOCB will be released and function returned.
- * If the retry attempt has been given up (possibly reach the maximum
- * number of retries), one additional decrement of ndlp reference shall be
- * invoked before going out after releasing the command IOCB. This will
- * actually release the remote node (Note, lpfc_els_free_iocb() will also
- * invoke one decrement of ndlp reference count). If no error reported in
- * the IOCB status, the command Port ID field is used to determine whether
- * this is a point-to-point topology or a fabric topology: if the Port ID
- * field is assigned, it is a fabric topology; otherwise, it is a
- * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
- * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
- * specific topology completion conditions.
- **/
- static void
- lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
- {
- struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
- struct serv_parm *sp;
- uint16_t fcf_index;
- int rc;
- /* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(vport)) {
- /* One additional decrement on node reference count to
- * trigger the release of the node
- */
- lpfc_nlp_put(ndlp);
- goto out;
- }
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "FLOGI cmpl: status:x%x/x%x state:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- vport->port_state);
- if (irsp->ulpStatus) {
- /*
- * In case of FIP mode, perform roundrobin FCF failover
- * due to new FCF discovery
- */
- if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
- (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
- if (phba->link_state < LPFC_LINK_UP)
- goto stop_rr_fcf_flogi;
- if ((phba->fcoe_cvl_eventtag_attn ==
- phba->fcoe_cvl_eventtag) &&
- (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_ABORTED))
- goto stop_rr_fcf_flogi;
- else
- phba->fcoe_cvl_eventtag_attn =
- phba->fcoe_cvl_eventtag;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
- "2611 FLOGI failed on FCF (x%x), "
- "status:x%x/x%x, tmo:x%x, perform "
- "roundrobin FCF failover\n",
- phba->fcf.current_rec.fcf_indx,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
- lpfc_sli4_set_fcf_flogi_fail(phba,
- phba->fcf.current_rec.fcf_indx);
- fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
- rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
- if (rc)
- goto out;
- }
- stop_rr_fcf_flogi:
- /* FLOGI failure */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_LOOP_OPEN_FAILURE)))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x "
- "TMO:x%x Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
- /* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb))
- goto out;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x "
- "xri x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
- /* If this is not a loop open failure, bail out */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_LOOP_OPEN_FAILURE)))
- goto flogifail;
- /* FLOGI failed, so there is no fabric */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
- /* If private loop, then allow max outstanding els to be
- * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
- * alpa map would take too long otherwise.
- */
- if (phba->alpa_map[0] == 0)
- vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!(vport->fc_flag & FC_VFI_REGISTERED) ||
- (vport->fc_prevDID != vport->fc_myDID) ||
- phba->fc_topology_changed)) {
- if (vport->fc_flag & FC_VFI_REGISTERED) {
- if (phba->fc_topology_changed) {
- lpfc_unregister_fcf_prep(phba);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
- phba->fc_topology_changed = 0;
- } else {
- lpfc_sli4_unreg_all_rpis(vport);
- }
- }
- /* Do not register VFI if the driver aborted FLOGI */
- if (!lpfc_error_lost_link(irsp))
- lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
- goto out;
- }
- goto flogifail;
- }
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
- vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
- spin_unlock_irq(shost->host_lock);
- /*
- * The FLogI succeeded. Sync the data for the CPU before
- * accessing it.
- */
- prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
- if (!prsp)
- goto out;
- sp = prsp->virt + sizeof(uint32_t);
- /* FLOGI completes successfully */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0101 FLOGI completes successfully, I/O tag:x%x, "
- "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
- cmdiocb->iotag, cmdiocb->sli4_xritag,
- irsp->un.ulpWord[4], sp->cmn.e_d_tov,
- sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
- vport->port_state, vport->fc_flag);
- if (vport->port_state == LPFC_FLOGI) {
- /*
- * If Common Service Parameters indicate Nport
- * we are point to point, if Fport we are Fabric.
- */
- if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
- else if (!(phba->hba_flag & HBA_FCOE_MODE))
- rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
- else {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_FIP | LOG_ELS,
- "2831 FLOGI response with cleared Fabric "
- "bit fcf_index 0x%x "
- "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
- "Fabric Name "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
- phba->fcf.current_rec.fcf_indx,
- phba->fcf.current_rec.switch_name[0],
- phba->fcf.current_rec.switch_name[1],
- phba->fcf.current_rec.switch_name[2],
- phba->fcf.current_rec.switch_name[3],
- phba->fcf.current_rec.switch_name[4],
- phba->fcf.current_rec.switch_name[5],
- phba->fcf.current_rec.switch_name[6],
- phba->fcf.current_rec.switch_name[7],
- phba->fcf.current_rec.fabric_name[0],
- phba->fcf.current_rec.fabric_name[1],
- phba->fcf.current_rec.fabric_name[2],
- phba->fcf.current_rec.fabric_name[3],
- phba->fcf.current_rec.fabric_name[4],
- phba->fcf.current_rec.fabric_name[5],
- phba->fcf.current_rec.fabric_name[6],
- phba->fcf.current_rec.fabric_name[7]);
- lpfc_nlp_put(ndlp);
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
- spin_unlock_irq(&phba->hbalock);
- phba->fcf.fcf_redisc_attempted = 0; /* reset */
- goto out;
- }
- if (!rc) {
- /* Mark the FCF discovery process done */
- if (phba->hba_flag & HBA_FIP_SUPPORT)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
- LOG_ELS,
- "2769 FLOGI to FCF (x%x) "
- "completed successfully\n",
- phba->fcf.current_rec.fcf_indx);
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
- spin_unlock_irq(&phba->hbalock);
- phba->fcf.fcf_redisc_attempted = 0; /* reset */
- goto out;
- }
- }
- flogifail:
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- spin_unlock_irq(&phba->hbalock);
- lpfc_nlp_put(ndlp);
- if (!lpfc_error_lost_link(irsp)) {
- /* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(vport);
- /* Start discovery */
- lpfc_disc_start(vport);
- } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
- IOERR_SLI_ABORTED) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
- IOERR_SLI_DOWN))) &&
- (phba->link_state != LPFC_CLEAR_LA)) {
- /* If FLOGI failed enable link interrupt. */
- lpfc_issue_clear_la(phba, vport);
- }
- out:
- lpfc_els_free_iocb(phba, cmdiocb);
- }
- /**
- * lpfc_cmpl_els_link_down - Completion callback function for ELS command
- * aborted during a link down
- * @phba: pointer to lpfc hba data structure.
- * @cmdiocb: pointer to lpfc command iocb data structure.
- * @rspiocb: pointer to lpfc response iocb data structure.
- *
- */
- static void
- lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
- {
- IOCB_t *irsp;
- uint32_t *pcmd;
- uint32_t cmd;
- pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
- cmd = *pcmd;
- irsp = &rspiocb->iocb;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "6445 ELS completes after LINK_DOWN: "
- " Status %x/%x cmd x%x flg x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
- cmdiocb->iocb_flag);
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
- atomic_dec(&phba->fabric_iocb_count);
- }
- lpfc_els_free_iocb(phba, cmdiocb);
- }
- /**
- * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
- * @vport: pointer to a host virtual N_Port data structure.
- * @ndlp: pointer to a node-list data structure.
- * @retry: number of retries to the command IOCB.
- *
- * This routine issues a Fabric Login (FLOGI) Request ELS command
- * for a @vport. The initiator service parameters are put into the payload
- * of the FLOGI Request IOCB and the top-level callback function pointer
- * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
- * function field. The lpfc_issue_fabric_iocb routine is invoked to send
- * out FLOGI ELS command with one outstanding fabric IOCB at a time.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FLOGI ELS command.
- *
- * Return code
- * 0 - successfully issued flogi iocb for @vport
- * 1 - failed to issue flogi iocb for @vport
- **/
- static int
- lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- uint8_t retry)
- {
- struct lpfc_hba *phba = vport->phba;
- struct serv_parm *sp;
- IOCB_t *icmd;
- struct lpfc_iocbq *elsiocb;
- struct lpfc_iocbq defer_flogi_acc;
- uint8_t *pcmd;
- uint16_t cmdsize;
- uint32_t tmo, did;
- int rc;
- cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
- elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_FLOGI);
- if (!elsiocb)
- return 1;
- icmd = &elsiocb->iocb;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- /* For FLOGI request, remainder of payload is service parameters */
- *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
- pcmd += sizeof(uint32_t);
- memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
- sp = (struct serv_parm *) pcmd;
- /* Setup CSPs accordingly for Fabric */
- sp->cmn.e_d_tov = 0;
- sp->cmn.w2.r_a_tov = 0;
- sp->cmn.virtual_fabric_support = 0;
- sp->cls1.classValid = 0;
- if (sp->cmn.fcphLow < FC_PH3)
- sp->cmn.fcphLow = FC_PH3;
- if (sp->cmn.fcphHigh < FC_PH3)
- sp->cmn.fcphHigh = FC_PH3;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_0) {
- elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
- elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
- /* FLOGI needs to be 3 for WQE FCFI */
- /* Set the fcfi to the fcfi we registered with */
- elsiocb->iocb.ulpContext = phba->fcf.fcfi;
- }
- /* Can't do SLI4 class2 without support sequence coalescing */
- sp->cls2.classValid = 0;
- sp->cls2.seqDelivery = 0;
- } else {
- /* Historical, setting sequential-delivery bit for SLI3 */
- sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
- sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- sp->cmn.request_multiple_Nport = 1;
- /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
- icmd->ulpCt_h = 1;
- icmd->ulpCt_l = 0;
- } else
- sp->cmn.request_multiple_Nport = 0;
- }
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
- }
- tmo = phba->fc_ratov;
- phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
- lpfc_set_disctmo(vport);
- phba->fc_ratov = tmo;
- phba->fc_stat.elsXmitFLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue FLOGI: opt:x%x",
- phba->sli3_options, 0, 0);
- rc = lpfc_issue_fabric_iocb(phba, elsiocb);
- phba->hba_flag |= HBA_FLOGI_ISSUED;
- /* Check for a deferred FLOGI ACC condition */
- if (phba->defer_flogi_acc_flag) {
- did = vport->fc_myDID;
- vport->fc_myDID = Fabric_DID;
- memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
- defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
- defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
- " ox_id: x%x, hba_flag x%x\n",
- phba->defer_flogi_acc_rx_id,
- phba->defer_flogi_acc_ox_id, phba->hba_flag);
- /* Send deferred FLOGI ACC */
- lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
- ndlp, NULL);
- phba->defer_flogi_acc_flag = false;
- vport->fc_myDID = did;
- }
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
- return 0;
- }
- /**
- * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
- * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
- * list and issues an abort IOCB commond on each outstanding IOCB that
- * contains a active Fabric_DID ndlp. Note that this function is to issue
- * the abort IOCB command on all the outstanding IOCBs, thus when this
- * function returns, it does not guarantee all the IOCBs are actually aborted.
- *
- * Return code
- * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
- **/
- int
- lpfc_els_abort_flogi(struct lpfc_hba *phba)
- {
- struct lpfc_sli_ring *pring;
- struct lpfc_iocbq *iocb, *next_iocb;
- struct lpfc_nodelist *ndlp;
- IOCB_t *icmd;
- /* Abort outstanding I/O on NPort <nlp_DID> */
- lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "0201 Abort outstanding I/O on NPort x%x\n",
- Fabric_DID);
- pring = lpfc_phba_elsring(phba);
- if (unlikely(!pring))
- return -EIO;
- /*
- * Check the txcmplq for an iocb that matches the nport the driver is
- * searching for.
- */
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
- ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_DID == Fabric_DID))
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
- }
- }
- spin_unlock_irq(&phba->hbalock);
- return 0;
- }
- /**
- * lpfc_initial_flogi - Issue an initial fabric login for a vport
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine issues an initial Fabric Login (FLOGI) for the @vport
- * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
- * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
- * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
- * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
- * is then invoked with the @vport and the ndlp to perform the FLOGI for the
- * @vport.
- *
- * Return code
- * 0 - failed to issue initial flogi for @vport
- * 1 - successfully issued initial flogi for @vport
- **/
- int
- lpfc_initial_flogi(struct lpfc_vport *vport)
- {
- struct lpfc_nodelist *ndlp;
- vport->port_state = LPFC_FLOGI;
- lpfc_set_disctmo(vport);
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp) {
- /* Cannot find existing Fabric ndlp, so allocate a new one */
- ndlp = lpfc_nlp_init(vport, Fabric_DID);
- if (!ndlp)
- return 0;
- /* Set the node type */
- ndlp->nlp_type |= NLP_FABRIC;
- /* Put ndlp onto node list */
- lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
- }
- if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
- /* This decrement of reference count to node shall kick off
- * the release of the node.
- */
- lpfc_nlp_put(ndlp);
- return 0;
- }
- return 1;
- }
- /**
- * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine issues an initial Fabric Discover (FDISC) for the @vport
- * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
- * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
- * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
- * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
- * is then invoked with the @vport and the ndlp to perform the FDISC for the
- * @vport.
- *
- * Return code
- * 0 - failed to issue initial fdisc for @vport
- * 1 - successfully issued initial fdisc for @vport
- **/
- int
- lpfc_initial_fdisc(struct lpfc_vport *vport)
- {
- struct lpfc_nodelist *ndlp;
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp) {
- /* Cannot find existing Fabric ndlp, so allocate a new one */
- ndlp = lpfc_nlp_init(vport, Fabric_DID);
- if (!ndlp)
- return 0;
- /* Put ndlp onto node list */
- lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
- }
- if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
- /* decrement node reference count to trigger the release of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- return 0;
- }
- return 1;
- }
- /**
- * lpfc_more_plogi - Check and issue remaining plogis for a vport
- * @vport: pointer to a host virtual N_Port data structure.
- *
- * This routine checks whether there are more remaining Port Logins
- * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
- * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
- * to issue ELS PLOGIs up to the configured discover threads with the
- * @vport (@vport->cfg_discovery_threads). The function also decrement
- * the @vport's num_disc_node by 1 if it is not already 0.
- **/
- void
- lpfc_more_plogi(struct lpfc_vport *vport)
- {
- if (vport->num_disc_nodes)
- vport->num…