PageRenderTime 123ms CodeModel.GetById 21ms app.highlight 80ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/scsi/qla2xxx/qla_os.c

http://github.com/mirrors/linux
C | 7956 lines | 6323 code | 1033 blank | 600 comment | 1011 complexity | 69290e65b10cef3fe76a68cfae7f6121 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8
   9#include <linux/moduleparam.h>
  10#include <linux/vmalloc.h>
  11#include <linux/delay.h>
  12#include <linux/kthread.h>
  13#include <linux/mutex.h>
  14#include <linux/kobject.h>
  15#include <linux/slab.h>
  16#include <linux/blk-mq-pci.h>
  17#include <linux/refcount.h>
  18
  19#include <scsi/scsi_tcq.h>
  20#include <scsi/scsicam.h>
  21#include <scsi/scsi_transport.h>
  22#include <scsi/scsi_transport_fc.h>
  23
  24#include "qla_target.h"
  25
  26/*
  27 * Driver version
  28 */
  29char qla2x00_version_str[40];
  30
  31static int apidev_major;
  32
  33/*
  34 * SRB allocation cache
  35 */
  36struct kmem_cache *srb_cachep;
  37
  38/*
  39 * CT6 CTX allocation cache
  40 */
  41static struct kmem_cache *ctx_cachep;
  42/*
  43 * error level for logging
  44 */
  45uint ql_errlev = 0x8001;
  46
  47static int ql2xenableclass2;
  48module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
  49MODULE_PARM_DESC(ql2xenableclass2,
  50		"Specify if Class 2 operations are supported from the very "
  51		"beginning. Default is 0 - class 2 not supported.");
  52
  53
  54int ql2xlogintimeout = 20;
  55module_param(ql2xlogintimeout, int, S_IRUGO);
  56MODULE_PARM_DESC(ql2xlogintimeout,
  57		"Login timeout value in seconds.");
  58
  59int qlport_down_retry;
  60module_param(qlport_down_retry, int, S_IRUGO);
  61MODULE_PARM_DESC(qlport_down_retry,
  62		"Maximum number of command retries to a port that returns "
  63		"a PORT-DOWN status.");
  64
  65int ql2xplogiabsentdevice;
  66module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  67MODULE_PARM_DESC(ql2xplogiabsentdevice,
  68		"Option to enable PLOGI to devices that are not present after "
  69		"a Fabric scan.  This is needed for several broken switches. "
  70		"Default is 0 - no PLOGI. 1 - perform PLOGI.");
  71
  72int ql2xloginretrycount;
  73module_param(ql2xloginretrycount, int, S_IRUGO);
  74MODULE_PARM_DESC(ql2xloginretrycount,
  75		"Specify an alternate value for the NVRAM login retry count.");
  76
  77int ql2xallocfwdump = 1;
  78module_param(ql2xallocfwdump, int, S_IRUGO);
  79MODULE_PARM_DESC(ql2xallocfwdump,
  80		"Option to enable allocation of memory for a firmware dump "
  81		"during HBA initialization.  Memory allocation requirements "
  82		"vary by ISP type.  Default is 1 - allocate memory.");
  83
  84int ql2xextended_error_logging;
  85module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  86module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  87MODULE_PARM_DESC(ql2xextended_error_logging,
  88		"Option to enable extended error logging,\n"
  89		"\t\tDefault is 0 - no logging.  0x40000000 - Module Init & Probe.\n"
  90		"\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
  91		"\t\t0x08000000 - IO tracing.    0x04000000 - DPC Thread.\n"
  92		"\t\t0x02000000 - Async events.  0x01000000 - Timer routines.\n"
  93		"\t\t0x00800000 - User space.    0x00400000 - Task Management.\n"
  94		"\t\t0x00200000 - AER/EEH.       0x00100000 - Multi Q.\n"
  95		"\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
  96		"\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
  97		"\t\t0x00008000 - Verbose.       0x00004000 - Target.\n"
  98		"\t\t0x00002000 - Target Mgmt.   0x00001000 - Target TMF.\n"
  99		"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
 100		"\t\t0x1e400000 - Preferred value for capturing essential "
 101		"debug information (equivalent to old "
 102		"ql2xextended_error_logging=1).\n"
 103		"\t\tDo LOGICAL OR of the value to enable more than one level");
 104
 105int ql2xshiftctondsd = 6;
 106module_param(ql2xshiftctondsd, int, S_IRUGO);
 107MODULE_PARM_DESC(ql2xshiftctondsd,
 108		"Set to control shifting of command type processing "
 109		"based on total number of SG elements.");
 110
 111int ql2xfdmienable = 1;
 112module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 113module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 114MODULE_PARM_DESC(ql2xfdmienable,
 115		"Enables FDMI registrations. "
 116		"0 - no FDMI registrations. "
 117		"1 - provide FDMI registrations (default).");
 118
 119#define MAX_Q_DEPTH	64
 120static int ql2xmaxqdepth = MAX_Q_DEPTH;
 121module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 122MODULE_PARM_DESC(ql2xmaxqdepth,
 123		"Maximum queue depth to set for each LUN. "
 124		"Default is 64.");
 125
 126int ql2xenabledif = 2;
 127module_param(ql2xenabledif, int, S_IRUGO);
 128MODULE_PARM_DESC(ql2xenabledif,
 129		" Enable T10-CRC-DIF:\n"
 130		" Default is 2.\n"
 131		"  0 -- No DIF Support\n"
 132		"  1 -- Enable DIF for all types\n"
 133		"  2 -- Enable DIF for all types, except Type 0.\n");
 134
 135#if (IS_ENABLED(CONFIG_NVME_FC))
 136int ql2xnvmeenable = 1;
 137#else
 138int ql2xnvmeenable;
 139#endif
 140module_param(ql2xnvmeenable, int, 0644);
 141MODULE_PARM_DESC(ql2xnvmeenable,
 142    "Enables NVME support. "
 143    "0 - no NVMe.  Default is Y");
 144
 145int ql2xenablehba_err_chk = 2;
 146module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 147MODULE_PARM_DESC(ql2xenablehba_err_chk,
 148		" Enable T10-CRC-DIF Error isolation by HBA:\n"
 149		" Default is 2.\n"
 150		"  0 -- Error isolation disabled\n"
 151		"  1 -- Error isolation enabled only for DIX Type 0\n"
 152		"  2 -- Error isolation enabled for all Types\n");
 153
 154int ql2xiidmaenable = 1;
 155module_param(ql2xiidmaenable, int, S_IRUGO);
 156MODULE_PARM_DESC(ql2xiidmaenable,
 157		"Enables iIDMA settings "
 158		"Default is 1 - perform iIDMA. 0 - no iIDMA.");
 159
 160int ql2xmqsupport = 1;
 161module_param(ql2xmqsupport, int, S_IRUGO);
 162MODULE_PARM_DESC(ql2xmqsupport,
 163		"Enable on demand multiple queue pairs support "
 164		"Default is 1 for supported. "
 165		"Set it to 0 to turn off mq qpair support.");
 166
 167int ql2xfwloadbin;
 168module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
 169module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
 170MODULE_PARM_DESC(ql2xfwloadbin,
 171		"Option to specify location from which to load ISP firmware:.\n"
 172		" 2 -- load firmware via the request_firmware() (hotplug).\n"
 173		"      interface.\n"
 174		" 1 -- load firmware from flash.\n"
 175		" 0 -- use default semantics.\n");
 176
 177int ql2xetsenable;
 178module_param(ql2xetsenable, int, S_IRUGO);
 179MODULE_PARM_DESC(ql2xetsenable,
 180		"Enables firmware ETS burst."
 181		"Default is 0 - skip ETS enablement.");
 182
 183int ql2xdbwr = 1;
 184module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
 185MODULE_PARM_DESC(ql2xdbwr,
 186		"Option to specify scheme for request queue posting.\n"
 187		" 0 -- Regular doorbell.\n"
 188		" 1 -- CAMRAM doorbell (faster).\n");
 189
 190int ql2xtargetreset = 1;
 191module_param(ql2xtargetreset, int, S_IRUGO);
 192MODULE_PARM_DESC(ql2xtargetreset,
 193		 "Enable target reset."
 194		 "Default is 1 - use hw defaults.");
 195
 196int ql2xgffidenable;
 197module_param(ql2xgffidenable, int, S_IRUGO);
 198MODULE_PARM_DESC(ql2xgffidenable,
 199		"Enables GFF_ID checks of port type. "
 200		"Default is 0 - Do not use GFF_ID information.");
 201
 202int ql2xasynctmfenable = 1;
 203module_param(ql2xasynctmfenable, int, S_IRUGO);
 204MODULE_PARM_DESC(ql2xasynctmfenable,
 205		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
 206		"Default is 1 - Issue TM IOCBs via mailbox mechanism.");
 207
 208int ql2xdontresethba;
 209module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
 210MODULE_PARM_DESC(ql2xdontresethba,
 211		"Option to specify reset behaviour.\n"
 212		" 0 (Default) -- Reset on failure.\n"
 213		" 1 -- Do not reset on failure.\n");
 214
 215uint64_t ql2xmaxlun = MAX_LUNS;
 216module_param(ql2xmaxlun, ullong, S_IRUGO);
 217MODULE_PARM_DESC(ql2xmaxlun,
 218		"Defines the maximum LU number to register with the SCSI "
 219		"midlayer. Default is 65535.");
 220
 221int ql2xmdcapmask = 0x1F;
 222module_param(ql2xmdcapmask, int, S_IRUGO);
 223MODULE_PARM_DESC(ql2xmdcapmask,
 224		"Set the Minidump driver capture mask level. "
 225		"Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 226
 227int ql2xmdenable = 1;
 228module_param(ql2xmdenable, int, S_IRUGO);
 229MODULE_PARM_DESC(ql2xmdenable,
 230		"Enable/disable MiniDump. "
 231		"0 - MiniDump disabled. "
 232		"1 (Default) - MiniDump enabled.");
 233
 234int ql2xexlogins;
 235module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
 236MODULE_PARM_DESC(ql2xexlogins,
 237		 "Number of extended Logins. "
 238		 "0 (Default)- Disabled.");
 239
 240int ql2xexchoffld = 1024;
 241module_param(ql2xexchoffld, uint, 0644);
 242MODULE_PARM_DESC(ql2xexchoffld,
 243	"Number of target exchanges.");
 244
 245int ql2xiniexchg = 1024;
 246module_param(ql2xiniexchg, uint, 0644);
 247MODULE_PARM_DESC(ql2xiniexchg,
 248	"Number of initiator exchanges.");
 249
 250int ql2xfwholdabts;
 251module_param(ql2xfwholdabts, int, S_IRUGO);
 252MODULE_PARM_DESC(ql2xfwholdabts,
 253		"Allow FW to hold status IOCB until ABTS rsp received. "
 254		"0 (Default) Do not set fw option. "
 255		"1 - Set fw option to hold ABTS.");
 256
 257int ql2xmvasynctoatio = 1;
 258module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
 259MODULE_PARM_DESC(ql2xmvasynctoatio,
 260		"Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
 261		"0 (Default). Do not move IOCBs"
 262		"1 - Move IOCBs.");
 263
 264int ql2xautodetectsfp = 1;
 265module_param(ql2xautodetectsfp, int, 0444);
 266MODULE_PARM_DESC(ql2xautodetectsfp,
 267		 "Detect SFP range and set appropriate distance.\n"
 268		 "1 (Default): Enable\n");
 269
 270int ql2xenablemsix = 1;
 271module_param(ql2xenablemsix, int, 0444);
 272MODULE_PARM_DESC(ql2xenablemsix,
 273		 "Set to enable MSI or MSI-X interrupt mechanism.\n"
 274		 " Default is 1, enable MSI-X interrupt mechanism.\n"
 275		 " 0 -- enable traditional pin-based mechanism.\n"
 276		 " 1 -- enable MSI-X interrupt mechanism.\n"
 277		 " 2 -- enable MSI interrupt mechanism.\n");
 278
 279int qla2xuseresexchforels;
 280module_param(qla2xuseresexchforels, int, 0444);
 281MODULE_PARM_DESC(qla2xuseresexchforels,
 282		 "Reserve 1/2 of emergency exchanges for ELS.\n"
 283		 " 0 (default): disabled");
 284
 285static int ql2xprotmask;
 286module_param(ql2xprotmask, int, 0644);
 287MODULE_PARM_DESC(ql2xprotmask,
 288		 "Override DIF/DIX protection capabilities mask\n"
 289		 "Default is 0 which sets protection mask based on "
 290		 "capabilities reported by HBA firmware.\n");
 291
 292static int ql2xprotguard;
 293module_param(ql2xprotguard, int, 0644);
 294MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
 295		 "  0 -- Let HBA firmware decide\n"
 296		 "  1 -- Force T10 CRC\n"
 297		 "  2 -- Force IP checksum\n");
 298
 299int ql2xdifbundlinginternalbuffers;
 300module_param(ql2xdifbundlinginternalbuffers, int, 0644);
 301MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
 302    "Force using internal buffers for DIF information\n"
 303    "0 (Default). Based on check.\n"
 304    "1 Force using internal buffers\n");
 305
 306int ql2xsmartsan;
 307module_param(ql2xsmartsan, int, 0444);
 308module_param_named(smartsan, ql2xsmartsan, int, 0444);
 309MODULE_PARM_DESC(ql2xsmartsan,
 310		"Send SmartSAN Management Attributes for FDMI Registration."
 311		" Default is 0 - No SmartSAN registration,"
 312		" 1 - Register SmartSAN Management Attributes.");
 313
 314int ql2xrdpenable;
 315module_param(ql2xrdpenable, int, 0444);
 316module_param_named(rdpenable, ql2xrdpenable, int, 0444);
 317MODULE_PARM_DESC(ql2xrdpenable,
 318		"Enables RDP responses. "
 319		"0 - no RDP responses (default). "
 320		"1 - provide RDP responses.");
 321
 322static void qla2x00_clear_drv_active(struct qla_hw_data *);
 323static void qla2x00_free_device(scsi_qla_host_t *);
 324static int qla2xxx_map_queues(struct Scsi_Host *shost);
 325static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 326
 327
 328static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 329struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 330
 331/* TODO Convert to inlines
 332 *
 333 * Timer routines
 334 */
 335
 336__inline__ void
 337qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
 338{
 339	timer_setup(&vha->timer, qla2x00_timer, 0);
 340	vha->timer.expires = jiffies + interval * HZ;
 341	add_timer(&vha->timer);
 342	vha->timer_active = 1;
 343}
 344
 345static inline void
 346qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
 347{
 348	/* Currently used for 82XX only. */
 349	if (vha->device_flags & DFLG_DEV_FAILED) {
 350		ql_dbg(ql_dbg_timer, vha, 0x600d,
 351		    "Device in a failed state, returning.\n");
 352		return;
 353	}
 354
 355	mod_timer(&vha->timer, jiffies + interval * HZ);
 356}
 357
 358static __inline__ void
 359qla2x00_stop_timer(scsi_qla_host_t *vha)
 360{
 361	del_timer_sync(&vha->timer);
 362	vha->timer_active = 0;
 363}
 364
 365static int qla2x00_do_dpc(void *data);
 366
 367static void qla2x00_rst_aen(scsi_qla_host_t *);
 368
 369static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
 370	struct req_que **, struct rsp_que **);
 371static void qla2x00_free_fw_dump(struct qla_hw_data *);
 372static void qla2x00_mem_free(struct qla_hw_data *);
 373int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
 374	struct qla_qpair *qpair);
 375
 376/* -------------------------------------------------------------------------- */
 377static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
 378    struct rsp_que *rsp)
 379{
 380	struct qla_hw_data *ha = vha->hw;
 381
 382	rsp->qpair = ha->base_qpair;
 383	rsp->req = req;
 384	ha->base_qpair->hw = ha;
 385	ha->base_qpair->req = req;
 386	ha->base_qpair->rsp = rsp;
 387	ha->base_qpair->vha = vha;
 388	ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
 389	ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
 390	ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
 391	ha->base_qpair->srb_mempool = ha->srb_mempool;
 392	INIT_LIST_HEAD(&ha->base_qpair->hints_list);
 393	ha->base_qpair->enable_class_2 = ql2xenableclass2;
 394	/* init qpair to this cpu. Will adjust at run time. */
 395	qla_cpu_update(rsp->qpair, raw_smp_processor_id());
 396	ha->base_qpair->pdev = ha->pdev;
 397
 398	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
 399		ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
 400}
 401
 402static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
 403				struct rsp_que *rsp)
 404{
 405	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 406
 407	ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
 408				GFP_KERNEL);
 409	if (!ha->req_q_map) {
 410		ql_log(ql_log_fatal, vha, 0x003b,
 411		    "Unable to allocate memory for request queue ptrs.\n");
 412		goto fail_req_map;
 413	}
 414
 415	ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
 416				GFP_KERNEL);
 417	if (!ha->rsp_q_map) {
 418		ql_log(ql_log_fatal, vha, 0x003c,
 419		    "Unable to allocate memory for response queue ptrs.\n");
 420		goto fail_rsp_map;
 421	}
 422
 423	ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
 424	if (ha->base_qpair == NULL) {
 425		ql_log(ql_log_warn, vha, 0x00e0,
 426		    "Failed to allocate base queue pair memory.\n");
 427		goto fail_base_qpair;
 428	}
 429
 430	qla_init_base_qpair(vha, req, rsp);
 431
 432	if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
 433		ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
 434			GFP_KERNEL);
 435		if (!ha->queue_pair_map) {
 436			ql_log(ql_log_fatal, vha, 0x0180,
 437			    "Unable to allocate memory for queue pair ptrs.\n");
 438			goto fail_qpair_map;
 439		}
 440	}
 441
 442	/*
 443	 * Make sure we record at least the request and response queue zero in
 444	 * case we need to free them if part of the probe fails.
 445	 */
 446	ha->rsp_q_map[0] = rsp;
 447	ha->req_q_map[0] = req;
 448	set_bit(0, ha->rsp_qid_map);
 449	set_bit(0, ha->req_qid_map);
 450	return 0;
 451
 452fail_qpair_map:
 453	kfree(ha->base_qpair);
 454	ha->base_qpair = NULL;
 455fail_base_qpair:
 456	kfree(ha->rsp_q_map);
 457	ha->rsp_q_map = NULL;
 458fail_rsp_map:
 459	kfree(ha->req_q_map);
 460	ha->req_q_map = NULL;
 461fail_req_map:
 462	return -ENOMEM;
 463}
 464
 465static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 466{
 467	if (IS_QLAFX00(ha)) {
 468		if (req && req->ring_fx00)
 469			dma_free_coherent(&ha->pdev->dev,
 470			    (req->length_fx00 + 1) * sizeof(request_t),
 471			    req->ring_fx00, req->dma_fx00);
 472	} else if (req && req->ring)
 473		dma_free_coherent(&ha->pdev->dev,
 474		(req->length + 1) * sizeof(request_t),
 475		req->ring, req->dma);
 476
 477	if (req)
 478		kfree(req->outstanding_cmds);
 479
 480	kfree(req);
 481}
 482
 483static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
 484{
 485	if (IS_QLAFX00(ha)) {
 486		if (rsp && rsp->ring_fx00)
 487			dma_free_coherent(&ha->pdev->dev,
 488			    (rsp->length_fx00 + 1) * sizeof(request_t),
 489			    rsp->ring_fx00, rsp->dma_fx00);
 490	} else if (rsp && rsp->ring) {
 491		dma_free_coherent(&ha->pdev->dev,
 492		(rsp->length + 1) * sizeof(response_t),
 493		rsp->ring, rsp->dma);
 494	}
 495	kfree(rsp);
 496}
 497
 498static void qla2x00_free_queues(struct qla_hw_data *ha)
 499{
 500	struct req_que *req;
 501	struct rsp_que *rsp;
 502	int cnt;
 503	unsigned long flags;
 504
 505	if (ha->queue_pair_map) {
 506		kfree(ha->queue_pair_map);
 507		ha->queue_pair_map = NULL;
 508	}
 509	if (ha->base_qpair) {
 510		kfree(ha->base_qpair);
 511		ha->base_qpair = NULL;
 512	}
 513
 514	spin_lock_irqsave(&ha->hardware_lock, flags);
 515	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
 516		if (!test_bit(cnt, ha->req_qid_map))
 517			continue;
 518
 519		req = ha->req_q_map[cnt];
 520		clear_bit(cnt, ha->req_qid_map);
 521		ha->req_q_map[cnt] = NULL;
 522
 523		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 524		qla2x00_free_req_que(ha, req);
 525		spin_lock_irqsave(&ha->hardware_lock, flags);
 526	}
 527	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 528
 529	kfree(ha->req_q_map);
 530	ha->req_q_map = NULL;
 531
 532
 533	spin_lock_irqsave(&ha->hardware_lock, flags);
 534	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
 535		if (!test_bit(cnt, ha->rsp_qid_map))
 536			continue;
 537
 538		rsp = ha->rsp_q_map[cnt];
 539		clear_bit(cnt, ha->rsp_qid_map);
 540		ha->rsp_q_map[cnt] =  NULL;
 541		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 542		qla2x00_free_rsp_que(ha, rsp);
 543		spin_lock_irqsave(&ha->hardware_lock, flags);
 544	}
 545	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 546
 547	kfree(ha->rsp_q_map);
 548	ha->rsp_q_map = NULL;
 549}
 550
 551static char *
 552qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
 553{
 554	struct qla_hw_data *ha = vha->hw;
 555	static const char *const pci_bus_modes[] = {
 556		"33", "66", "100", "133",
 557	};
 558	uint16_t pci_bus;
 559
 560	pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
 561	if (pci_bus) {
 562		snprintf(str, str_len, "PCI-X (%s MHz)",
 563			 pci_bus_modes[pci_bus]);
 564	} else {
 565		pci_bus = (ha->pci_attr & BIT_8) >> 8;
 566		snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
 567	}
 568
 569	return str;
 570}
 571
 572static char *
 573qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
 574{
 575	static const char *const pci_bus_modes[] = {
 576		"33", "66", "100", "133",
 577	};
 578	struct qla_hw_data *ha = vha->hw;
 579	uint32_t pci_bus;
 580
 581	if (pci_is_pcie(ha->pdev)) {
 582		uint32_t lstat, lspeed, lwidth;
 583		const char *speed_str;
 584
 585		pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
 586		lspeed = lstat & PCI_EXP_LNKCAP_SLS;
 587		lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
 588
 589		switch (lspeed) {
 590		case 1:
 591			speed_str = "2.5GT/s";
 592			break;
 593		case 2:
 594			speed_str = "5.0GT/s";
 595			break;
 596		case 3:
 597			speed_str = "8.0GT/s";
 598			break;
 599		case 4:
 600			speed_str = "16.0GT/s";
 601			break;
 602		default:
 603			speed_str = "<unknown>";
 604			break;
 605		}
 606		snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
 607
 608		return str;
 609	}
 610
 611	pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
 612	if (pci_bus == 0 || pci_bus == 8)
 613		snprintf(str, str_len, "PCI (%s MHz)",
 614			 pci_bus_modes[pci_bus >> 3]);
 615	else
 616		snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
 617			 pci_bus & 4 ? 2 : 1,
 618			 pci_bus_modes[pci_bus & 3]);
 619
 620	return str;
 621}
 622
 623static char *
 624qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 625{
 626	char un_str[10];
 627	struct qla_hw_data *ha = vha->hw;
 628
 629	snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
 630	    ha->fw_minor_version, ha->fw_subminor_version);
 631
 632	if (ha->fw_attributes & BIT_9) {
 633		strcat(str, "FLX");
 634		return (str);
 635	}
 636
 637	switch (ha->fw_attributes & 0xFF) {
 638	case 0x7:
 639		strcat(str, "EF");
 640		break;
 641	case 0x17:
 642		strcat(str, "TP");
 643		break;
 644	case 0x37:
 645		strcat(str, "IP");
 646		break;
 647	case 0x77:
 648		strcat(str, "VI");
 649		break;
 650	default:
 651		sprintf(un_str, "(%x)", ha->fw_attributes);
 652		strcat(str, un_str);
 653		break;
 654	}
 655	if (ha->fw_attributes & 0x100)
 656		strcat(str, "X");
 657
 658	return (str);
 659}
 660
 661static char *
 662qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 663{
 664	struct qla_hw_data *ha = vha->hw;
 665
 666	snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
 667	    ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
 668	return str;
 669}
 670
 671void qla2x00_sp_free_dma(srb_t *sp)
 672{
 673	struct qla_hw_data *ha = sp->vha->hw;
 674	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 675
 676	if (sp->flags & SRB_DMA_VALID) {
 677		scsi_dma_unmap(cmd);
 678		sp->flags &= ~SRB_DMA_VALID;
 679	}
 680
 681	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
 682		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
 683		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
 684		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 685	}
 686
 687	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 688		/* List assured to be having elements */
 689		qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
 690		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 691	}
 692
 693	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
 694		struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
 695
 696		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
 697		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 698	}
 699
 700	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
 701		struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
 702
 703		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 704		    ctx1->fcp_cmnd_dma);
 705		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
 706		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
 707		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 708		mempool_free(ctx1, ha->ctx_mempool);
 709	}
 710}
 711
 712void qla2x00_sp_compl(srb_t *sp, int res)
 713{
 714	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 715	struct completion *comp = sp->comp;
 716
 717	sp->free(sp);
 718	cmd->result = res;
 719	CMD_SP(cmd) = NULL;
 720	cmd->scsi_done(cmd);
 721	if (comp)
 722		complete(comp);
 723}
 724
 725void qla2xxx_qpair_sp_free_dma(srb_t *sp)
 726{
 727	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 728	struct qla_hw_data *ha = sp->fcport->vha->hw;
 729
 730	if (sp->flags & SRB_DMA_VALID) {
 731		scsi_dma_unmap(cmd);
 732		sp->flags &= ~SRB_DMA_VALID;
 733	}
 734
 735	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
 736		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
 737		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
 738		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 739	}
 740
 741	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 742		/* List assured to be having elements */
 743		qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
 744		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 745	}
 746
 747	if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
 748		struct crc_context *difctx = sp->u.scmd.crc_ctx;
 749		struct dsd_dma *dif_dsd, *nxt_dsd;
 750
 751		list_for_each_entry_safe(dif_dsd, nxt_dsd,
 752		    &difctx->ldif_dma_hndl_list, list) {
 753			list_del(&dif_dsd->list);
 754			dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
 755			    dif_dsd->dsd_list_dma);
 756			kfree(dif_dsd);
 757			difctx->no_dif_bundl--;
 758		}
 759
 760		list_for_each_entry_safe(dif_dsd, nxt_dsd,
 761		    &difctx->ldif_dsd_list, list) {
 762			list_del(&dif_dsd->list);
 763			dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
 764			    dif_dsd->dsd_list_dma);
 765			kfree(dif_dsd);
 766			difctx->no_ldif_dsd--;
 767		}
 768
 769		if (difctx->no_ldif_dsd) {
 770			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
 771			    "%s: difctx->no_ldif_dsd=%x\n",
 772			    __func__, difctx->no_ldif_dsd);
 773		}
 774
 775		if (difctx->no_dif_bundl) {
 776			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
 777			    "%s: difctx->no_dif_bundl=%x\n",
 778			    __func__, difctx->no_dif_bundl);
 779		}
 780		sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
 781	}
 782
 783	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
 784		struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
 785
 786		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 787		    ctx1->fcp_cmnd_dma);
 788		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
 789		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
 790		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 791		mempool_free(ctx1, ha->ctx_mempool);
 792		sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
 793	}
 794
 795	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
 796		struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
 797
 798		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
 799		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 800	}
 801}
 802
 803void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
 804{
 805	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 806	struct completion *comp = sp->comp;
 807
 808	sp->free(sp);
 809	cmd->result = res;
 810	CMD_SP(cmd) = NULL;
 811	cmd->scsi_done(cmd);
 812	if (comp)
 813		complete(comp);
 814}
 815
 816static int
 817qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 818{
 819	scsi_qla_host_t *vha = shost_priv(host);
 820	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 821	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
 822	struct qla_hw_data *ha = vha->hw;
 823	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 824	srb_t *sp;
 825	int rval;
 826
 827	if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
 828	    WARN_ON_ONCE(!rport)) {
 829		cmd->result = DID_NO_CONNECT << 16;
 830		goto qc24_fail_command;
 831	}
 832
 833	if (ha->mqenable) {
 834		uint32_t tag;
 835		uint16_t hwq;
 836		struct qla_qpair *qpair = NULL;
 837
 838		tag = blk_mq_unique_tag(cmd->request);
 839		hwq = blk_mq_unique_tag_to_hwq(tag);
 840		qpair = ha->queue_pair_map[hwq];
 841
 842		if (qpair)
 843			return qla2xxx_mqueuecommand(host, cmd, qpair);
 844	}
 845
 846	if (ha->flags.eeh_busy) {
 847		if (ha->flags.pci_channel_io_perm_failure) {
 848			ql_dbg(ql_dbg_aer, vha, 0x9010,
 849			    "PCI Channel IO permanent failure, exiting "
 850			    "cmd=%p.\n", cmd);
 851			cmd->result = DID_NO_CONNECT << 16;
 852		} else {
 853			ql_dbg(ql_dbg_aer, vha, 0x9011,
 854			    "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
 855			cmd->result = DID_REQUEUE << 16;
 856		}
 857		goto qc24_fail_command;
 858	}
 859
 860	rval = fc_remote_port_chkready(rport);
 861	if (rval) {
 862		cmd->result = rval;
 863		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
 864		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
 865		    cmd, rval);
 866		goto qc24_fail_command;
 867	}
 868
 869	if (!vha->flags.difdix_supported &&
 870		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
 871			ql_dbg(ql_dbg_io, vha, 0x3004,
 872			    "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
 873			    cmd);
 874			cmd->result = DID_NO_CONNECT << 16;
 875			goto qc24_fail_command;
 876	}
 877
 878	if (!fcport) {
 879		cmd->result = DID_NO_CONNECT << 16;
 880		goto qc24_fail_command;
 881	}
 882
 883	if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
 884		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
 885			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 886			ql_dbg(ql_dbg_io, vha, 0x3005,
 887			    "Returning DNC, fcport_state=%d loop_state=%d.\n",
 888			    atomic_read(&fcport->state),
 889			    atomic_read(&base_vha->loop_state));
 890			cmd->result = DID_NO_CONNECT << 16;
 891			goto qc24_fail_command;
 892		}
 893		goto qc24_target_busy;
 894	}
 895
 896	/*
 897	 * Return target busy if we've received a non-zero retry_delay_timer
 898	 * in a FCP_RSP.
 899	 */
 900	if (fcport->retry_delay_timestamp == 0) {
 901		/* retry delay not set */
 902	} else if (time_after(jiffies, fcport->retry_delay_timestamp))
 903		fcport->retry_delay_timestamp = 0;
 904	else
 905		goto qc24_target_busy;
 906
 907	sp = scsi_cmd_priv(cmd);
 908	qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
 909
 910	sp->u.scmd.cmd = cmd;
 911	sp->type = SRB_SCSI_CMD;
 912
 913	CMD_SP(cmd) = (void *)sp;
 914	sp->free = qla2x00_sp_free_dma;
 915	sp->done = qla2x00_sp_compl;
 916
 917	rval = ha->isp_ops->start_scsi(sp);
 918	if (rval != QLA_SUCCESS) {
 919		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
 920		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
 921		goto qc24_host_busy_free_sp;
 922	}
 923
 924	return 0;
 925
 926qc24_host_busy_free_sp:
 927	sp->free(sp);
 928
 929qc24_target_busy:
 930	return SCSI_MLQUEUE_TARGET_BUSY;
 931
 932qc24_fail_command:
 933	cmd->scsi_done(cmd);
 934
 935	return 0;
 936}
 937
 938/* For MQ supported I/O */
 939int
 940qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
 941    struct qla_qpair *qpair)
 942{
 943	scsi_qla_host_t *vha = shost_priv(host);
 944	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 945	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
 946	struct qla_hw_data *ha = vha->hw;
 947	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 948	srb_t *sp;
 949	int rval;
 950
 951	rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
 952	if (rval) {
 953		cmd->result = rval;
 954		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
 955		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
 956		    cmd, rval);
 957		goto qc24_fail_command;
 958	}
 959
 960	if (!fcport) {
 961		cmd->result = DID_NO_CONNECT << 16;
 962		goto qc24_fail_command;
 963	}
 964
 965	if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
 966		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
 967			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 968			ql_dbg(ql_dbg_io, vha, 0x3077,
 969			    "Returning DNC, fcport_state=%d loop_state=%d.\n",
 970			    atomic_read(&fcport->state),
 971			    atomic_read(&base_vha->loop_state));
 972			cmd->result = DID_NO_CONNECT << 16;
 973			goto qc24_fail_command;
 974		}
 975		goto qc24_target_busy;
 976	}
 977
 978	/*
 979	 * Return target busy if we've received a non-zero retry_delay_timer
 980	 * in a FCP_RSP.
 981	 */
 982	if (fcport->retry_delay_timestamp == 0) {
 983		/* retry delay not set */
 984	} else if (time_after(jiffies, fcport->retry_delay_timestamp))
 985		fcport->retry_delay_timestamp = 0;
 986	else
 987		goto qc24_target_busy;
 988
 989	sp = scsi_cmd_priv(cmd);
 990	qla2xxx_init_sp(sp, vha, qpair, fcport);
 991
 992	sp->u.scmd.cmd = cmd;
 993	sp->type = SRB_SCSI_CMD;
 994	CMD_SP(cmd) = (void *)sp;
 995	sp->free = qla2xxx_qpair_sp_free_dma;
 996	sp->done = qla2xxx_qpair_sp_compl;
 997
 998	rval = ha->isp_ops->start_scsi_mq(sp);
 999	if (rval != QLA_SUCCESS) {
1000		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
1001		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
1002		if (rval == QLA_INTERFACE_ERROR)
1003			goto qc24_free_sp_fail_command;
1004		goto qc24_host_busy_free_sp;
1005	}
1006
1007	return 0;
1008
1009qc24_host_busy_free_sp:
1010	sp->free(sp);
1011
1012qc24_target_busy:
1013	return SCSI_MLQUEUE_TARGET_BUSY;
1014
1015qc24_free_sp_fail_command:
1016	sp->free(sp);
1017	CMD_SP(cmd) = NULL;
1018	qla2xxx_rel_qpair_sp(sp->qpair, sp);
1019
1020qc24_fail_command:
1021	cmd->scsi_done(cmd);
1022
1023	return 0;
1024}
1025
1026/*
1027 * qla2x00_eh_wait_on_command
1028 *    Waits for the command to be returned by the Firmware for some
1029 *    max time.
1030 *
1031 * Input:
1032 *    cmd = Scsi Command to wait on.
1033 *
1034 * Return:
1035 *    Completed in time : QLA_SUCCESS
1036 *    Did not complete in time : QLA_FUNCTION_FAILED
1037 */
1038static int
1039qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
1040{
1041#define ABORT_POLLING_PERIOD	1000
1042#define ABORT_WAIT_ITER		((2 * 1000) / (ABORT_POLLING_PERIOD))
1043	unsigned long wait_iter = ABORT_WAIT_ITER;
1044	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1045	struct qla_hw_data *ha = vha->hw;
1046	int ret = QLA_SUCCESS;
1047
1048	if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
1049		ql_dbg(ql_dbg_taskm, vha, 0x8005,
1050		    "Return:eh_wait.\n");
1051		return ret;
1052	}
1053
1054	while (CMD_SP(cmd) && wait_iter--) {
1055		msleep(ABORT_POLLING_PERIOD);
1056	}
1057	if (CMD_SP(cmd))
1058		ret = QLA_FUNCTION_FAILED;
1059
1060	return ret;
1061}
1062
1063/*
1064 * qla2x00_wait_for_hba_online
1065 *    Wait till the HBA is online after going through
1066 *    <= MAX_RETRIES_OF_ISP_ABORT  or
1067 *    finally HBA is disabled ie marked offline
1068 *
1069 * Input:
1070 *     ha - pointer to host adapter structure
1071 *
1072 * Note:
1073 *    Does context switching-Release SPIN_LOCK
1074 *    (if any) before calling this routine.
1075 *
1076 * Return:
1077 *    Success (Adapter is online) : 0
1078 *    Failed  (Adapter is offline/disabled) : 1
1079 */
1080int
1081qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1082{
1083	int		return_status;
1084	unsigned long	wait_online;
1085	struct qla_hw_data *ha = vha->hw;
1086	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1087
1088	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1089	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1090	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1091	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1092	    ha->dpc_active) && time_before(jiffies, wait_online)) {
1093
1094		msleep(1000);
1095	}
1096	if (base_vha->flags.online)
1097		return_status = QLA_SUCCESS;
1098	else
1099		return_status = QLA_FUNCTION_FAILED;
1100
1101	return (return_status);
1102}
1103
1104static inline int test_fcport_count(scsi_qla_host_t *vha)
1105{
1106	struct qla_hw_data *ha = vha->hw;
1107	unsigned long flags;
1108	int res;
1109
1110	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1111	ql_dbg(ql_dbg_init, vha, 0x00ec,
1112	    "tgt %p, fcport_count=%d\n",
1113	    vha, vha->fcport_count);
1114	res = (vha->fcport_count == 0);
1115	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1116
1117	return res;
1118}
1119
1120/*
1121 * qla2x00_wait_for_sess_deletion can only be called from remove_one.
1122 * it has dependency on UNLOADING flag to stop device discovery
1123 */
1124void
1125qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1126{
1127	u8 i;
1128
1129	qla2x00_mark_all_devices_lost(vha);
1130
1131	for (i = 0; i < 10; i++) {
1132		if (wait_event_timeout(vha->fcport_waitQ,
1133		    test_fcport_count(vha), HZ) > 0)
1134			break;
1135	}
1136
1137	flush_workqueue(vha->hw->wq);
1138}
1139
1140/*
1141 * qla2x00_wait_for_hba_ready
1142 * Wait till the HBA is ready before doing driver unload
1143 *
1144 * Input:
1145 *     ha - pointer to host adapter structure
1146 *
1147 * Note:
1148 *    Does context switching-Release SPIN_LOCK
1149 *    (if any) before calling this routine.
1150 *
1151 */
1152static void
1153qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
1154{
1155	struct qla_hw_data *ha = vha->hw;
1156	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1157
1158	while ((qla2x00_reset_active(vha) || ha->dpc_active ||
1159		ha->flags.mbox_busy) ||
1160	       test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
1161	       test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
1162		if (test_bit(UNLOADING, &base_vha->dpc_flags))
1163			break;
1164		msleep(1000);
1165	}
1166}
1167
1168int
1169qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
1170{
1171	int		return_status;
1172	unsigned long	wait_reset;
1173	struct qla_hw_data *ha = vha->hw;
1174	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1175
1176	wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1177	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1178	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1179	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1180	    ha->dpc_active) && time_before(jiffies, wait_reset)) {
1181
1182		msleep(1000);
1183
1184		if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1185		    ha->flags.chip_reset_done)
1186			break;
1187	}
1188	if (ha->flags.chip_reset_done)
1189		return_status = QLA_SUCCESS;
1190	else
1191		return_status = QLA_FUNCTION_FAILED;
1192
1193	return return_status;
1194}
1195
1196#define ISP_REG_DISCONNECT 0xffffffffU
1197/**************************************************************************
1198* qla2x00_isp_reg_stat
1199*
1200* Description:
1201*	Read the host status register of ISP before aborting the command.
1202*
1203* Input:
1204*	ha = pointer to host adapter structure.
1205*
1206*
1207* Returns:
1208*	Either true or false.
1209*
1210* Note:	Return true if there is register disconnect.
1211**************************************************************************/
1212static inline
1213uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1214{
1215	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1216	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1217
1218	if (IS_P3P_TYPE(ha))
1219		return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1220	else
1221		return ((RD_REG_DWORD(&reg->host_status)) ==
1222			ISP_REG_DISCONNECT);
1223}
1224
1225/**************************************************************************
1226* qla2xxx_eh_abort
1227*
1228* Description:
1229*    The abort function will abort the specified command.
1230*
1231* Input:
1232*    cmd = Linux SCSI command packet to be aborted.
1233*
1234* Returns:
1235*    Either SUCCESS or FAILED.
1236*
1237* Note:
1238*    Only return FAILED if command not returned by firmware.
1239**************************************************************************/
1240static int
1241qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1242{
1243	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1244	DECLARE_COMPLETION_ONSTACK(comp);
1245	srb_t *sp;
1246	int ret;
1247	unsigned int id;
1248	uint64_t lun;
1249	int rval;
1250	struct qla_hw_data *ha = vha->hw;
1251	uint32_t ratov_j;
1252	struct qla_qpair *qpair;
1253	unsigned long flags;
1254
1255	if (qla2x00_isp_reg_stat(ha)) {
1256		ql_log(ql_log_info, vha, 0x8042,
1257		    "PCI/Register disconnect, exiting.\n");
1258		return FAILED;
1259	}
1260
1261	ret = fc_block_scsi_eh(cmd);
1262	if (ret != 0)
1263		return ret;
1264
1265	sp = scsi_cmd_priv(cmd);
1266	qpair = sp->qpair;
1267
1268	if ((sp->fcport && sp->fcport->deleted) || !qpair)
1269		return SUCCESS;
1270
1271	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1272	sp->comp = &comp;
1273	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1274
1275
1276	id = cmd->device->id;
1277	lun = cmd->device->lun;
1278
1279	ql_dbg(ql_dbg_taskm, vha, 0x8002,
1280	    "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
1281	    vha->host_no, id, lun, sp, cmd, sp->handle);
1282
1283	/*
1284	 * Abort will release the original Command/sp from FW. Let the
1285	 * original command call scsi_done. In return, he will wakeup
1286	 * this sleeping thread.
1287	 */
1288	rval = ha->isp_ops->abort_command(sp);
1289
1290	ql_dbg(ql_dbg_taskm, vha, 0x8003,
1291	       "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
1292
1293	/* Wait for the command completion. */
1294	ratov_j = ha->r_a_tov/10 * 4 * 1000;
1295	ratov_j = msecs_to_jiffies(ratov_j);
1296	switch (rval) {
1297	case QLA_SUCCESS:
1298		if (!wait_for_completion_timeout(&comp, ratov_j)) {
1299			ql_dbg(ql_dbg_taskm, vha, 0xffff,
1300			    "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1301			    __func__, ha->r_a_tov/10);
1302			ret = FAILED;
1303		} else {
1304			ret = SUCCESS;
1305		}
1306		break;
1307	default:
1308		ret = FAILED;
1309		break;
1310	}
1311
1312	sp->comp = NULL;
1313
1314	ql_log(ql_log_info, vha, 0x801c,
1315	    "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
1316	    vha->host_no, id, lun, ret);
1317
1318	return ret;
1319}
1320
1321/*
1322 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
1323 */
1324int
1325qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
1326	uint64_t l, enum nexus_wait_type type)
1327{
1328	int cnt, match, status;
1329	unsigned long flags;
1330	struct qla_hw_data *ha = vha->hw;
1331	struct req_que *req;
1332	srb_t *sp;
1333	struct scsi_cmnd *cmd;
1334
1335	status = QLA_SUCCESS;
1336
1337	spin_lock_irqsave(&ha->hardware_lock, flags);
1338	req = vha->req;
1339	for (cnt = 1; status == QLA_SUCCESS &&
1340		cnt < req->num_outstanding_cmds; cnt++) {
1341		sp = req->outstanding_cmds[cnt];
1342		if (!sp)
1343			continue;
1344		if (sp->type != SRB_SCSI_CMD)
1345			continue;
1346		if (vha->vp_idx != sp->vha->vp_idx)
1347			continue;
1348		match = 0;
1349		cmd = GET_CMD_SP(sp);
1350		switch (type) {
1351		case WAIT_HOST:
1352			match = 1;
1353			break;
1354		case WAIT_TARGET:
1355			match = cmd->device->id == t;
1356			break;
1357		case WAIT_LUN:
1358			match = (cmd->device->id == t &&
1359				cmd->device->lun == l);
1360			break;
1361		}
1362		if (!match)
1363			continue;
1364
1365		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1366		status = qla2x00_eh_wait_on_command(cmd);
1367		spin_lock_irqsave(&ha->hardware_lock, flags);
1368	}
1369	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1370
1371	return status;
1372}
1373
1374static char *reset_errors[] = {
1375	"HBA not online",
1376	"HBA not ready",
1377	"Task management failed",
1378	"Waiting for command completions",
1379};
1380
1381static int
1382__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1383    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
1384{
1385	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1386	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1387	int err;
1388
1389	if (!fcport) {
1390		return FAILED;
1391	}
1392
1393	err = fc_block_scsi_eh(cmd);
1394	if (err != 0)
1395		return err;
1396
1397	if (fcport->deleted)
1398		return SUCCESS;
1399
1400	ql_log(ql_log_info, vha, 0x8009,
1401	    "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
1402	    cmd->device->id, cmd->device->lun, cmd);
1403
1404	err = 0;
1405	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1406		ql_log(ql_log_warn, vha, 0x800a,
1407		    "Wait for hba online failed for cmd=%p.\n", cmd);
1408		goto eh_reset_failed;
1409	}
1410	err = 2;
1411	if (do_reset(fcport, cmd->device->lun, 1)
1412		!= QLA_SUCCESS) {
1413		ql_log(ql_log_warn, vha, 0x800c,
1414		    "do_reset failed for cmd=%p.\n", cmd);
1415		goto eh_reset_failed;
1416	}
1417	err = 3;
1418	if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
1419	    cmd->device->lun, type) != QLA_SUCCESS) {
1420		ql_log(ql_log_warn, vha, 0x800d,
1421		    "wait for pending cmds failed for cmd=%p.\n", cmd);
1422		goto eh_reset_failed;
1423	}
1424
1425	ql_log(ql_log_info, vha, 0x800e,
1426	    "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
1427	    vha->host_no, cmd->device->id, cmd->device->lun, cmd);
1428
1429	return SUCCESS;
1430
1431eh_reset_failed:
1432	ql_log(ql_log_info, vha, 0x800f,
1433	    "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
1434	    reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1435	    cmd);
1436	return FAILED;
1437}
1438
1439static int
1440qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1441{
1442	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1443	struct qla_hw_data *ha = vha->hw;
1444
1445	if (qla2x00_isp_reg_stat(ha)) {
1446		ql_log(ql_log_info, vha, 0x803e,
1447		    "PCI/Register disconnect, exiting.\n");
1448		return FAILED;
1449	}
1450
1451	return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
1452	    ha->isp_ops->lun_reset);
1453}
1454
1455static int
1456qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1457{
1458	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1459	struct qla_hw_data *ha = vha->hw;
1460
1461	if (qla2x00_isp_reg_stat(ha)) {
1462		ql_log(ql_log_info, vha, 0x803f,
1463		    "PCI/Register disconnect, exiting.\n");
1464		return FAILED;
1465	}
1466
1467	return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
1468	    ha->isp_ops->target_reset);
1469}
1470
1471/**************************************************************************
1472* qla2xxx_eh_bus_reset
1473*
1474* Description:
1475*    The bus reset function will reset the bus and abort any executing
1476*    commands.
1477*
1478* Input:
1479*    cmd = Linux SCSI command packet of the command that cause the
1480*          bus reset.
1481*
1482* Returns:
1483*    SUCCESS/FAILURE (defined as macro in scsi.h).
1484*
1485**************************************************************************/
1486static int
1487qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1488{
1489	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1490	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1491	int ret = FAILED;
1492	unsigned int id;
1493	uint64_t lun;
1494	struct qla_hw_data *ha = vha->hw;
1495
1496	if (qla2x00_isp_reg_stat(ha)) {
1497		ql_log(ql_log_info, vha, 0x8040,
1498		    "PCI/Register disconnect, exiting.\n");
1499		return FAILED;
1500	}
1501
1502	id = cmd->device->id;
1503	lun = cmd->device->lun;
1504
1505	if (!fcport) {
1506		return ret;
1507	}
1508
1509	ret = fc_block_scsi_eh(cmd);
1510	if (ret != 0)
1511		return ret;
1512	ret = FAILED;
1513
1514	if (qla2x00_chip_is_down(vha))
1515		return ret;
1516
1517	ql_log(ql_log_info, vha, 0x8012,
1518	    "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1519
1520	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1521		ql_log(ql_log_fatal, vha, 0x8013,
1522		    "Wait for hba online failed board disabled.\n");
1523		goto eh_bus_reset_done;
1524	}
1525
1526	if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1527		ret = SUCCESS;
1528
1529	if (ret == FAILED)
1530		goto eh_bus_reset_done;
1531
1532	/* Flush outstanding commands. */
1533	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1534	    QLA_SUCCESS) {
1535		ql_log(ql_log_warn, vha, 0x8014,
1536		    "Wait for pending commands failed.\n");
1537		ret = FAILED;
1538	}
1539
1540eh_bus_reset_done:
1541	ql_log(ql_log_warn, vha, 0x802b,
1542	    "BUS RESET %s nexus=%ld:%d:%llu.\n",
1543	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1544
1545	return ret;
1546}
1547
1548/**************************************************************************
1549* qla2xxx_eh_host_reset
1550*
1551* Description:
1552*    The reset function will reset the Adapter.
1553*
1554* Input:
1555*      cmd = Linux SCSI command packet of the command that cause the
1556*            adapter reset.
1557*
1558* Returns:
1559*      Either SUCCESS or FAILED.
1560*
1561* Note:
1562**************************************************************************/
1563static int
1564qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1565{
1566	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1567	struct qla_hw_data *ha = vha->hw;
1568	int ret = FAILED;
1569	unsigned int id;
1570	uint64_t lun;
1571	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1572
1573	if (qla2x00_isp_reg_stat(ha)) {
1574		ql_log(ql_log_info, vha, 0x8041,
1575		    "PCI/Register disconnect, exiting.\n");
1576		schedule_work(&ha->board_disable);
1577		return SUCCESS;
1578	}
1579
1580	id = cmd->device->id;
1581	lun = cmd->device->lun;
1582
1583	ql_log(ql_log_info, vha, 0x8018,
1584	    "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1585
1586	/*
1587	 * No point in issuing another reset if one is active.  Also do not
1588	 * attempt a reset if we are updating flash.
1589	 */
1590	if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
1591		goto eh_host_reset_lock;
1592
1593	if (vha != base_vha) {
1594		if (qla2x00_vp_abort_isp(vha))
1595			goto eh_host_reset_lock;
1596	} else {
1597		if (IS_P3P_TYPE(vha->hw)) {
1598			if (!qla82xx_fcoe_ctx_reset(vha)) {
1599				/* Ctx reset success */
1600				ret = SUCCESS;
1601				goto eh_host_reset_lock;
1602			}
1603			/* fall thru if ctx reset failed */
1604		}
1605		if (ha->wq)
1606			flush_workqueue(ha->wq);
1607
1608		set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1609		if (ha->isp_ops->abort_isp(base_vha)) {
1610			clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1611			/* failed. schedule dpc to try */
1612			set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1613
1614			if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1615				ql_log(ql_log_warn, vha, 0x802a,
1616				    "wait for hba online failed.\n");
1617				goto eh_host_reset_lock;
1618			}
1619		}
1620		clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1621	}
1622
1623	/* Waiting for command to be returned to OS.*/
1624	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1625		QLA_SUCCESS)
1626		ret = SUCCESS;
1627
1628eh_host_reset_lock:
1629	ql_log(ql_log_info, vha, 0x8017,
1630	    "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
1631	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1632
1633	return ret;
1634}
1635
1636/*
1637* qla2x00_loop_reset
1638*      Issue loop reset.
1639*
1640* Input:
1641*      ha = adapter block pointer.
1642*
1643* Returns:
1644*      0 = success
1645*/
1646int
1647qla2x00_loop_reset(scsi_qla_host_t *vha)
1648{
1649	int ret;
1650	struct fc_port *fcport;
1651	struct qla_hw_data *ha = vha->hw;
1652
1653	if (IS_QLAFX00(ha)) {
1654		return qlafx00_loop_reset(vha);
1655	}
1656
1657	if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1658		list_for_each_entry(fcport, &vha->vp_fcports, list) {
1659			if (fcport->port_type != FCT_TARGET)
1660				continue;
1661
1662			ret = ha->isp_ops->target_reset(fcport, 0, 0);
1663			if (ret != QLA_SUCCESS) {
1664				ql_dbg(ql_dbg_taskm, vha, 0x802c,
1665				    "Bus Reset failed: Reset=%d "
1666				    "d_id=%x.\n", ret, fcport->d_id.b24);
1667			}
1668		}
1669	}
1670
1671
1672	if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1673		atomic_set(&vha->loop_state, LOOP_DOWN);
1674		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1675		qla2x00_mark_all_devices_lost(vha);
1676		ret = qla2x00_full_login_lip(vha);
1677		if (ret != QLA_SUCCESS) {
1678			ql_dbg(ql_dbg_taskm, vha, 0x802d,
1679			    "full_login_lip=%d.\n", ret);
1680		}
1681	}
1682
1683	if (ha->flags.enable_lip_reset) {
1684		ret = qla2x00_lip_reset(vha);
1685		if (ret != QLA_SUCCESS)
1686			ql_dbg(ql_dbg_taskm, vha, 0x802e,
1687			    "lip_reset failed (%d).\n", ret);
1688	}
1689
1690	/* Issue marker command only when we are going to start the I/O */
1691	vha->marker_needed = 1;
1692
1693	return QLA_SUCCESS;
1694}
1695
1696/*
1697 * The caller must ensure that no completion interrupts will happen
1698 * while this function is in progress.
1699 */
1700static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1701			      unsigned long *flags)
1702	__releases(qp->qp_lock_ptr)
1703	__acquires(qp->qp_lock_ptr)
1704{
1705	DECLARE_COMPLETION_ONSTACK(comp);
1706	scsi_qla_host_t *vha = qp->vha;
1707	struct qla_hw_data *ha = vha->hw;
1708	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1709	int rval;
1710	bool ret_cmd;
1711	uint32_t ratov_j;
1712
1713	lockdep_assert_held(qp->qp_lock_ptr);
1714
1715	if (qla2x00_chip_is_down(vha)) {
1716		sp->done(sp, res);
1717		return;
1718	}
1719
1720	if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
1721	    (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
1722	     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
1723	     !qla2x00_isp_reg_stat(ha))) {
1724		if (sp->comp) {
1725			sp->done(sp, res);
1726			return;
1727		}
1728
1729		sp->comp = &comp;
1730		spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1731
1732		rval = ha->isp_ops->abort_command(sp);
1733		/* Wait for command completion. */
1734		ret_cmd = false;
1735		ratov_j = ha->r_a_tov/10 * 4 * 1000;
1736		ratov_j = msecs_to_jiffies(ratov_j);
1737		switch (rval) {
1738		case QLA_SUCCESS:
1739			if (wait_for_completion_timeout(&comp, ratov_j)) {
1740				ql_dbg(ql_dbg_taskm, vha, 0xffff,
1741				    "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1742				    __func__, ha->r_a_tov/10);
1743				ret_cmd = true;
1744			}
1745			/* else FW return SP to driver */
1746			break;
1747		default:
1748			ret_cmd = true;
1749			break;
1750		}
1751
1752		spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1753		if (ret_cmd && blk_mq_request_started(cmd->request))
1754			sp->done(sp, res);
1755	} else {
1756		sp->done(sp, res);
1757	}
1758}
1759
1760/*
1761 * The caller must ensure that no completion interrupts will happen
1762 * while this function is in progress.
1763 */
1764static void
1765__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1766{
1767	int cnt;
1768	unsigned long flags;
1769	srb_t *sp;
1770	scsi_qla_host_t *vha = qp->vha;
1771	struct qla_hw_data *ha = vha->hw;
1772	struct req_que *req;
1773	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1774	struct qla_tgt_cmd *cmd;
1775
1776	if (!ha->req_q_map)
1777		return;
1778	spin_lock_irqsave(qp->qp_lock_ptr, flags);
1779	req = qp->req;
1780	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1781		sp = req->outstanding_cmds[cnt];
1782		if (sp) {
1783			switch (sp->cmd_type) {
1784			case TYPE_SRB:
1785				qla2x00_abort_srb(qp, sp, res, &flags);
1786				break;
1787			case TYPE_TGT_CMD:
1788				if (!vha->hw->tgt.tgt_ops || !tgt ||
1789				    qla_ini_mode_enabled(vha)) {
1790					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
1791					    "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1792					    vha->dpc_flags);
1793					continue;
1794				}
1795				cmd = (struct qla_tgt_cmd *)sp;
1796				cmd->aborted = 1;
1797				break;
1798			case TYPE_TGT_TMCMD:
1799				/* Skip task management functions. */
1800				break;
1801			default:
1802				break;
1803			}
1804			req->outstanding_cmds[cnt] = NULL;
1805		}
1806	}
1807	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1808}
1809
1810/*
1811 * The caller must ensure that no completion interrupts will happen
1812 * while this function is in progress.
1813 */
1814void
1815qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1816{
1817	int que;
1818	struct qla_hw_data *ha = vha->hw;
1819
1820	/* Continue only if initialization complete. */
1821	if (!ha->base_qpair)
1822		return;
1823	__qla2x00_abort_all_cmds(ha->base_qpair, res);
1824
1825	if (!ha->queue_pair_map)
1826		return;
1827	for (que = 0; que < ha->max_qpairs; que++) {
1828		if (!ha->queue_pair_map[que])
1829			continue;
1830
1831		__qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
1832	}
1833}
1834
1835static int
1836qla2xxx_slave_alloc(struct scsi_device *sdev)
1837{
1838	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1839
1840	if (!rport || fc_remote_port_chkready(rport))
1841		return -ENXIO;
1842
1843	sdev->hostdata = *(fc_port_t **)rport->dd_data;
1844
1845	return 0;
1846}
1847
1848static int
1849qla2xxx_slave_configure(struct scsi_device *sdev)
1850{
1851	scsi_qla_host_t *vha = shost_priv(sdev->host);
1852	struct req_que *req = vha->req;
1853
1854	if (IS_T10_PI_CAPABLE(vha->hw))
1855		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1856
1857	scsi_change_queue_depth(sdev, req->max_q_depth);
1858	return 0;
1859}
1860
1861static void
1862qla2xxx_slave_destroy(struct scsi_device *sdev)
1863{
1864	sdev->hostdata = NULL;
1865}
1866
1867/**
1868 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1869 * @ha: HA context
1870 *
1871 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1872 * supported addressing method.
1873 */
1874static void
1875qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1876{
1877	/* Assume a 32bit DMA mask. */
1878	ha->flags.enable_64bit_addressing = 0;
1879
1880	if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1881		/* Any upper-dword bits set? */
1882		if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1883		    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1884			/* Ok, a 64bit DMA mask is applicable. */
1885			ha->flags.enable_64bit_addressing = 1;
1886			ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1887			ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1888			return;
1889		}
1890	}
1891
1892	dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1893	pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1894}
1895
1896static void
1897qla2x00_enable_intrs(struct qla_hw_data *ha)
1898{
1899	unsigned long flags = 0;
1900	struct device_reg_2xxx _

Large files files are truncated, but you can click here to view the full file