PageRenderTime 59ms CodeModel.GetById 2ms app.highlight 50ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/qla2xxx/qla_mid.c

http://github.com/mirrors/linux
C | 1003 lines | 790 code | 136 blank | 77 comment | 117 complexity | a8523ed9508ac3404c647300f18319a0 MD5 | raw file
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2014 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7#include "qla_def.h"
   8#include "qla_gbl.h"
   9#include "qla_target.h"
  10
  11#include <linux/moduleparam.h>
  12#include <linux/vmalloc.h>
  13#include <linux/slab.h>
  14#include <linux/list.h>
  15
  16#include <scsi/scsi_tcq.h>
  17#include <scsi/scsicam.h>
  18#include <linux/delay.h>
  19
  20void
  21qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  22{
  23	if (vha->vp_idx && vha->timer_active) {
  24		del_timer_sync(&vha->timer);
  25		vha->timer_active = 0;
  26	}
  27}
  28
  29static uint32_t
  30qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  31{
  32	uint32_t vp_id;
  33	struct qla_hw_data *ha = vha->hw;
  34	unsigned long flags;
  35
  36	/* Find an empty slot and assign an vp_id */
  37	mutex_lock(&ha->vport_lock);
  38	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  39	if (vp_id > ha->max_npiv_vports) {
  40		ql_dbg(ql_dbg_vport, vha, 0xa000,
  41		    "vp_id %d is bigger than max-supported %d.\n",
  42		    vp_id, ha->max_npiv_vports);
  43		mutex_unlock(&ha->vport_lock);
  44		return vp_id;
  45	}
  46
  47	set_bit(vp_id, ha->vp_idx_map);
  48	ha->num_vhosts++;
  49	vha->vp_idx = vp_id;
  50
  51	spin_lock_irqsave(&ha->vport_slock, flags);
  52	list_add_tail(&vha->list, &ha->vp_list);
  53	spin_unlock_irqrestore(&ha->vport_slock, flags);
  54
  55	spin_lock_irqsave(&ha->hardware_lock, flags);
  56	qlt_update_vp_map(vha, SET_VP_IDX);
  57	spin_unlock_irqrestore(&ha->hardware_lock, flags);
  58
  59	mutex_unlock(&ha->vport_lock);
  60	return vp_id;
  61}
  62
  63void
  64qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  65{
  66	uint16_t vp_id;
  67	struct qla_hw_data *ha = vha->hw;
  68	unsigned long flags = 0;
  69	u8 i;
  70
  71	mutex_lock(&ha->vport_lock);
  72	/*
  73	 * Wait for all pending activities to finish before removing vport from
  74	 * the list.
  75	 * Lock needs to be held for safe removal from the list (it
  76	 * ensures no active vp_list traversal while the vport is removed
  77	 * from the queue)
  78	 */
  79	for (i = 0; i < 10; i++) {
  80		if (wait_event_timeout(vha->vref_waitq,
  81		    !atomic_read(&vha->vref_count), HZ) > 0)
  82			break;
  83	}
  84
  85	spin_lock_irqsave(&ha->vport_slock, flags);
  86	if (atomic_read(&vha->vref_count)) {
  87		ql_dbg(ql_dbg_vport, vha, 0xfffa,
  88		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
  89		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
  90	}
  91	list_del(&vha->list);
  92	qlt_update_vp_map(vha, RESET_VP_IDX);
  93	spin_unlock_irqrestore(&ha->vport_slock, flags);
  94
  95	vp_id = vha->vp_idx;
  96	ha->num_vhosts--;
  97	clear_bit(vp_id, ha->vp_idx_map);
  98
  99	mutex_unlock(&ha->vport_lock);
 100}
 101
 102static scsi_qla_host_t *
 103qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 104{
 105	scsi_qla_host_t *vha;
 106	struct scsi_qla_host *tvha;
 107	unsigned long flags;
 108
 109	spin_lock_irqsave(&ha->vport_slock, flags);
 110	/* Locate matching device in database. */
 111	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
 112		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
 113			spin_unlock_irqrestore(&ha->vport_slock, flags);
 114			return vha;
 115		}
 116	}
 117	spin_unlock_irqrestore(&ha->vport_slock, flags);
 118	return NULL;
 119}
 120
 121/*
 122 * qla2x00_mark_vp_devices_dead
 123 *	Updates fcport state when device goes offline.
 124 *
 125 * Input:
 126 *	ha = adapter block pointer.
 127 *	fcport = port structure pointer.
 128 *
 129 * Return:
 130 *	None.
 131 *
 132 * Context:
 133 */
 134static void
 135qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 136{
 137	/*
 138	 * !!! NOTE !!!
 139	 * This function, if called in contexts other than vp create, disable
 140	 * or delete, please make sure this is synchronized with the
 141	 * delete thread.
 142	 */
 143	fc_port_t *fcport;
 144
 145	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 146		ql_dbg(ql_dbg_vport, vha, 0xa001,
 147		    "Marking port dead, loop_id=0x%04x : %x.\n",
 148		    fcport->loop_id, fcport->vha->vp_idx);
 149
 150		qla2x00_mark_device_lost(vha, fcport, 0);
 151		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
 152	}
 153}
 154
 155int
 156qla24xx_disable_vp(scsi_qla_host_t *vha)
 157{
 158	unsigned long flags;
 159	int ret = QLA_SUCCESS;
 160	fc_port_t *fcport;
 161
 162	if (vha->hw->flags.fw_started)
 163		ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 164
 165	atomic_set(&vha->loop_state, LOOP_DOWN);
 166	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 167	list_for_each_entry(fcport, &vha->vp_fcports, list)
 168		fcport->logout_on_delete = 0;
 169
 170	qla2x00_mark_all_devices_lost(vha);
 171
 172	/* Remove port id from vp target map */
 173	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 174	qlt_update_vp_map(vha, RESET_AL_PA);
 175	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 176
 177	qla2x00_mark_vp_devices_dead(vha);
 178	atomic_set(&vha->vp_state, VP_FAILED);
 179	vha->flags.management_server_logged_in = 0;
 180	if (ret == QLA_SUCCESS) {
 181		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
 182	} else {
 183		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 184		return -1;
 185	}
 186	return 0;
 187}
 188
 189int
 190qla24xx_enable_vp(scsi_qla_host_t *vha)
 191{
 192	int ret;
 193	struct qla_hw_data *ha = vha->hw;
 194	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 195
 196	/* Check if physical ha port is Up */
 197	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
 198		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
 199		!(ha->current_topology & ISP_CFG_F)) {
 200		vha->vp_err_state =  VP_ERR_PORTDWN;
 201		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
 202		ql_dbg(ql_dbg_taskm, vha, 0x800b,
 203		    "%s skip enable. loop_state %x topo %x\n",
 204		    __func__, base_vha->loop_state.counter,
 205		    ha->current_topology);
 206
 207		goto enable_failed;
 208	}
 209
 210	/* Initialize the new vport unless it is a persistent port */
 211	mutex_lock(&ha->vport_lock);
 212	ret = qla24xx_modify_vp_config(vha);
 213	mutex_unlock(&ha->vport_lock);
 214
 215	if (ret != QLA_SUCCESS) {
 216		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
 217		goto enable_failed;
 218	}
 219
 220	ql_dbg(ql_dbg_taskm, vha, 0x801a,
 221	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
 222	return 0;
 223
 224enable_failed:
 225	ql_dbg(ql_dbg_taskm, vha, 0x801b,
 226	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
 227	return 1;
 228}
 229
 230static void
 231qla24xx_configure_vp(scsi_qla_host_t *vha)
 232{
 233	struct fc_vport *fc_vport;
 234	int ret;
 235
 236	fc_vport = vha->fc_vport;
 237
 238	ql_dbg(ql_dbg_vport, vha, 0xa002,
 239	    "%s: change request #3.\n", __func__);
 240	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
 241	if (ret != QLA_SUCCESS) {
 242		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
 243		    "receiving of RSCN requests: 0x%x.\n", ret);
 244		return;
 245	} else {
 246		/* Corresponds to SCR enabled */
 247		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
 248	}
 249
 250	vha->flags.online = 1;
 251	if (qla24xx_configure_vhba(vha))
 252		return;
 253
 254	atomic_set(&vha->vp_state, VP_ACTIVE);
 255	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
 256}
 257
 258void
 259qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 260{
 261	scsi_qla_host_t *vha;
 262	struct qla_hw_data *ha = rsp->hw;
 263	int i = 0;
 264	unsigned long flags;
 265
 266	spin_lock_irqsave(&ha->vport_slock, flags);
 267	list_for_each_entry(vha, &ha->vp_list, list) {
 268		if (vha->vp_idx) {
 269			if (test_bit(VPORT_DELETE, &vha->dpc_flags))
 270				continue;
 271
 272			atomic_inc(&vha->vref_count);
 273			spin_unlock_irqrestore(&ha->vport_slock, flags);
 274
 275			switch (mb[0]) {
 276			case MBA_LIP_OCCURRED:
 277			case MBA_LOOP_UP:
 278			case MBA_LOOP_DOWN:
 279			case MBA_LIP_RESET:
 280			case MBA_POINT_TO_POINT:
 281			case MBA_CHG_IN_CONNECTION:
 282				ql_dbg(ql_dbg_async, vha, 0x5024,
 283				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
 284				    i, *mb, vha);
 285				qla2x00_async_event(vha, rsp, mb);
 286				break;
 287			case MBA_PORT_UPDATE:
 288			case MBA_RSCN_UPDATE:
 289				if ((mb[3] & 0xff) == vha->vp_idx) {
 290					ql_dbg(ql_dbg_async, vha, 0x5024,
 291					    "Async_event for VP[%d], mb=0x%x vha=%p\n",
 292					    i, *mb, vha);
 293					qla2x00_async_event(vha, rsp, mb);
 294				}
 295				break;
 296			}
 297
 298			spin_lock_irqsave(&ha->vport_slock, flags);
 299			atomic_dec(&vha->vref_count);
 300			wake_up(&vha->vref_waitq);
 301		}
 302		i++;
 303	}
 304	spin_unlock_irqrestore(&ha->vport_slock, flags);
 305}
 306
 307int
 308qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 309{
 310	fc_port_t *fcport;
 311
 312	/*
 313	 * To exclusively reset vport, we need to log it out first.
 314	 * Note: This control_vp can fail if ISP reset is already
 315	 * issued, this is expected, as the vp would be already
 316	 * logged out due to ISP reset.
 317	 */
 318	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
 319		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 320		list_for_each_entry(fcport, &vha->vp_fcports, list)
 321			fcport->logout_on_delete = 0;
 322	}
 323
 324	/*
 325	 * Physical port will do most of the abort and recovery work. We can
 326	 * just treat it as a loop down
 327	 */
 328	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 329		atomic_set(&vha->loop_state, LOOP_DOWN);
 330		qla2x00_mark_all_devices_lost(vha);
 331	} else {
 332		if (!atomic_read(&vha->loop_down_timer))
 333			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 334	}
 335
 336	ql_dbg(ql_dbg_taskm, vha, 0x801d,
 337	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
 338
 339	return qla24xx_enable_vp(vha);
 340}
 341
 342static int
 343qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 344{
 345	struct qla_hw_data *ha = vha->hw;
 346	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 347
 348	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
 349	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
 350
 351	/* Check if Fw is ready to configure VP first */
 352	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
 353		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
 354			/* VP acquired. complete port configuration */
 355			ql_dbg(ql_dbg_dpc, vha, 0x4014,
 356			    "Configure VP scheduled.\n");
 357			qla24xx_configure_vp(vha);
 358			ql_dbg(ql_dbg_dpc, vha, 0x4015,
 359			    "Configure VP end.\n");
 360			return 0;
 361		}
 362	}
 363
 364	if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
 365		if (atomic_read(&vha->loop_state) == LOOP_READY) {
 366			qla24xx_process_purex_list(&vha->purex_list);
 367			clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
 368		}
 369	}
 370
 371	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
 372		ql_dbg(ql_dbg_dpc, vha, 0x4016,
 373		    "FCPort update scheduled.\n");
 374		qla2x00_update_fcports(vha);
 375		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
 376		ql_dbg(ql_dbg_dpc, vha, 0x4017,
 377		    "FCPort update end.\n");
 378	}
 379
 380	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
 381	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
 382	    atomic_read(&vha->loop_state) != LOOP_DOWN) {
 383
 384		if (!vha->relogin_jif ||
 385		    time_after_eq(jiffies, vha->relogin_jif)) {
 386			vha->relogin_jif = jiffies + HZ;
 387			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 388
 389			ql_dbg(ql_dbg_dpc, vha, 0x4018,
 390			    "Relogin needed scheduled.\n");
 391			qla24xx_post_relogin_work(vha);
 392		}
 393	}
 394
 395	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
 396	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
 397		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
 398	}
 399
 400	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
 401		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
 402			ql_dbg(ql_dbg_dpc, vha, 0x401a,
 403			    "Loop resync scheduled.\n");
 404			qla2x00_loop_resync(vha);
 405			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
 406			ql_dbg(ql_dbg_dpc, vha, 0x401b,
 407			    "Loop resync end.\n");
 408		}
 409	}
 410
 411	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
 412	    "Exiting %s.\n", __func__);
 413	return 0;
 414}
 415
 416void
 417qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
 418{
 419	struct qla_hw_data *ha = vha->hw;
 420	scsi_qla_host_t *vp;
 421	unsigned long flags = 0;
 422
 423	if (vha->vp_idx)
 424		return;
 425	if (list_empty(&ha->vp_list))
 426		return;
 427
 428	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 429
 430	if (!(ha->current_topology & ISP_CFG_F))
 431		return;
 432
 433	spin_lock_irqsave(&ha->vport_slock, flags);
 434	list_for_each_entry(vp, &ha->vp_list, list) {
 435		if (vp->vp_idx) {
 436			atomic_inc(&vp->vref_count);
 437			spin_unlock_irqrestore(&ha->vport_slock, flags);
 438
 439			qla2x00_do_dpc_vp(vp);
 440
 441			spin_lock_irqsave(&ha->vport_slock, flags);
 442			atomic_dec(&vp->vref_count);
 443		}
 444	}
 445	spin_unlock_irqrestore(&ha->vport_slock, flags);
 446}
 447
 448int
 449qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
 450{
 451	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
 452	struct qla_hw_data *ha = base_vha->hw;
 453	scsi_qla_host_t *vha;
 454	uint8_t port_name[WWN_SIZE];
 455
 456	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
 457		return VPCERR_UNSUPPORTED;
 458
 459	/* Check up the F/W and H/W support NPIV */
 460	if (!ha->flags.npiv_supported)
 461		return VPCERR_UNSUPPORTED;
 462
 463	/* Check up whether npiv supported switch presented */
 464	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
 465		return VPCERR_NO_FABRIC_SUPP;
 466
 467	/* Check up unique WWPN */
 468	u64_to_wwn(fc_vport->port_name, port_name);
 469	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
 470		return VPCERR_BAD_WWN;
 471	vha = qla24xx_find_vhost_by_name(ha, port_name);
 472	if (vha)
 473		return VPCERR_BAD_WWN;
 474
 475	/* Check up max-npiv-supports */
 476	if (ha->num_vhosts > ha->max_npiv_vports) {
 477		ql_dbg(ql_dbg_vport, vha, 0xa004,
 478		    "num_vhosts %ud is bigger "
 479		    "than max_npiv_vports %ud.\n",
 480		    ha->num_vhosts, ha->max_npiv_vports);
 481		return VPCERR_UNSUPPORTED;
 482	}
 483	return 0;
 484}
 485
 486scsi_qla_host_t *
 487qla24xx_create_vhost(struct fc_vport *fc_vport)
 488{
 489	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
 490	struct qla_hw_data *ha = base_vha->hw;
 491	scsi_qla_host_t *vha;
 492	struct scsi_host_template *sht = &qla2xxx_driver_template;
 493	struct Scsi_Host *host;
 494
 495	vha = qla2x00_create_host(sht, ha);
 496	if (!vha) {
 497		ql_log(ql_log_warn, vha, 0xa005,
 498		    "scsi_host_alloc() failed for vport.\n");
 499		return(NULL);
 500	}
 501
 502	host = vha->host;
 503	fc_vport->dd_data = vha;
 504	/* New host info */
 505	u64_to_wwn(fc_vport->node_name, vha->node_name);
 506	u64_to_wwn(fc_vport->port_name, vha->port_name);
 507
 508	vha->fc_vport = fc_vport;
 509	vha->device_flags = 0;
 510	vha->vp_idx = qla24xx_allocate_vp_id(vha);
 511	if (vha->vp_idx > ha->max_npiv_vports) {
 512		ql_dbg(ql_dbg_vport, vha, 0xa006,
 513		    "Couldn't allocate vp_id.\n");
 514		goto create_vhost_failed;
 515	}
 516	vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
 517
 518	vha->dpc_flags = 0L;
 519	ha->dpc_active = 0;
 520	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 521	set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
 522
 523	/*
 524	 * To fix the issue of processing a parent's RSCN for the vport before
 525	 * its SCR is complete.
 526	 */
 527	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
 528	atomic_set(&vha->loop_state, LOOP_DOWN);
 529	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 530
 531	qla2x00_start_timer(vha, WATCH_INTERVAL);
 532
 533	vha->req = base_vha->req;
 534	vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
 535	host->can_queue = base_vha->req->length + 128;
 536	host->cmd_per_lun = 3;
 537	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
 538		host->max_cmd_len = 32;
 539	else
 540		host->max_cmd_len = MAX_CMDSZ;
 541	host->max_channel = MAX_BUSES - 1;
 542	host->max_lun = ql2xmaxlun;
 543	host->unique_id = host->host_no;
 544	host->max_id = ha->max_fibre_devices;
 545	host->transportt = qla2xxx_transport_vport_template;
 546
 547	ql_dbg(ql_dbg_vport, vha, 0xa007,
 548	    "Detect vport hba %ld at address = %p.\n",
 549	    vha->host_no, vha);
 550
 551	vha->flags.init_done = 1;
 552
 553	mutex_lock(&ha->vport_lock);
 554	set_bit(vha->vp_idx, ha->vp_idx_map);
 555	ha->cur_vport_count++;
 556	mutex_unlock(&ha->vport_lock);
 557
 558	return vha;
 559
 560create_vhost_failed:
 561	return NULL;
 562}
 563
 564static void
 565qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
 566{
 567	struct qla_hw_data *ha = vha->hw;
 568	uint16_t que_id = req->id;
 569
 570	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
 571		sizeof(request_t), req->ring, req->dma);
 572	req->ring = NULL;
 573	req->dma = 0;
 574	if (que_id) {
 575		ha->req_q_map[que_id] = NULL;
 576		mutex_lock(&ha->vport_lock);
 577		clear_bit(que_id, ha->req_qid_map);
 578		mutex_unlock(&ha->vport_lock);
 579	}
 580	kfree(req->outstanding_cmds);
 581	kfree(req);
 582	req = NULL;
 583}
 584
 585static void
 586qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 587{
 588	struct qla_hw_data *ha = vha->hw;
 589	uint16_t que_id = rsp->id;
 590
 591	if (rsp->msix && rsp->msix->have_irq) {
 592		free_irq(rsp->msix->vector, rsp->msix->handle);
 593		rsp->msix->have_irq = 0;
 594		rsp->msix->in_use = 0;
 595		rsp->msix->handle = NULL;
 596	}
 597	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
 598		sizeof(response_t), rsp->ring, rsp->dma);
 599	rsp->ring = NULL;
 600	rsp->dma = 0;
 601	if (que_id) {
 602		ha->rsp_q_map[que_id] = NULL;
 603		mutex_lock(&ha->vport_lock);
 604		clear_bit(que_id, ha->rsp_qid_map);
 605		mutex_unlock(&ha->vport_lock);
 606	}
 607	kfree(rsp);
 608	rsp = NULL;
 609}
 610
 611int
 612qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
 613{
 614	int ret = QLA_SUCCESS;
 615
 616	if (req && vha->flags.qpairs_req_created) {
 617		req->options |= BIT_0;
 618		ret = qla25xx_init_req_que(vha, req);
 619		if (ret != QLA_SUCCESS)
 620			return QLA_FUNCTION_FAILED;
 621
 622		qla25xx_free_req_que(vha, req);
 623	}
 624
 625	return ret;
 626}
 627
 628int
 629qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 630{
 631	int ret = QLA_SUCCESS;
 632
 633	if (rsp && vha->flags.qpairs_rsp_created) {
 634		rsp->options |= BIT_0;
 635		ret = qla25xx_init_rsp_que(vha, rsp);
 636		if (ret != QLA_SUCCESS)
 637			return QLA_FUNCTION_FAILED;
 638
 639		qla25xx_free_rsp_que(vha, rsp);
 640	}
 641
 642	return ret;
 643}
 644
 645/* Delete all queues for a given vhost */
 646int
 647qla25xx_delete_queues(struct scsi_qla_host *vha)
 648{
 649	int cnt, ret = 0;
 650	struct req_que *req = NULL;
 651	struct rsp_que *rsp = NULL;
 652	struct qla_hw_data *ha = vha->hw;
 653	struct qla_qpair *qpair, *tqpair;
 654
 655	if (ql2xmqsupport || ql2xnvmeenable) {
 656		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
 657		    qp_list_elem)
 658			qla2xxx_delete_qpair(vha, qpair);
 659	} else {
 660		/* Delete request queues */
 661		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
 662			req = ha->req_q_map[cnt];
 663			if (req && test_bit(cnt, ha->req_qid_map)) {
 664				ret = qla25xx_delete_req_que(vha, req);
 665				if (ret != QLA_SUCCESS) {
 666					ql_log(ql_log_warn, vha, 0x00ea,
 667					    "Couldn't delete req que %d.\n",
 668					    req->id);
 669					return ret;
 670				}
 671			}
 672		}
 673
 674		/* Delete response queues */
 675		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
 676			rsp = ha->rsp_q_map[cnt];
 677			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
 678				ret = qla25xx_delete_rsp_que(vha, rsp);
 679				if (ret != QLA_SUCCESS) {
 680					ql_log(ql_log_warn, vha, 0x00eb,
 681					    "Couldn't delete rsp que %d.\n",
 682					    rsp->id);
 683					return ret;
 684				}
 685			}
 686		}
 687	}
 688
 689	return ret;
 690}
 691
 692int
 693qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
 694    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
 695{
 696	int ret = 0;
 697	struct req_que *req = NULL;
 698	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 699	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
 700	uint16_t que_id = 0;
 701	device_reg_t *reg;
 702	uint32_t cnt;
 703
 704	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
 705	if (req == NULL) {
 706		ql_log(ql_log_fatal, base_vha, 0x00d9,
 707		    "Failed to allocate memory for request queue.\n");
 708		goto failed;
 709	}
 710
 711	req->length = REQUEST_ENTRY_CNT_24XX;
 712	req->ring = dma_alloc_coherent(&ha->pdev->dev,
 713			(req->length + 1) * sizeof(request_t),
 714			&req->dma, GFP_KERNEL);
 715	if (req->ring == NULL) {
 716		ql_log(ql_log_fatal, base_vha, 0x00da,
 717		    "Failed to allocate memory for request_ring.\n");
 718		goto que_failed;
 719	}
 720
 721	ret = qla2x00_alloc_outstanding_cmds(ha, req);
 722	if (ret != QLA_SUCCESS)
 723		goto que_failed;
 724
 725	mutex_lock(&ha->mq_lock);
 726	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
 727	if (que_id >= ha->max_req_queues) {
 728		mutex_unlock(&ha->mq_lock);
 729		ql_log(ql_log_warn, base_vha, 0x00db,
 730		    "No resources to create additional request queue.\n");
 731		goto que_failed;
 732	}
 733	set_bit(que_id, ha->req_qid_map);
 734	ha->req_q_map[que_id] = req;
 735	req->rid = rid;
 736	req->vp_idx = vp_idx;
 737	req->qos = qos;
 738
 739	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
 740	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
 741	    que_id, req->rid, req->vp_idx, req->qos);
 742	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
 743	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
 744	    que_id, req->rid, req->vp_idx, req->qos);
 745	if (rsp_que < 0)
 746		req->rsp = NULL;
 747	else
 748		req->rsp = ha->rsp_q_map[rsp_que];
 749	/* Use alternate PCI bus number */
 750	if (MSB(req->rid))
 751		options |= BIT_4;
 752	/* Use alternate PCI devfn */
 753	if (LSB(req->rid))
 754		options |= BIT_5;
 755	req->options = options;
 756
 757	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
 758	    "options=0x%x.\n", req->options);
 759	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
 760	    "options=0x%x.\n", req->options);
 761	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
 762		req->outstanding_cmds[cnt] = NULL;
 763	req->current_outstanding_cmd = 1;
 764
 765	req->ring_ptr = req->ring;
 766	req->ring_index = 0;
 767	req->cnt = req->length;
 768	req->id = que_id;
 769	reg = ISP_QUE_REG(ha, que_id);
 770	req->req_q_in = &reg->isp25mq.req_q_in;
 771	req->req_q_out = &reg->isp25mq.req_q_out;
 772	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
 773	req->out_ptr = (void *)(req->ring + req->length);
 774	mutex_unlock(&ha->mq_lock);
 775	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
 776	    "ring_ptr=%p ring_index=%d, "
 777	    "cnt=%d id=%d max_q_depth=%d.\n",
 778	    req->ring_ptr, req->ring_index,
 779	    req->cnt, req->id, req->max_q_depth);
 780	ql_dbg(ql_dbg_init, base_vha, 0x00de,
 781	    "ring_ptr=%p ring_index=%d, "
 782	    "cnt=%d id=%d max_q_depth=%d.\n",
 783	    req->ring_ptr, req->ring_index, req->cnt,
 784	    req->id, req->max_q_depth);
 785
 786	if (startqp) {
 787		ret = qla25xx_init_req_que(base_vha, req);
 788		if (ret != QLA_SUCCESS) {
 789			ql_log(ql_log_fatal, base_vha, 0x00df,
 790			    "%s failed.\n", __func__);
 791			mutex_lock(&ha->mq_lock);
 792			clear_bit(que_id, ha->req_qid_map);
 793			mutex_unlock(&ha->mq_lock);
 794			goto que_failed;
 795		}
 796		vha->flags.qpairs_req_created = 1;
 797	}
 798
 799	return req->id;
 800
 801que_failed:
 802	qla25xx_free_req_que(base_vha, req);
 803failed:
 804	return 0;
 805}
 806
 807static void qla_do_work(struct work_struct *work)
 808{
 809	unsigned long flags;
 810	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
 811	struct scsi_qla_host *vha;
 812	struct qla_hw_data *ha = qpair->hw;
 813
 814	spin_lock_irqsave(&qpair->qp_lock, flags);
 815	vha = pci_get_drvdata(ha->pdev);
 816	qla24xx_process_response_queue(vha, qpair->rsp);
 817	spin_unlock_irqrestore(&qpair->qp_lock, flags);
 818
 819}
 820
 821/* create response queue */
 822int
 823qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
 824    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
 825{
 826	int ret = 0;
 827	struct rsp_que *rsp = NULL;
 828	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 829	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
 830	uint16_t que_id = 0;
 831	device_reg_t *reg;
 832
 833	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
 834	if (rsp == NULL) {
 835		ql_log(ql_log_warn, base_vha, 0x0066,
 836		    "Failed to allocate memory for response queue.\n");
 837		goto failed;
 838	}
 839
 840	rsp->length = RESPONSE_ENTRY_CNT_MQ;
 841	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
 842			(rsp->length + 1) * sizeof(response_t),
 843			&rsp->dma, GFP_KERNEL);
 844	if (rsp->ring == NULL) {
 845		ql_log(ql_log_warn, base_vha, 0x00e1,
 846		    "Failed to allocate memory for response ring.\n");
 847		goto que_failed;
 848	}
 849
 850	mutex_lock(&ha->mq_lock);
 851	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
 852	if (que_id >= ha->max_rsp_queues) {
 853		mutex_unlock(&ha->mq_lock);
 854		ql_log(ql_log_warn, base_vha, 0x00e2,
 855		    "No resources to create additional request queue.\n");
 856		goto que_failed;
 857	}
 858	set_bit(que_id, ha->rsp_qid_map);
 859
 860	rsp->msix = qpair->msix;
 861
 862	ha->rsp_q_map[que_id] = rsp;
 863	rsp->rid = rid;
 864	rsp->vp_idx = vp_idx;
 865	rsp->hw = ha;
 866	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
 867	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
 868	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
 869	/* Use alternate PCI bus number */
 870	if (MSB(rsp->rid))
 871		options |= BIT_4;
 872	/* Use alternate PCI devfn */
 873	if (LSB(rsp->rid))
 874		options |= BIT_5;
 875	/* Enable MSIX handshake mode on for uncapable adapters */
 876	if (!IS_MSIX_NACK_CAPABLE(ha))
 877		options |= BIT_6;
 878
 879	/* Set option to indicate response queue creation */
 880	options |= BIT_1;
 881
 882	rsp->options = options;
 883	rsp->id = que_id;
 884	reg = ISP_QUE_REG(ha, que_id);
 885	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
 886	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
 887	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
 888	mutex_unlock(&ha->mq_lock);
 889	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
 890	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
 891	    rsp->options, rsp->id, rsp->rsp_q_in,
 892	    rsp->rsp_q_out);
 893	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
 894	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
 895	    rsp->options, rsp->id, rsp->rsp_q_in,
 896	    rsp->rsp_q_out);
 897
 898	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
 899		ha->flags.disable_msix_handshake ?
 900		QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
 901	if (ret)
 902		goto que_failed;
 903
 904	if (startqp) {
 905		ret = qla25xx_init_rsp_que(base_vha, rsp);
 906		if (ret != QLA_SUCCESS) {
 907			ql_log(ql_log_fatal, base_vha, 0x00e7,
 908			    "%s failed.\n", __func__);
 909			mutex_lock(&ha->mq_lock);
 910			clear_bit(que_id, ha->rsp_qid_map);
 911			mutex_unlock(&ha->mq_lock);
 912			goto que_failed;
 913		}
 914		vha->flags.qpairs_rsp_created = 1;
 915	}
 916	rsp->req = NULL;
 917
 918	qla2x00_init_response_q_entries(rsp);
 919	if (qpair->hw->wq)
 920		INIT_WORK(&qpair->q_work, qla_do_work);
 921	return rsp->id;
 922
 923que_failed:
 924	qla25xx_free_rsp_que(base_vha, rsp);
 925failed:
 926	return 0;
 927}
 928
 929static void qla_ctrlvp_sp_done(srb_t *sp, int res)
 930{
 931	if (sp->comp)
 932		complete(sp->comp);
 933	/* don't free sp here. Let the caller do the free */
 934}
 935
 936/**
 937 * qla24xx_control_vp() - Enable a virtual port for given host
 938 * @vha:	adapter block pointer
 939 * @cmd:	command type to be sent for enable virtual port
 940 *
 941 * Return:	qla2xxx local function return status code.
 942 */
 943int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
 944{
 945	int rval = QLA_MEMORY_ALLOC_FAILED;
 946	struct qla_hw_data *ha = vha->hw;
 947	int	vp_index = vha->vp_idx;
 948	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 949	DECLARE_COMPLETION_ONSTACK(comp);
 950	srb_t *sp;
 951
 952	ql_dbg(ql_dbg_vport, vha, 0x10c1,
 953	    "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
 954
 955	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
 956		return QLA_PARAMETER_ERROR;
 957
 958	sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
 959	if (!sp)
 960		return rval;
 961
 962	sp->type = SRB_CTRL_VP;
 963	sp->name = "ctrl_vp";
 964	sp->comp = &comp;
 965	sp->done = qla_ctrlvp_sp_done;
 966	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 967	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 968	sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
 969	sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
 970
 971	rval = qla2x00_start_sp(sp);
 972	if (rval != QLA_SUCCESS) {
 973		ql_dbg(ql_dbg_async, vha, 0xffff,
 974		    "%s: %s Failed submission. %x.\n",
 975		    __func__, sp->name, rval);
 976		goto done;
 977	}
 978
 979	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
 980	    sp->name, sp->handle);
 981
 982	wait_for_completion(&comp);
 983	sp->comp = NULL;
 984
 985	rval = sp->rc;
 986	switch (rval) {
 987	case QLA_FUNCTION_TIMEOUT:
 988		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
 989		    __func__, sp->name, rval);
 990		break;
 991	case QLA_SUCCESS:
 992		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
 993		    __func__, sp->name);
 994		break;
 995	default:
 996		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
 997		    __func__, sp->name, rval);
 998		break;
 999	}
1000done:
1001	sp->free(sp);
1002	return rval;
1003}