PageRenderTime 89ms CodeModel.GetById 22ms app.highlight 56ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/scsi/be2iscsi/be_main.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t
C | 4461 lines | 3901 code | 471 blank | 89 comment | 425 complexity | 9f338297faf2b91c3eafe7613b4ab8e4 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

   1/**
   2 * Copyright (C) 2005 - 2011 Emulex
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
  11 *
  12 * Contact Information:
  13 * linux-drivers@emulex.com
  14 *
  15 * Emulex
  16 * 3333 Susan Street
  17 * Costa Mesa, CA 92626
  18 */
  19
  20#include <linux/reboot.h>
  21#include <linux/delay.h>
  22#include <linux/slab.h>
  23#include <linux/interrupt.h>
  24#include <linux/blkdev.h>
  25#include <linux/pci.h>
  26#include <linux/string.h>
  27#include <linux/kernel.h>
  28#include <linux/semaphore.h>
  29#include <linux/iscsi_boot_sysfs.h>
  30
  31#include <scsi/libiscsi.h>
  32#include <scsi/scsi_transport_iscsi.h>
  33#include <scsi/scsi_transport.h>
  34#include <scsi/scsi_cmnd.h>
  35#include <scsi/scsi_device.h>
  36#include <scsi/scsi_host.h>
  37#include <scsi/scsi.h>
  38#include "be_main.h"
  39#include "be_iscsi.h"
  40#include "be_mgmt.h"
  41
  42static unsigned int be_iopoll_budget = 10;
  43static unsigned int be_max_phys_size = 64;
  44static unsigned int enable_msix = 1;
  45static unsigned int gcrashmode = 0;
  46static unsigned int num_hba = 0;
  47
  48MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
  49MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
  50MODULE_AUTHOR("ServerEngines Corporation");
  51MODULE_LICENSE("GPL");
  52module_param(be_iopoll_budget, int, 0);
  53module_param(enable_msix, int, 0);
  54module_param(be_max_phys_size, uint, S_IRUGO);
  55MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
  56				   "contiguous memory that can be allocated."
  57				   "Range is 16 - 128");
  58
  59static int beiscsi_slave_configure(struct scsi_device *sdev)
  60{
  61	blk_queue_max_segment_size(sdev->request_queue, 65536);
  62	return 0;
  63}
  64
  65static int beiscsi_eh_abort(struct scsi_cmnd *sc)
  66{
  67	struct iscsi_cls_session *cls_session;
  68	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
  69	struct beiscsi_io_task *aborted_io_task;
  70	struct iscsi_conn *conn;
  71	struct beiscsi_conn *beiscsi_conn;
  72	struct beiscsi_hba *phba;
  73	struct iscsi_session *session;
  74	struct invalidate_command_table *inv_tbl;
  75	struct be_dma_mem nonemb_cmd;
  76	unsigned int cid, tag, num_invalidate;
  77
  78	cls_session = starget_to_session(scsi_target(sc->device));
  79	session = cls_session->dd_data;
  80
  81	spin_lock_bh(&session->lock);
  82	if (!aborted_task || !aborted_task->sc) {
  83		/* we raced */
  84		spin_unlock_bh(&session->lock);
  85		return SUCCESS;
  86	}
  87
  88	aborted_io_task = aborted_task->dd_data;
  89	if (!aborted_io_task->scsi_cmnd) {
  90		/* raced or invalid command */
  91		spin_unlock_bh(&session->lock);
  92		return SUCCESS;
  93	}
  94	spin_unlock_bh(&session->lock);
  95	conn = aborted_task->conn;
  96	beiscsi_conn = conn->dd_data;
  97	phba = beiscsi_conn->phba;
  98
  99	/* invalidate iocb */
 100	cid = beiscsi_conn->beiscsi_conn_cid;
 101	inv_tbl = phba->inv_tbl;
 102	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
 103	inv_tbl->cid = cid;
 104	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
 105	num_invalidate = 1;
 106	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
 107				sizeof(struct invalidate_commands_params_in),
 108				&nonemb_cmd.dma);
 109	if (nonemb_cmd.va == NULL) {
 110		SE_DEBUG(DBG_LVL_1,
 111			 "Failed to allocate memory for"
 112			 "mgmt_invalidate_icds\n");
 113		return FAILED;
 114	}
 115	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
 116
 117	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
 118				   cid, &nonemb_cmd);
 119	if (!tag) {
 120		shost_printk(KERN_WARNING, phba->shost,
 121			     "mgmt_invalidate_icds could not be"
 122			     " submitted\n");
 123		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 124				    nonemb_cmd.va, nonemb_cmd.dma);
 125
 126		return FAILED;
 127	} else {
 128		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
 129					 phba->ctrl.mcc_numtag[tag]);
 130		free_mcc_tag(&phba->ctrl, tag);
 131	}
 132	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 133			    nonemb_cmd.va, nonemb_cmd.dma);
 134	return iscsi_eh_abort(sc);
 135}
 136
 137static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
 138{
 139	struct iscsi_task *abrt_task;
 140	struct beiscsi_io_task *abrt_io_task;
 141	struct iscsi_conn *conn;
 142	struct beiscsi_conn *beiscsi_conn;
 143	struct beiscsi_hba *phba;
 144	struct iscsi_session *session;
 145	struct iscsi_cls_session *cls_session;
 146	struct invalidate_command_table *inv_tbl;
 147	struct be_dma_mem nonemb_cmd;
 148	unsigned int cid, tag, i, num_invalidate;
 149	int rc = FAILED;
 150
 151	/* invalidate iocbs */
 152	cls_session = starget_to_session(scsi_target(sc->device));
 153	session = cls_session->dd_data;
 154	spin_lock_bh(&session->lock);
 155	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
 156		goto unlock;
 157
 158	conn = session->leadconn;
 159	beiscsi_conn = conn->dd_data;
 160	phba = beiscsi_conn->phba;
 161	cid = beiscsi_conn->beiscsi_conn_cid;
 162	inv_tbl = phba->inv_tbl;
 163	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
 164	num_invalidate = 0;
 165	for (i = 0; i < conn->session->cmds_max; i++) {
 166		abrt_task = conn->session->cmds[i];
 167		abrt_io_task = abrt_task->dd_data;
 168		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
 169			continue;
 170
 171		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
 172			continue;
 173
 174		inv_tbl->cid = cid;
 175		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
 176		num_invalidate++;
 177		inv_tbl++;
 178	}
 179	spin_unlock_bh(&session->lock);
 180	inv_tbl = phba->inv_tbl;
 181
 182	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
 183				sizeof(struct invalidate_commands_params_in),
 184				&nonemb_cmd.dma);
 185	if (nonemb_cmd.va == NULL) {
 186		SE_DEBUG(DBG_LVL_1,
 187			 "Failed to allocate memory for"
 188			 "mgmt_invalidate_icds\n");
 189		return FAILED;
 190	}
 191	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
 192	memset(nonemb_cmd.va, 0, nonemb_cmd.size);
 193	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
 194				   cid, &nonemb_cmd);
 195	if (!tag) {
 196		shost_printk(KERN_WARNING, phba->shost,
 197			     "mgmt_invalidate_icds could not be"
 198			     " submitted\n");
 199		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 200				    nonemb_cmd.va, nonemb_cmd.dma);
 201		return FAILED;
 202	} else {
 203		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
 204					 phba->ctrl.mcc_numtag[tag]);
 205		free_mcc_tag(&phba->ctrl, tag);
 206	}
 207	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 208			    nonemb_cmd.va, nonemb_cmd.dma);
 209	return iscsi_eh_device_reset(sc);
 210unlock:
 211	spin_unlock_bh(&session->lock);
 212	return rc;
 213}
 214
 215static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
 216{
 217	struct beiscsi_hba *phba = data;
 218	struct mgmt_session_info *boot_sess = &phba->boot_sess;
 219	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
 220	char *str = buf;
 221	int rc;
 222
 223	switch (type) {
 224	case ISCSI_BOOT_TGT_NAME:
 225		rc = sprintf(buf, "%.*s\n",
 226			    (int)strlen(boot_sess->target_name),
 227			    (char *)&boot_sess->target_name);
 228		break;
 229	case ISCSI_BOOT_TGT_IP_ADDR:
 230		if (boot_conn->dest_ipaddr.ip_type == 0x1)
 231			rc = sprintf(buf, "%pI4\n",
 232				(char *)&boot_conn->dest_ipaddr.ip_address);
 233		else
 234			rc = sprintf(str, "%pI6\n",
 235				(char *)&boot_conn->dest_ipaddr.ip_address);
 236		break;
 237	case ISCSI_BOOT_TGT_PORT:
 238		rc = sprintf(str, "%d\n", boot_conn->dest_port);
 239		break;
 240
 241	case ISCSI_BOOT_TGT_CHAP_NAME:
 242		rc = sprintf(str,  "%.*s\n",
 243			     boot_conn->negotiated_login_options.auth_data.chap.
 244			     target_chap_name_length,
 245			     (char *)&boot_conn->negotiated_login_options.
 246			     auth_data.chap.target_chap_name);
 247		break;
 248	case ISCSI_BOOT_TGT_CHAP_SECRET:
 249		rc = sprintf(str,  "%.*s\n",
 250			     boot_conn->negotiated_login_options.auth_data.chap.
 251			     target_secret_length,
 252			     (char *)&boot_conn->negotiated_login_options.
 253			     auth_data.chap.target_secret);
 254		break;
 255	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
 256		rc = sprintf(str,  "%.*s\n",
 257			     boot_conn->negotiated_login_options.auth_data.chap.
 258			     intr_chap_name_length,
 259			     (char *)&boot_conn->negotiated_login_options.
 260			     auth_data.chap.intr_chap_name);
 261		break;
 262	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
 263		rc = sprintf(str,  "%.*s\n",
 264			     boot_conn->negotiated_login_options.auth_data.chap.
 265			     intr_secret_length,
 266			     (char *)&boot_conn->negotiated_login_options.
 267			     auth_data.chap.intr_secret);
 268		break;
 269	case ISCSI_BOOT_TGT_FLAGS:
 270		rc = sprintf(str, "2\n");
 271		break;
 272	case ISCSI_BOOT_TGT_NIC_ASSOC:
 273		rc = sprintf(str, "0\n");
 274		break;
 275	default:
 276		rc = -ENOSYS;
 277		break;
 278	}
 279	return rc;
 280}
 281
 282static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
 283{
 284	struct beiscsi_hba *phba = data;
 285	char *str = buf;
 286	int rc;
 287
 288	switch (type) {
 289	case ISCSI_BOOT_INI_INITIATOR_NAME:
 290		rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
 291		break;
 292	default:
 293		rc = -ENOSYS;
 294		break;
 295	}
 296	return rc;
 297}
 298
 299static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
 300{
 301	struct beiscsi_hba *phba = data;
 302	char *str = buf;
 303	int rc;
 304
 305	switch (type) {
 306	case ISCSI_BOOT_ETH_FLAGS:
 307		rc = sprintf(str, "2\n");
 308		break;
 309	case ISCSI_BOOT_ETH_INDEX:
 310		rc = sprintf(str, "0\n");
 311		break;
 312	case ISCSI_BOOT_ETH_MAC:
 313		rc  = beiscsi_get_macaddr(buf, phba);
 314		if (rc < 0) {
 315			SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
 316			return rc;
 317		}
 318	break;
 319	default:
 320		rc = -ENOSYS;
 321		break;
 322	}
 323	return rc;
 324}
 325
 326
 327static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
 328{
 329	int rc;
 330
 331	switch (type) {
 332	case ISCSI_BOOT_TGT_NAME:
 333	case ISCSI_BOOT_TGT_IP_ADDR:
 334	case ISCSI_BOOT_TGT_PORT:
 335	case ISCSI_BOOT_TGT_CHAP_NAME:
 336	case ISCSI_BOOT_TGT_CHAP_SECRET:
 337	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
 338	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
 339	case ISCSI_BOOT_TGT_NIC_ASSOC:
 340	case ISCSI_BOOT_TGT_FLAGS:
 341		rc = S_IRUGO;
 342		break;
 343	default:
 344		rc = 0;
 345		break;
 346	}
 347	return rc;
 348}
 349
 350static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
 351{
 352	int rc;
 353
 354	switch (type) {
 355	case ISCSI_BOOT_INI_INITIATOR_NAME:
 356		rc = S_IRUGO;
 357		break;
 358	default:
 359		rc = 0;
 360		break;
 361	}
 362	return rc;
 363}
 364
 365
 366static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
 367{
 368	int rc;
 369
 370	switch (type) {
 371	case ISCSI_BOOT_ETH_FLAGS:
 372	case ISCSI_BOOT_ETH_MAC:
 373	case ISCSI_BOOT_ETH_INDEX:
 374		rc = S_IRUGO;
 375		break;
 376	default:
 377		rc = 0;
 378		break;
 379	}
 380	return rc;
 381}
 382
 383/*------------------- PCI Driver operations and data ----------------- */
 384static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
 385	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
 386	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
 387	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
 388	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
 389	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
 390	{ 0 }
 391};
 392MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 393
 394static struct scsi_host_template beiscsi_sht = {
 395	.module = THIS_MODULE,
 396	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
 397	.proc_name = DRV_NAME,
 398	.queuecommand = iscsi_queuecommand,
 399	.change_queue_depth = iscsi_change_queue_depth,
 400	.slave_configure = beiscsi_slave_configure,
 401	.target_alloc = iscsi_target_alloc,
 402	.eh_abort_handler = beiscsi_eh_abort,
 403	.eh_device_reset_handler = beiscsi_eh_device_reset,
 404	.eh_target_reset_handler = iscsi_eh_session_reset,
 405	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
 406	.can_queue = BE2_IO_DEPTH,
 407	.this_id = -1,
 408	.max_sectors = BEISCSI_MAX_SECTORS,
 409	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
 410	.use_clustering = ENABLE_CLUSTERING,
 411};
 412
 413static struct scsi_transport_template *beiscsi_scsi_transport;
 414
 415static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
 416{
 417	struct beiscsi_hba *phba;
 418	struct Scsi_Host *shost;
 419
 420	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
 421	if (!shost) {
 422		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
 423			"iscsi_host_alloc failed\n");
 424		return NULL;
 425	}
 426	shost->dma_boundary = pcidev->dma_mask;
 427	shost->max_id = BE2_MAX_SESSIONS;
 428	shost->max_channel = 0;
 429	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
 430	shost->max_lun = BEISCSI_NUM_MAX_LUN;
 431	shost->transportt = beiscsi_scsi_transport;
 432	phba = iscsi_host_priv(shost);
 433	memset(phba, 0, sizeof(*phba));
 434	phba->shost = shost;
 435	phba->pcidev = pci_dev_get(pcidev);
 436	pci_set_drvdata(pcidev, phba);
 437
 438	if (iscsi_host_add(shost, &phba->pcidev->dev))
 439		goto free_devices;
 440
 441	return phba;
 442
 443free_devices:
 444	pci_dev_put(phba->pcidev);
 445	iscsi_host_free(phba->shost);
 446	return NULL;
 447}
 448
 449static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
 450{
 451	if (phba->csr_va) {
 452		iounmap(phba->csr_va);
 453		phba->csr_va = NULL;
 454	}
 455	if (phba->db_va) {
 456		iounmap(phba->db_va);
 457		phba->db_va = NULL;
 458	}
 459	if (phba->pci_va) {
 460		iounmap(phba->pci_va);
 461		phba->pci_va = NULL;
 462	}
 463}
 464
 465static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
 466				struct pci_dev *pcidev)
 467{
 468	u8 __iomem *addr;
 469	int pcicfg_reg;
 470
 471	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
 472			       pci_resource_len(pcidev, 2));
 473	if (addr == NULL)
 474		return -ENOMEM;
 475	phba->ctrl.csr = addr;
 476	phba->csr_va = addr;
 477	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
 478
 479	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
 480	if (addr == NULL)
 481		goto pci_map_err;
 482	phba->ctrl.db = addr;
 483	phba->db_va = addr;
 484	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
 485
 486	if (phba->generation == BE_GEN2)
 487		pcicfg_reg = 1;
 488	else
 489		pcicfg_reg = 0;
 490
 491	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
 492			       pci_resource_len(pcidev, pcicfg_reg));
 493
 494	if (addr == NULL)
 495		goto pci_map_err;
 496	phba->ctrl.pcicfg = addr;
 497	phba->pci_va = addr;
 498	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
 499	return 0;
 500
 501pci_map_err:
 502	beiscsi_unmap_pci_function(phba);
 503	return -ENOMEM;
 504}
 505
 506static int beiscsi_enable_pci(struct pci_dev *pcidev)
 507{
 508	int ret;
 509
 510	ret = pci_enable_device(pcidev);
 511	if (ret) {
 512		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
 513			"failed. Returning -ENODEV\n");
 514		return ret;
 515	}
 516
 517	pci_set_master(pcidev);
 518	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
 519		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
 520		if (ret) {
 521			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
 522			pci_disable_device(pcidev);
 523			return ret;
 524		}
 525	}
 526	return 0;
 527}
 528
 529static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
 530{
 531	struct be_ctrl_info *ctrl = &phba->ctrl;
 532	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
 533	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
 534	int status = 0;
 535
 536	ctrl->pdev = pdev;
 537	status = beiscsi_map_pci_bars(phba, pdev);
 538	if (status)
 539		return status;
 540	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
 541	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
 542						  mbox_mem_alloc->size,
 543						  &mbox_mem_alloc->dma);
 544	if (!mbox_mem_alloc->va) {
 545		beiscsi_unmap_pci_function(phba);
 546		status = -ENOMEM;
 547		return status;
 548	}
 549
 550	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
 551	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
 552	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
 553	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 554	spin_lock_init(&ctrl->mbox_lock);
 555	spin_lock_init(&phba->ctrl.mcc_lock);
 556	spin_lock_init(&phba->ctrl.mcc_cq_lock);
 557
 558	return status;
 559}
 560
 561static void beiscsi_get_params(struct beiscsi_hba *phba)
 562{
 563	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
 564				    - (phba->fw_config.iscsi_cid_count
 565				    + BE2_TMFS
 566				    + BE2_NOPOUT_REQ));
 567	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
 568	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
 569	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
 570	phba->params.num_sge_per_io = BE2_SGE;
 571	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
 572	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
 573	phba->params.eq_timer = 64;
 574	phba->params.num_eq_entries =
 575	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
 576				    + BE2_TMFS) / 512) + 1) * 512;
 577	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
 578				? 1024 : phba->params.num_eq_entries;
 579	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
 580			     phba->params.num_eq_entries);
 581	phba->params.num_cq_entries =
 582	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
 583				    + BE2_TMFS) / 512) + 1) * 512;
 584	phba->params.wrbs_per_cxn = 256;
 585}
 586
 587static void hwi_ring_eq_db(struct beiscsi_hba *phba,
 588			   unsigned int id, unsigned int clr_interrupt,
 589			   unsigned int num_processed,
 590			   unsigned char rearm, unsigned char event)
 591{
 592	u32 val = 0;
 593	val |= id & DB_EQ_RING_ID_MASK;
 594	if (rearm)
 595		val |= 1 << DB_EQ_REARM_SHIFT;
 596	if (clr_interrupt)
 597		val |= 1 << DB_EQ_CLR_SHIFT;
 598	if (event)
 599		val |= 1 << DB_EQ_EVNT_SHIFT;
 600	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
 601	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
 602}
 603
 604/**
 605 * be_isr_mcc - The isr routine of the driver.
 606 * @irq: Not used
 607 * @dev_id: Pointer to host adapter structure
 608 */
 609static irqreturn_t be_isr_mcc(int irq, void *dev_id)
 610{
 611	struct beiscsi_hba *phba;
 612	struct be_eq_entry *eqe = NULL;
 613	struct be_queue_info *eq;
 614	struct be_queue_info *mcc;
 615	unsigned int num_eq_processed;
 616	struct be_eq_obj *pbe_eq;
 617	unsigned long flags;
 618
 619	pbe_eq = dev_id;
 620	eq = &pbe_eq->q;
 621	phba =  pbe_eq->phba;
 622	mcc = &phba->ctrl.mcc_obj.cq;
 623	eqe = queue_tail_node(eq);
 624	if (!eqe)
 625		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 626
 627	num_eq_processed = 0;
 628
 629	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 630				& EQE_VALID_MASK) {
 631		if (((eqe->dw[offsetof(struct amap_eq_entry,
 632		     resource_id) / 32] &
 633		     EQE_RESID_MASK) >> 16) == mcc->id) {
 634			spin_lock_irqsave(&phba->isr_lock, flags);
 635			phba->todo_mcc_cq = 1;
 636			spin_unlock_irqrestore(&phba->isr_lock, flags);
 637		}
 638		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 639		queue_tail_inc(eq);
 640		eqe = queue_tail_node(eq);
 641		num_eq_processed++;
 642	}
 643	if (phba->todo_mcc_cq)
 644		queue_work(phba->wq, &phba->work_cqs);
 645	if (num_eq_processed)
 646		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
 647
 648	return IRQ_HANDLED;
 649}
 650
 651/**
 652 * be_isr_msix - The isr routine of the driver.
 653 * @irq: Not used
 654 * @dev_id: Pointer to host adapter structure
 655 */
 656static irqreturn_t be_isr_msix(int irq, void *dev_id)
 657{
 658	struct beiscsi_hba *phba;
 659	struct be_eq_entry *eqe = NULL;
 660	struct be_queue_info *eq;
 661	struct be_queue_info *cq;
 662	unsigned int num_eq_processed;
 663	struct be_eq_obj *pbe_eq;
 664	unsigned long flags;
 665
 666	pbe_eq = dev_id;
 667	eq = &pbe_eq->q;
 668	cq = pbe_eq->cq;
 669	eqe = queue_tail_node(eq);
 670	if (!eqe)
 671		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 672
 673	phba = pbe_eq->phba;
 674	num_eq_processed = 0;
 675	if (blk_iopoll_enabled) {
 676		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 677					& EQE_VALID_MASK) {
 678			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
 679				blk_iopoll_sched(&pbe_eq->iopoll);
 680
 681			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 682			queue_tail_inc(eq);
 683			eqe = queue_tail_node(eq);
 684			num_eq_processed++;
 685		}
 686		if (num_eq_processed)
 687			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
 688
 689		return IRQ_HANDLED;
 690	} else {
 691		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 692						& EQE_VALID_MASK) {
 693			spin_lock_irqsave(&phba->isr_lock, flags);
 694			phba->todo_cq = 1;
 695			spin_unlock_irqrestore(&phba->isr_lock, flags);
 696			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 697			queue_tail_inc(eq);
 698			eqe = queue_tail_node(eq);
 699			num_eq_processed++;
 700		}
 701		if (phba->todo_cq)
 702			queue_work(phba->wq, &phba->work_cqs);
 703
 704		if (num_eq_processed)
 705			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
 706
 707		return IRQ_HANDLED;
 708	}
 709}
 710
 711/**
 712 * be_isr - The isr routine of the driver.
 713 * @irq: Not used
 714 * @dev_id: Pointer to host adapter structure
 715 */
 716static irqreturn_t be_isr(int irq, void *dev_id)
 717{
 718	struct beiscsi_hba *phba;
 719	struct hwi_controller *phwi_ctrlr;
 720	struct hwi_context_memory *phwi_context;
 721	struct be_eq_entry *eqe = NULL;
 722	struct be_queue_info *eq;
 723	struct be_queue_info *cq;
 724	struct be_queue_info *mcc;
 725	unsigned long flags, index;
 726	unsigned int num_mcceq_processed, num_ioeq_processed;
 727	struct be_ctrl_info *ctrl;
 728	struct be_eq_obj *pbe_eq;
 729	int isr;
 730
 731	phba = dev_id;
 732	ctrl = &phba->ctrl;
 733	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
 734		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
 735	if (!isr)
 736		return IRQ_NONE;
 737
 738	phwi_ctrlr = phba->phwi_ctrlr;
 739	phwi_context = phwi_ctrlr->phwi_ctxt;
 740	pbe_eq = &phwi_context->be_eq[0];
 741
 742	eq = &phwi_context->be_eq[0].q;
 743	mcc = &phba->ctrl.mcc_obj.cq;
 744	index = 0;
 745	eqe = queue_tail_node(eq);
 746	if (!eqe)
 747		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 748
 749	num_ioeq_processed = 0;
 750	num_mcceq_processed = 0;
 751	if (blk_iopoll_enabled) {
 752		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 753					& EQE_VALID_MASK) {
 754			if (((eqe->dw[offsetof(struct amap_eq_entry,
 755			     resource_id) / 32] &
 756			     EQE_RESID_MASK) >> 16) == mcc->id) {
 757				spin_lock_irqsave(&phba->isr_lock, flags);
 758				phba->todo_mcc_cq = 1;
 759				spin_unlock_irqrestore(&phba->isr_lock, flags);
 760				num_mcceq_processed++;
 761			} else {
 762				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
 763					blk_iopoll_sched(&pbe_eq->iopoll);
 764				num_ioeq_processed++;
 765			}
 766			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 767			queue_tail_inc(eq);
 768			eqe = queue_tail_node(eq);
 769		}
 770		if (num_ioeq_processed || num_mcceq_processed) {
 771			if (phba->todo_mcc_cq)
 772				queue_work(phba->wq, &phba->work_cqs);
 773
 774			if ((num_mcceq_processed) && (!num_ioeq_processed))
 775				hwi_ring_eq_db(phba, eq->id, 0,
 776					      (num_ioeq_processed +
 777					       num_mcceq_processed) , 1, 1);
 778			else
 779				hwi_ring_eq_db(phba, eq->id, 0,
 780					       (num_ioeq_processed +
 781						num_mcceq_processed), 0, 1);
 782
 783			return IRQ_HANDLED;
 784		} else
 785			return IRQ_NONE;
 786	} else {
 787		cq = &phwi_context->be_cq[0];
 788		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 789						& EQE_VALID_MASK) {
 790
 791			if (((eqe->dw[offsetof(struct amap_eq_entry,
 792			     resource_id) / 32] &
 793			     EQE_RESID_MASK) >> 16) != cq->id) {
 794				spin_lock_irqsave(&phba->isr_lock, flags);
 795				phba->todo_mcc_cq = 1;
 796				spin_unlock_irqrestore(&phba->isr_lock, flags);
 797			} else {
 798				spin_lock_irqsave(&phba->isr_lock, flags);
 799				phba->todo_cq = 1;
 800				spin_unlock_irqrestore(&phba->isr_lock, flags);
 801			}
 802			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 803			queue_tail_inc(eq);
 804			eqe = queue_tail_node(eq);
 805			num_ioeq_processed++;
 806		}
 807		if (phba->todo_cq || phba->todo_mcc_cq)
 808			queue_work(phba->wq, &phba->work_cqs);
 809
 810		if (num_ioeq_processed) {
 811			hwi_ring_eq_db(phba, eq->id, 0,
 812				       num_ioeq_processed, 1, 1);
 813			return IRQ_HANDLED;
 814		} else
 815			return IRQ_NONE;
 816	}
 817}
 818
 819static int beiscsi_init_irqs(struct beiscsi_hba *phba)
 820{
 821	struct pci_dev *pcidev = phba->pcidev;
 822	struct hwi_controller *phwi_ctrlr;
 823	struct hwi_context_memory *phwi_context;
 824	int ret, msix_vec, i, j;
 825	char desc[32];
 826
 827	phwi_ctrlr = phba->phwi_ctrlr;
 828	phwi_context = phwi_ctrlr->phwi_ctxt;
 829
 830	if (phba->msix_enabled) {
 831		for (i = 0; i < phba->num_cpus; i++) {
 832			sprintf(desc, "beiscsi_msix_%04x", i);
 833			msix_vec = phba->msix_entries[i].vector;
 834			ret = request_irq(msix_vec, be_isr_msix, 0, desc,
 835					  &phwi_context->be_eq[i]);
 836			if (ret) {
 837				shost_printk(KERN_ERR, phba->shost,
 838					     "beiscsi_init_irqs-Failed to"
 839					     "register msix for i = %d\n", i);
 840				if (!i)
 841					return ret;
 842				goto free_msix_irqs;
 843			}
 844		}
 845		msix_vec = phba->msix_entries[i].vector;
 846		ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
 847				  &phwi_context->be_eq[i]);
 848		if (ret) {
 849			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
 850				     "Failed to register beiscsi_msix_mcc\n");
 851			i++;
 852			goto free_msix_irqs;
 853		}
 854
 855	} else {
 856		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
 857				  "beiscsi", phba);
 858		if (ret) {
 859			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
 860				     "Failed to register irq\\n");
 861			return ret;
 862		}
 863	}
 864	return 0;
 865free_msix_irqs:
 866	for (j = i - 1; j == 0; j++)
 867		free_irq(msix_vec, &phwi_context->be_eq[j]);
 868	return ret;
 869}
 870
 871static void hwi_ring_cq_db(struct beiscsi_hba *phba,
 872			   unsigned int id, unsigned int num_processed,
 873			   unsigned char rearm, unsigned char event)
 874{
 875	u32 val = 0;
 876	val |= id & DB_CQ_RING_ID_MASK;
 877	if (rearm)
 878		val |= 1 << DB_CQ_REARM_SHIFT;
 879	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
 880	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 881}
 882
 883static unsigned int
 884beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
 885			  struct beiscsi_hba *phba,
 886			  unsigned short cid,
 887			  struct pdu_base *ppdu,
 888			  unsigned long pdu_len,
 889			  void *pbuffer, unsigned long buf_len)
 890{
 891	struct iscsi_conn *conn = beiscsi_conn->conn;
 892	struct iscsi_session *session = conn->session;
 893	struct iscsi_task *task;
 894	struct beiscsi_io_task *io_task;
 895	struct iscsi_hdr *login_hdr;
 896
 897	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
 898						PDUBASE_OPCODE_MASK) {
 899	case ISCSI_OP_NOOP_IN:
 900		pbuffer = NULL;
 901		buf_len = 0;
 902		break;
 903	case ISCSI_OP_ASYNC_EVENT:
 904		break;
 905	case ISCSI_OP_REJECT:
 906		WARN_ON(!pbuffer);
 907		WARN_ON(!(buf_len == 48));
 908		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
 909		break;
 910	case ISCSI_OP_LOGIN_RSP:
 911	case ISCSI_OP_TEXT_RSP:
 912		task = conn->login_task;
 913		io_task = task->dd_data;
 914		login_hdr = (struct iscsi_hdr *)ppdu;
 915		login_hdr->itt = io_task->libiscsi_itt;
 916		break;
 917	default:
 918		shost_printk(KERN_WARNING, phba->shost,
 919			     "Unrecognized opcode 0x%x in async msg\n",
 920			     (ppdu->
 921			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
 922						& PDUBASE_OPCODE_MASK));
 923		return 1;
 924	}
 925
 926	spin_lock_bh(&session->lock);
 927	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
 928	spin_unlock_bh(&session->lock);
 929	return 0;
 930}
 931
 932static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 933{
 934	struct sgl_handle *psgl_handle;
 935
 936	if (phba->io_sgl_hndl_avbl) {
 937		SE_DEBUG(DBG_LVL_8,
 938			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
 939			 phba->io_sgl_alloc_index);
 940		psgl_handle = phba->io_sgl_hndl_base[phba->
 941						io_sgl_alloc_index];
 942		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
 943		phba->io_sgl_hndl_avbl--;
 944		if (phba->io_sgl_alloc_index == (phba->params.
 945						 ios_per_ctrl - 1))
 946			phba->io_sgl_alloc_index = 0;
 947		else
 948			phba->io_sgl_alloc_index++;
 949	} else
 950		psgl_handle = NULL;
 951	return psgl_handle;
 952}
 953
 954static void
 955free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 956{
 957	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
 958		 phba->io_sgl_free_index);
 959	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
 960		/*
 961		 * this can happen if clean_task is called on a task that
 962		 * failed in xmit_task or alloc_pdu.
 963		 */
 964		 SE_DEBUG(DBG_LVL_8,
 965			 "Double Free in IO SGL io_sgl_free_index=%d,"
 966			 "value there=%p\n", phba->io_sgl_free_index,
 967			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
 968		return;
 969	}
 970	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
 971	phba->io_sgl_hndl_avbl++;
 972	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
 973		phba->io_sgl_free_index = 0;
 974	else
 975		phba->io_sgl_free_index++;
 976}
 977
 978/**
 979 * alloc_wrb_handle - To allocate a wrb handle
 980 * @phba: The hba pointer
 981 * @cid: The cid to use for allocation
 982 *
 983 * This happens under session_lock until submission to chip
 984 */
 985struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
 986{
 987	struct hwi_wrb_context *pwrb_context;
 988	struct hwi_controller *phwi_ctrlr;
 989	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
 990
 991	phwi_ctrlr = phba->phwi_ctrlr;
 992	pwrb_context = &phwi_ctrlr->wrb_context[cid];
 993	if (pwrb_context->wrb_handles_available >= 2) {
 994		pwrb_handle = pwrb_context->pwrb_handle_base[
 995					    pwrb_context->alloc_index];
 996		pwrb_context->wrb_handles_available--;
 997		if (pwrb_context->alloc_index ==
 998						(phba->params.wrbs_per_cxn - 1))
 999			pwrb_context->alloc_index = 0;
1000		else
1001			pwrb_context->alloc_index++;
1002		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1003						pwrb_context->alloc_index];
1004		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1005	} else
1006		pwrb_handle = NULL;
1007	return pwrb_handle;
1008}
1009
1010/**
1011 * free_wrb_handle - To free the wrb handle back to pool
1012 * @phba: The hba pointer
1013 * @pwrb_context: The context to free from
1014 * @pwrb_handle: The wrb_handle to free
1015 *
1016 * This happens under session_lock until submission to chip
1017 */
1018static void
1019free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1020		struct wrb_handle *pwrb_handle)
1021{
1022	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1023	pwrb_context->wrb_handles_available++;
1024	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1025		pwrb_context->free_index = 0;
1026	else
1027		pwrb_context->free_index++;
1028
1029	SE_DEBUG(DBG_LVL_8,
1030		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
1031		 "wrb_handles_available=%d\n",
1032		 pwrb_handle, pwrb_context->free_index,
1033		 pwrb_context->wrb_handles_available);
1034}
1035
1036static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1037{
1038	struct sgl_handle *psgl_handle;
1039
1040	if (phba->eh_sgl_hndl_avbl) {
1041		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1042		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1043		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
1044			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1045		phba->eh_sgl_hndl_avbl--;
1046		if (phba->eh_sgl_alloc_index ==
1047		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1048		     1))
1049			phba->eh_sgl_alloc_index = 0;
1050		else
1051			phba->eh_sgl_alloc_index++;
1052	} else
1053		psgl_handle = NULL;
1054	return psgl_handle;
1055}
1056
1057void
1058free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1059{
1060
1061	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
1062			     phba->eh_sgl_free_index);
1063	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1064		/*
1065		 * this can happen if clean_task is called on a task that
1066		 * failed in xmit_task or alloc_pdu.
1067		 */
1068		SE_DEBUG(DBG_LVL_8,
1069			 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
1070			 phba->eh_sgl_free_index);
1071		return;
1072	}
1073	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1074	phba->eh_sgl_hndl_avbl++;
1075	if (phba->eh_sgl_free_index ==
1076	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1077		phba->eh_sgl_free_index = 0;
1078	else
1079		phba->eh_sgl_free_index++;
1080}
1081
1082static void
1083be_complete_io(struct beiscsi_conn *beiscsi_conn,
1084	       struct iscsi_task *task, struct sol_cqe *psol)
1085{
1086	struct beiscsi_io_task *io_task = task->dd_data;
1087	struct be_status_bhs *sts_bhs =
1088				(struct be_status_bhs *)io_task->cmd_bhs;
1089	struct iscsi_conn *conn = beiscsi_conn->conn;
1090	unsigned int sense_len;
1091	unsigned char *sense;
1092	u32 resid = 0, exp_cmdsn, max_cmdsn;
1093	u8 rsp, status, flags;
1094
1095	exp_cmdsn = (psol->
1096			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1097			& SOL_EXP_CMD_SN_MASK);
1098	max_cmdsn = ((psol->
1099			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1100			& SOL_EXP_CMD_SN_MASK) +
1101			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1102				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1103	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1104						& SOL_RESP_MASK) >> 16);
1105	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1106						& SOL_STS_MASK) >> 8);
1107	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1108					& SOL_FLAGS_MASK) >> 24) | 0x80;
1109
1110	task->sc->result = (DID_OK << 16) | status;
1111	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1112		task->sc->result = DID_ERROR << 16;
1113		goto unmap;
1114	}
1115
1116	/* bidi not initially supported */
1117	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1118		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1119				32] & SOL_RES_CNT_MASK);
1120
1121		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1122			task->sc->result = DID_ERROR << 16;
1123
1124		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1125			scsi_set_resid(task->sc, resid);
1126			if (!status && (scsi_bufflen(task->sc) - resid <
1127			    task->sc->underflow))
1128				task->sc->result = DID_ERROR << 16;
1129		}
1130	}
1131
1132	if (status == SAM_STAT_CHECK_CONDITION) {
1133		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1134		sense = sts_bhs->sense_info + sizeof(unsigned short);
1135		sense_len =  cpu_to_be16(*slen);
1136		memcpy(task->sc->sense_buffer, sense,
1137		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1138	}
1139
1140	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1141		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1142							& SOL_RES_CNT_MASK)
1143			 conn->rxdata_octets += (psol->
1144			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1145			     & SOL_RES_CNT_MASK);
1146	}
1147unmap:
1148	scsi_dma_unmap(io_task->scsi_cmnd);
1149	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1150}
1151
1152static void
1153be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1154		   struct iscsi_task *task, struct sol_cqe *psol)
1155{
1156	struct iscsi_logout_rsp *hdr;
1157	struct beiscsi_io_task *io_task = task->dd_data;
1158	struct iscsi_conn *conn = beiscsi_conn->conn;
1159
1160	hdr = (struct iscsi_logout_rsp *)task->hdr;
1161	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1162	hdr->t2wait = 5;
1163	hdr->t2retain = 0;
1164	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1165					& SOL_FLAGS_MASK) >> 24) | 0x80;
1166	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1167					32] & SOL_RESP_MASK);
1168	hdr->exp_cmdsn = cpu_to_be32(psol->
1169			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1170					& SOL_EXP_CMD_SN_MASK);
1171	hdr->max_cmdsn = be32_to_cpu((psol->
1172			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1173					& SOL_EXP_CMD_SN_MASK) +
1174			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1175					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1176	hdr->dlength[0] = 0;
1177	hdr->dlength[1] = 0;
1178	hdr->dlength[2] = 0;
1179	hdr->hlength = 0;
1180	hdr->itt = io_task->libiscsi_itt;
1181	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1182}
1183
1184static void
1185be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1186		struct iscsi_task *task, struct sol_cqe *psol)
1187{
1188	struct iscsi_tm_rsp *hdr;
1189	struct iscsi_conn *conn = beiscsi_conn->conn;
1190	struct beiscsi_io_task *io_task = task->dd_data;
1191
1192	hdr = (struct iscsi_tm_rsp *)task->hdr;
1193	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1194	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1195					& SOL_FLAGS_MASK) >> 24) | 0x80;
1196	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1197					32] & SOL_RESP_MASK);
1198	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1199				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1200	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1201			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1202			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1203			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1204	hdr->itt = io_task->libiscsi_itt;
1205	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1206}
1207
1208static void
1209hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1210		       struct beiscsi_hba *phba, struct sol_cqe *psol)
1211{
1212	struct hwi_wrb_context *pwrb_context;
1213	struct wrb_handle *pwrb_handle = NULL;
1214	struct hwi_controller *phwi_ctrlr;
1215	struct iscsi_task *task;
1216	struct beiscsi_io_task *io_task;
1217	struct iscsi_conn *conn = beiscsi_conn->conn;
1218	struct iscsi_session *session = conn->session;
1219
1220	phwi_ctrlr = phba->phwi_ctrlr;
1221	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1222				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1223				SOL_CID_MASK) >> 6) -
1224				phba->fw_config.iscsi_cid_start];
1225	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1226				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1227				32] & SOL_WRB_INDEX_MASK) >> 16)];
1228	task = pwrb_handle->pio_handle;
1229
1230	io_task = task->dd_data;
1231	spin_lock(&phba->mgmt_sgl_lock);
1232	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1233	spin_unlock(&phba->mgmt_sgl_lock);
1234	spin_lock_bh(&session->lock);
1235	free_wrb_handle(phba, pwrb_context, pwrb_handle);
1236	spin_unlock_bh(&session->lock);
1237}
1238
1239static void
1240be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1241		       struct iscsi_task *task, struct sol_cqe *psol)
1242{
1243	struct iscsi_nopin *hdr;
1244	struct iscsi_conn *conn = beiscsi_conn->conn;
1245	struct beiscsi_io_task *io_task = task->dd_data;
1246
1247	hdr = (struct iscsi_nopin *)task->hdr;
1248	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1249			& SOL_FLAGS_MASK) >> 24) | 0x80;
1250	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1251				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1252	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1253			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1254			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1255			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1256	hdr->opcode = ISCSI_OP_NOOP_IN;
1257	hdr->itt = io_task->libiscsi_itt;
1258	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1259}
1260
1261static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1262			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1263{
1264	struct hwi_wrb_context *pwrb_context;
1265	struct wrb_handle *pwrb_handle;
1266	struct iscsi_wrb *pwrb = NULL;
1267	struct hwi_controller *phwi_ctrlr;
1268	struct iscsi_task *task;
1269	unsigned int type;
1270	struct iscsi_conn *conn = beiscsi_conn->conn;
1271	struct iscsi_session *session = conn->session;
1272
1273	phwi_ctrlr = phba->phwi_ctrlr;
1274	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1275				(struct amap_sol_cqe, cid) / 32]
1276				& SOL_CID_MASK) >> 6) -
1277				phba->fw_config.iscsi_cid_start];
1278	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1279				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1280				32] & SOL_WRB_INDEX_MASK) >> 16)];
1281	task = pwrb_handle->pio_handle;
1282	pwrb = pwrb_handle->pwrb;
1283	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1284				 WRB_TYPE_MASK) >> 28;
1285
1286	spin_lock_bh(&session->lock);
1287	switch (type) {
1288	case HWH_TYPE_IO:
1289	case HWH_TYPE_IO_RD:
1290		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1291		     ISCSI_OP_NOOP_OUT)
1292			be_complete_nopin_resp(beiscsi_conn, task, psol);
1293		else
1294			be_complete_io(beiscsi_conn, task, psol);
1295		break;
1296
1297	case HWH_TYPE_LOGOUT:
1298		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1299			be_complete_logout(beiscsi_conn, task, psol);
1300		else
1301			be_complete_tmf(beiscsi_conn, task, psol);
1302
1303		break;
1304
1305	case HWH_TYPE_LOGIN:
1306		SE_DEBUG(DBG_LVL_1,
1307			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1308			 "- Solicited path\n");
1309		break;
1310
1311	case HWH_TYPE_NOP:
1312		be_complete_nopin_resp(beiscsi_conn, task, psol);
1313		break;
1314
1315	default:
1316		shost_printk(KERN_WARNING, phba->shost,
1317				"In hwi_complete_cmd, unknown type = %d"
1318				"wrb_index 0x%x CID 0x%x\n", type,
1319				((psol->dw[offsetof(struct amap_iscsi_wrb,
1320				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1321				((psol->dw[offsetof(struct amap_sol_cqe,
1322				cid) / 32] & SOL_CID_MASK) >> 6));
1323		break;
1324	}
1325
1326	spin_unlock_bh(&session->lock);
1327}
1328
1329static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1330					  *pasync_ctx, unsigned int is_header,
1331					  unsigned int host_write_ptr)
1332{
1333	if (is_header)
1334		return &pasync_ctx->async_entry[host_write_ptr].
1335		    header_busy_list;
1336	else
1337		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1338}
1339
1340static struct async_pdu_handle *
1341hwi_get_async_handle(struct beiscsi_hba *phba,
1342		     struct beiscsi_conn *beiscsi_conn,
1343		     struct hwi_async_pdu_context *pasync_ctx,
1344		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1345{
1346	struct be_bus_address phys_addr;
1347	struct list_head *pbusy_list;
1348	struct async_pdu_handle *pasync_handle = NULL;
1349	int buffer_len = 0;
1350	unsigned char buffer_index = -1;
1351	unsigned char is_header = 0;
1352
1353	phys_addr.u.a32.address_lo =
1354	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1355	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1356						& PDUCQE_DPL_MASK) >> 16);
1357	phys_addr.u.a32.address_hi =
1358	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1359
1360	phys_addr.u.a64.address =
1361			*((unsigned long long *)(&phys_addr.u.a64.address));
1362
1363	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1364			& PDUCQE_CODE_MASK) {
1365	case UNSOL_HDR_NOTIFY:
1366		is_header = 1;
1367
1368		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1369			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1370			index) / 32] & PDUCQE_INDEX_MASK));
1371
1372		buffer_len = (unsigned int)(phys_addr.u.a64.address -
1373				pasync_ctx->async_header.pa_base.u.a64.address);
1374
1375		buffer_index = buffer_len /
1376				pasync_ctx->async_header.buffer_size;
1377
1378		break;
1379	case UNSOL_DATA_NOTIFY:
1380		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1381					dw[offsetof(struct amap_i_t_dpdu_cqe,
1382					index) / 32] & PDUCQE_INDEX_MASK));
1383		buffer_len = (unsigned long)(phys_addr.u.a64.address -
1384					pasync_ctx->async_data.pa_base.u.
1385					a64.address);
1386		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1387		break;
1388	default:
1389		pbusy_list = NULL;
1390		shost_printk(KERN_WARNING, phba->shost,
1391			"Unexpected code=%d\n",
1392			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1393					code) / 32] & PDUCQE_CODE_MASK);
1394		return NULL;
1395	}
1396
1397	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1398	WARN_ON(list_empty(pbusy_list));
1399	list_for_each_entry(pasync_handle, pbusy_list, link) {
1400		WARN_ON(pasync_handle->consumed);
1401		if (pasync_handle->index == buffer_index)
1402			break;
1403	}
1404
1405	WARN_ON(!pasync_handle);
1406
1407	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1408					     phba->fw_config.iscsi_cid_start;
1409	pasync_handle->is_header = is_header;
1410	pasync_handle->buffer_len = ((pdpdu_cqe->
1411			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1412			& PDUCQE_DPL_MASK) >> 16);
1413
1414	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1415			index) / 32] & PDUCQE_INDEX_MASK);
1416	return pasync_handle;
1417}
1418
1419static unsigned int
1420hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1421			   unsigned int is_header, unsigned int cq_index)
1422{
1423	struct list_head *pbusy_list;
1424	struct async_pdu_handle *pasync_handle;
1425	unsigned int num_entries, writables = 0;
1426	unsigned int *pep_read_ptr, *pwritables;
1427
1428
1429	if (is_header) {
1430		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1431		pwritables = &pasync_ctx->async_header.writables;
1432		num_entries = pasync_ctx->async_header.num_entries;
1433	} else {
1434		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1435		pwritables = &pasync_ctx->async_data.writables;
1436		num_entries = pasync_ctx->async_data.num_entries;
1437	}
1438
1439	while ((*pep_read_ptr) != cq_index) {
1440		(*pep_read_ptr)++;
1441		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1442
1443		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1444						     *pep_read_ptr);
1445		if (writables == 0)
1446			WARN_ON(list_empty(pbusy_list));
1447
1448		if (!list_empty(pbusy_list)) {
1449			pasync_handle = list_entry(pbusy_list->next,
1450						   struct async_pdu_handle,
1451						   link);
1452			WARN_ON(!pasync_handle);
1453			pasync_handle->consumed = 1;
1454		}
1455
1456		writables++;
1457	}
1458
1459	if (!writables) {
1460		SE_DEBUG(DBG_LVL_1,
1461			 "Duplicate notification received - index 0x%x!!\n",
1462			 cq_index);
1463		WARN_ON(1);
1464	}
1465
1466	*pwritables = *pwritables + writables;
1467	return 0;
1468}
1469
1470static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1471				       unsigned int cri)
1472{
1473	struct hwi_controller *phwi_ctrlr;
1474	struct hwi_async_pdu_context *pasync_ctx;
1475	struct async_pdu_handle *pasync_handle, *tmp_handle;
1476	struct list_head *plist;
1477	unsigned int i = 0;
1478
1479	phwi_ctrlr = phba->phwi_ctrlr;
1480	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1481
1482	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1483
1484	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1485		list_del(&pasync_handle->link);
1486
1487		if (i == 0) {
1488			list_add_tail(&pasync_handle->link,
1489				      &pasync_ctx->async_header.free_list);
1490			pasync_ctx->async_header.free_entries++;
1491			i++;
1492		} else {
1493			list_add_tail(&pasync_handle->link,
1494				      &pasync_ctx->async_data.free_list);
1495			pasync_ctx->async_data.free_entries++;
1496			i++;
1497		}
1498	}
1499
1500	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1501	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1502	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1503	return 0;
1504}
1505
1506static struct phys_addr *
1507hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1508		     unsigned int is_header, unsigned int host_write_ptr)
1509{
1510	struct phys_addr *pasync_sge = NULL;
1511
1512	if (is_header)
1513		pasync_sge = pasync_ctx->async_header.ring_base;
1514	else
1515		pasync_sge = pasync_ctx->async_data.ring_base;
1516
1517	return pasync_sge + host_write_ptr;
1518}
1519
1520static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1521				   unsigned int is_header)
1522{
1523	struct hwi_controller *phwi_ctrlr;
1524	struct hwi_async_pdu_context *pasync_ctx;
1525	struct async_pdu_handle *pasync_handle;
1526	struct list_head *pfree_link, *pbusy_list;
1527	struct phys_addr *pasync_sge;
1528	unsigned int ring_id, num_entries;
1529	unsigned int host_write_num;
1530	unsigned int writables;
1531	unsigned int i = 0;
1532	u32 doorbell = 0;
1533
1534	phwi_ctrlr = phba->phwi_ctrlr;
1535	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1536
1537	if (is_header) {
1538		num_entries = pasync_ctx->async_header.num_entries;
1539		writables = min(pasync_ctx->async_header.writables,
1540				pasync_ctx->async_header.free_entries);
1541		pfree_link = pasync_ctx->async_header.free_list.next;
1542		host_write_num = pasync_ctx->async_header.host_write_ptr;
1543		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1544	} else {
1545		num_entries = pasync_ctx->async_data.num_entries;
1546		writables = min(pasync_ctx->async_data.writables,
1547				pasync_ctx->async_data.free_entries);
1548		pfree_link = pasync_ctx->async_data.free_list.next;
1549		host_write_num = pasync_ctx->async_data.host_write_ptr;
1550		ring_id = phwi_ctrlr->default_pdu_data.id;
1551	}
1552
1553	writables = (writables / 8) * 8;
1554	if (writables) {
1555		for (i = 0; i < writables; i++) {
1556			pbusy_list =
1557			    hwi_get_async_busy_list(pasync_ctx, is_header,
1558						    host_write_num);
1559			pasync_handle =
1560			    list_entry(pfree_link, struct async_pdu_handle,
1561								link);
1562			WARN_ON(!pasync_handle);
1563			pasync_handle->consumed = 0;
1564
1565			pfree_link = pfree_link->next;
1566
1567			pasync_sge = hwi_get_ring_address(pasync_ctx,
1568						is_header, host_write_num);
1569
1570			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1571			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1572
1573			list_move(&pasync_handle->link, pbusy_list);
1574
1575			host_write_num++;
1576			host_write_num = host_write_num % num_entries;
1577		}
1578
1579		if (is_header) {
1580			pasync_ctx->async_header.host_write_ptr =
1581							host_write_num;
1582			pasync_ctx->async_header.free_entries -= writables;
1583			pasync_ctx->async_header.writables -= writables;
1584			pasync_ctx->async_header.busy_entries += writables;
1585		} else {
1586			pasync_ctx->async_data.host_write_ptr = host_write_num;
1587			pasync_ctx->async_data.free_entries -= writables;
1588			pasync_ctx->async_data.writables -= writables;
1589			pasync_ctx->async_data.busy_entries += writables;
1590		}
1591
1592		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1593		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1594		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1595		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1596					<< DB_DEF_PDU_CQPROC_SHIFT;
1597
1598		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1599	}
1600}
1601
1602static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1603					 struct beiscsi_conn *beiscsi_conn,
1604					 struct i_t_dpdu_cqe *pdpdu_cqe)
1605{
1606	struct hwi_controller *phwi_ctrlr;
1607	struct hwi_async_pdu_context *pasync_ctx;
1608	struct async_pdu_handle *pasync_handle = NULL;
1609	unsigned int cq_index = -1;
1610
1611	phwi_ctrlr = phba->phwi_ctrlr;
1612	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1613
1614	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1615					     pdpdu_cqe, &cq_index);
1616	BUG_ON(pasync_handle->is_header != 0);
1617	if (pasync_handle->consumed == 0)
1618		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1619					   cq_index);
1620
1621	hwi_free_async_msg(phba, pasync_handle->cri);
1622	hwi_post_async_buffers(phba, pasync_handle->is_header);
1623}
1624
1625static unsigned int
1626hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1627		  struct beiscsi_hba *phba,
1628		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1629{
1630	struct list_head *plist;
1631	struct async_pdu_handle *pasync_handle;
1632	void *phdr = NULL;
1633	unsigned int hdr_len = 0, buf_len = 0;
1634	unsigned int status, index = 0, offset = 0;
1635	void *pfirst_buffer = NULL;
1636	unsigned int num_buf = 0;
1637
1638	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1639
1640	list_for_each_entry(pasync_handle, plist, link) {
1641		if (index == 0) {
1642			phdr = pasync_handle->pbuffer;
1643			hdr_len = pasync_handle->buffer_len;
1644		} else {
1645			buf_len = pasync_handle->buffer_len;
1646			if (!num_buf) {
1647				pfirst_buffer = pasync_handle->pbuffer;
1648				num_buf++;
1649			}
1650			memcpy(pfirst_buffer + offset,
1651			       pasync_handle->pbuffer, buf_len);
1652			offset = buf_len;
1653		}
1654		index++;
1655	}
1656
1657	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1658					   (beiscsi_conn->beiscsi_conn_cid -
1659					    phba->fw_config.iscsi_cid_start),
1660					    phdr, hdr_len, pfirst_buffer,
1661					    buf_len);
1662
1663	if (status == 0)
1664		hwi_free_async_msg(phba, cri);
1665	return 0;
1666}
1667
1668static unsigned int
1669hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1670		     struct beiscsi_hba *phba,
1671		     struct async_pdu_handle *pasync_handle)
1672{
1673	struct hwi_async_pdu_context *pasync_ctx;
1674	struct hwi_controller *phwi_ctrlr;
1675	unsigned int bytes_needed = 0, status = 0;
1676	unsigned short cri = pasync_handle->cri;
1677	struct pdu_base *ppdu;
1678
1679	phwi_ctrlr = phba->phwi_ctrlr;
1680	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1681
1682	list_del(&pasync_handle->link);
1683	if (pasync_handle->is_header) {
1684		pasync_ctx->async_header.busy_entries--;
1685		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1686			hwi_free_async_msg(phba, cri);
1687			BUG();
1688		}
1689
1690		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1691		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1692		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1693				(unsigned short)pasync_handle->buffer_len;
1694		list_add_tail(&pasync_handle->link,
1695			      &pasync_ctx->async_entry[cri].wait_queue.list);
1696
1697		ppdu = pasync_handle->pbuffer;
1698		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1699			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1700			0xFFFF0000) | ((be16_to_cpu((ppdu->
1701			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1702			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1703
1704		if (status == 0) {
1705			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1706			    bytes_needed;
1707
1708			if (bytes_needed == 0)
1709				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1710							   pasync_ctx, cri);
1711		}
1712	} else {
1713		pasync_ctx->async_data.busy_entries--;
1714		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1715			list_add_tail(&pasync_handle->link,
1716				      &pasync_ctx->async_entry[cri].wait_queue.
1717				      list);
1718			pasync_ctx->async_entry[cri].wait_queue.
1719				bytes_received +=
1720				(unsigned short)pasync_handle->buffer_len;
1721
1722			if (pasync_ctx->async_entry[cri].wait_queue.
1723			    bytes_received >=
1724			    pasync_ctx->async_entry[cri].wait_queue.
1725			    bytes_needed)
1726				status = hwi_fwd_asy

Large files files are truncated, but you can click here to view the full file