PageRenderTime 135ms CodeModel.GetById 17ms app.highlight 102ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/scsi/mpt2sas/mpt2sas_base.c

http://github.com/mirrors/linux
C | 4670 lines | 3315 code | 514 blank | 841 comment | 519 complexity | e01d610ed22f16a918a3846895ea6f54 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * This is the Fusion MPT base driver providing common API layer interface
   3 * for access to MPT (Message Passing Technology) firmware.
   4 *
   5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
   6 * Copyright (C) 2007-2013  LSI Corporation
   7 *  (mailto:DL-MPTFusionLinux@lsi.com)
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version 2
  12 * of the License, or (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * NO WARRANTY
  20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24 * solely responsible for determining the appropriateness of using and
  25 * distributing the Program and assumes all risks associated with its
  26 * exercise of rights under this Agreement, including but not limited to
  27 * the risks and costs of program errors, damage to or loss of data,
  28 * programs or equipment, and unavailability or interruption of operations.
  29
  30 * DISCLAIMER OF LIABILITY
  31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38
  39 * You should have received a copy of the GNU General Public License
  40 * along with this program; if not, write to the Free Software
  41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
  42 * USA.
  43 */
  44
  45#include <linux/kernel.h>
  46#include <linux/module.h>
  47#include <linux/errno.h>
  48#include <linux/init.h>
  49#include <linux/slab.h>
  50#include <linux/types.h>
  51#include <linux/pci.h>
  52#include <linux/kdev_t.h>
  53#include <linux/blkdev.h>
  54#include <linux/delay.h>
  55#include <linux/interrupt.h>
  56#include <linux/dma-mapping.h>
  57#include <linux/sort.h>
  58#include <linux/io.h>
  59#include <linux/time.h>
  60#include <linux/kthread.h>
  61#include <linux/aer.h>
  62
  63#include "mpt2sas_base.h"
  64
  65static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
  66
  67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  68
  69#define MAX_HBA_QUEUE_DEPTH	30000
  70#define MAX_CHAIN_DEPTH		100000
  71static int max_queue_depth = -1;
  72module_param(max_queue_depth, int, 0);
  73MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  74
  75static int max_sgl_entries = -1;
  76module_param(max_sgl_entries, int, 0);
  77MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  78
  79static int msix_disable = -1;
  80module_param(msix_disable, int, 0);
  81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  82
  83static int mpt2sas_fwfault_debug;
  84MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
  85	"and halt firmware - (default=0)");
  86
  87static int disable_discovery = -1;
  88module_param(disable_discovery, int, 0);
  89MODULE_PARM_DESC(disable_discovery, " disable discovery ");
  90
  91/**
  92 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  93 *
  94 */
  95static int
  96_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
  97{
  98	int ret = param_set_int(val, kp);
  99	struct MPT2SAS_ADAPTER *ioc;
 100
 101	if (ret)
 102		return ret;
 103
 104	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
 105	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
 106		ioc->fwfault_debug = mpt2sas_fwfault_debug;
 107	return 0;
 108}
 109
 110module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
 111    param_get_int, &mpt2sas_fwfault_debug, 0644);
 112
 113/**
 114 *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
 115 * @arg: input argument, used to derive ioc
 116 *
 117 * Return 0 if controller is removed from pci subsystem.
 118 * Return -1 for other case.
 119 */
 120static int mpt2sas_remove_dead_ioc_func(void *arg)
 121{
 122		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
 123		struct pci_dev *pdev;
 124
 125		if ((ioc == NULL))
 126			return -1;
 127
 128		pdev = ioc->pdev;
 129		if ((pdev == NULL))
 130			return -1;
 131		pci_stop_and_remove_bus_device(pdev);
 132		return 0;
 133}
 134
 135
 136/**
 137 * _base_fault_reset_work - workq handling ioc fault conditions
 138 * @work: input argument, used to derive ioc
 139 * Context: sleep.
 140 *
 141 * Return nothing.
 142 */
 143static void
 144_base_fault_reset_work(struct work_struct *work)
 145{
 146	struct MPT2SAS_ADAPTER *ioc =
 147	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
 148	unsigned long	 flags;
 149	u32 doorbell;
 150	int rc;
 151	struct task_struct *p;
 152
 153	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 154	if (ioc->shost_recovery || ioc->pci_error_recovery)
 155		goto rearm_timer;
 156	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 157
 158	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
 159	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
 160		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
 161			ioc->name, __func__);
 162
 163		/* It may be possible that EEH recovery can resolve some of
 164		 * pci bus failure issues rather removing the dead ioc function
 165		 * by considering controller is in a non-operational state. So
 166		 * here priority is given to the EEH recovery. If it doesn't
 167		 * not resolve this issue, mpt2sas driver will consider this
 168		 * controller to non-operational state and remove the dead ioc
 169		 * function.
 170		 */
 171		if (ioc->non_operational_loop++ < 5) {
 172			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
 173							 flags);
 174			goto rearm_timer;
 175		}
 176
 177		/*
 178		 * Call _scsih_flush_pending_cmds callback so that we flush all
 179		 * pending commands back to OS. This call is required to aovid
 180		 * deadlock at block layer. Dead IOC will fail to do diag reset,
 181		 * and this call is safe since dead ioc will never return any
 182		 * command back from HW.
 183		 */
 184		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
 185		/*
 186		 * Set remove_host flag early since kernel thread will
 187		 * take some time to execute.
 188		 */
 189		ioc->remove_host = 1;
 190		/*Remove the Dead Host */
 191		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
 192		    "mpt2sas_dead_ioc_%d", ioc->id);
 193		if (IS_ERR(p)) {
 194			printk(MPT2SAS_ERR_FMT
 195			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
 196			ioc->name, __func__);
 197		} else {
 198		    printk(MPT2SAS_ERR_FMT
 199			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
 200			ioc->name, __func__);
 201		}
 202
 203		return; /* don't rearm timer */
 204	}
 205
 206	ioc->non_operational_loop = 0;
 207
 208	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 209		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
 210		    FORCE_BIG_HAMMER);
 211		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
 212		    __func__, (rc == 0) ? "success" : "failed");
 213		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
 214		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 215			mpt2sas_base_fault_info(ioc, doorbell &
 216			    MPI2_DOORBELL_DATA_MASK);
 217	}
 218
 219	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 220 rearm_timer:
 221	if (ioc->fault_reset_work_q)
 222		queue_delayed_work(ioc->fault_reset_work_q,
 223		    &ioc->fault_reset_work,
 224		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 225	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 226}
 227
 228/**
 229 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
 230 * @ioc: per adapter object
 231 * Context: sleep.
 232 *
 233 * Return nothing.
 234 */
 235void
 236mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
 237{
 238	unsigned long	 flags;
 239
 240	if (ioc->fault_reset_work_q)
 241		return;
 242
 243	/* initialize fault polling */
 244	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
 245	snprintf(ioc->fault_reset_work_q_name,
 246	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
 247	ioc->fault_reset_work_q =
 248		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
 249	if (!ioc->fault_reset_work_q) {
 250		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
 251		    ioc->name, __func__, __LINE__);
 252			return;
 253	}
 254	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 255	if (ioc->fault_reset_work_q)
 256		queue_delayed_work(ioc->fault_reset_work_q,
 257		    &ioc->fault_reset_work,
 258		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
 259	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 260}
 261
 262/**
 263 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
 264 * @ioc: per adapter object
 265 * Context: sleep.
 266 *
 267 * Return nothing.
 268 */
 269void
 270mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
 271{
 272	unsigned long	 flags;
 273	struct workqueue_struct *wq;
 274
 275	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 276	wq = ioc->fault_reset_work_q;
 277	ioc->fault_reset_work_q = NULL;
 278	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 279	if (wq) {
 280		if (!cancel_delayed_work(&ioc->fault_reset_work))
 281			flush_workqueue(wq);
 282		destroy_workqueue(wq);
 283	}
 284}
 285
 286/**
 287 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
 288 * @ioc: per adapter object
 289 * @fault_code: fault code
 290 *
 291 * Return nothing.
 292 */
 293void
 294mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
 295{
 296	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
 297	    ioc->name, fault_code);
 298}
 299
 300/**
 301 * mpt2sas_halt_firmware - halt's mpt controller firmware
 302 * @ioc: per adapter object
 303 *
 304 * For debugging timeout related issues.  Writing 0xCOFFEE00
 305 * to the doorbell register will halt controller firmware. With
 306 * the purpose to stop both driver and firmware, the enduser can
 307 * obtain a ring buffer from controller UART.
 308 */
 309void
 310mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
 311{
 312	u32 doorbell;
 313
 314	if (!ioc->fwfault_debug)
 315		return;
 316
 317	dump_stack();
 318
 319	doorbell = readl(&ioc->chip->Doorbell);
 320	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 321		mpt2sas_base_fault_info(ioc , doorbell);
 322	else {
 323		writel(0xC0FFEE00, &ioc->chip->Doorbell);
 324		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
 325		    "timeout\n", ioc->name);
 326	}
 327
 328	panic("panic in %s\n", __func__);
 329}
 330
 331#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 332/**
 333 * _base_sas_ioc_info - verbose translation of the ioc status
 334 * @ioc: per adapter object
 335 * @mpi_reply: reply mf payload returned from firmware
 336 * @request_hdr: request mf
 337 *
 338 * Return nothing.
 339 */
 340static void
 341_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
 342     MPI2RequestHeader_t *request_hdr)
 343{
 344	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
 345	    MPI2_IOCSTATUS_MASK;
 346	char *desc = NULL;
 347	u16 frame_sz;
 348	char *func_str = NULL;
 349
 350	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
 351	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 352	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
 353	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
 354		return;
 355
 356	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 357		return;
 358
 359	switch (ioc_status) {
 360
 361/****************************************************************************
 362*  Common IOCStatus values for all replies
 363****************************************************************************/
 364
 365	case MPI2_IOCSTATUS_INVALID_FUNCTION:
 366		desc = "invalid function";
 367		break;
 368	case MPI2_IOCSTATUS_BUSY:
 369		desc = "busy";
 370		break;
 371	case MPI2_IOCSTATUS_INVALID_SGL:
 372		desc = "invalid sgl";
 373		break;
 374	case MPI2_IOCSTATUS_INTERNAL_ERROR:
 375		desc = "internal error";
 376		break;
 377	case MPI2_IOCSTATUS_INVALID_VPID:
 378		desc = "invalid vpid";
 379		break;
 380	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
 381		desc = "insufficient resources";
 382		break;
 383	case MPI2_IOCSTATUS_INVALID_FIELD:
 384		desc = "invalid field";
 385		break;
 386	case MPI2_IOCSTATUS_INVALID_STATE:
 387		desc = "invalid state";
 388		break;
 389	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
 390		desc = "op state not supported";
 391		break;
 392
 393/****************************************************************************
 394*  Config IOCStatus values
 395****************************************************************************/
 396
 397	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
 398		desc = "config invalid action";
 399		break;
 400	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
 401		desc = "config invalid type";
 402		break;
 403	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
 404		desc = "config invalid page";
 405		break;
 406	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
 407		desc = "config invalid data";
 408		break;
 409	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
 410		desc = "config no defaults";
 411		break;
 412	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
 413		desc = "config cant commit";
 414		break;
 415
 416/****************************************************************************
 417*  SCSI IO Reply
 418****************************************************************************/
 419
 420	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
 421	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
 422	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
 423	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
 424	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
 425	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
 426	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
 427	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
 428	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
 429	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
 430	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
 431	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
 432		break;
 433
 434/****************************************************************************
 435*  For use by SCSI Initiator and SCSI Target end-to-end data protection
 436****************************************************************************/
 437
 438	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
 439		desc = "eedp guard error";
 440		break;
 441	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
 442		desc = "eedp ref tag error";
 443		break;
 444	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
 445		desc = "eedp app tag error";
 446		break;
 447
 448/****************************************************************************
 449*  SCSI Target values
 450****************************************************************************/
 451
 452	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
 453		desc = "target invalid io index";
 454		break;
 455	case MPI2_IOCSTATUS_TARGET_ABORTED:
 456		desc = "target aborted";
 457		break;
 458	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
 459		desc = "target no conn retryable";
 460		break;
 461	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
 462		desc = "target no connection";
 463		break;
 464	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
 465		desc = "target xfer count mismatch";
 466		break;
 467	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
 468		desc = "target data offset error";
 469		break;
 470	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
 471		desc = "target too much write data";
 472		break;
 473	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
 474		desc = "target iu too short";
 475		break;
 476	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
 477		desc = "target ack nak timeout";
 478		break;
 479	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
 480		desc = "target nak received";
 481		break;
 482
 483/****************************************************************************
 484*  Serial Attached SCSI values
 485****************************************************************************/
 486
 487	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
 488		desc = "smp request failed";
 489		break;
 490	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
 491		desc = "smp data overrun";
 492		break;
 493
 494/****************************************************************************
 495*  Diagnostic Buffer Post / Diagnostic Release values
 496****************************************************************************/
 497
 498	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
 499		desc = "diagnostic released";
 500		break;
 501	default:
 502		break;
 503	}
 504
 505	if (!desc)
 506		return;
 507
 508	switch (request_hdr->Function) {
 509	case MPI2_FUNCTION_CONFIG:
 510		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
 511		func_str = "config_page";
 512		break;
 513	case MPI2_FUNCTION_SCSI_TASK_MGMT:
 514		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
 515		func_str = "task_mgmt";
 516		break;
 517	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
 518		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
 519		func_str = "sas_iounit_ctl";
 520		break;
 521	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
 522		frame_sz = sizeof(Mpi2SepRequest_t);
 523		func_str = "enclosure";
 524		break;
 525	case MPI2_FUNCTION_IOC_INIT:
 526		frame_sz = sizeof(Mpi2IOCInitRequest_t);
 527		func_str = "ioc_init";
 528		break;
 529	case MPI2_FUNCTION_PORT_ENABLE:
 530		frame_sz = sizeof(Mpi2PortEnableRequest_t);
 531		func_str = "port_enable";
 532		break;
 533	case MPI2_FUNCTION_SMP_PASSTHROUGH:
 534		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
 535		func_str = "smp_passthru";
 536		break;
 537	default:
 538		frame_sz = 32;
 539		func_str = "unknown";
 540		break;
 541	}
 542
 543	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
 544	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
 545
 546	_debug_dump_mf(request_hdr, frame_sz/4);
 547}
 548
 549/**
 550 * _base_display_event_data - verbose translation of firmware asyn events
 551 * @ioc: per adapter object
 552 * @mpi_reply: reply mf payload returned from firmware
 553 *
 554 * Return nothing.
 555 */
 556static void
 557_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
 558    Mpi2EventNotificationReply_t *mpi_reply)
 559{
 560	char *desc = NULL;
 561	u16 event;
 562
 563	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
 564		return;
 565
 566	event = le16_to_cpu(mpi_reply->Event);
 567
 568	switch (event) {
 569	case MPI2_EVENT_LOG_DATA:
 570		desc = "Log Data";
 571		break;
 572	case MPI2_EVENT_STATE_CHANGE:
 573		desc = "Status Change";
 574		break;
 575	case MPI2_EVENT_HARD_RESET_RECEIVED:
 576		desc = "Hard Reset Received";
 577		break;
 578	case MPI2_EVENT_EVENT_CHANGE:
 579		desc = "Event Change";
 580		break;
 581	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
 582		desc = "Device Status Change";
 583		break;
 584	case MPI2_EVENT_IR_OPERATION_STATUS:
 585		if (!ioc->hide_ir_msg)
 586			desc = "IR Operation Status";
 587		break;
 588	case MPI2_EVENT_SAS_DISCOVERY:
 589	{
 590		Mpi2EventDataSasDiscovery_t *event_data =
 591		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
 592		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
 593		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
 594		    "start" : "stop");
 595		if (event_data->DiscoveryStatus)
 596			printk("discovery_status(0x%08x)",
 597			    le32_to_cpu(event_data->DiscoveryStatus));
 598		printk("\n");
 599		return;
 600	}
 601	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
 602		desc = "SAS Broadcast Primitive";
 603		break;
 604	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
 605		desc = "SAS Init Device Status Change";
 606		break;
 607	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
 608		desc = "SAS Init Table Overflow";
 609		break;
 610	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
 611		desc = "SAS Topology Change List";
 612		break;
 613	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
 614		desc = "SAS Enclosure Device Status Change";
 615		break;
 616	case MPI2_EVENT_IR_VOLUME:
 617		if (!ioc->hide_ir_msg)
 618			desc = "IR Volume";
 619		break;
 620	case MPI2_EVENT_IR_PHYSICAL_DISK:
 621		if (!ioc->hide_ir_msg)
 622			desc = "IR Physical Disk";
 623		break;
 624	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
 625		if (!ioc->hide_ir_msg)
 626			desc = "IR Configuration Change List";
 627		break;
 628	case MPI2_EVENT_LOG_ENTRY_ADDED:
 629		if (!ioc->hide_ir_msg)
 630			desc = "Log Entry Added";
 631		break;
 632	}
 633
 634	if (!desc)
 635		return;
 636
 637	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
 638}
 639#endif
 640
 641/**
 642 * _base_sas_log_info - verbose translation of firmware log info
 643 * @ioc: per adapter object
 644 * @log_info: log info
 645 *
 646 * Return nothing.
 647 */
 648static void
 649_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
 650{
 651	union loginfo_type {
 652		u32	loginfo;
 653		struct {
 654			u32	subcode:16;
 655			u32	code:8;
 656			u32	originator:4;
 657			u32	bus_type:4;
 658		} dw;
 659	};
 660	union loginfo_type sas_loginfo;
 661	char *originator_str = NULL;
 662
 663	sas_loginfo.loginfo = log_info;
 664	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
 665		return;
 666
 667	/* each nexus loss loginfo */
 668	if (log_info == 0x31170000)
 669		return;
 670
 671	/* eat the loginfos associated with task aborts */
 672	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
 673	    0x31140000 || log_info == 0x31130000))
 674		return;
 675
 676	switch (sas_loginfo.dw.originator) {
 677	case 0:
 678		originator_str = "IOP";
 679		break;
 680	case 1:
 681		originator_str = "PL";
 682		break;
 683	case 2:
 684		if (!ioc->hide_ir_msg)
 685			originator_str = "IR";
 686		else
 687			originator_str = "WarpDrive";
 688		break;
 689	}
 690
 691	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
 692	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
 693	     originator_str, sas_loginfo.dw.code,
 694	     sas_loginfo.dw.subcode);
 695}
 696
 697/**
 698 * _base_display_reply_info -
 699 * @ioc: per adapter object
 700 * @smid: system request message index
 701 * @msix_index: MSIX table index supplied by the OS
 702 * @reply: reply message frame(lower 32bit addr)
 703 *
 704 * Return nothing.
 705 */
 706static void
 707_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 708    u32 reply)
 709{
 710	MPI2DefaultReply_t *mpi_reply;
 711	u16 ioc_status;
 712
 713	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
 714	if (unlikely(!mpi_reply)) {
 715		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
 716			ioc->name, __FILE__, __LINE__, __func__);
 717		return;
 718	}
 719	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
 720#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 721	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
 722	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
 723		_base_sas_ioc_info(ioc , mpi_reply,
 724		   mpt2sas_base_get_msg_frame(ioc, smid));
 725	}
 726#endif
 727	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
 728		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
 729}
 730
 731/**
 732 * mpt2sas_base_done - base internal command completion routine
 733 * @ioc: per adapter object
 734 * @smid: system request message index
 735 * @msix_index: MSIX table index supplied by the OS
 736 * @reply: reply message frame(lower 32bit addr)
 737 *
 738 * Return 1 meaning mf should be freed from _base_interrupt
 739 *        0 means the mf is freed from this function.
 740 */
 741u8
 742mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 743    u32 reply)
 744{
 745	MPI2DefaultReply_t *mpi_reply;
 746
 747	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
 748	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
 749		return 1;
 750
 751	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
 752		return 1;
 753
 754	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
 755	if (mpi_reply) {
 756		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
 757		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
 758	}
 759	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
 760
 761	complete(&ioc->base_cmds.done);
 762	return 1;
 763}
 764
 765/**
 766 * _base_async_event - main callback handler for firmware asyn events
 767 * @ioc: per adapter object
 768 * @msix_index: MSIX table index supplied by the OS
 769 * @reply: reply message frame(lower 32bit addr)
 770 *
 771 * Returns void.
 772 */
 773static void
 774_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 775{
 776	Mpi2EventNotificationReply_t *mpi_reply;
 777	Mpi2EventAckRequest_t *ack_request;
 778	u16 smid;
 779
 780	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
 781	if (!mpi_reply)
 782		return;
 783	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
 784		return;
 785#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 786	_base_display_event_data(ioc, mpi_reply);
 787#endif
 788	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
 789		goto out;
 790	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
 791	if (!smid) {
 792		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
 793		    ioc->name, __func__);
 794		goto out;
 795	}
 796
 797	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
 798	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
 799	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
 800	ack_request->Event = mpi_reply->Event;
 801	ack_request->EventContext = mpi_reply->EventContext;
 802	ack_request->VF_ID = 0;  /* TODO */
 803	ack_request->VP_ID = 0;
 804	mpt2sas_base_put_smid_default(ioc, smid);
 805
 806 out:
 807
 808	/* scsih callback handler */
 809	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
 810
 811	/* ctl callback handler */
 812	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
 813
 814	return;
 815}
 816
 817/**
 818 * _base_get_cb_idx - obtain the callback index
 819 * @ioc: per adapter object
 820 * @smid: system request message index
 821 *
 822 * Return callback index.
 823 */
 824static u8
 825_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
 826{
 827	int i;
 828	u8 cb_idx;
 829
 830	if (smid < ioc->hi_priority_smid) {
 831		i = smid - 1;
 832		cb_idx = ioc->scsi_lookup[i].cb_idx;
 833	} else if (smid < ioc->internal_smid) {
 834		i = smid - ioc->hi_priority_smid;
 835		cb_idx = ioc->hpr_lookup[i].cb_idx;
 836	} else if (smid <= ioc->hba_queue_depth) {
 837		i = smid - ioc->internal_smid;
 838		cb_idx = ioc->internal_lookup[i].cb_idx;
 839	} else
 840		cb_idx = 0xFF;
 841	return cb_idx;
 842}
 843
 844/**
 845 * _base_mask_interrupts - disable interrupts
 846 * @ioc: per adapter object
 847 *
 848 * Disabling ResetIRQ, Reply and Doorbell Interrupts
 849 *
 850 * Return nothing.
 851 */
 852static void
 853_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
 854{
 855	u32 him_register;
 856
 857	ioc->mask_interrupts = 1;
 858	him_register = readl(&ioc->chip->HostInterruptMask);
 859	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
 860	writel(him_register, &ioc->chip->HostInterruptMask);
 861	readl(&ioc->chip->HostInterruptMask);
 862}
 863
 864/**
 865 * _base_unmask_interrupts - enable interrupts
 866 * @ioc: per adapter object
 867 *
 868 * Enabling only Reply Interrupts
 869 *
 870 * Return nothing.
 871 */
 872static void
 873_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
 874{
 875	u32 him_register;
 876
 877	him_register = readl(&ioc->chip->HostInterruptMask);
 878	him_register &= ~MPI2_HIM_RIM;
 879	writel(him_register, &ioc->chip->HostInterruptMask);
 880	ioc->mask_interrupts = 0;
 881}
 882
 883union reply_descriptor {
 884	u64 word;
 885	struct {
 886		u32 low;
 887		u32 high;
 888	} u;
 889};
 890
 891/**
 892 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
 893 * @irq: irq number (not used)
 894 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
 895 * @r: pt_regs pointer (not used)
 896 *
 897 * Return IRQ_HANDLE if processed, else IRQ_NONE.
 898 */
 899static irqreturn_t
 900_base_interrupt(int irq, void *bus_id)
 901{
 902	struct adapter_reply_queue *reply_q = bus_id;
 903	union reply_descriptor rd;
 904	u32 completed_cmds;
 905	u8 request_desript_type;
 906	u16 smid;
 907	u8 cb_idx;
 908	u32 reply;
 909	u8 msix_index = reply_q->msix_index;
 910	struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
 911	Mpi2ReplyDescriptorsUnion_t *rpf;
 912	u8 rc;
 913
 914	if (ioc->mask_interrupts)
 915		return IRQ_NONE;
 916
 917	if (!atomic_add_unless(&reply_q->busy, 1, 1))
 918		return IRQ_NONE;
 919
 920	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
 921	request_desript_type = rpf->Default.ReplyFlags
 922	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 923	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
 924		atomic_dec(&reply_q->busy);
 925		return IRQ_NONE;
 926	}
 927
 928	completed_cmds = 0;
 929	cb_idx = 0xFF;
 930	do {
 931		rd.word = le64_to_cpu(rpf->Words);
 932		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
 933			goto out;
 934		reply = 0;
 935		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
 936		if (request_desript_type ==
 937		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
 938			reply = le32_to_cpu
 939				(rpf->AddressReply.ReplyFrameAddress);
 940			if (reply > ioc->reply_dma_max_address ||
 941			    reply < ioc->reply_dma_min_address)
 942				reply = 0;
 943		} else if (request_desript_type ==
 944		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
 945			goto next;
 946		else if (request_desript_type ==
 947		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
 948			goto next;
 949		if (smid) {
 950			cb_idx = _base_get_cb_idx(ioc, smid);
 951		if ((likely(cb_idx < MPT_MAX_CALLBACKS))
 952			    && (likely(mpt_callbacks[cb_idx] != NULL))) {
 953				rc = mpt_callbacks[cb_idx](ioc, smid,
 954				    msix_index, reply);
 955			if (reply)
 956				_base_display_reply_info(ioc, smid,
 957				    msix_index, reply);
 958			if (rc)
 959				mpt2sas_base_free_smid(ioc, smid);
 960			}
 961		}
 962		if (!smid)
 963			_base_async_event(ioc, msix_index, reply);
 964
 965		/* reply free queue handling */
 966		if (reply) {
 967			ioc->reply_free_host_index =
 968			    (ioc->reply_free_host_index ==
 969			    (ioc->reply_free_queue_depth - 1)) ?
 970			    0 : ioc->reply_free_host_index + 1;
 971			ioc->reply_free[ioc->reply_free_host_index] =
 972			    cpu_to_le32(reply);
 973			wmb();
 974			writel(ioc->reply_free_host_index,
 975			    &ioc->chip->ReplyFreeHostIndex);
 976		}
 977
 978 next:
 979
 980		rpf->Words = cpu_to_le64(ULLONG_MAX);
 981		reply_q->reply_post_host_index =
 982		    (reply_q->reply_post_host_index ==
 983		    (ioc->reply_post_queue_depth - 1)) ? 0 :
 984		    reply_q->reply_post_host_index + 1;
 985		request_desript_type =
 986		    reply_q->reply_post_free[reply_q->reply_post_host_index].
 987		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 988		completed_cmds++;
 989		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 990			goto out;
 991		if (!reply_q->reply_post_host_index)
 992			rpf = reply_q->reply_post_free;
 993		else
 994			rpf++;
 995	} while (1);
 996
 997 out:
 998
 999	if (!completed_cmds) {
1000		atomic_dec(&reply_q->busy);
1001		return IRQ_NONE;
1002	}
1003	wmb();
1004	if (ioc->is_warpdrive) {
1005		writel(reply_q->reply_post_host_index,
1006		ioc->reply_post_host_index[msix_index]);
1007		atomic_dec(&reply_q->busy);
1008		return IRQ_HANDLED;
1009	}
1010	writel(reply_q->reply_post_host_index | (msix_index <<
1011	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1012	atomic_dec(&reply_q->busy);
1013	return IRQ_HANDLED;
1014}
1015
1016/**
1017 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1018 * @ioc: per adapter object
1019 *
1020 */
1021static inline int
1022_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
1023{
1024	return (ioc->facts.IOCCapabilities &
1025	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1026}
1027
1028/**
1029 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
1030 * @ioc: per adapter object
1031 * Context: ISR conext
1032 *
1033 * Called when a Task Management request has completed. We want
1034 * to flush the other reply queues so all the outstanding IO has been
1035 * completed back to OS before we process the TM completetion.
1036 *
1037 * Return nothing.
1038 */
1039void
1040mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1041{
1042	struct adapter_reply_queue *reply_q;
1043
1044	/* If MSIX capability is turned off
1045	 * then multi-queues are not enabled
1046	 */
1047	if (!_base_is_controller_msix_enabled(ioc))
1048		return;
1049
1050	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1051		if (ioc->shost_recovery)
1052			return;
1053		/* TMs are on msix_index == 0 */
1054		if (reply_q->msix_index == 0)
1055			continue;
1056		_base_interrupt(reply_q->vector, (void *)reply_q);
1057	}
1058}
1059
1060/**
1061 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
1062 * @cb_idx: callback index
1063 *
1064 * Return nothing.
1065 */
1066void
1067mpt2sas_base_release_callback_handler(u8 cb_idx)
1068{
1069	mpt_callbacks[cb_idx] = NULL;
1070}
1071
1072/**
1073 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
1074 * @cb_func: callback function
1075 *
1076 * Returns cb_func.
1077 */
1078u8
1079mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1080{
1081	u8 cb_idx;
1082
1083	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1084		if (mpt_callbacks[cb_idx] == NULL)
1085			break;
1086
1087	mpt_callbacks[cb_idx] = cb_func;
1088	return cb_idx;
1089}
1090
1091/**
1092 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
1093 *
1094 * Return nothing.
1095 */
1096void
1097mpt2sas_base_initialize_callback_handler(void)
1098{
1099	u8 cb_idx;
1100
1101	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1102		mpt2sas_base_release_callback_handler(cb_idx);
1103}
1104
1105/**
1106 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
1107 * @ioc: per adapter object
1108 * @paddr: virtual address for SGE
1109 *
1110 * Create a zero length scatter gather entry to insure the IOCs hardware has
1111 * something to use if the target device goes brain dead and tries
1112 * to send data even when none is asked for.
1113 *
1114 * Return nothing.
1115 */
1116void
1117mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
1118{
1119	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1120	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1121	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1122	    MPI2_SGE_FLAGS_SHIFT);
1123	ioc->base_add_sg_single(paddr, flags_length, -1);
1124}
1125
1126/**
1127 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1128 * @paddr: virtual address for SGE
1129 * @flags_length: SGE flags and data transfer length
1130 * @dma_addr: Physical address
1131 *
1132 * Return nothing.
1133 */
1134static void
1135_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1136{
1137	Mpi2SGESimple32_t *sgel = paddr;
1138
1139	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1140	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1141	sgel->FlagsLength = cpu_to_le32(flags_length);
1142	sgel->Address = cpu_to_le32(dma_addr);
1143}
1144
1145
1146/**
1147 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1148 * @paddr: virtual address for SGE
1149 * @flags_length: SGE flags and data transfer length
1150 * @dma_addr: Physical address
1151 *
1152 * Return nothing.
1153 */
1154static void
1155_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1156{
1157	Mpi2SGESimple64_t *sgel = paddr;
1158
1159	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1160	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1161	sgel->FlagsLength = cpu_to_le32(flags_length);
1162	sgel->Address = cpu_to_le64(dma_addr);
1163}
1164
1165#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1166
1167/**
1168 * _base_config_dma_addressing - set dma addressing
1169 * @ioc: per adapter object
1170 * @pdev: PCI device struct
1171 *
1172 * Returns 0 for success, non-zero for failure.
1173 */
1174static int
1175_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1176{
1177	struct sysinfo s;
1178	char *desc = NULL;
1179
1180	if (sizeof(dma_addr_t) > 4) {
1181		const uint64_t required_mask =
1182		    dma_get_required_mask(&pdev->dev);
1183		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1184		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1185		    DMA_BIT_MASK(64))) {
1186			ioc->base_add_sg_single = &_base_add_sg_single_64;
1187			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1188			desc = "64";
1189			goto out;
1190		}
1191	}
1192
1193	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1194	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1195		ioc->base_add_sg_single = &_base_add_sg_single_32;
1196		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1197		desc = "32";
1198	} else
1199		return -ENODEV;
1200
1201 out:
1202	si_meminfo(&s);
1203	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1204	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1205
1206	return 0;
1207}
1208
1209/**
1210 * _base_check_enable_msix - checks MSIX capabable.
1211 * @ioc: per adapter object
1212 *
1213 * Check to see if card is capable of MSIX, and set number
1214 * of available msix vectors
1215 */
1216static int
1217_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1218{
1219	int base;
1220	u16 message_control;
1221
1222
1223	/* Check whether controller SAS2008 B0 controller,
1224	   if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
1225	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1226	    ioc->pdev->revision == 0x01) {
1227		return -EINVAL;
1228	}
1229
1230	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1231	if (!base) {
1232		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1233		    "supported\n", ioc->name));
1234		return -EINVAL;
1235	}
1236
1237	/* get msix vector count */
1238	/* NUMA_IO not supported for older controllers */
1239	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1240	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1241	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1242	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1243	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1244	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1245	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1246		ioc->msix_vector_count = 1;
1247	else {
1248		pci_read_config_word(ioc->pdev, base + 2, &message_control);
1249		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1250	}
1251	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1252	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1253
1254	return 0;
1255}
1256
1257/**
1258 * _base_free_irq - free irq
1259 * @ioc: per adapter object
1260 *
1261 * Freeing respective reply_queue from the list.
1262 */
1263static void
1264_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1265{
1266	struct adapter_reply_queue *reply_q, *next;
1267
1268	if (list_empty(&ioc->reply_queue_list))
1269		return;
1270
1271	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1272		list_del(&reply_q->list);
1273		synchronize_irq(reply_q->vector);
1274		free_irq(reply_q->vector, reply_q);
1275		kfree(reply_q);
1276	}
1277}
1278
1279/**
1280 * _base_request_irq - request irq
1281 * @ioc: per adapter object
1282 * @index: msix index into vector table
1283 * @vector: irq vector
1284 *
1285 * Inserting respective reply_queue into the list.
1286 */
1287static int
1288_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1289{
1290	struct adapter_reply_queue *reply_q;
1291	int r;
1292
1293	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1294	if (!reply_q) {
1295		printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1296		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1297		return -ENOMEM;
1298	}
1299	reply_q->ioc = ioc;
1300	reply_q->msix_index = index;
1301	reply_q->vector = vector;
1302	atomic_set(&reply_q->busy, 0);
1303	if (ioc->msix_enable)
1304		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1305		    MPT2SAS_DRIVER_NAME, ioc->id, index);
1306	else
1307		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1308		    MPT2SAS_DRIVER_NAME, ioc->id);
1309	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1310	    reply_q);
1311	if (r) {
1312		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1313		    reply_q->name, vector);
1314		kfree(reply_q);
1315		return -EBUSY;
1316	}
1317
1318	INIT_LIST_HEAD(&reply_q->list);
1319	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1320	return 0;
1321}
1322
1323/**
1324 * _base_assign_reply_queues - assigning msix index for each cpu
1325 * @ioc: per adapter object
1326 *
1327 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1328 *
1329 * It would nice if we could call irq_set_affinity, however it is not
1330 * an exported symbol
1331 */
1332static void
1333_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1334{
1335	struct adapter_reply_queue *reply_q;
1336	int cpu_id;
1337	int cpu_grouping, loop, grouping, grouping_mod;
1338
1339	if (!_base_is_controller_msix_enabled(ioc))
1340		return;
1341
1342	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1343	/* when there are more cpus than available msix vectors,
1344	 * then group cpus togeather on same irq
1345	 */
1346	if (ioc->cpu_count > ioc->msix_vector_count) {
1347		grouping = ioc->cpu_count / ioc->msix_vector_count;
1348		grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1349		if (grouping < 2 || (grouping == 2 && !grouping_mod))
1350			cpu_grouping = 2;
1351		else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1352			cpu_grouping = 4;
1353		else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1354			cpu_grouping = 8;
1355		else
1356			cpu_grouping = 16;
1357	} else
1358		cpu_grouping = 0;
1359
1360	loop = 0;
1361	reply_q = list_entry(ioc->reply_queue_list.next,
1362	     struct adapter_reply_queue, list);
1363	for_each_online_cpu(cpu_id) {
1364		if (!cpu_grouping) {
1365			ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1366			reply_q = list_entry(reply_q->list.next,
1367			    struct adapter_reply_queue, list);
1368		} else {
1369			if (loop < cpu_grouping) {
1370				ioc->cpu_msix_table[cpu_id] =
1371					reply_q->msix_index;
1372				loop++;
1373			} else {
1374				reply_q = list_entry(reply_q->list.next,
1375				    struct adapter_reply_queue, list);
1376				ioc->cpu_msix_table[cpu_id] =
1377					reply_q->msix_index;
1378				loop = 1;
1379			}
1380		}
1381	}
1382}
1383
1384/**
1385 * _base_disable_msix - disables msix
1386 * @ioc: per adapter object
1387 *
1388 */
1389static void
1390_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1391{
1392	if (ioc->msix_enable) {
1393		pci_disable_msix(ioc->pdev);
1394		ioc->msix_enable = 0;
1395	}
1396}
1397
1398/**
1399 * _base_enable_msix - enables msix, failback to io_apic
1400 * @ioc: per adapter object
1401 *
1402 */
1403static int
1404_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1405{
1406	struct msix_entry *entries, *a;
1407	int r;
1408	int i;
1409	u8 try_msix = 0;
1410
1411	if (msix_disable == -1 || msix_disable == 0)
1412		try_msix = 1;
1413
1414	if (!try_msix)
1415		goto try_ioapic;
1416
1417	if (_base_check_enable_msix(ioc) != 0)
1418		goto try_ioapic;
1419
1420	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1421	    ioc->msix_vector_count);
1422
1423	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1424	    GFP_KERNEL);
1425	if (!entries) {
1426		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1427		    "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1428		    __LINE__, __func__));
1429		goto try_ioapic;
1430	}
1431
1432	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1433		a->entry = i;
1434
1435	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1436	if (r) {
1437		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1438		    "failed (r=%d) !!!\n", ioc->name, r));
1439		kfree(entries);
1440		goto try_ioapic;
1441	}
1442
1443	ioc->msix_enable = 1;
1444	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1445		r = _base_request_irq(ioc, i, a->vector);
1446		if (r) {
1447			_base_free_irq(ioc);
1448			_base_disable_msix(ioc);
1449			kfree(entries);
1450			goto try_ioapic;
1451		}
1452	}
1453
1454	kfree(entries);
1455	return 0;
1456
1457/* failback to io_apic interrupt routing */
1458 try_ioapic:
1459
1460	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1461
1462	return r;
1463}
1464
1465/**
1466 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1467 * @ioc: per adapter object
1468 *
1469 * Returns 0 for success, non-zero for failure.
1470 */
1471int
1472mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1473{
1474	struct pci_dev *pdev = ioc->pdev;
1475	u32 memap_sz;
1476	u32 pio_sz;
1477	int i, r = 0;
1478	u64 pio_chip = 0;
1479	u64 chip_phys = 0;
1480	struct adapter_reply_queue *reply_q;
1481
1482	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1483	    ioc->name, __func__));
1484
1485	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1486	if (pci_enable_device_mem(pdev)) {
1487		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1488		    "failed\n", ioc->name);
1489		ioc->bars = 0;
1490		return -ENODEV;
1491	}
1492
1493
1494	if (pci_request_selected_regions(pdev, ioc->bars,
1495	    MPT2SAS_DRIVER_NAME)) {
1496		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1497		    "failed\n", ioc->name);
1498		ioc->bars = 0;
1499		r = -ENODEV;
1500		goto out_fail;
1501	}
1502
1503	/* AER (Advanced Error Reporting) hooks */
1504	pci_enable_pcie_error_reporting(pdev);
1505
1506	pci_set_master(pdev);
1507
1508	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1509		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1510		    ioc->name, pci_name(pdev));
1511		r = -ENODEV;
1512		goto out_fail;
1513	}
1514
1515	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1516		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1517			if (pio_sz)
1518				continue;
1519			pio_chip = (u64)pci_resource_start(pdev, i);
1520			pio_sz = pci_resource_len(pdev, i);
1521		} else {
1522			if (memap_sz)
1523				continue;
1524			/* verify memory resource is valid before using */
1525			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1526				ioc->chip_phys = pci_resource_start(pdev, i);
1527				chip_phys = (u64)ioc->chip_phys;
1528				memap_sz = pci_resource_len(pdev, i);
1529				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1530				if (ioc->chip == NULL) {
1531					printk(MPT2SAS_ERR_FMT "unable to map "
1532					    "adapter memory!\n", ioc->name);
1533					r = -EINVAL;
1534					goto out_fail;
1535				}
1536			}
1537		}
1538	}
1539
1540	_base_mask_interrupts(ioc);
1541	r = _base_enable_msix(ioc);
1542	if (r)
1543		goto out_fail;
1544
1545	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1546		printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1547		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1548		    "IO-APIC enabled"), reply_q->vector);
1549
1550	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1551	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1552	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1553	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1554
1555	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1556	pci_save_state(pdev);
1557
1558	return 0;
1559
1560 out_fail:
1561	if (ioc->chip_phys)
1562		iounmap(ioc->chip);
1563	ioc->chip_phys = 0;
1564	pci_release_selected_regions(ioc->pdev, ioc->bars);
1565	pci_disable_pcie_error_reporting(pdev);
1566	pci_disable_device(pdev);
1567	return r;
1568}
1569
1570/**
1571 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1572 * @ioc: per adapter object
1573 * @smid: system request message index(smid zero is invalid)
1574 *
1575 * Returns virt pointer to message frame.
1576 */
1577void *
1578mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1579{
1580	return (void *)(ioc->request + (smid * ioc->request_sz));
1581}
1582
1583/**
1584 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1585 * @ioc: per adapter object
1586 * @smid: system request message index
1587 *
1588 * Returns virt pointer to sense buffer.
1589 */
1590void *
1591mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1592{
1593	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1594}
1595
1596/**
1597 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1598 * @ioc: per adapter object
1599 * @smid: system request message index
1600 *
1601 * Returns phys pointer to the low 32bit address of the sense buffer.
1602 */
1603__le32
1604mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1605{
1606	return cpu_to_le32(ioc->sense_dma +
1607			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1608}
1609
1610/**
1611 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1612 * @ioc: per adapter object
1613 * @phys_addr: lower 32 physical addr of the reply
1614 *
1615 * Converts 32bit lower physical addr into a virt address.
1616 */
1617void *
1618mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1619{
1620	if (!phys_addr)
1621		return NULL;
1622	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1623}
1624
1625/**
1626 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1627 * @ioc: per adapter object
1628 * @cb_idx: callback index
1629 *
1630 * Returns smid (zero is invalid)
1631 */
1632u16
1633mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1634{
1635	unsigned long flags;
1636	struct request_tracker *request;
1637	u16 smid;
1638
1639	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1640	if (list_empty(&ioc->internal_free_list)) {
1641		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1642		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1643		    ioc->name, __func__);
1644		return 0;
1645	}
1646
1647	request = list_entry(ioc->internal_free_list.next,
1648	    struct request_tracker, tracker_list);
1649	request->cb_idx = cb_idx;
1650	smid = request->smid;
1651	list_del(&request->tracker_list);
1652	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1653	return smid;
1654}
1655
1656/**
1657 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1658 * @ioc: per adapter object
1659 * @cb_idx: callback index
1660 * @scmd: pointer to scsi command object
1661 *
1662 * Returns smid (zero is invalid)
1663 */
1664u16
1665mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1666    struct scsi_cmnd *scmd)
1667{
1668	unsigned long flags;
1669	struct scsiio_tracker *request;
1670	u16 smid;
1671
1672	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1673	if (list_empty(&ioc->free_list)) {
1674		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1675		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1676		    ioc->name, __func__);
1677		return 0;
1678	}
1679
1680	request = list_entry(ioc->free_list.next,
1681	    struct scsiio_tracker, tracker_list);
1682	request->scmd = scmd;
1683	request->cb_idx = cb_idx;
1684	smid = request->smid;
1685	list_del(&request->tracker_list);
1686	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1687	return smid;
1688}
1689
1690/**
1691 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1692 * @ioc: per adapter object
1693 * @cb_idx: callback index
1694 *
1695 * Returns smid (zero is invalid)
1696 */
1697u16
1698mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1699{
1700	unsigned long flags;
1701	struct request_tracker *request;
1702	u16 smid;
1703
1704	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1705	if (list_empty(&ioc->hpr_free_list)) {
1706		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1707		return 0;
1708	}
1709
1710	request = list_entry(ioc->hpr_free_list.next,
1711	    struct request_tracker, tracker_list);
1712	request->cb_idx = cb_idx;
1713	smid = request->smid;
1714	list_del(&request->tracker_list);
1715	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1716	return smid;
1717}
1718
1719
1720/**
1721 * mpt2sas_base_free_smid - put smid back on free_list
1722 * @ioc: per adapter object
1723 * @smid: system request message index
1724 *
1725 * Return nothing.
1726 */
1727void
1728mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1729{
1730	unsigned long flags;
1731	int i;
1732	struct chain_tracker *chain_req, *next;
1733
1734	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1735	if (smid < ioc->hi_priority_smid) {
1736		/* scsiio queue */
1737		i = smid - 1;
1738		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1739			list_for_each_entry_safe(chain_req, next,
1740			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1741				list_del_init(&chain_req->tracker_list);
1742				list_add_tail(&chain_req->tracker_list,
1743				    &ioc->free_chain_list);
1744			}
1745		}
1746		ioc->scsi_lookup[i].cb_idx = 0xFF;
1747		ioc->scsi_lookup[i].scmd = NULL;
1748		ioc->scsi_lookup[i].direct_io = 0;
1749		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1750		    &ioc->free_list);
1751		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1752
1753		/*
1754		 * See _wait_for_commands_to_complete() call with regards
1755		 * to this code.
1756		 */
1757		if (ioc->shost_recovery && ioc->pending_io_count) {
1758			if (ioc->pending_io_count == 1)
1759				wake_up(&ioc->reset_wq);
1760			ioc->pending_io_count--;
1761		}
1762		return;
1763	} else if (smid < ioc->internal_smid) {
1764		/* hi-priority */
1765		i = smid - ioc->hi_priority_smid;
1766		ioc->hpr_lookup[i].cb_idx = 0xFF;
1767		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1768		    &ioc->hpr_free_list);
1769	} else if (smid <= ioc->hba_queue_depth) {
1770		/* internal queue */
1771		i = smid - ioc->internal_smid;
1772		ioc->internal_lookup[i].cb_idx = 0xFF;
1773		list_add_tail(&ioc->internal_lookup[i].tracker_list,
1774		    &ioc->internal_free_list);
1775	}
1776	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1777}
1778
1779/**
1780 * _base_writeq - 64 bit write to MMIO
1781 * @ioc: per adapter object
1782 * @b: data payload
1783 * @addr: address in MMIO space
1784 * @writeq_lock: spin lock
1785 *
1786 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1787 * care of 32 bit environment where its not quarenteed to send the entire word
1788 * in one transfer.
1789 */
1790#ifndef writeq
1791static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1792    spinlock_t *writeq_lock)
1793{
1794	unsigned long flags;
1795	__u64 data_out = cpu_to_le64(b);
1796
1797	spin_lock_irqsave(writeq_lock, flags);
1798	writel((u32)(data_out), addr);
1799	writel((u32)(data_out >> 32), (addr + 4));
1800	spin_unlock_irqrestore(writeq_lock, flags);
1801}
1802#else
1803static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1804    spinlock_t *writeq_lock)
1805{
1806	writeq(cpu_to_le64(b), addr);
1807}
1808#endif
1809
1810static inline u8
1811_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1812{
1813	return ioc->cpu_msix_table[raw_smp_processor_id()];
1814}
1815
1816/**
1817 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1818 * @ioc: per adapter object
1819 * @smid: system request message index
1820 * @handle: device handle
1821 *
1822 * Return nothing.
1823 */
1824void
1825mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1826{
1827	Mpi2RequestDescriptorUnion_t descriptor;
1828	u64 *request = (u64 *)&descriptor;
1829
1830
1831	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1832	descr

Large files files are truncated, but you can click here to view the full file