PageRenderTime 590ms CodeModel.GetById 19ms app.highlight 506ms RepoModel.GetById 1ms app.codeStats 3ms

/drivers/scsi/ipr.c

http://github.com/mirrors/linux
C | 10871 lines | 7151 code | 1373 blank | 2347 comment | 1069 complexity | 9130bd96bd4a171be93a408704b4f4ea MD5 | raw file
    1// SPDX-License-Identifier: GPL-2.0-or-later
    2/*
    3 * ipr.c -- driver for IBM Power Linux RAID adapters
    4 *
    5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
    6 *
    7 * Copyright (C) 2003, 2004 IBM Corporation
    8 */
    9
   10/*
   11 * Notes:
   12 *
   13 * This driver is used to control the following SCSI adapters:
   14 *
   15 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
   16 *
   17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
   18 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
   19 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
   20 *              Embedded SCSI adapter on p615 and p655 systems
   21 *
   22 * Supported Hardware Features:
   23 *	- Ultra 320 SCSI controller
   24 *	- PCI-X host interface
   25 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
   26 *	- Non-Volatile Write Cache
   27 *	- Supports attachment of non-RAID disks, tape, and optical devices
   28 *	- RAID Levels 0, 5, 10
   29 *	- Hot spare
   30 *	- Background Parity Checking
   31 *	- Background Data Scrubbing
   32 *	- Ability to increase the capacity of an existing RAID 5 disk array
   33 *		by adding disks
   34 *
   35 * Driver Features:
   36 *	- Tagged command queuing
   37 *	- Adapter microcode download
   38 *	- PCI hot plug
   39 *	- SCSI device hot plug
   40 *
   41 */
   42
   43#include <linux/fs.h>
   44#include <linux/init.h>
   45#include <linux/types.h>
   46#include <linux/errno.h>
   47#include <linux/kernel.h>
   48#include <linux/slab.h>
   49#include <linux/vmalloc.h>
   50#include <linux/ioport.h>
   51#include <linux/delay.h>
   52#include <linux/pci.h>
   53#include <linux/wait.h>
   54#include <linux/spinlock.h>
   55#include <linux/sched.h>
   56#include <linux/interrupt.h>
   57#include <linux/blkdev.h>
   58#include <linux/firmware.h>
   59#include <linux/module.h>
   60#include <linux/moduleparam.h>
   61#include <linux/libata.h>
   62#include <linux/hdreg.h>
   63#include <linux/reboot.h>
   64#include <linux/stringify.h>
   65#include <asm/io.h>
   66#include <asm/irq.h>
   67#include <asm/processor.h>
   68#include <scsi/scsi.h>
   69#include <scsi/scsi_host.h>
   70#include <scsi/scsi_tcq.h>
   71#include <scsi/scsi_eh.h>
   72#include <scsi/scsi_cmnd.h>
   73#include "ipr.h"
   74
   75/*
   76 *   Global Data
   77 */
   78static LIST_HEAD(ipr_ioa_head);
   79static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
   80static unsigned int ipr_max_speed = 1;
   81static int ipr_testmode = 0;
   82static unsigned int ipr_fastfail = 0;
   83static unsigned int ipr_transop_timeout = 0;
   84static unsigned int ipr_debug = 0;
   85static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
   86static unsigned int ipr_dual_ioa_raid = 1;
   87static unsigned int ipr_number_of_msix = 16;
   88static unsigned int ipr_fast_reboot;
   89static DEFINE_SPINLOCK(ipr_driver_lock);
   90
   91/* This table describes the differences between DMA controller chips */
   92static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
   93	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
   94		.mailbox = 0x0042C,
   95		.max_cmds = 100,
   96		.cache_line_size = 0x20,
   97		.clear_isr = 1,
   98		.iopoll_weight = 0,
   99		{
  100			.set_interrupt_mask_reg = 0x0022C,
  101			.clr_interrupt_mask_reg = 0x00230,
  102			.clr_interrupt_mask_reg32 = 0x00230,
  103			.sense_interrupt_mask_reg = 0x0022C,
  104			.sense_interrupt_mask_reg32 = 0x0022C,
  105			.clr_interrupt_reg = 0x00228,
  106			.clr_interrupt_reg32 = 0x00228,
  107			.sense_interrupt_reg = 0x00224,
  108			.sense_interrupt_reg32 = 0x00224,
  109			.ioarrin_reg = 0x00404,
  110			.sense_uproc_interrupt_reg = 0x00214,
  111			.sense_uproc_interrupt_reg32 = 0x00214,
  112			.set_uproc_interrupt_reg = 0x00214,
  113			.set_uproc_interrupt_reg32 = 0x00214,
  114			.clr_uproc_interrupt_reg = 0x00218,
  115			.clr_uproc_interrupt_reg32 = 0x00218
  116		}
  117	},
  118	{ /* Snipe and Scamp */
  119		.mailbox = 0x0052C,
  120		.max_cmds = 100,
  121		.cache_line_size = 0x20,
  122		.clear_isr = 1,
  123		.iopoll_weight = 0,
  124		{
  125			.set_interrupt_mask_reg = 0x00288,
  126			.clr_interrupt_mask_reg = 0x0028C,
  127			.clr_interrupt_mask_reg32 = 0x0028C,
  128			.sense_interrupt_mask_reg = 0x00288,
  129			.sense_interrupt_mask_reg32 = 0x00288,
  130			.clr_interrupt_reg = 0x00284,
  131			.clr_interrupt_reg32 = 0x00284,
  132			.sense_interrupt_reg = 0x00280,
  133			.sense_interrupt_reg32 = 0x00280,
  134			.ioarrin_reg = 0x00504,
  135			.sense_uproc_interrupt_reg = 0x00290,
  136			.sense_uproc_interrupt_reg32 = 0x00290,
  137			.set_uproc_interrupt_reg = 0x00290,
  138			.set_uproc_interrupt_reg32 = 0x00290,
  139			.clr_uproc_interrupt_reg = 0x00294,
  140			.clr_uproc_interrupt_reg32 = 0x00294
  141		}
  142	},
  143	{ /* CRoC */
  144		.mailbox = 0x00044,
  145		.max_cmds = 1000,
  146		.cache_line_size = 0x20,
  147		.clear_isr = 0,
  148		.iopoll_weight = 64,
  149		{
  150			.set_interrupt_mask_reg = 0x00010,
  151			.clr_interrupt_mask_reg = 0x00018,
  152			.clr_interrupt_mask_reg32 = 0x0001C,
  153			.sense_interrupt_mask_reg = 0x00010,
  154			.sense_interrupt_mask_reg32 = 0x00014,
  155			.clr_interrupt_reg = 0x00008,
  156			.clr_interrupt_reg32 = 0x0000C,
  157			.sense_interrupt_reg = 0x00000,
  158			.sense_interrupt_reg32 = 0x00004,
  159			.ioarrin_reg = 0x00070,
  160			.sense_uproc_interrupt_reg = 0x00020,
  161			.sense_uproc_interrupt_reg32 = 0x00024,
  162			.set_uproc_interrupt_reg = 0x00020,
  163			.set_uproc_interrupt_reg32 = 0x00024,
  164			.clr_uproc_interrupt_reg = 0x00028,
  165			.clr_uproc_interrupt_reg32 = 0x0002C,
  166			.init_feedback_reg = 0x0005C,
  167			.dump_addr_reg = 0x00064,
  168			.dump_data_reg = 0x00068,
  169			.endian_swap_reg = 0x00084
  170		}
  171	},
  172};
  173
  174static const struct ipr_chip_t ipr_chip[] = {
  175	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  176	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  177	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  178	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  179	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  180	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
  181	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
  182	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
  183	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
  184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
  185};
  186
  187static int ipr_max_bus_speeds[] = {
  188	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
  189};
  190
  191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
  192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
  193module_param_named(max_speed, ipr_max_speed, uint, 0);
  194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
  195module_param_named(log_level, ipr_log_level, uint, 0);
  196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
  197module_param_named(testmode, ipr_testmode, int, 0);
  198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
  199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
  200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
  201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
  202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
  203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
  204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
  205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
  206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
  207module_param_named(max_devs, ipr_max_devs, int, 0);
  208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
  209		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
  210module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
  211MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
  212module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
  213MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
  214MODULE_LICENSE("GPL");
  215MODULE_VERSION(IPR_DRIVER_VERSION);
  216
  217/*  A constant array of IOASCs/URCs/Error Messages */
  218static const
  219struct ipr_error_table_t ipr_error_table[] = {
  220	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
  221	"8155: An unknown error was received"},
  222	{0x00330000, 0, 0,
  223	"Soft underlength error"},
  224	{0x005A0000, 0, 0,
  225	"Command to be cancelled not found"},
  226	{0x00808000, 0, 0,
  227	"Qualified success"},
  228	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
  229	"FFFE: Soft device bus error recovered by the IOA"},
  230	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
  231	"4101: Soft device bus fabric error"},
  232	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
  233	"FFFC: Logical block guard error recovered by the device"},
  234	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
  235	"FFFC: Logical block reference tag error recovered by the device"},
  236	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
  237	"4171: Recovered scatter list tag / sequence number error"},
  238	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
  239	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
  240	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
  241	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
  242	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
  243	"FFFD: Recovered logical block reference tag error detected by the IOA"},
  244	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
  245	"FFFD: Logical block guard error recovered by the IOA"},
  246	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
  247	"FFF9: Device sector reassign successful"},
  248	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
  249	"FFF7: Media error recovered by device rewrite procedures"},
  250	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
  251	"7001: IOA sector reassignment successful"},
  252	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
  253	"FFF9: Soft media error. Sector reassignment recommended"},
  254	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
  255	"FFF7: Media error recovered by IOA rewrite procedures"},
  256	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
  257	"FF3D: Soft PCI bus error recovered by the IOA"},
  258	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
  259	"FFF6: Device hardware error recovered by the IOA"},
  260	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
  261	"FFF6: Device hardware error recovered by the device"},
  262	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
  263	"FF3D: Soft IOA error recovered by the IOA"},
  264	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
  265	"FFFA: Undefined device response recovered by the IOA"},
  266	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
  267	"FFF6: Device bus error, message or command phase"},
  268	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
  269	"FFFE: Task Management Function failed"},
  270	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
  271	"FFF6: Failure prediction threshold exceeded"},
  272	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
  273	"8009: Impending cache battery pack failure"},
  274	{0x02040100, 0, 0,
  275	"Logical Unit in process of becoming ready"},
  276	{0x02040200, 0, 0,
  277	"Initializing command required"},
  278	{0x02040400, 0, 0,
  279	"34FF: Disk device format in progress"},
  280	{0x02040C00, 0, 0,
  281	"Logical unit not accessible, target port in unavailable state"},
  282	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
  283	"9070: IOA requested reset"},
  284	{0x023F0000, 0, 0,
  285	"Synchronization required"},
  286	{0x02408500, 0, 0,
  287	"IOA microcode download required"},
  288	{0x02408600, 0, 0,
  289	"Device bus connection is prohibited by host"},
  290	{0x024E0000, 0, 0,
  291	"No ready, IOA shutdown"},
  292	{0x025A0000, 0, 0,
  293	"Not ready, IOA has been shutdown"},
  294	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
  295	"3020: Storage subsystem configuration error"},
  296	{0x03110B00, 0, 0,
  297	"FFF5: Medium error, data unreadable, recommend reassign"},
  298	{0x03110C00, 0, 0,
  299	"7000: Medium error, data unreadable, do not reassign"},
  300	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
  301	"FFF3: Disk media format bad"},
  302	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
  303	"3002: Addressed device failed to respond to selection"},
  304	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
  305	"3100: Device bus error"},
  306	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
  307	"3109: IOA timed out a device command"},
  308	{0x04088000, 0, 0,
  309	"3120: SCSI bus is not operational"},
  310	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
  311	"4100: Hard device bus fabric error"},
  312	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
  313	"310C: Logical block guard error detected by the device"},
  314	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
  315	"310C: Logical block reference tag error detected by the device"},
  316	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
  317	"4170: Scatter list tag / sequence number error"},
  318	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
  319	"8150: Logical block CRC error on IOA to Host transfer"},
  320	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
  321	"4170: Logical block sequence number error on IOA to Host transfer"},
  322	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
  323	"310D: Logical block reference tag error detected by the IOA"},
  324	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
  325	"310D: Logical block guard error detected by the IOA"},
  326	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
  327	"9000: IOA reserved area data check"},
  328	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
  329	"9001: IOA reserved area invalid data pattern"},
  330	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
  331	"9002: IOA reserved area LRC error"},
  332	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
  333	"Hardware Error, IOA metadata access error"},
  334	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
  335	"102E: Out of alternate sectors for disk storage"},
  336	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
  337	"FFF4: Data transfer underlength error"},
  338	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
  339	"FFF4: Data transfer overlength error"},
  340	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
  341	"3400: Logical unit failure"},
  342	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
  343	"FFF4: Device microcode is corrupt"},
  344	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
  345	"8150: PCI bus error"},
  346	{0x04430000, 1, 0,
  347	"Unsupported device bus message received"},
  348	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
  349	"FFF4: Disk device problem"},
  350	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
  351	"8150: Permanent IOA failure"},
  352	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
  353	"3010: Disk device returned wrong response to IOA"},
  354	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
  355	"8151: IOA microcode error"},
  356	{0x04448500, 0, 0,
  357	"Device bus status error"},
  358	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
  359	"8157: IOA error requiring IOA reset to recover"},
  360	{0x04448700, 0, 0,
  361	"ATA device status error"},
  362	{0x04490000, 0, 0,
  363	"Message reject received from the device"},
  364	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
  365	"8008: A permanent cache battery pack failure occurred"},
  366	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
  367	"9090: Disk unit has been modified after the last known status"},
  368	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
  369	"9081: IOA detected device error"},
  370	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
  371	"9082: IOA detected device error"},
  372	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
  373	"3110: Device bus error, message or command phase"},
  374	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
  375	"3110: SAS Command / Task Management Function failed"},
  376	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
  377	"9091: Incorrect hardware configuration change has been detected"},
  378	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
  379	"9073: Invalid multi-adapter configuration"},
  380	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
  381	"4010: Incorrect connection between cascaded expanders"},
  382	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
  383	"4020: Connections exceed IOA design limits"},
  384	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
  385	"4030: Incorrect multipath connection"},
  386	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
  387	"4110: Unsupported enclosure function"},
  388	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
  389	"4120: SAS cable VPD cannot be read"},
  390	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
  391	"FFF4: Command to logical unit failed"},
  392	{0x05240000, 1, 0,
  393	"Illegal request, invalid request type or request packet"},
  394	{0x05250000, 0, 0,
  395	"Illegal request, invalid resource handle"},
  396	{0x05258000, 0, 0,
  397	"Illegal request, commands not allowed to this device"},
  398	{0x05258100, 0, 0,
  399	"Illegal request, command not allowed to a secondary adapter"},
  400	{0x05258200, 0, 0,
  401	"Illegal request, command not allowed to a non-optimized resource"},
  402	{0x05260000, 0, 0,
  403	"Illegal request, invalid field in parameter list"},
  404	{0x05260100, 0, 0,
  405	"Illegal request, parameter not supported"},
  406	{0x05260200, 0, 0,
  407	"Illegal request, parameter value invalid"},
  408	{0x052C0000, 0, 0,
  409	"Illegal request, command sequence error"},
  410	{0x052C8000, 1, 0,
  411	"Illegal request, dual adapter support not enabled"},
  412	{0x052C8100, 1, 0,
  413	"Illegal request, another cable connector was physically disabled"},
  414	{0x054E8000, 1, 0,
  415	"Illegal request, inconsistent group id/group count"},
  416	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
  417	"9031: Array protection temporarily suspended, protection resuming"},
  418	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
  419	"9040: Array protection temporarily suspended, protection resuming"},
  420	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
  421	"4080: IOA exceeded maximum operating temperature"},
  422	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
  423	"4085: Service required"},
  424	{0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
  425	"4086: SAS Adapter Hardware Configuration Error"},
  426	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
  427	"3140: Device bus not ready to ready transition"},
  428	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
  429	"FFFB: SCSI bus was reset"},
  430	{0x06290500, 0, 0,
  431	"FFFE: SCSI bus transition to single ended"},
  432	{0x06290600, 0, 0,
  433	"FFFE: SCSI bus transition to LVD"},
  434	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
  435	"FFFB: SCSI bus was reset by another initiator"},
  436	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
  437	"3029: A device replacement has occurred"},
  438	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
  439	"4102: Device bus fabric performance degradation"},
  440	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
  441	"9051: IOA cache data exists for a missing or failed device"},
  442	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
  443	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
  444	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
  445	"9025: Disk unit is not supported at its physical location"},
  446	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
  447	"3020: IOA detected a SCSI bus configuration error"},
  448	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
  449	"3150: SCSI bus configuration error"},
  450	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
  451	"9074: Asymmetric advanced function disk configuration"},
  452	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
  453	"4040: Incomplete multipath connection between IOA and enclosure"},
  454	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
  455	"4041: Incomplete multipath connection between enclosure and device"},
  456	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
  457	"9075: Incomplete multipath connection between IOA and remote IOA"},
  458	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
  459	"9076: Configuration error, missing remote IOA"},
  460	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
  461	"4050: Enclosure does not support a required multipath function"},
  462	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
  463	"4121: Configuration error, required cable is missing"},
  464	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
  465	"4122: Cable is not plugged into the correct location on remote IOA"},
  466	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
  467	"4123: Configuration error, invalid cable vital product data"},
  468	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
  469	"4124: Configuration error, both cable ends are plugged into the same IOA"},
  470	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
  471	"4070: Logically bad block written on device"},
  472	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
  473	"9041: Array protection temporarily suspended"},
  474	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
  475	"9042: Corrupt array parity detected on specified device"},
  476	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
  477	"9030: Array no longer protected due to missing or failed disk unit"},
  478	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
  479	"9071: Link operational transition"},
  480	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
  481	"9072: Link not operational transition"},
  482	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
  483	"9032: Array exposed but still protected"},
  484	{0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
  485	"70DD: Device forced failed by disrupt device command"},
  486	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
  487	"4061: Multipath redundancy level got better"},
  488	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
  489	"4060: Multipath redundancy level got worse"},
  490	{0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
  491	"9083: Device raw mode enabled"},
  492	{0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
  493	"9084: Device raw mode disabled"},
  494	{0x07270000, 0, 0,
  495	"Failure due to other device"},
  496	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
  497	"9008: IOA does not support functions expected by devices"},
  498	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
  499	"9010: Cache data associated with attached devices cannot be found"},
  500	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
  501	"9011: Cache data belongs to devices other than those attached"},
  502	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
  503	"9020: Array missing 2 or more devices with only 1 device present"},
  504	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
  505	"9021: Array missing 2 or more devices with 2 or more devices present"},
  506	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
  507	"9022: Exposed array is missing a required device"},
  508	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
  509	"9023: Array member(s) not at required physical locations"},
  510	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
  511	"9024: Array not functional due to present hardware configuration"},
  512	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
  513	"9026: Array not functional due to present hardware configuration"},
  514	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
  515	"9027: Array is missing a device and parity is out of sync"},
  516	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
  517	"9028: Maximum number of arrays already exist"},
  518	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
  519	"9050: Required cache data cannot be located for a disk unit"},
  520	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
  521	"9052: Cache data exists for a device that has been modified"},
  522	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
  523	"9054: IOA resources not available due to previous problems"},
  524	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
  525	"9092: Disk unit requires initialization before use"},
  526	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
  527	"9029: Incorrect hardware configuration change has been detected"},
  528	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
  529	"9060: One or more disk pairs are missing from an array"},
  530	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
  531	"9061: One or more disks are missing from an array"},
  532	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
  533	"9062: One or more disks are missing from an array"},
  534	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
  535	"9063: Maximum number of functional arrays has been exceeded"},
  536	{0x07279A00, 0, 0,
  537	"Data protect, other volume set problem"},
  538	{0x0B260000, 0, 0,
  539	"Aborted command, invalid descriptor"},
  540	{0x0B3F9000, 0, 0,
  541	"Target operating conditions have changed, dual adapter takeover"},
  542	{0x0B530200, 0, 0,
  543	"Aborted command, medium removal prevented"},
  544	{0x0B5A0000, 0, 0,
  545	"Command terminated by host"},
  546	{0x0B5B8000, 0, 0,
  547	"Aborted command, command terminated by host"}
  548};
  549
  550static const struct ipr_ses_table_entry ipr_ses_table[] = {
  551	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
  552	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
  553	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
  554	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
  555	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
  556	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
  557	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
  558	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
  559	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
  560	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
  561	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
  562	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
  563	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
  564};
  565
  566/*
  567 *  Function Prototypes
  568 */
  569static int ipr_reset_alert(struct ipr_cmnd *);
  570static void ipr_process_ccn(struct ipr_cmnd *);
  571static void ipr_process_error(struct ipr_cmnd *);
  572static void ipr_reset_ioa_job(struct ipr_cmnd *);
  573static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
  574				   enum ipr_shutdown_type);
  575
  576#ifdef CONFIG_SCSI_IPR_TRACE
  577/**
  578 * ipr_trc_hook - Add a trace entry to the driver trace
  579 * @ipr_cmd:	ipr command struct
  580 * @type:		trace type
  581 * @add_data:	additional data
  582 *
  583 * Return value:
  584 * 	none
  585 **/
  586static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
  587			 u8 type, u32 add_data)
  588{
  589	struct ipr_trace_entry *trace_entry;
  590	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  591	unsigned int trace_index;
  592
  593	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
  594	trace_entry = &ioa_cfg->trace[trace_index];
  595	trace_entry->time = jiffies;
  596	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
  597	trace_entry->type = type;
  598	if (ipr_cmd->ioa_cfg->sis64)
  599		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
  600	else
  601		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
  602	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
  603	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
  604	trace_entry->u.add_data = add_data;
  605	wmb();
  606}
  607#else
  608#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
  609#endif
  610
  611/**
  612 * ipr_lock_and_done - Acquire lock and complete command
  613 * @ipr_cmd:	ipr command struct
  614 *
  615 * Return value:
  616 *	none
  617 **/
  618static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
  619{
  620	unsigned long lock_flags;
  621	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  622
  623	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  624	ipr_cmd->done(ipr_cmd);
  625	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  626}
  627
  628/**
  629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
  630 * @ipr_cmd:	ipr command struct
  631 *
  632 * Return value:
  633 * 	none
  634 **/
  635static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
  636{
  637	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  638	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
  639	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
  640	dma_addr_t dma_addr = ipr_cmd->dma_addr;
  641	int hrrq_id;
  642
  643	hrrq_id = ioarcb->cmd_pkt.hrrq_id;
  644	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
  645	ioarcb->cmd_pkt.hrrq_id = hrrq_id;
  646	ioarcb->data_transfer_length = 0;
  647	ioarcb->read_data_transfer_length = 0;
  648	ioarcb->ioadl_len = 0;
  649	ioarcb->read_ioadl_len = 0;
  650
  651	if (ipr_cmd->ioa_cfg->sis64) {
  652		ioarcb->u.sis64_addr_data.data_ioadl_addr =
  653			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
  654		ioasa64->u.gata.status = 0;
  655	} else {
  656		ioarcb->write_ioadl_addr =
  657			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
  658		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
  659		ioasa->u.gata.status = 0;
  660	}
  661
  662	ioasa->hdr.ioasc = 0;
  663	ioasa->hdr.residual_data_len = 0;
  664	ipr_cmd->scsi_cmd = NULL;
  665	ipr_cmd->qc = NULL;
  666	ipr_cmd->sense_buffer[0] = 0;
  667	ipr_cmd->dma_use_sg = 0;
  668}
  669
  670/**
  671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
  672 * @ipr_cmd:	ipr command struct
  673 *
  674 * Return value:
  675 * 	none
  676 **/
  677static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
  678			      void (*fast_done) (struct ipr_cmnd *))
  679{
  680	ipr_reinit_ipr_cmnd(ipr_cmd);
  681	ipr_cmd->u.scratch = 0;
  682	ipr_cmd->sibling = NULL;
  683	ipr_cmd->eh_comp = NULL;
  684	ipr_cmd->fast_done = fast_done;
  685	timer_setup(&ipr_cmd->timer, NULL, 0);
  686}
  687
  688/**
  689 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
  690 * @ioa_cfg:	ioa config struct
  691 *
  692 * Return value:
  693 * 	pointer to ipr command struct
  694 **/
  695static
  696struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
  697{
  698	struct ipr_cmnd *ipr_cmd = NULL;
  699
  700	if (likely(!list_empty(&hrrq->hrrq_free_q))) {
  701		ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
  702			struct ipr_cmnd, queue);
  703		list_del(&ipr_cmd->queue);
  704	}
  705
  706
  707	return ipr_cmd;
  708}
  709
  710/**
  711 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
  712 * @ioa_cfg:	ioa config struct
  713 *
  714 * Return value:
  715 *	pointer to ipr command struct
  716 **/
  717static
  718struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
  719{
  720	struct ipr_cmnd *ipr_cmd =
  721		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
  722	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
  723	return ipr_cmd;
  724}
  725
  726/**
  727 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
  728 * @ioa_cfg:	ioa config struct
  729 * @clr_ints:     interrupts to clear
  730 *
  731 * This function masks all interrupts on the adapter, then clears the
  732 * interrupts specified in the mask
  733 *
  734 * Return value:
  735 * 	none
  736 **/
  737static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
  738					  u32 clr_ints)
  739{
  740	volatile u32 int_reg;
  741	int i;
  742
  743	/* Stop new interrupts */
  744	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  745		spin_lock(&ioa_cfg->hrrq[i]._lock);
  746		ioa_cfg->hrrq[i].allow_interrupts = 0;
  747		spin_unlock(&ioa_cfg->hrrq[i]._lock);
  748	}
  749
  750	/* Set interrupt mask to stop all new interrupts */
  751	if (ioa_cfg->sis64)
  752		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
  753	else
  754		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
  755
  756	/* Clear any pending interrupts */
  757	if (ioa_cfg->sis64)
  758		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
  759	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
  760	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
  761}
  762
  763/**
  764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
  765 * @ioa_cfg:	ioa config struct
  766 *
  767 * Return value:
  768 * 	0 on success / -EIO on failure
  769 **/
  770static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  771{
  772	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
  773
  774	if (pcix_cmd_reg == 0)
  775		return 0;
  776
  777	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
  778				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
  779		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
  780		return -EIO;
  781	}
  782
  783	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
  784	return 0;
  785}
  786
  787/**
  788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
  789 * @ioa_cfg:	ioa config struct
  790 *
  791 * Return value:
  792 * 	0 on success / -EIO on failure
  793 **/
  794static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  795{
  796	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
  797
  798	if (pcix_cmd_reg) {
  799		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
  800					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
  801			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
  802			return -EIO;
  803		}
  804	}
  805
  806	return 0;
  807}
  808
  809/**
  810 * __ipr_sata_eh_done - done function for aborted SATA commands
  811 * @ipr_cmd:	ipr command struct
  812 *
  813 * This function is invoked for ops generated to SATA
  814 * devices which are being aborted.
  815 *
  816 * Return value:
  817 * 	none
  818 **/
  819static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
  820{
  821	struct ata_queued_cmd *qc = ipr_cmd->qc;
  822	struct ipr_sata_port *sata_port = qc->ap->private_data;
  823
  824	qc->err_mask |= AC_ERR_OTHER;
  825	sata_port->ioasa.status |= ATA_BUSY;
  826	ata_qc_complete(qc);
  827	if (ipr_cmd->eh_comp)
  828		complete(ipr_cmd->eh_comp);
  829	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  830}
  831
  832/**
  833 * ipr_sata_eh_done - done function for aborted SATA commands
  834 * @ipr_cmd:	ipr command struct
  835 *
  836 * This function is invoked for ops generated to SATA
  837 * devices which are being aborted.
  838 *
  839 * Return value:
  840 * 	none
  841 **/
  842static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
  843{
  844	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
  845	unsigned long hrrq_flags;
  846
  847	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
  848	__ipr_sata_eh_done(ipr_cmd);
  849	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
  850}
  851
  852/**
  853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
  854 * @ipr_cmd:	ipr command struct
  855 *
  856 * This function is invoked by the interrupt handler for
  857 * ops generated by the SCSI mid-layer which are being aborted.
  858 *
  859 * Return value:
  860 * 	none
  861 **/
  862static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
  863{
  864	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  865
  866	scsi_cmd->result |= (DID_ERROR << 16);
  867
  868	scsi_dma_unmap(ipr_cmd->scsi_cmd);
  869	scsi_cmd->scsi_done(scsi_cmd);
  870	if (ipr_cmd->eh_comp)
  871		complete(ipr_cmd->eh_comp);
  872	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  873}
  874
  875/**
  876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
  877 * @ipr_cmd:	ipr command struct
  878 *
  879 * This function is invoked by the interrupt handler for
  880 * ops generated by the SCSI mid-layer which are being aborted.
  881 *
  882 * Return value:
  883 * 	none
  884 **/
  885static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
  886{
  887	unsigned long hrrq_flags;
  888	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
  889
  890	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
  891	__ipr_scsi_eh_done(ipr_cmd);
  892	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
  893}
  894
  895/**
  896 * ipr_fail_all_ops - Fails all outstanding ops.
  897 * @ioa_cfg:	ioa config struct
  898 *
  899 * This function fails all outstanding ops.
  900 *
  901 * Return value:
  902 * 	none
  903 **/
  904static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
  905{
  906	struct ipr_cmnd *ipr_cmd, *temp;
  907	struct ipr_hrr_queue *hrrq;
  908
  909	ENTER;
  910	for_each_hrrq(hrrq, ioa_cfg) {
  911		spin_lock(&hrrq->_lock);
  912		list_for_each_entry_safe(ipr_cmd,
  913					temp, &hrrq->hrrq_pending_q, queue) {
  914			list_del(&ipr_cmd->queue);
  915
  916			ipr_cmd->s.ioasa.hdr.ioasc =
  917				cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
  918			ipr_cmd->s.ioasa.hdr.ilid =
  919				cpu_to_be32(IPR_DRIVER_ILID);
  920
  921			if (ipr_cmd->scsi_cmd)
  922				ipr_cmd->done = __ipr_scsi_eh_done;
  923			else if (ipr_cmd->qc)
  924				ipr_cmd->done = __ipr_sata_eh_done;
  925
  926			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
  927				     IPR_IOASC_IOA_WAS_RESET);
  928			del_timer(&ipr_cmd->timer);
  929			ipr_cmd->done(ipr_cmd);
  930		}
  931		spin_unlock(&hrrq->_lock);
  932	}
  933	LEAVE;
  934}
  935
  936/**
  937 * ipr_send_command -  Send driver initiated requests.
  938 * @ipr_cmd:		ipr command struct
  939 *
  940 * This function sends a command to the adapter using the correct write call.
  941 * In the case of sis64, calculate the ioarcb size required. Then or in the
  942 * appropriate bits.
  943 *
  944 * Return value:
  945 * 	none
  946 **/
  947static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
  948{
  949	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  950	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
  951
  952	if (ioa_cfg->sis64) {
  953		/* The default size is 256 bytes */
  954		send_dma_addr |= 0x1;
  955
  956		/* If the number of ioadls * size of ioadl > 128 bytes,
  957		   then use a 512 byte ioarcb */
  958		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
  959			send_dma_addr |= 0x4;
  960		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
  961	} else
  962		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
  963}
  964
  965/**
  966 * ipr_do_req -  Send driver initiated requests.
  967 * @ipr_cmd:		ipr command struct
  968 * @done:			done function
  969 * @timeout_func:	timeout function
  970 * @timeout:		timeout value
  971 *
  972 * This function sends the specified command to the adapter with the
  973 * timeout given. The done function is invoked on command completion.
  974 *
  975 * Return value:
  976 * 	none
  977 **/
  978static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
  979		       void (*done) (struct ipr_cmnd *),
  980		       void (*timeout_func) (struct timer_list *), u32 timeout)
  981{
  982	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  983
  984	ipr_cmd->done = done;
  985
  986	ipr_cmd->timer.expires = jiffies + timeout;
  987	ipr_cmd->timer.function = timeout_func;
  988
  989	add_timer(&ipr_cmd->timer);
  990
  991	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
  992
  993	ipr_send_command(ipr_cmd);
  994}
  995
  996/**
  997 * ipr_internal_cmd_done - Op done function for an internally generated op.
  998 * @ipr_cmd:	ipr command struct
  999 *
 1000 * This function is the op done function for an internally generated,
 1001 * blocking op. It simply wakes the sleeping thread.
 1002 *
 1003 * Return value:
 1004 * 	none
 1005 **/
 1006static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
 1007{
 1008	if (ipr_cmd->sibling)
 1009		ipr_cmd->sibling = NULL;
 1010	else
 1011		complete(&ipr_cmd->completion);
 1012}
 1013
 1014/**
 1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
 1016 * @ipr_cmd:	ipr command struct
 1017 * @dma_addr:	dma address
 1018 * @len:	transfer length
 1019 * @flags:	ioadl flag value
 1020 *
 1021 * This function initializes an ioadl in the case where there is only a single
 1022 * descriptor.
 1023 *
 1024 * Return value:
 1025 * 	nothing
 1026 **/
 1027static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
 1028			   u32 len, int flags)
 1029{
 1030	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
 1031	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
 1032
 1033	ipr_cmd->dma_use_sg = 1;
 1034
 1035	if (ipr_cmd->ioa_cfg->sis64) {
 1036		ioadl64->flags = cpu_to_be32(flags);
 1037		ioadl64->data_len = cpu_to_be32(len);
 1038		ioadl64->address = cpu_to_be64(dma_addr);
 1039
 1040		ipr_cmd->ioarcb.ioadl_len =
 1041		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
 1042		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
 1043	} else {
 1044		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
 1045		ioadl->address = cpu_to_be32(dma_addr);
 1046
 1047		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
 1048			ipr_cmd->ioarcb.read_ioadl_len =
 1049				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
 1050			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
 1051		} else {
 1052			ipr_cmd->ioarcb.ioadl_len =
 1053			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
 1054			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
 1055		}
 1056	}
 1057}
 1058
 1059/**
 1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
 1061 * @ipr_cmd:	ipr command struct
 1062 * @timeout_func:	function to invoke if command times out
 1063 * @timeout:	timeout
 1064 *
 1065 * Return value:
 1066 * 	none
 1067 **/
 1068static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
 1069				  void (*timeout_func) (struct timer_list *),
 1070				  u32 timeout)
 1071{
 1072	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 1073
 1074	init_completion(&ipr_cmd->completion);
 1075	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
 1076
 1077	spin_unlock_irq(ioa_cfg->host->host_lock);
 1078	wait_for_completion(&ipr_cmd->completion);
 1079	spin_lock_irq(ioa_cfg->host->host_lock);
 1080}
 1081
 1082static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
 1083{
 1084	unsigned int hrrq;
 1085
 1086	if (ioa_cfg->hrrq_num == 1)
 1087		hrrq = 0;
 1088	else {
 1089		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
 1090		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
 1091	}
 1092	return hrrq;
 1093}
 1094
 1095/**
 1096 * ipr_send_hcam - Send an HCAM to the adapter.
 1097 * @ioa_cfg:	ioa config struct
 1098 * @type:		HCAM type
 1099 * @hostrcb:	hostrcb struct
 1100 *
 1101 * This function will send a Host Controlled Async command to the adapter.
 1102 * If HCAMs are currently not allowed to be issued to the adapter, it will
 1103 * place the hostrcb on the free queue.
 1104 *
 1105 * Return value:
 1106 * 	none
 1107 **/
 1108static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
 1109			  struct ipr_hostrcb *hostrcb)
 1110{
 1111	struct ipr_cmnd *ipr_cmd;
 1112	struct ipr_ioarcb *ioarcb;
 1113
 1114	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
 1115		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 1116		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 1117		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
 1118
 1119		ipr_cmd->u.hostrcb = hostrcb;
 1120		ioarcb = &ipr_cmd->ioarcb;
 1121
 1122		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
 1123		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
 1124		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
 1125		ioarcb->cmd_pkt.cdb[1] = type;
 1126		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
 1127		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
 1128
 1129		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
 1130			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
 1131
 1132		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
 1133			ipr_cmd->done = ipr_process_ccn;
 1134		else
 1135			ipr_cmd->done = ipr_process_error;
 1136
 1137		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
 1138
 1139		ipr_send_command(ipr_cmd);
 1140	} else {
 1141		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
 1142	}
 1143}
 1144
 1145/**
 1146 * ipr_update_ata_class - Update the ata class in the resource entry
 1147 * @res:	resource entry struct
 1148 * @proto:	cfgte device bus protocol value
 1149 *
 1150 * Return value:
 1151 * 	none
 1152 **/
 1153static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
 1154{
 1155	switch (proto) {
 1156	case IPR_PROTO_SATA:
 1157	case IPR_PROTO_SAS_STP:
 1158		res->ata_class = ATA_DEV_ATA;
 1159		break;
 1160	case IPR_PROTO_SATA_ATAPI:
 1161	case IPR_PROTO_SAS_STP_ATAPI:
 1162		res->ata_class = ATA_DEV_ATAPI;
 1163		break;
 1164	default:
 1165		res->ata_class = ATA_DEV_UNKNOWN;
 1166		break;
 1167	};
 1168}
 1169
 1170/**
 1171 * ipr_init_res_entry - Initialize a resource entry struct.
 1172 * @res:	resource entry struct
 1173 * @cfgtew:	config table entry wrapper struct
 1174 *
 1175 * Return value:
 1176 * 	none
 1177 **/
 1178static void ipr_init_res_entry(struct ipr_resource_entry *res,
 1179			       struct ipr_config_table_entry_wrapper *cfgtew)
 1180{
 1181	int found = 0;
 1182	unsigned int proto;
 1183	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
 1184	struct ipr_resource_entry *gscsi_res = NULL;
 1185
 1186	res->needs_sync_complete = 0;
 1187	res->in_erp = 0;
 1188	res->add_to_ml = 0;
 1189	res->del_from_ml = 0;
 1190	res->resetting_device = 0;
 1191	res->reset_occurred = 0;
 1192	res->sdev = NULL;
 1193	res->sata_port = NULL;
 1194
 1195	if (ioa_cfg->sis64) {
 1196		proto = cfgtew->u.cfgte64->proto;
 1197		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
 1198		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
 1199		res->qmodel = IPR_QUEUEING_MODEL64(res);
 1200		res->type = cfgtew->u.cfgte64->res_type;
 1201
 1202		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
 1203			sizeof(res->res_path));
 1204
 1205		res->bus = 0;
 1206		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
 1207			sizeof(res->dev_lun.scsi_lun));
 1208		res->lun = scsilun_to_int(&res->dev_lun);
 1209
 1210		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
 1211			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
 1212				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
 1213					found = 1;
 1214					res->target = gscsi_res->target;
 1215					break;
 1216				}
 1217			}
 1218			if (!found) {
 1219				res->target = find_first_zero_bit(ioa_cfg->target_ids,
 1220								  ioa_cfg->max_devs_supported);
 1221				set_bit(res->target, ioa_cfg->target_ids);
 1222			}
 1223		} else if (res->type == IPR_RES_TYPE_IOAFP) {
 1224			res->bus = IPR_IOAFP_VIRTUAL_BUS;
 1225			res->target = 0;
 1226		} else if (res->type == IPR_RES_TYPE_ARRAY) {
 1227			res->bus = IPR_ARRAY_VIRTUAL_BUS;
 1228			res->target = find_first_zero_bit(ioa_cfg->array_ids,
 1229							  ioa_cfg->max_devs_supported);
 1230			set_bit(res->target, ioa_cfg->array_ids);
 1231		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
 1232			res->bus = IPR_VSET_VIRTUAL_BUS;
 1233			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
 1234							  ioa_cfg->max_devs_supported);
 1235			set_bit(res->target, ioa_cfg->vset_ids);
 1236		} else {
 1237			res->target = find_first_zero_bit(ioa_cfg->target_ids,
 1238							  ioa_cfg->max_devs_supported);
 1239			set_bit(res->target, ioa_cfg->target_ids);
 1240		}
 1241	} else {
 1242		proto = cfgtew->u.cfgte->proto;
 1243		res->qmodel = IPR_QUEUEING_MODEL(res);
 1244		res->flags = cfgtew->u.cfgte->flags;
 1245		if (res->flags & IPR_IS_IOA_RESOURCE)
 1246			res->type = IPR_RES_TYPE_IOAFP;
 1247		else
 1248			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
 1249
 1250		res->bus = cfgtew->u.cfgte->res_addr.bus;
 1251		res->target = cfgtew->u.cfgte->res_addr.target;
 1252		res->lun = cfgtew->u.cfgte->res_addr.lun;
 1253		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
 1254	}
 1255
 1256	ipr_update_ata_class(res, proto);
 1257}
 1258
 1259/**
 1260 * ipr_is_same_device - Determine if two devices are the same.
 1261 * @res:	resource entry struct
 1262 * @cfgtew:	config table entry wrapper struct
 1263 *
 1264 * Return value:
 1265 * 	1 if the devices are the same / 0 otherwise
 1266 **/
 1267static int ipr_is_same_device(struct ipr_resource_entry *res,
 1268			      struct ipr_config_table_entry_wrapper *cfgtew)
 1269{
 1270	if (res->ioa_cfg->sis64) {
 1271		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
 1272					sizeof(cfgtew->u.cfgte64->dev_id)) &&
 1273			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
 1274					sizeof(cfgtew->u.cfgte64->lun))) {
 1275			return 1;
 1276		}
 1277	} else {
 1278		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
 1279		    res->target == cfgtew->u.cfgte->res_addr.target &&
 1280		    res->lun == cfgtew->u.cfgte->res_addr.lun)
 1281			return 1;
 1282	}
 1283
 1284	return 0;
 1285}
 1286
 1287/**
 1288 * __ipr_format_res_path - Format the resource path for printing.
 1289 * @res_path:	resource path
 1290 * @buf:	buffer
 1291 * @len:	length of buffer provided
 1292 *
 1293 * Return value:
 1294 * 	pointer to buffer
 1295 **/
 1296static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
 1297{
 1298	int i;
 1299	char *p = buffer;
 1300
 1301	*p = '\0';
 1302	p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
 1303	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
 1304		p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
 1305
 1306	return buffer;
 1307}
 1308
 1309/**
 1310 * ipr_format_res_path - Format the resource path for printing.
 1311 * @ioa_cfg:	ioa config struct
 1312 * @res_path:	resource path
 1313 * @buf:	buffer
 1314 * @len:	length of buffer provided
 1315 *
 1316 * Return value:
 1317 *	pointer to buffer
 1318 **/
 1319static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
 1320				 u8 *res_path, char *buffer, int len)
 1321{
 1322	char *p = buffer;
 1323
 1324	*p = '\0';
 1325	p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
 1326	__ipr_format_res_path(res_path, p, len - (buffer - p));
 1327	return buffer;
 1328}
 1329
 1330/**
 1331 * ipr_update_res_entry - Update the resource entry.
 1332 * @res:	resource entry struct
 1333 * @cfgtew:	config table entry wrapper struct
 1334 *
 1335 * Return value:
 1336 *      none
 1337 **/
 1338static void ipr_update_res_entry(struct ipr_resource_entry *res,
 1339				 struct ipr_config_table_entry_wrapper *cfgtew)
 1340{
 1341	char buffer[IPR_MAX_RES_PATH_LENGTH];
 1342	unsigned int proto;
 1343	int new_path = 0;
 1344
 1345	if (res->ioa_cfg->sis64) {
 1346		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
 1347		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
 1348		res->type = cfgtew->u.cfgte64->res_type;
 1349
 1350		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
 1351			sizeof(struct ipr_std_inq_data));
 1352
 1353		res->qmodel = IPR_QUEUEING_MODEL64(res);
 1354		proto = cfgtew->u.cfgte64->proto;
 1355		res->res_handle = cfgtew->u.cfgte64->res_handle;
 1356		res->dev_id = cfgtew->u.cfgte64->dev_id;
 1357
 1358		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
 1359			sizeof(res->dev_lun.scsi_lun));
 1360
 1361		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
 1362					sizeof(res->res_path))) {
 1363			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
 1364				sizeof(res->res_path));
 1365			new_path = 1;
 1366		}
 1367
 1368		if (res->sdev && new_path)
 1369			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
 1370				    ipr_format_res_path(res->ioa_cfg,
 1371					res->res_path, buffer, sizeof(buffer)));
 1372	} else {
 1373		res->flags = cfgtew->u.cfgte->flags;
 1374		if (res->flags & IPR_IS_IOA_RESOURCE)
 1375			res->type = IPR_RES_TYPE_IOAFP;
 1376		else
 1377			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
 1378
 1379		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
 1380			sizeof(struct ipr_std_inq_data));
 1381
 1382		res->qmodel = IPR_QUEUEING_MODEL(res);
 1383		proto = cfgtew->u.cfgte->proto;
 1384		res->res_handle = cfgtew->u.cfgte->res_handle;
 1385	}
 1386
 1387	ipr_update_ata_class(res, proto);
 1388}
 1389
 1390/**
 1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
 1392 * 			  for the resource.
 1393 * @res:	resource entry struct
 1394 * @cfgtew:	config table entry wrapper struct
 1395 *
 1396 * Return value:
 1397 *      none
 1398 **/
 1399static void ipr_clear_res_target(struct ipr_resource_entry *res)
 1400{
 1401	struct ipr_resource_entry *gscsi_res = NULL;
 1402	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
 1403
 1404	if (!ioa_cfg->sis64)
 1405		return;
 1406
 1407	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
 1408		clear_bit(res->target, ioa_cfg->array_ids);
 1409	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
 1410		clear_bit(res->target, ioa_cfg->vset_ids);
 1411	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
 1412		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
 1413			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
 1414				return;
 1415		clear_bit(res->target, ioa_cfg->target_ids);
 1416
 1417	} else if (res->bus == 0)
 1418		clear_bit(res->target, ioa_cfg->target_ids);
 1419}
 1420
 1421/**
 1422 * ipr_handle_config_change - Handle a config change from the adapter
 1423 * @ioa_cfg:	ioa config struct
 1424 * @hostrcb:	hostrcb
 1425 *
 1426 * Return value:
 1427 * 	none
 1428 **/
 1429static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
 1430				     struct ipr_hostrcb *hostrcb)
 1431{
 1432	struct ipr_resource_entry *res = NULL;
 1433	struct ipr_config_table_entry_wrapper cfgtew;
 1434	__be32 cc_res_handle;
 1435
 1436	u32 is_ndn = 1;
 1437
 1438	if (ioa_cfg->sis64) {
 1439		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
 1440		cc_res_handle = cfgtew.u.cfgte64->res_handle;
 1441	} else {
 1442		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
 1443		cc_res_handle = cfgtew.u.cfgte->res_handle;
 1444	}
 1445
 1446	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 1447		if (res->res_handle == cc_res_handle) {
 1448			is_ndn = 0;
 1449			break;
 1450		}
 1451	}
 1452
 1453	if (is_ndn) {
 1454		if (list_empty(&ioa_cfg->free_res_q)) {
 1455			ipr_send_hcam(ioa_cfg,
 1456				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
 1457				      hostrcb);
 1458			return;
 1459		}
 1460
 1461		res = list_entry(ioa_cfg->free_res_q.next,
 1462				 struct ipr_resource_entry, queue);
 1463
 1464		list_del(&res->queue);
 1465		ipr_init_res_entry(res, &cfgtew);
 1466		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
 1467	}
 1468
 1469	ipr_update_res_entry(res, &cfgtew);
 1470
 1471	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
 1472		if (res->sdev) {
 1473			res->del_from_ml = 1;
 1474			res->res_handle = IPR_INVALID_RES_HANDLE;
 1475			schedule_work(&ioa_cfg->work_q);
 1476		} else {
 1477			ipr_clear_res_target(res);
 1478			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
 1479		}
 1480	} else if (!res->sdev || res->del_from_ml) {
 1481		res->add_to_ml = 1;
 1482		schedule_work(&ioa_cfg->work_q);
 1483	}
 1484
 1485	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
 1486}
 1487
 1488/**
 1489 * ipr_process_ccn - Op done function for a CCN.
 1490 * @ipr_cmd:	ipr command struct
 1491 *
 1492 * This function is the op done function for a configuration
 1493 * change notification host controlled async from the adapter.
 1494 *
 1495 * Return value:
 1496 * 	none
 1497 **/
 1498static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
 1499{
 1500	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 1501	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 1502	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 1503
 1504	list_del_init(&hostrcb->queue);
 1505	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 1506
 1507	if (ioasc) {
 1508		if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
 1509		    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
 1510			dev_err(&ioa_cfg->pdev->dev,
 1511				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
 1512
 1513		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
 1514	} else {
 1515		ipr_handle_config_change(ioa_cfg, hostrcb);
 1516	}
 1517}
 1518
 1519/**
 1520 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
 1521 * @i:		index into buffer
 1522 * @buf:		string to modify
 1523 *
 1524 * This function will strip all trailing whitespace, pad the end
 1525 * of the string with a single space, and NULL terminate the string.
 1526 *
 1527 * Return value:
 1528 * 	new length of string
 1529 **/
 1530static int strip_and_pad_whitespace(int i, char *buf)
 1531{
 1532	while (i && buf[i] == ' ')
 1533		i--;
 1534	buf[i+1] = ' ';
 1535	buf[i+2] = '\0';
 1536	return i + 2;
 1537}
 1538
 1539/**
 1540 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
 1541 * @prefix:		string to print at start of printk
 1542 * @hostrcb:	hostrcb pointer
 1543 * @vpd:		vendor/product id/sn struct
 1544 *
 1545 * Return value:
 1546 * 	none
 1547 **/
 1548static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
 1549				struct ipr_vpd *vpd)
 1550{
 1551	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
 1552	int i = 0;
 1553
 1554	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
 1555	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
 1556
 1557	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
 1558	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
 1559
 1560	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
 1561	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
 1562
 1563	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
 1564}
 1565
 1566/**
 1567 * ipr_log_vpd - Log the passed VPD to the error log.
 1568 * @vpd:		vendor/product id/sn struct
 1569 *
 1570 * Return value:
 1571 * 	none
 1572 **/
 1573static void ipr_log_vpd(struct ipr_vpd *vpd)
 1574{
 1575	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
 1576		    + IPR_SERIAL_NUM_LEN];
 1577
 1578	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
 1579	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
 1580	       IPR_PROD_ID_LEN);
 1581	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
 1582	ipr_err("Vendor/Product ID: %s\n", buffer);
 1583
 1584	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
 1585	buffer[IPR_SERIAL_NUM_LEN] = '\0';
 1586	ipr_err("    Serial Number: %s\n", buffer);
 1587}
 1588
 1589/**
 1590 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
 1591 * @prefix:		string to print at start of printk
 1592 * @hostrcb:	hostrcb pointer
 1593 * @vpd:		vendor/product id/sn/wwn struct
 1594 *
 1595 * Return value:
 1596 * 	none
 1597 **/
 1598static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
 1599				    struct ipr_ext_vpd *vpd)
 1600{
 1601	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
 1602	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
 1603		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
 1604}
 1605
 1606/**
 1607 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
 1608 * @vpd:		vendor/product id/sn/wwn struct
 1609 *
 1610 * Return value:
 1611 * 	none
 1612 **/
 1613static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
 1614{
 1615	ipr_log_vpd(&vpd->vpd);
 1616	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
 1617		be32_to_cpu(vpd->wwid[1]));
 1618}
 1619
 1620/**
 1621 * ipr_log_enhanced_cache_error - Log a cache error.
 1622 * @ioa_cfg:	ioa config struct
 1623 * @hostrcb:	hostrcb struct
 1624 *
 1625 * Return value:
 1626 * 	none
 1627 **/
 1628static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
 1629					 struct ipr_hostrcb *hostrcb)
 1630{
 1631	struct ipr_hostrcb_type_12_error *error;
 1632
 1633	if (ioa_cfg->sis64)
 1634		error = &hostrcb->hcam.u.error64.u.type_12_error;
 1635	else
 1636		error = &hostrcb->hcam.u.error.u.type_12_error;
 1637
 1638	ipr_err("-----Current Configuration-----\n");
 1639	ipr_err("Cache Directory Card Information:\n");
 1640	ipr_log_ext_vpd(&error->ioa_vpd);
 1641	ipr_err("Adapter Card Information:\n");
 1642	ipr_log_ext_vpd(&error->cfc_vpd);
 1643
 1644	ipr_err("-----Expected Configuration-----\n");
 1645	ipr_err("Cache Directory Card Information:\n");
 1646	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
 1647	ipr_err("Adapter Card Information:\n");
 1648	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
 1649
 1650	ipr_err("Additional IOA Data: %08X %08X %08X\n",
 1651		     be32_to_cpu(error->ioa_data[0]),
 1652		     be32_to_cpu(error->ioa_data[1]),
 1653		     be32_to_cpu(error->ioa_data[2]));
 1654}
 1655
 1656/**
 1657 * ipr_log_cache_error - Log a cache error.
 1658 * @ioa_cfg:	ioa config struct
 1659 * @hostrcb:	hostrcb struct
 1660 *
 1661 * Return value:
 1662 * 	none
 1663 **/
 1664static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
 1665				struct ipr_hostrcb *hostrcb)
 1666{
 1667	struct ipr_hostrcb_type_02_error *error =
 1668		&hostrcb->hcam.u.error.u.type_02_error;
 1669
 1670	ipr_err("-----Current Configuration-----\n");
 1671	ipr_err("Cache Directory Card Information:\n");
 1672	ipr_log_vpd(&error->ioa_vpd);
 1673	ipr_err("Adapter Card Information:\n");
 1674	ipr_log_vpd(&error->cfc_vpd);
 1675
 1676	ipr_err("-----Expected Configuration-----\n");
 1677	ipr_err("Cache Directory Card Information:\n");
 1678	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
 1679	ipr_err("Adapter Card Information:\n");
 1680	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
 1681
 1682	ipr_err("Additional IOA Data: %08X %08X %08X\n",
 1683		     be32_to_cpu(error->ioa_data[0]),
 1684		     be32_to_cpu(error->ioa_data[1]),
 1685		     be32_to_cpu(error->ioa_data[2]));
 1686}
 1687
 1688/**
 1689 * ipr_log_enhanced_config_error - Log a configuration error.
 1690 * @ioa_cfg:	ioa config struct
 1691 * @hostrcb:	hostrcb struct
 1692 *
 1693 * Return value:
 1694 * 	none
 1695 **/
 1696static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
 1697					  struct ipr_hostrcb *hostrcb)
 1698{
 1699	int errors_logged, i;
 1700	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
 1701	struct ipr_hostrcb_type_13_error *error;
 1702
 1703	error = &hostrcb->hcam.u.error.u.type_13_error;
 1704	errors_logged = be32_to_cpu(error->errors_logged);
 1705
 1706	ipr_err("Device Errors Detected/Logged: %d/%d\n",
 1707		be32_to_cpu(error->errors_detected), errors_logged);
 1708
 1709	dev_entry = error->dev;
 1710
 1711	for (i = 0; i < errors_logged; i++, dev_entry++) {
 1712		ipr_err_separator;
 1713
 1714		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
 1715		ipr_log_ext_vpd(&dev_entry->vpd);
 1716
 1717		ipr_err("-----New Device Information-----\n");
 1718		ipr_log_ext_vpd(&dev_entry->new_vpd);
 1719
 1720		ipr_err("Cache Directory Card Information:\n");
 1721		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
 1722
 1723		ipr_err("Adapter Card Information:\n");
 1724		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
 1725	}
 1726}
 1727
 1728/**
 1729 * ipr_log_sis64_config_error - Log a device error.
 1730 * @ioa_cfg:	ioa config struct
 1731 * @hostrcb:	hostrcb struct
 1732 *
 1733 * Return value:
 1734 * 	none
 1735 **/
 1736static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
 1737				       struct ipr_hostrcb *hostrcb)
 1738{
 1739	int errors_logged, i;
 1740	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
 1741	struct ipr_hostrcb_type_23_error *error;
 1742	char buffer[IPR_MAX_RES_PATH_LENGTH];
 1743
 1744	error = &hostrcb->hcam.u.error64.u.type_23_error;
 1745	errors_logged = be32_to_cpu(error->errors_logged);
 1746
 1747	ipr_err("Device Errors Detected/Logged: %d/%d\n",
 1748		be32_to_cpu(error->errors_detected), errors_logged);
 1749
 1750	dev_entry = error->dev;
 1751
 1752	for (i = 0; i < errors_logged; i++, dev_entry++) {
 1753		ipr_err_separator;
 1754
 1755		ipr_err("Device %d : %s", i + 1,
 1756			__ipr_format_res_path(dev_entry->res_path,
 1757					      buffer, sizeof(buffer)));
 1758		ipr_log_ext_vpd(&dev_entry->vpd);
 1759
 1760		ipr_err("-----New Device Information-----\n");
 1761		ipr_log_ext_vpd(&dev_entry->new_vpd);
 1762
 1763		ipr_err("Cache Directory Card Information:\n");
 1764		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
 1765
 1766		ipr_err("Adapter Card Information:\n");
 1767		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
 1768	}
 1769}
 1770
 1771/**
 1772 * ipr_log_config_error - Log a configuration error.
 1773 * @ioa_cfg:	ioa config struct
 1774 * @hostrcb:	hostrcb struct
 1775 *
 1776 * Return value:
 1777 * 	none
 1778 **/
 1779static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
 1780				 struct ipr_hostrcb *hostrcb)
 1781{
 1782	int errors_logged, i;
 1783	struct ipr_hostrcb_device_data_entry *dev_entry;
 1784	struct ipr_hostrcb_type_03_error *error;
 1785
 1786	error = &hostrcb->hcam.u.error.u.type_03_error;
 1787	errors_logged = be32_to_cpu(error->errors_logged);
 1788
 1789	ipr_err("Device Errors Detected/Logged: %d/%d\n",
 1790		be32_to_cpu(error->errors_detected), errors_logged);
 1791
 1792	dev_entry = error->dev;
 1793
 1794	for (i = 0; i < errors_logged; i++, dev_entry++) {
 1795		ipr_err_separator;
 1796
 1797		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
 1798		ipr_log_vpd(&dev_entry->vpd);
 1799
 1800		ipr_err("-----New Device Information-----\n");
 1801		ipr_log_vpd(&dev_entry->new_vpd);
 1802
 1803		ipr_err("Cache Directory Card Information:\n");
 1804		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
 1805
 1806		ipr_err("Adapter Card Information:\n");
 1807		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
 1808
 1809		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
 1810			be32_to_cpu(dev_entry->ioa_data[0]),
 1811			be32_to_cpu(dev_entry->ioa_data[1]),
 1812			be32_to_cpu(dev_entry->ioa_data[2]),
 1813			be32_to_cpu(dev_entry->ioa_data[3]),
 1814			be32_to_cpu(dev_entry->ioa_data[4]));
 1815	}
 1816}
 1817
 1818/**
 1819 * ipr_log_enhanced_array_error - Log an array configuration error.
 1820 * @ioa_cfg:	ioa config struct
 1821 * @hostrcb:	hostrcb struct
 1822 *
 1823 * Return value:
 1824 * 	none
 1825 **/
 1826static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
 1827					 struct ipr_hostrcb *hostrcb)
 1828{
 1829	int i, num_entries;
 1830	struct ipr_hostrcb_type_14_error *error;
 1831	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
 1832	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
 1833
 1834	error = &hostrcb->hcam.u.error.u.type_14_error;
 1835
 1836	ipr_err_separator;
 1837
 1838	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
 1839		error->protection_level,
 1840		ioa_cfg->host->host_no,
 1841		error->last_func_vset_res_addr.bus,
 1842		error->last_func_vset_res_addr.target,
 1843		error->last_func_vset_res_addr.lun);
 1844
 1845	ipr_err_separator;
 1846
 1847	array_entry = error->array_member;
 1848	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
 1849			    ARRAY_SIZE(error->array_member));
 1850
 1851	for (i = 0; i < num_entries; i++, array_entry++) {
 1852		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
 1853			continue;
 1854
 1855		if (be32_to_cpu(error->exposed_mode_adn) == i)
 1856			ipr_err("Exposed Array Member %d:\n", i);
 1857		else
 1858			ipr_err("Array Member %d:\n", i);
 1859
 1860		ipr_log_ext_vpd(&array_entry->vpd);
 1861		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
 1862		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
 1863				 "Expected Location");
 1864
 1865		ipr_err_separator;
 1866	}
 1867}
 1868
 1869/**
 1870 * ipr_log_array_error - Log an array configuration error.
 1871 * @ioa_cfg:	ioa config struct
 1872 * @hostrcb:	hostrcb struct
 1873 *
 1874 * Return value:
 1875 * 	none
 1876 **/
 1877static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
 1878				struct ipr_hostrcb *hostrcb)
 1879{
 1880	int i;
 1881	struct ipr_hostrcb_type_04_error *error;
 1882	struct ipr_hostrcb_array_data_entry *array_entry;
 1883	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
 1884
 1885	error = &hostrcb->hcam.u.error.u.type_04_error;
 1886
 1887	ipr_err_separator;
 1888
 1889	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
 1890		error->protection_level,
 1891		ioa_cfg->host->host_no,
 1892		error->last_func_vset_res_addr.bus,
 1893		error->last_func_vset_res_addr.target,
 1894		error->last_func_vset_res_addr.lun);
 1895
 1896	ipr_err_separator;
 1897
 1898	array_entry = error->array_member;
 1899
 1900	for (i = 0; i < 18; i++) {
 1901		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
 1902			continue;
 1903
 1904		if (be32_to_cpu(error->exposed_mode_adn) == i)
 1905			ipr_err("Exposed Array Member %d:\n", i);
 1906		else
 1907			ipr_err("Array Member %d:\n", i);
 1908
 1909		ipr_log_vpd(&array_entry->vpd);
 1910
 1911		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
 1912		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
 1913				 "Expected Location");
 1914
 1915		ipr_err_separator;
 1916
 1917		if (i == 9)
 1918			array_entry = error->array_member2;
 1919		else
 1920			array_entry++;
 1921	}
 1922}
 1923
 1924/**
 1925 * ipr_log_hex_data - Log additional hex IOA error data.
 1926 * @ioa_cfg:	ioa config struct
 1927 * @data:		IOA error data
 1928 * @len:		data length
 1929 *
 1930 * Return value:
 1931 * 	none
 1932 **/
 1933static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
 1934{
 1935	int i;
 1936
 1937	if (len == 0)
 1938		return;
 1939
 1940	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
 1941		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
 1942
 1943	for (i = 0; i < len / 4; i += 4) {
 1944		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
 1945			be32_to_cpu(data[i]),
 1946			be32_to_cpu(data[i+1]),
 1947			be32_to_cpu(data[i+2]),
 1948			be32_to_cpu(data[i+3]));
 1949	}
 1950}
 1951
 1952/**
 1953 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
 1954 * @ioa_cfg:	ioa config struct
 1955 * @hostrcb:	hostrcb struct
 1956 *
 1957 * Return value:
 1958 * 	none
 1959 **/
 1960static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
 1961					    struct ipr_hostrcb *hostrcb)
 1962{
 1963	struct ipr_hostrcb_type_17_error *error;
 1964
 1965	if (ioa_cfg->sis64)
 1966		error = &hostrcb->hcam.u.error64.u.type_17_error;
 1967	else
 1968		error = &hostrcb->hcam.u.error.u.type_17_error;
 1969
 1970	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
 1971	strim(error->failure_reason);
 1972
 1973	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
 1974		     be32_to_cpu(hostrcb->hcam.u.error.prc));
 1975	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
 1976	ipr_log_hex_data(ioa_cfg, error->data,
 1977			 be32_to_cpu(hostrcb->hcam.length) -
 1978			 (offsetof(struct ipr_hostrcb_error, u) +
 1979			  offsetof(struct ipr_hostrcb_type_17_error, data)));
 1980}
 1981
 1982/**
 1983 * ipr_log_dual_ioa_error - Log a dual adapter error.
 1984 * @ioa_cfg:	ioa config struct
 1985 * @hostrcb:	hostrcb struct
 1986 *
 1987 * Return value:
 1988 * 	none
 1989 **/
 1990static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
 1991				   struct ipr_hostrcb *hostrcb)
 1992{
 1993	struct ipr_hostrcb_type_07_error *error;
 1994
 1995	error = &hostrcb->hcam.u.error.u.type_07_error;
 1996	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
 1997	strim(error->failure_reason);
 1998
 1999	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
 2000		     be32_to_cpu(hostrcb->hcam.u.error.prc));
 2001	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
 2002	ipr_log_hex_data(ioa_cfg, error->data,
 2003			 be32_to_cpu(hostrcb->hcam.length) -
 2004			 (offsetof(struct ipr_hostrcb_error, u) +
 2005			  offsetof(struct ipr_hostrcb_type_07_error, data)));
 2006}
 2007
 2008static const struct {
 2009	u8 active;
 2010	char *desc;
 2011} path_active_desc[] = {
 2012	{ IPR_PATH_NO_INFO, "Path" },
 2013	{ IPR_PATH_ACTIVE, "Active path" },
 2014	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
 2015};
 2016
 2017static const struct {
 2018	u8 state;
 2019	char *desc;
 2020} path_state_desc[] = {
 2021	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
 2022	{ IPR_PATH_HEALTHY, "is healthy" },
 2023	{ IPR_PATH_DEGRADED, "is degraded" },
 2024	{ IPR_PATH_FAILED, "is failed" }
 2025};
 2026
 2027/**
 2028 * ipr_log_fabric_path - Log a fabric path error
 2029 * @hostrcb:	hostrcb struct
 2030 * @fabric:		fabric descriptor
 2031 *
 2032 * Return value:
 2033 * 	none
 2034 **/
 2035static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
 2036				struct ipr_hostrcb_fabric_desc *fabric)
 2037{
 2038	int i, j;
 2039	u8 path_state = fabric->path_state;
 2040	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
 2041	u8 state = path_state & IPR_PATH_STATE_MASK;
 2042
 2043	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
 2044		if (path_active_desc[i].active != active)
 2045			continue;
 2046
 2047		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
 2048			if (path_state_desc[j].state != state)
 2049				continue;
 2050
 2051			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
 2052				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
 2053					     path_active_desc[i].desc, path_state_desc[j].desc,
 2054					     fabric->ioa_port);
 2055			} else if (fabric->cascaded_expander == 0xff) {
 2056				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
 2057					     path_active_desc[i].desc, path_state_desc[j].desc,
 2058					     fabric->ioa_port, fabric->phy);
 2059			} else if (fabric->phy == 0xff) {
 2060				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
 2061					     path_active_desc[i].desc, path_state_desc[j].desc,
 2062					     fabric->ioa_port, fabric->cascaded_expander);
 2063			} else {
 2064				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
 2065					     path_active_desc[i].desc, path_state_desc[j].desc,
 2066					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
 2067			}
 2068			return;
 2069		}
 2070	}
 2071
 2072	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
 2073		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
 2074}
 2075
 2076/**
 2077 * ipr_log64_fabric_path - Log a fabric path error
 2078 * @hostrcb:	hostrcb struct
 2079 * @fabric:		fabric descriptor
 2080 *
 2081 * Return value:
 2082 * 	none
 2083 **/
 2084static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
 2085				  struct ipr_hostrcb64_fabric_desc *fabric)
 2086{
 2087	int i, j;
 2088	u8 path_state = fabric->path_state;
 2089	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
 2090	u8 state = path_state & IPR_PATH_STATE_MASK;
 2091	char buffer[IPR_MAX_RES_PATH_LENGTH];
 2092
 2093	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
 2094		if (path_active_desc[i].active != active)
 2095			continue;
 2096
 2097		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
 2098			if (path_state_desc[j].state != state)
 2099				continue;
 2100
 2101			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
 2102				     path_active_desc[i].desc, path_state_desc[j].desc,
 2103				     ipr_format_res_path(hostrcb->ioa_cfg,
 2104						fabric->res_path,
 2105						buffer, sizeof(buffer)));
 2106			return;
 2107		}
 2108	}
 2109
 2110	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
 2111		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
 2112				    buffer, sizeof(buffer)));
 2113}
 2114
 2115static const struct {
 2116	u8 type;
 2117	char *desc;
 2118} path_type_desc[] = {
 2119	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
 2120	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
 2121	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
 2122	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
 2123};
 2124
 2125static const struct {
 2126	u8 status;
 2127	char *desc;
 2128} path_status_desc[] = {
 2129	{ IPR_PATH_CFG_NO_PROB, "Functional" },
 2130	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
 2131	{ IPR_PATH_CFG_FAILED, "Failed" },
 2132	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
 2133	{ IPR_PATH_NOT_DETECTED, "Missing" },
 2134	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
 2135};
 2136
 2137static const char *link_rate[] = {
 2138	"unknown",
 2139	"disabled",
 2140	"phy reset problem",
 2141	"spinup hold",
 2142	"port selector",
 2143	"unknown",
 2144	"unknown",
 2145	"unknown",
 2146	"1.5Gbps",
 2147	"3.0Gbps",
 2148	"unknown",
 2149	"unknown",
 2150	"unknown",
 2151	"unknown",
 2152	"unknown",
 2153	"unknown"
 2154};
 2155
 2156/**
 2157 * ipr_log_path_elem - Log a fabric path element.
 2158 * @hostrcb:	hostrcb struct
 2159 * @cfg:		fabric path element struct
 2160 *
 2161 * Return value:
 2162 * 	none
 2163 **/
 2164static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
 2165			      struct ipr_hostrcb_config_element *cfg)
 2166{
 2167	int i, j;
 2168	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
 2169	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
 2170
 2171	if (type == IPR_PATH_CFG_NOT_EXIST)
 2172		return;
 2173
 2174	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
 2175		if (path_type_desc[i].type != type)
 2176			continue;
 2177
 2178		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
 2179			if (path_status_desc[j].status != status)
 2180				continue;
 2181
 2182			if (type == IPR_PATH_CFG_IOA_PORT) {
 2183				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
 2184					     path_status_desc[j].desc, path_type_desc[i].desc,
 2185					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2186					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2187			} else {
 2188				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
 2189					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
 2190						     path_status_desc[j].desc, path_type_desc[i].desc,
 2191						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2192						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2193				} else if (cfg->cascaded_expander == 0xff) {
 2194					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
 2195						     "WWN=%08X%08X\n", path_status_desc[j].desc,
 2196						     path_type_desc[i].desc, cfg->phy,
 2197						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2198						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2199				} else if (cfg->phy == 0xff) {
 2200					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
 2201						     "WWN=%08X%08X\n", path_status_desc[j].desc,
 2202						     path_type_desc[i].desc, cfg->cascaded_expander,
 2203						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2204						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2205				} else {
 2206					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
 2207						     "WWN=%08X%08X\n", path_status_desc[j].desc,
 2208						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
 2209						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2210						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2211				}
 2212			}
 2213			return;
 2214		}
 2215	}
 2216
 2217	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
 2218		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
 2219		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2220		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2221}
 2222
 2223/**
 2224 * ipr_log64_path_elem - Log a fabric path element.
 2225 * @hostrcb:	hostrcb struct
 2226 * @cfg:		fabric path element struct
 2227 *
 2228 * Return value:
 2229 * 	none
 2230 **/
 2231static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
 2232				struct ipr_hostrcb64_config_element *cfg)
 2233{
 2234	int i, j;
 2235	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
 2236	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
 2237	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
 2238	char buffer[IPR_MAX_RES_PATH_LENGTH];
 2239
 2240	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
 2241		return;
 2242
 2243	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
 2244		if (path_type_desc[i].type != type)
 2245			continue;
 2246
 2247		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
 2248			if (path_status_desc[j].status != status)
 2249				continue;
 2250
 2251			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
 2252				     path_status_desc[j].desc, path_type_desc[i].desc,
 2253				     ipr_format_res_path(hostrcb->ioa_cfg,
 2254					cfg->res_path, buffer, sizeof(buffer)),
 2255					link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2256					be32_to_cpu(cfg->wwid[0]),
 2257					be32_to_cpu(cfg->wwid[1]));
 2258			return;
 2259		}
 2260	}
 2261	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
 2262		     "WWN=%08X%08X\n", cfg->type_status,
 2263		     ipr_format_res_path(hostrcb->ioa_cfg,
 2264			cfg->res_path, buffer, sizeof(buffer)),
 2265			link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
 2266			be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 2267}
 2268
 2269/**
 2270 * ipr_log_fabric_error - Log a fabric error.
 2271 * @ioa_cfg:	ioa config struct
 2272 * @hostrcb:	hostrcb struct
 2273 *
 2274 * Return value:
 2275 * 	none
 2276 **/
 2277static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
 2278				 struct ipr_hostrcb *hostrcb)
 2279{
 2280	struct ipr_hostrcb_type_20_error *error;
 2281	struct ipr_hostrcb_fabric_desc *fabric;
 2282	struct ipr_hostrcb_config_element *cfg;
 2283	int i, add_len;
 2284
 2285	error = &hostrcb->hcam.u.error.u.type_20_error;
 2286	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
 2287	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
 2288
 2289	add_len = be32_to_cpu(hostrcb->hcam.length) -
 2290		(offsetof(struct ipr_hostrcb_error, u) +
 2291		 offsetof(struct ipr_hostrcb_type_20_error, desc));
 2292
 2293	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
 2294		ipr_log_fabric_path(hostrcb, fabric);
 2295		for_each_fabric_cfg(fabric, cfg)
 2296			ipr_log_path_elem(hostrcb, cfg);
 2297
 2298		add_len -= be16_to_cpu(fabric->length);
 2299		fabric = (struct ipr_hostrcb_fabric_desc *)
 2300			((unsigned long)fabric + be16_to_cpu(fabric->length));
 2301	}
 2302
 2303	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
 2304}
 2305
 2306/**
 2307 * ipr_log_sis64_array_error - Log a sis64 array error.
 2308 * @ioa_cfg:	ioa config struct
 2309 * @hostrcb:	hostrcb struct
 2310 *
 2311 * Return value:
 2312 * 	none
 2313 **/
 2314static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
 2315				      struct ipr_hostrcb *hostrcb)
 2316{
 2317	int i, num_entries;
 2318	struct ipr_hostrcb_type_24_error *error;
 2319	struct ipr_hostrcb64_array_data_entry *array_entry;
 2320	char buffer[IPR_MAX_RES_PATH_LENGTH];
 2321	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
 2322
 2323	error = &hostrcb->hcam.u.error64.u.type_24_error;
 2324
 2325	ipr_err_separator;
 2326
 2327	ipr_err("RAID %s Array Configuration: %s\n",
 2328		error->protection_level,
 2329		ipr_format_res_path(ioa_cfg, error->last_res_path,
 2330			buffer, sizeof(buffer)));
 2331
 2332	ipr_err_separator;
 2333
 2334	array_entry = error->array_member;
 2335	num_entries = min_t(u32, error->num_entries,
 2336			    ARRAY_SIZE(error->array_member));
 2337
 2338	for (i = 0; i < num_entries; i++, array_entry++) {
 2339
 2340		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
 2341			continue;
 2342
 2343		if (error->exposed_mode_adn == i)
 2344			ipr_err("Exposed Array Member %d:\n", i);
 2345		else
 2346			ipr_err("Array Member %d:\n", i);
 2347
 2348		ipr_err("Array Member %d:\n", i);
 2349		ipr_log_ext_vpd(&array_entry->vpd);
 2350		ipr_err("Current Location: %s\n",
 2351			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
 2352				buffer, sizeof(buffer)));
 2353		ipr_err("Expected Location: %s\n",
 2354			 ipr_format_res_path(ioa_cfg,
 2355				array_entry->expected_res_path,
 2356				buffer, sizeof(buffer)));
 2357
 2358		ipr_err_separator;
 2359	}
 2360}
 2361
 2362/**
 2363 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
 2364 * @ioa_cfg:	ioa config struct
 2365 * @hostrcb:	hostrcb struct
 2366 *
 2367 * Return value:
 2368 * 	none
 2369 **/
 2370static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
 2371				       struct ipr_hostrcb *hostrcb)
 2372{
 2373	struct ipr_hostrcb_type_30_error *error;
 2374	struct ipr_hostrcb64_fabric_desc *fabric;
 2375	struct ipr_hostrcb64_config_element *cfg;
 2376	int i, add_len;
 2377
 2378	error = &hostrcb->hcam.u.error64.u.type_30_error;
 2379
 2380	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
 2381	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
 2382
 2383	add_len = be32_to_cpu(hostrcb->hcam.length) -
 2384		(offsetof(struct ipr_hostrcb64_error, u) +
 2385		 offsetof(struct ipr_hostrcb_type_30_error, desc));
 2386
 2387	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
 2388		ipr_log64_fabric_path(hostrcb, fabric);
 2389		for_each_fabric_cfg(fabric, cfg)
 2390			ipr_log64_path_elem(hostrcb, cfg);
 2391
 2392		add_len -= be16_to_cpu(fabric->length);
 2393		fabric = (struct ipr_hostrcb64_fabric_desc *)
 2394			((unsigned long)fabric + be16_to_cpu(fabric->length));
 2395	}
 2396
 2397	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
 2398}
 2399
 2400/**
 2401 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
 2402 * @ioa_cfg:    ioa config struct
 2403 * @hostrcb:    hostrcb struct
 2404 *
 2405 * Return value:
 2406 *      none
 2407 **/
 2408static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
 2409				       struct ipr_hostrcb *hostrcb)
 2410{
 2411	struct ipr_hostrcb_type_41_error *error;
 2412
 2413	error = &hostrcb->hcam.u.error64.u.type_41_error;
 2414
 2415	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
 2416	ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
 2417	ipr_log_hex_data(ioa_cfg, error->data,
 2418			 be32_to_cpu(hostrcb->hcam.length) -
 2419			 (offsetof(struct ipr_hostrcb_error, u) +
 2420			  offsetof(struct ipr_hostrcb_type_41_error, data)));
 2421}
 2422/**
 2423 * ipr_log_generic_error - Log an adapter error.
 2424 * @ioa_cfg:	ioa config struct
 2425 * @hostrcb:	hostrcb struct
 2426 *
 2427 * Return value:
 2428 * 	none
 2429 **/
 2430static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
 2431				  struct ipr_hostrcb *hostrcb)
 2432{
 2433	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
 2434			 be32_to_cpu(hostrcb->hcam.length));
 2435}
 2436
 2437/**
 2438 * ipr_log_sis64_device_error - Log a cache error.
 2439 * @ioa_cfg:	ioa config struct
 2440 * @hostrcb:	hostrcb struct
 2441 *
 2442 * Return value:
 2443 * 	none
 2444 **/
 2445static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
 2446					 struct ipr_hostrcb *hostrcb)
 2447{
 2448	struct ipr_hostrcb_type_21_error *error;
 2449	char buffer[IPR_MAX_RES_PATH_LENGTH];
 2450
 2451	error = &hostrcb->hcam.u.error64.u.type_21_error;
 2452
 2453	ipr_err("-----Failing Device Information-----\n");
 2454	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
 2455		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
 2456		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
 2457	ipr_err("Device Resource Path: %s\n",
 2458		__ipr_format_res_path(error->res_path,
 2459				      buffer, sizeof(buffer)));
 2460	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
 2461	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
 2462	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
 2463	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
 2464	ipr_err("SCSI Sense Data:\n");
 2465	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
 2466	ipr_err("SCSI Command Descriptor Block: \n");
 2467	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
 2468
 2469	ipr_err("Additional IOA Data:\n");
 2470	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
 2471}
 2472
 2473/**
 2474 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
 2475 * @ioasc:	IOASC
 2476 *
 2477 * This function will return the index of into the ipr_error_table
 2478 * for the specified IOASC. If the IOASC is not in the table,
 2479 * 0 will be returned, which points to the entry used for unknown errors.
 2480 *
 2481 * Return value:
 2482 * 	index into the ipr_error_table
 2483 **/
 2484static u32 ipr_get_error(u32 ioasc)
 2485{
 2486	int i;
 2487
 2488	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
 2489		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
 2490			return i;
 2491
 2492	return 0;
 2493}
 2494
 2495/**
 2496 * ipr_handle_log_data - Log an adapter error.
 2497 * @ioa_cfg:	ioa config struct
 2498 * @hostrcb:	hostrcb struct
 2499 *
 2500 * This function logs an adapter error to the system.
 2501 *
 2502 * Return value:
 2503 * 	none
 2504 **/
 2505static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
 2506				struct ipr_hostrcb *hostrcb)
 2507{
 2508	u32 ioasc;
 2509	int error_index;
 2510	struct ipr_hostrcb_type_21_error *error;
 2511
 2512	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
 2513		return;
 2514
 2515	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
 2516		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
 2517
 2518	if (ioa_cfg->sis64)
 2519		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
 2520	else
 2521		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 2522
 2523	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
 2524	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
 2525		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
 2526		scsi_report_bus_reset(ioa_cfg->host,
 2527				      hostrcb->hcam.u.error.fd_res_addr.bus);
 2528	}
 2529
 2530	error_index = ipr_get_error(ioasc);
 2531
 2532	if (!ipr_error_table[error_index].log_hcam)
 2533		return;
 2534
 2535	if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
 2536	    hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
 2537		error = &hostrcb->hcam.u.error64.u.type_21_error;
 2538
 2539		if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
 2540			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
 2541				return;
 2542	}
 2543
 2544	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
 2545
 2546	/* Set indication we have logged an error */
 2547	ioa_cfg->errors_logged++;
 2548
 2549	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
 2550		return;
 2551	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
 2552		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
 2553
 2554	switch (hostrcb->hcam.overlay_id) {
 2555	case IPR_HOST_RCB_OVERLAY_ID_2:
 2556		ipr_log_cache_error(ioa_cfg, hostrcb);
 2557		break;
 2558	case IPR_HOST_RCB_OVERLAY_ID_3:
 2559		ipr_log_config_error(ioa_cfg, hostrcb);
 2560		break;
 2561	case IPR_HOST_RCB_OVERLAY_ID_4:
 2562	case IPR_HOST_RCB_OVERLAY_ID_6:
 2563		ipr_log_array_error(ioa_cfg, hostrcb);
 2564		break;
 2565	case IPR_HOST_RCB_OVERLAY_ID_7:
 2566		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
 2567		break;
 2568	case IPR_HOST_RCB_OVERLAY_ID_12:
 2569		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
 2570		break;
 2571	case IPR_HOST_RCB_OVERLAY_ID_13:
 2572		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
 2573		break;
 2574	case IPR_HOST_RCB_OVERLAY_ID_14:
 2575	case IPR_HOST_RCB_OVERLAY_ID_16:
 2576		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
 2577		break;
 2578	case IPR_HOST_RCB_OVERLAY_ID_17:
 2579		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
 2580		break;
 2581	case IPR_HOST_RCB_OVERLAY_ID_20:
 2582		ipr_log_fabric_error(ioa_cfg, hostrcb);
 2583		break;
 2584	case IPR_HOST_RCB_OVERLAY_ID_21:
 2585		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
 2586		break;
 2587	case IPR_HOST_RCB_OVERLAY_ID_23:
 2588		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
 2589		break;
 2590	case IPR_HOST_RCB_OVERLAY_ID_24:
 2591	case IPR_HOST_RCB_OVERLAY_ID_26:
 2592		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
 2593		break;
 2594	case IPR_HOST_RCB_OVERLAY_ID_30:
 2595		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
 2596		break;
 2597	case IPR_HOST_RCB_OVERLAY_ID_41:
 2598		ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
 2599		break;
 2600	case IPR_HOST_RCB_OVERLAY_ID_1:
 2601	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
 2602	default:
 2603		ipr_log_generic_error(ioa_cfg, hostrcb);
 2604		break;
 2605	}
 2606}
 2607
 2608static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
 2609{
 2610	struct ipr_hostrcb *hostrcb;
 2611
 2612	hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
 2613					struct ipr_hostrcb, queue);
 2614
 2615	if (unlikely(!hostrcb)) {
 2616		dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
 2617		hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
 2618						struct ipr_hostrcb, queue);
 2619	}
 2620
 2621	list_del_init(&hostrcb->queue);
 2622	return hostrcb;
 2623}
 2624
 2625/**
 2626 * ipr_process_error - Op done function for an adapter error log.
 2627 * @ipr_cmd:	ipr command struct
 2628 *
 2629 * This function is the op done function for an error log host
 2630 * controlled async from the adapter. It will log the error and
 2631 * send the HCAM back to the adapter.
 2632 *
 2633 * Return value:
 2634 * 	none
 2635 **/
 2636static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
 2637{
 2638	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 2639	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 2640	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 2641	u32 fd_ioasc;
 2642
 2643	if (ioa_cfg->sis64)
 2644		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
 2645	else
 2646		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 2647
 2648	list_del_init(&hostrcb->queue);
 2649	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 2650
 2651	if (!ioasc) {
 2652		ipr_handle_log_data(ioa_cfg, hostrcb);
 2653		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
 2654			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
 2655	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
 2656		   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
 2657		dev_err(&ioa_cfg->pdev->dev,
 2658			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
 2659	}
 2660
 2661	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
 2662	schedule_work(&ioa_cfg->work_q);
 2663	hostrcb = ipr_get_free_hostrcb(ioa_cfg);
 2664
 2665	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
 2666}
 2667
 2668/**
 2669 * ipr_timeout -  An internally generated op has timed out.
 2670 * @ipr_cmd:	ipr command struct
 2671 *
 2672 * This function blocks host requests and initiates an
 2673 * adapter reset.
 2674 *
 2675 * Return value:
 2676 * 	none
 2677 **/
 2678static void ipr_timeout(struct timer_list *t)
 2679{
 2680	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
 2681	unsigned long lock_flags = 0;
 2682	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 2683
 2684	ENTER;
 2685	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 2686
 2687	ioa_cfg->errors_logged++;
 2688	dev_err(&ioa_cfg->pdev->dev,
 2689		"Adapter being reset due to command timeout.\n");
 2690
 2691	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
 2692		ioa_cfg->sdt_state = GET_DUMP;
 2693
 2694	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
 2695		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 2696
 2697	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 2698	LEAVE;
 2699}
 2700
 2701/**
 2702 * ipr_oper_timeout -  Adapter timed out transitioning to operational
 2703 * @ipr_cmd:	ipr command struct
 2704 *
 2705 * This function blocks host requests and initiates an
 2706 * adapter reset.
 2707 *
 2708 * Return value:
 2709 * 	none
 2710 **/
 2711static void ipr_oper_timeout(struct timer_list *t)
 2712{
 2713	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
 2714	unsigned long lock_flags = 0;
 2715	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 2716
 2717	ENTER;
 2718	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 2719
 2720	ioa_cfg->errors_logged++;
 2721	dev_err(&ioa_cfg->pdev->dev,
 2722		"Adapter timed out transitioning to operational.\n");
 2723
 2724	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
 2725		ioa_cfg->sdt_state = GET_DUMP;
 2726
 2727	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
 2728		if (ipr_fastfail)
 2729			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
 2730		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 2731	}
 2732
 2733	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 2734	LEAVE;
 2735}
 2736
 2737/**
 2738 * ipr_find_ses_entry - Find matching SES in SES table
 2739 * @res:	resource entry struct of SES
 2740 *
 2741 * Return value:
 2742 * 	pointer to SES table entry / NULL on failure
 2743 **/
 2744static const struct ipr_ses_table_entry *
 2745ipr_find_ses_entry(struct ipr_resource_entry *res)
 2746{
 2747	int i, j, matches;
 2748	struct ipr_std_inq_vpids *vpids;
 2749	const struct ipr_ses_table_entry *ste = ipr_ses_table;
 2750
 2751	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
 2752		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
 2753			if (ste->compare_product_id_byte[j] == 'X') {
 2754				vpids = &res->std_inq_data.vpids;
 2755				if (vpids->product_id[j] == ste->product_id[j])
 2756					matches++;
 2757				else
 2758					break;
 2759			} else
 2760				matches++;
 2761		}
 2762
 2763		if (matches == IPR_PROD_ID_LEN)
 2764			return ste;
 2765	}
 2766
 2767	return NULL;
 2768}
 2769
 2770/**
 2771 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
 2772 * @ioa_cfg:	ioa config struct
 2773 * @bus:		SCSI bus
 2774 * @bus_width:	bus width
 2775 *
 2776 * Return value:
 2777 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
 2778 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
 2779 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
 2780 *	max 160MHz = max 320MB/sec).
 2781 **/
 2782static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
 2783{
 2784	struct ipr_resource_entry *res;
 2785	const struct ipr_ses_table_entry *ste;
 2786	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
 2787
 2788	/* Loop through each config table entry in the config table buffer */
 2789	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 2790		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
 2791			continue;
 2792
 2793		if (bus != res->bus)
 2794			continue;
 2795
 2796		if (!(ste = ipr_find_ses_entry(res)))
 2797			continue;
 2798
 2799		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
 2800	}
 2801
 2802	return max_xfer_rate;
 2803}
 2804
 2805/**
 2806 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
 2807 * @ioa_cfg:		ioa config struct
 2808 * @max_delay:		max delay in micro-seconds to wait
 2809 *
 2810 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
 2811 *
 2812 * Return value:
 2813 * 	0 on success / other on failure
 2814 **/
 2815static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
 2816{
 2817	volatile u32 pcii_reg;
 2818	int delay = 1;
 2819
 2820	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
 2821	while (delay < max_delay) {
 2822		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 2823
 2824		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
 2825			return 0;
 2826
 2827		/* udelay cannot be used if delay is more than a few milliseconds */
 2828		if ((delay / 1000) > MAX_UDELAY_MS)
 2829			mdelay(delay / 1000);
 2830		else
 2831			udelay(delay);
 2832
 2833		delay += delay;
 2834	}
 2835	return -EIO;
 2836}
 2837
 2838/**
 2839 * ipr_get_sis64_dump_data_section - Dump IOA memory
 2840 * @ioa_cfg:			ioa config struct
 2841 * @start_addr:			adapter address to dump
 2842 * @dest:			destination kernel buffer
 2843 * @length_in_words:		length to dump in 4 byte words
 2844 *
 2845 * Return value:
 2846 * 	0 on success
 2847 **/
 2848static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
 2849					   u32 start_addr,
 2850					   __be32 *dest, u32 length_in_words)
 2851{
 2852	int i;
 2853
 2854	for (i = 0; i < length_in_words; i++) {
 2855		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
 2856		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
 2857		dest++;
 2858	}
 2859
 2860	return 0;
 2861}
 2862
 2863/**
 2864 * ipr_get_ldump_data_section - Dump IOA memory
 2865 * @ioa_cfg:			ioa config struct
 2866 * @start_addr:			adapter address to dump
 2867 * @dest:				destination kernel buffer
 2868 * @length_in_words:	length to dump in 4 byte words
 2869 *
 2870 * Return value:
 2871 * 	0 on success / -EIO on failure
 2872 **/
 2873static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
 2874				      u32 start_addr,
 2875				      __be32 *dest, u32 length_in_words)
 2876{
 2877	volatile u32 temp_pcii_reg;
 2878	int i, delay = 0;
 2879
 2880	if (ioa_cfg->sis64)
 2881		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
 2882						       dest, length_in_words);
 2883
 2884	/* Write IOA interrupt reg starting LDUMP state  */
 2885	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
 2886	       ioa_cfg->regs.set_uproc_interrupt_reg32);
 2887
 2888	/* Wait for IO debug acknowledge */
 2889	if (ipr_wait_iodbg_ack(ioa_cfg,
 2890			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
 2891		dev_err(&ioa_cfg->pdev->dev,
 2892			"IOA dump long data transfer timeout\n");
 2893		return -EIO;
 2894	}
 2895
 2896	/* Signal LDUMP interlocked - clear IO debug ack */
 2897	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
 2898	       ioa_cfg->regs.clr_interrupt_reg);
 2899
 2900	/* Write Mailbox with starting address */
 2901	writel(start_addr, ioa_cfg->ioa_mailbox);
 2902
 2903	/* Signal address valid - clear IOA Reset alert */
 2904	writel(IPR_UPROCI_RESET_ALERT,
 2905	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
 2906
 2907	for (i = 0; i < length_in_words; i++) {
 2908		/* Wait for IO debug acknowledge */
 2909		if (ipr_wait_iodbg_ack(ioa_cfg,
 2910				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
 2911			dev_err(&ioa_cfg->pdev->dev,
 2912				"IOA dump short data transfer timeout\n");
 2913			return -EIO;
 2914		}
 2915
 2916		/* Read data from mailbox and increment destination pointer */
 2917		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
 2918		dest++;
 2919
 2920		/* For all but the last word of data, signal data received */
 2921		if (i < (length_in_words - 1)) {
 2922			/* Signal dump data received - Clear IO debug Ack */
 2923			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
 2924			       ioa_cfg->regs.clr_interrupt_reg);
 2925		}
 2926	}
 2927
 2928	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
 2929	writel(IPR_UPROCI_RESET_ALERT,
 2930	       ioa_cfg->regs.set_uproc_interrupt_reg32);
 2931
 2932	writel(IPR_UPROCI_IO_DEBUG_ALERT,
 2933	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
 2934
 2935	/* Signal dump data received - Clear IO debug Ack */
 2936	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
 2937	       ioa_cfg->regs.clr_interrupt_reg);
 2938
 2939	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
 2940	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
 2941		temp_pcii_reg =
 2942		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
 2943
 2944		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
 2945			return 0;
 2946
 2947		udelay(10);
 2948		delay += 10;
 2949	}
 2950
 2951	return 0;
 2952}
 2953
 2954#ifdef CONFIG_SCSI_IPR_DUMP
 2955/**
 2956 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
 2957 * @ioa_cfg:		ioa config struct
 2958 * @pci_address:	adapter address
 2959 * @length:			length of data to copy
 2960 *
 2961 * Copy data from PCI adapter to kernel buffer.
 2962 * Note: length MUST be a 4 byte multiple
 2963 * Return value:
 2964 * 	0 on success / other on failure
 2965 **/
 2966static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
 2967			unsigned long pci_address, u32 length)
 2968{
 2969	int bytes_copied = 0;
 2970	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
 2971	__be32 *page;
 2972	unsigned long lock_flags = 0;
 2973	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
 2974
 2975	if (ioa_cfg->sis64)
 2976		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
 2977	else
 2978		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
 2979
 2980	while (bytes_copied < length &&
 2981	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
 2982		if (ioa_dump->page_offset >= PAGE_SIZE ||
 2983		    ioa_dump->page_offset == 0) {
 2984			page = (__be32 *)__get_free_page(GFP_ATOMIC);
 2985
 2986			if (!page) {
 2987				ipr_trace;
 2988				return bytes_copied;
 2989			}
 2990
 2991			ioa_dump->page_offset = 0;
 2992			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
 2993			ioa_dump->next_page_index++;
 2994		} else
 2995			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
 2996
 2997		rem_len = length - bytes_copied;
 2998		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
 2999		cur_len = min(rem_len, rem_page_len);
 3000
 3001		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3002		if (ioa_cfg->sdt_state == ABORT_DUMP) {
 3003			rc = -EIO;
 3004		} else {
 3005			rc = ipr_get_ldump_data_section(ioa_cfg,
 3006							pci_address + bytes_copied,
 3007							&page[ioa_dump->page_offset / 4],
 3008							(cur_len / sizeof(u32)));
 3009		}
 3010		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3011
 3012		if (!rc) {
 3013			ioa_dump->page_offset += cur_len;
 3014			bytes_copied += cur_len;
 3015		} else {
 3016			ipr_trace;
 3017			break;
 3018		}
 3019		schedule();
 3020	}
 3021
 3022	return bytes_copied;
 3023}
 3024
 3025/**
 3026 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
 3027 * @hdr:	dump entry header struct
 3028 *
 3029 * Return value:
 3030 * 	nothing
 3031 **/
 3032static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
 3033{
 3034	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
 3035	hdr->num_elems = 1;
 3036	hdr->offset = sizeof(*hdr);
 3037	hdr->status = IPR_DUMP_STATUS_SUCCESS;
 3038}
 3039
 3040/**
 3041 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
 3042 * @ioa_cfg:	ioa config struct
 3043 * @driver_dump:	driver dump struct
 3044 *
 3045 * Return value:
 3046 * 	nothing
 3047 **/
 3048static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
 3049				   struct ipr_driver_dump *driver_dump)
 3050{
 3051	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
 3052
 3053	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
 3054	driver_dump->ioa_type_entry.hdr.len =
 3055		sizeof(struct ipr_dump_ioa_type_entry) -
 3056		sizeof(struct ipr_dump_entry_header);
 3057	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
 3058	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
 3059	driver_dump->ioa_type_entry.type = ioa_cfg->type;
 3060	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
 3061		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
 3062		ucode_vpd->minor_release[1];
 3063	driver_dump->hdr.num_entries++;
 3064}
 3065
 3066/**
 3067 * ipr_dump_version_data - Fill in the driver version in the dump.
 3068 * @ioa_cfg:	ioa config struct
 3069 * @driver_dump:	driver dump struct
 3070 *
 3071 * Return value:
 3072 * 	nothing
 3073 **/
 3074static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
 3075				  struct ipr_driver_dump *driver_dump)
 3076{
 3077	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
 3078	driver_dump->version_entry.hdr.len =
 3079		sizeof(struct ipr_dump_version_entry) -
 3080		sizeof(struct ipr_dump_entry_header);
 3081	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
 3082	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
 3083	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
 3084	driver_dump->hdr.num_entries++;
 3085}
 3086
 3087/**
 3088 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
 3089 * @ioa_cfg:	ioa config struct
 3090 * @driver_dump:	driver dump struct
 3091 *
 3092 * Return value:
 3093 * 	nothing
 3094 **/
 3095static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
 3096				   struct ipr_driver_dump *driver_dump)
 3097{
 3098	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
 3099	driver_dump->trace_entry.hdr.len =
 3100		sizeof(struct ipr_dump_trace_entry) -
 3101		sizeof(struct ipr_dump_entry_header);
 3102	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
 3103	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
 3104	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
 3105	driver_dump->hdr.num_entries++;
 3106}
 3107
 3108/**
 3109 * ipr_dump_location_data - Fill in the IOA location in the dump.
 3110 * @ioa_cfg:	ioa config struct
 3111 * @driver_dump:	driver dump struct
 3112 *
 3113 * Return value:
 3114 * 	nothing
 3115 **/
 3116static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
 3117				   struct ipr_driver_dump *driver_dump)
 3118{
 3119	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
 3120	driver_dump->location_entry.hdr.len =
 3121		sizeof(struct ipr_dump_location_entry) -
 3122		sizeof(struct ipr_dump_entry_header);
 3123	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
 3124	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
 3125	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
 3126	driver_dump->hdr.num_entries++;
 3127}
 3128
 3129/**
 3130 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
 3131 * @ioa_cfg:	ioa config struct
 3132 * @dump:		dump struct
 3133 *
 3134 * Return value:
 3135 * 	nothing
 3136 **/
 3137static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
 3138{
 3139	unsigned long start_addr, sdt_word;
 3140	unsigned long lock_flags = 0;
 3141	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
 3142	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
 3143	u32 num_entries, max_num_entries, start_off, end_off;
 3144	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
 3145	struct ipr_sdt *sdt;
 3146	int valid = 1;
 3147	int i;
 3148
 3149	ENTER;
 3150
 3151	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3152
 3153	if (ioa_cfg->sdt_state != READ_DUMP) {
 3154		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3155		return;
 3156	}
 3157
 3158	if (ioa_cfg->sis64) {
 3159		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3160		ssleep(IPR_DUMP_DELAY_SECONDS);
 3161		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3162	}
 3163
 3164	start_addr = readl(ioa_cfg->ioa_mailbox);
 3165
 3166	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
 3167		dev_err(&ioa_cfg->pdev->dev,
 3168			"Invalid dump table format: %lx\n", start_addr);
 3169		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3170		return;
 3171	}
 3172
 3173	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
 3174
 3175	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
 3176
 3177	/* Initialize the overall dump header */
 3178	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
 3179	driver_dump->hdr.num_entries = 1;
 3180	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
 3181	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
 3182	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
 3183	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
 3184
 3185	ipr_dump_version_data(ioa_cfg, driver_dump);
 3186	ipr_dump_location_data(ioa_cfg, driver_dump);
 3187	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
 3188	ipr_dump_trace_data(ioa_cfg, driver_dump);
 3189
 3190	/* Update dump_header */
 3191	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
 3192
 3193	/* IOA Dump entry */
 3194	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
 3195	ioa_dump->hdr.len = 0;
 3196	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
 3197	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
 3198
 3199	/* First entries in sdt are actually a list of dump addresses and
 3200	 lengths to gather the real dump data.  sdt represents the pointer
 3201	 to the ioa generated dump table.  Dump data will be extracted based
 3202	 on entries in this table */
 3203	sdt = &ioa_dump->sdt;
 3204
 3205	if (ioa_cfg->sis64) {
 3206		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
 3207		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
 3208	} else {
 3209		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
 3210		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
 3211	}
 3212
 3213	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
 3214			(max_num_entries * sizeof(struct ipr_sdt_entry));
 3215	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
 3216					bytes_to_copy / sizeof(__be32));
 3217
 3218	/* Smart Dump table is ready to use and the first entry is valid */
 3219	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
 3220	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
 3221		dev_err(&ioa_cfg->pdev->dev,
 3222			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
 3223			rc, be32_to_cpu(sdt->hdr.state));
 3224		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
 3225		ioa_cfg->sdt_state = DUMP_OBTAINED;
 3226		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3227		return;
 3228	}
 3229
 3230	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
 3231
 3232	if (num_entries > max_num_entries)
 3233		num_entries = max_num_entries;
 3234
 3235	/* Update dump length to the actual data to be copied */
 3236	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
 3237	if (ioa_cfg->sis64)
 3238		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
 3239	else
 3240		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
 3241
 3242	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3243
 3244	for (i = 0; i < num_entries; i++) {
 3245		if (ioa_dump->hdr.len > max_dump_size) {
 3246			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
 3247			break;
 3248		}
 3249
 3250		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
 3251			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
 3252			if (ioa_cfg->sis64)
 3253				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
 3254			else {
 3255				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
 3256				end_off = be32_to_cpu(sdt->entry[i].end_token);
 3257
 3258				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
 3259					bytes_to_copy = end_off - start_off;
 3260				else
 3261					valid = 0;
 3262			}
 3263			if (valid) {
 3264				if (bytes_to_copy > max_dump_size) {
 3265					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
 3266					continue;
 3267				}
 3268
 3269				/* Copy data from adapter to driver buffers */
 3270				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
 3271							    bytes_to_copy);
 3272
 3273				ioa_dump->hdr.len += bytes_copied;
 3274
 3275				if (bytes_copied != bytes_to_copy) {
 3276					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
 3277					break;
 3278				}
 3279			}
 3280		}
 3281	}
 3282
 3283	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
 3284
 3285	/* Update dump_header */
 3286	driver_dump->hdr.len += ioa_dump->hdr.len;
 3287	wmb();
 3288	ioa_cfg->sdt_state = DUMP_OBTAINED;
 3289	LEAVE;
 3290}
 3291
 3292#else
 3293#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
 3294#endif
 3295
 3296/**
 3297 * ipr_release_dump - Free adapter dump memory
 3298 * @kref:	kref struct
 3299 *
 3300 * Return value:
 3301 *	nothing
 3302 **/
 3303static void ipr_release_dump(struct kref *kref)
 3304{
 3305	struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
 3306	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
 3307	unsigned long lock_flags = 0;
 3308	int i;
 3309
 3310	ENTER;
 3311	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3312	ioa_cfg->dump = NULL;
 3313	ioa_cfg->sdt_state = INACTIVE;
 3314	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3315
 3316	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
 3317		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
 3318
 3319	vfree(dump->ioa_dump.ioa_data);
 3320	kfree(dump);
 3321	LEAVE;
 3322}
 3323
 3324static void ipr_add_remove_thread(struct work_struct *work)
 3325{
 3326	unsigned long lock_flags;
 3327	struct ipr_resource_entry *res;
 3328	struct scsi_device *sdev;
 3329	struct ipr_ioa_cfg *ioa_cfg =
 3330		container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
 3331	u8 bus, target, lun;
 3332	int did_work;
 3333
 3334	ENTER;
 3335	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3336
 3337restart:
 3338	do {
 3339		did_work = 0;
 3340		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
 3341			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3342			return;
 3343		}
 3344
 3345		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 3346			if (res->del_from_ml && res->sdev) {
 3347				did_work = 1;
 3348				sdev = res->sdev;
 3349				if (!scsi_device_get(sdev)) {
 3350					if (!res->add_to_ml)
 3351						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
 3352					else
 3353						res->del_from_ml = 0;
 3354					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3355					scsi_remove_device(sdev);
 3356					scsi_device_put(sdev);
 3357					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3358				}
 3359				break;
 3360			}
 3361		}
 3362	} while (did_work);
 3363
 3364	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 3365		if (res->add_to_ml) {
 3366			bus = res->bus;
 3367			target = res->target;
 3368			lun = res->lun;
 3369			res->add_to_ml = 0;
 3370			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3371			scsi_add_device(ioa_cfg->host, bus, target, lun);
 3372			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3373			goto restart;
 3374		}
 3375	}
 3376
 3377	ioa_cfg->scan_done = 1;
 3378	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3379	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
 3380	LEAVE;
 3381}
 3382
 3383/**
 3384 * ipr_worker_thread - Worker thread
 3385 * @work:		ioa config struct
 3386 *
 3387 * Called at task level from a work thread. This function takes care
 3388 * of adding and removing device from the mid-layer as configuration
 3389 * changes are detected by the adapter.
 3390 *
 3391 * Return value:
 3392 * 	nothing
 3393 **/
 3394static void ipr_worker_thread(struct work_struct *work)
 3395{
 3396	unsigned long lock_flags;
 3397	struct ipr_dump *dump;
 3398	struct ipr_ioa_cfg *ioa_cfg =
 3399		container_of(work, struct ipr_ioa_cfg, work_q);
 3400
 3401	ENTER;
 3402	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3403
 3404	if (ioa_cfg->sdt_state == READ_DUMP) {
 3405		dump = ioa_cfg->dump;
 3406		if (!dump) {
 3407			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3408			return;
 3409		}
 3410		kref_get(&dump->kref);
 3411		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3412		ipr_get_ioa_dump(ioa_cfg, dump);
 3413		kref_put(&dump->kref, ipr_release_dump);
 3414
 3415		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3416		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
 3417			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 3418		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3419		return;
 3420	}
 3421
 3422	if (ioa_cfg->scsi_unblock) {
 3423		ioa_cfg->scsi_unblock = 0;
 3424		ioa_cfg->scsi_blocked = 0;
 3425		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3426		scsi_unblock_requests(ioa_cfg->host);
 3427		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3428		if (ioa_cfg->scsi_blocked)
 3429			scsi_block_requests(ioa_cfg->host);
 3430	}
 3431
 3432	if (!ioa_cfg->scan_enabled) {
 3433		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3434		return;
 3435	}
 3436
 3437	schedule_work(&ioa_cfg->scsi_add_work_q);
 3438
 3439	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3440	LEAVE;
 3441}
 3442
 3443#ifdef CONFIG_SCSI_IPR_TRACE
 3444/**
 3445 * ipr_read_trace - Dump the adapter trace
 3446 * @filp:		open sysfs file
 3447 * @kobj:		kobject struct
 3448 * @bin_attr:		bin_attribute struct
 3449 * @buf:		buffer
 3450 * @off:		offset
 3451 * @count:		buffer size
 3452 *
 3453 * Return value:
 3454 *	number of bytes printed to buffer
 3455 **/
 3456static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
 3457			      struct bin_attribute *bin_attr,
 3458			      char *buf, loff_t off, size_t count)
 3459{
 3460	struct device *dev = container_of(kobj, struct device, kobj);
 3461	struct Scsi_Host *shost = class_to_shost(dev);
 3462	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3463	unsigned long lock_flags = 0;
 3464	ssize_t ret;
 3465
 3466	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3467	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
 3468				IPR_TRACE_SIZE);
 3469	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3470
 3471	return ret;
 3472}
 3473
 3474static struct bin_attribute ipr_trace_attr = {
 3475	.attr =	{
 3476		.name = "trace",
 3477		.mode = S_IRUGO,
 3478	},
 3479	.size = 0,
 3480	.read = ipr_read_trace,
 3481};
 3482#endif
 3483
 3484/**
 3485 * ipr_show_fw_version - Show the firmware version
 3486 * @dev:	class device struct
 3487 * @buf:	buffer
 3488 *
 3489 * Return value:
 3490 *	number of bytes printed to buffer
 3491 **/
 3492static ssize_t ipr_show_fw_version(struct device *dev,
 3493				   struct device_attribute *attr, char *buf)
 3494{
 3495	struct Scsi_Host *shost = class_to_shost(dev);
 3496	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3497	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
 3498	unsigned long lock_flags = 0;
 3499	int len;
 3500
 3501	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3502	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
 3503		       ucode_vpd->major_release, ucode_vpd->card_type,
 3504		       ucode_vpd->minor_release[0],
 3505		       ucode_vpd->minor_release[1]);
 3506	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3507	return len;
 3508}
 3509
 3510static struct device_attribute ipr_fw_version_attr = {
 3511	.attr = {
 3512		.name =		"fw_version",
 3513		.mode =		S_IRUGO,
 3514	},
 3515	.show = ipr_show_fw_version,
 3516};
 3517
 3518/**
 3519 * ipr_show_log_level - Show the adapter's error logging level
 3520 * @dev:	class device struct
 3521 * @buf:	buffer
 3522 *
 3523 * Return value:
 3524 * 	number of bytes printed to buffer
 3525 **/
 3526static ssize_t ipr_show_log_level(struct device *dev,
 3527				   struct device_attribute *attr, char *buf)
 3528{
 3529	struct Scsi_Host *shost = class_to_shost(dev);
 3530	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3531	unsigned long lock_flags = 0;
 3532	int len;
 3533
 3534	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3535	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
 3536	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3537	return len;
 3538}
 3539
 3540/**
 3541 * ipr_store_log_level - Change the adapter's error logging level
 3542 * @dev:	class device struct
 3543 * @buf:	buffer
 3544 *
 3545 * Return value:
 3546 * 	number of bytes printed to buffer
 3547 **/
 3548static ssize_t ipr_store_log_level(struct device *dev,
 3549				   struct device_attribute *attr,
 3550				   const char *buf, size_t count)
 3551{
 3552	struct Scsi_Host *shost = class_to_shost(dev);
 3553	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3554	unsigned long lock_flags = 0;
 3555
 3556	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3557	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
 3558	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3559	return strlen(buf);
 3560}
 3561
 3562static struct device_attribute ipr_log_level_attr = {
 3563	.attr = {
 3564		.name =		"log_level",
 3565		.mode =		S_IRUGO | S_IWUSR,
 3566	},
 3567	.show = ipr_show_log_level,
 3568	.store = ipr_store_log_level
 3569};
 3570
 3571/**
 3572 * ipr_store_diagnostics - IOA Diagnostics interface
 3573 * @dev:	device struct
 3574 * @buf:	buffer
 3575 * @count:	buffer size
 3576 *
 3577 * This function will reset the adapter and wait a reasonable
 3578 * amount of time for any errors that the adapter might log.
 3579 *
 3580 * Return value:
 3581 * 	count on success / other on failure
 3582 **/
 3583static ssize_t ipr_store_diagnostics(struct device *dev,
 3584				     struct device_attribute *attr,
 3585				     const char *buf, size_t count)
 3586{
 3587	struct Scsi_Host *shost = class_to_shost(dev);
 3588	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3589	unsigned long lock_flags = 0;
 3590	int rc = count;
 3591
 3592	if (!capable(CAP_SYS_ADMIN))
 3593		return -EACCES;
 3594
 3595	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3596	while (ioa_cfg->in_reset_reload) {
 3597		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3598		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 3599		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3600	}
 3601
 3602	ioa_cfg->errors_logged = 0;
 3603	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
 3604
 3605	if (ioa_cfg->in_reset_reload) {
 3606		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3607		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 3608
 3609		/* Wait for a second for any errors to be logged */
 3610		msleep(1000);
 3611	} else {
 3612		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3613		return -EIO;
 3614	}
 3615
 3616	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3617	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
 3618		rc = -EIO;
 3619	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3620
 3621	return rc;
 3622}
 3623
 3624static struct device_attribute ipr_diagnostics_attr = {
 3625	.attr = {
 3626		.name =		"run_diagnostics",
 3627		.mode =		S_IWUSR,
 3628	},
 3629	.store = ipr_store_diagnostics
 3630};
 3631
 3632/**
 3633 * ipr_show_adapter_state - Show the adapter's state
 3634 * @class_dev:	device struct
 3635 * @buf:	buffer
 3636 *
 3637 * Return value:
 3638 * 	number of bytes printed to buffer
 3639 **/
 3640static ssize_t ipr_show_adapter_state(struct device *dev,
 3641				      struct device_attribute *attr, char *buf)
 3642{
 3643	struct Scsi_Host *shost = class_to_shost(dev);
 3644	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3645	unsigned long lock_flags = 0;
 3646	int len;
 3647
 3648	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3649	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
 3650		len = snprintf(buf, PAGE_SIZE, "offline\n");
 3651	else
 3652		len = snprintf(buf, PAGE_SIZE, "online\n");
 3653	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3654	return len;
 3655}
 3656
 3657/**
 3658 * ipr_store_adapter_state - Change adapter state
 3659 * @dev:	device struct
 3660 * @buf:	buffer
 3661 * @count:	buffer size
 3662 *
 3663 * This function will change the adapter's state.
 3664 *
 3665 * Return value:
 3666 * 	count on success / other on failure
 3667 **/
 3668static ssize_t ipr_store_adapter_state(struct device *dev,
 3669				       struct device_attribute *attr,
 3670				       const char *buf, size_t count)
 3671{
 3672	struct Scsi_Host *shost = class_to_shost(dev);
 3673	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3674	unsigned long lock_flags;
 3675	int result = count, i;
 3676
 3677	if (!capable(CAP_SYS_ADMIN))
 3678		return -EACCES;
 3679
 3680	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3681	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
 3682	    !strncmp(buf, "online", 6)) {
 3683		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
 3684			spin_lock(&ioa_cfg->hrrq[i]._lock);
 3685			ioa_cfg->hrrq[i].ioa_is_dead = 0;
 3686			spin_unlock(&ioa_cfg->hrrq[i]._lock);
 3687		}
 3688		wmb();
 3689		ioa_cfg->reset_retries = 0;
 3690		ioa_cfg->in_ioa_bringdown = 0;
 3691		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 3692	}
 3693	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3694	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 3695
 3696	return result;
 3697}
 3698
 3699static struct device_attribute ipr_ioa_state_attr = {
 3700	.attr = {
 3701		.name =		"online_state",
 3702		.mode =		S_IRUGO | S_IWUSR,
 3703	},
 3704	.show = ipr_show_adapter_state,
 3705	.store = ipr_store_adapter_state
 3706};
 3707
 3708/**
 3709 * ipr_store_reset_adapter - Reset the adapter
 3710 * @dev:	device struct
 3711 * @buf:	buffer
 3712 * @count:	buffer size
 3713 *
 3714 * This function will reset the adapter.
 3715 *
 3716 * Return value:
 3717 * 	count on success / other on failure
 3718 **/
 3719static ssize_t ipr_store_reset_adapter(struct device *dev,
 3720				       struct device_attribute *attr,
 3721				       const char *buf, size_t count)
 3722{
 3723	struct Scsi_Host *shost = class_to_shost(dev);
 3724	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3725	unsigned long lock_flags;
 3726	int result = count;
 3727
 3728	if (!capable(CAP_SYS_ADMIN))
 3729		return -EACCES;
 3730
 3731	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 3732	if (!ioa_cfg->in_reset_reload)
 3733		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
 3734	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 3735	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 3736
 3737	return result;
 3738}
 3739
 3740static struct device_attribute ipr_ioa_reset_attr = {
 3741	.attr = {
 3742		.name =		"reset_host",
 3743		.mode =		S_IWUSR,
 3744	},
 3745	.store = ipr_store_reset_adapter
 3746};
 3747
 3748static int ipr_iopoll(struct irq_poll *iop, int budget);
 3749 /**
 3750 * ipr_show_iopoll_weight - Show ipr polling mode
 3751 * @dev:	class device struct
 3752 * @buf:	buffer
 3753 *
 3754 * Return value:
 3755 *	number of bytes printed to buffer
 3756 **/
 3757static ssize_t ipr_show_iopoll_weight(struct device *dev,
 3758				   struct device_attribute *attr, char *buf)
 3759{
 3760	struct Scsi_Host *shost = class_to_shost(dev);
 3761	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3762	unsigned long lock_flags = 0;
 3763	int len;
 3764
 3765	spin_lock_irqsave(shost->host_lock, lock_flags);
 3766	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
 3767	spin_unlock_irqrestore(shost->host_lock, lock_flags);
 3768
 3769	return len;
 3770}
 3771
 3772/**
 3773 * ipr_store_iopoll_weight - Change the adapter's polling mode
 3774 * @dev:	class device struct
 3775 * @buf:	buffer
 3776 *
 3777 * Return value:
 3778 *	number of bytes printed to buffer
 3779 **/
 3780static ssize_t ipr_store_iopoll_weight(struct device *dev,
 3781					struct device_attribute *attr,
 3782					const char *buf, size_t count)
 3783{
 3784	struct Scsi_Host *shost = class_to_shost(dev);
 3785	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 3786	unsigned long user_iopoll_weight;
 3787	unsigned long lock_flags = 0;
 3788	int i;
 3789
 3790	if (!ioa_cfg->sis64) {
 3791		dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
 3792		return -EINVAL;
 3793	}
 3794	if (kstrtoul(buf, 10, &user_iopoll_weight))
 3795		return -EINVAL;
 3796
 3797	if (user_iopoll_weight > 256) {
 3798		dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
 3799		return -EINVAL;
 3800	}
 3801
 3802	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
 3803		dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
 3804		return strlen(buf);
 3805	}
 3806
 3807	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
 3808		for (i = 1; i < ioa_cfg->hrrq_num; i++)
 3809			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
 3810	}
 3811
 3812	spin_lock_irqsave(shost->host_lock, lock_flags);
 3813	ioa_cfg->iopoll_weight = user_iopoll_weight;
 3814	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
 3815		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
 3816			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
 3817					ioa_cfg->iopoll_weight, ipr_iopoll);
 3818		}
 3819	}
 3820	spin_unlock_irqrestore(shost->host_lock, lock_flags);
 3821
 3822	return strlen(buf);
 3823}
 3824
 3825static struct device_attribute ipr_iopoll_weight_attr = {
 3826	.attr = {
 3827		.name =		"iopoll_weight",
 3828		.mode =		S_IRUGO | S_IWUSR,
 3829	},
 3830	.show = ipr_show_iopoll_weight,
 3831	.store = ipr_store_iopoll_weight
 3832};
 3833
 3834/**
 3835 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
 3836 * @buf_len:		buffer length
 3837 *
 3838 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
 3839 * list to use for microcode download
 3840 *
 3841 * Return value:
 3842 * 	pointer to sglist / NULL on failure
 3843 **/
 3844static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
 3845{
 3846	int sg_size, order;
 3847	struct ipr_sglist *sglist;
 3848
 3849	/* Get the minimum size per scatter/gather element */
 3850	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
 3851
 3852	/* Get the actual size per element */
 3853	order = get_order(sg_size);
 3854
 3855	/* Allocate a scatter/gather list for the DMA */
 3856	sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
 3857	if (sglist == NULL) {
 3858		ipr_trace;
 3859		return NULL;
 3860	}
 3861	sglist->order = order;
 3862	sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
 3863					      &sglist->num_sg);
 3864	if (!sglist->scatterlist) {
 3865		kfree(sglist);
 3866		return NULL;
 3867	}
 3868
 3869	return sglist;
 3870}
 3871
 3872/**
 3873 * ipr_free_ucode_buffer - Frees a microcode download buffer
 3874 * @p_dnld:		scatter/gather list pointer
 3875 *
 3876 * Free a DMA'able ucode download buffer previously allocated with
 3877 * ipr_alloc_ucode_buffer
 3878 *
 3879 * Return value:
 3880 * 	nothing
 3881 **/
 3882static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
 3883{
 3884	sgl_free_order(sglist->scatterlist, sglist->order);
 3885	kfree(sglist);
 3886}
 3887
 3888/**
 3889 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
 3890 * @sglist:		scatter/gather list pointer
 3891 * @buffer:		buffer pointer
 3892 * @len:		buffer length
 3893 *
 3894 * Copy a microcode image from a user buffer into a buffer allocated by
 3895 * ipr_alloc_ucode_buffer
 3896 *
 3897 * Return value:
 3898 * 	0 on success / other on failure
 3899 **/
 3900static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
 3901				 u8 *buffer, u32 len)
 3902{
 3903	int bsize_elem, i, result = 0;
 3904	struct scatterlist *sg;
 3905	void *kaddr;
 3906
 3907	/* Determine the actual number of bytes per element */
 3908	bsize_elem = PAGE_SIZE * (1 << sglist->order);
 3909
 3910	sg = sglist->scatterlist;
 3911
 3912	for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
 3913			buffer += bsize_elem) {
 3914		struct page *page = sg_page(sg);
 3915
 3916		kaddr = kmap(page);
 3917		memcpy(kaddr, buffer, bsize_elem);
 3918		kunmap(page);
 3919
 3920		sg->length = bsize_elem;
 3921
 3922		if (result != 0) {
 3923			ipr_trace;
 3924			return result;
 3925		}
 3926	}
 3927
 3928	if (len % bsize_elem) {
 3929		struct page *page = sg_page(sg);
 3930
 3931		kaddr = kmap(page);
 3932		memcpy(kaddr, buffer, len % bsize_elem);
 3933		kunmap(page);
 3934
 3935		sg->length = len % bsize_elem;
 3936	}
 3937
 3938	sglist->buffer_len = len;
 3939	return result;
 3940}
 3941
 3942/**
 3943 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
 3944 * @ipr_cmd:		ipr command struct
 3945 * @sglist:		scatter/gather list
 3946 *
 3947 * Builds a microcode download IOA data list (IOADL).
 3948 *
 3949 **/
 3950static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
 3951				    struct ipr_sglist *sglist)
 3952{
 3953	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 3954	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
 3955	struct scatterlist *scatterlist = sglist->scatterlist;
 3956	struct scatterlist *sg;
 3957	int i;
 3958
 3959	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
 3960	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
 3961	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
 3962
 3963	ioarcb->ioadl_len =
 3964		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
 3965	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
 3966		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
 3967		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
 3968		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
 3969	}
 3970
 3971	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
 3972}
 3973
 3974/**
 3975 * ipr_build_ucode_ioadl - Build a microcode download IOADL
 3976 * @ipr_cmd:	ipr command struct
 3977 * @sglist:		scatter/gather list
 3978 *
 3979 * Builds a microcode download IOA data list (IOADL).
 3980 *
 3981 **/
 3982static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
 3983				  struct ipr_sglist *sglist)
 3984{
 3985	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 3986	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
 3987	struct scatterlist *scatterlist = sglist->scatterlist;
 3988	struct scatterlist *sg;
 3989	int i;
 3990
 3991	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
 3992	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
 3993	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
 3994
 3995	ioarcb->ioadl_len =
 3996		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
 3997
 3998	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
 3999		ioadl[i].flags_and_data_len =
 4000			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
 4001		ioadl[i].address =
 4002			cpu_to_be32(sg_dma_address(sg));
 4003	}
 4004
 4005	ioadl[i-1].flags_and_data_len |=
 4006		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
 4007}
 4008
 4009/**
 4010 * ipr_update_ioa_ucode - Update IOA's microcode
 4011 * @ioa_cfg:	ioa config struct
 4012 * @sglist:		scatter/gather list
 4013 *
 4014 * Initiate an adapter reset to update the IOA's microcode
 4015 *
 4016 * Return value:
 4017 * 	0 on success / -EIO on failure
 4018 **/
 4019static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
 4020				struct ipr_sglist *sglist)
 4021{
 4022	unsigned long lock_flags;
 4023
 4024	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4025	while (ioa_cfg->in_reset_reload) {
 4026		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4027		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 4028		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4029	}
 4030
 4031	if (ioa_cfg->ucode_sglist) {
 4032		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4033		dev_err(&ioa_cfg->pdev->dev,
 4034			"Microcode download already in progress\n");
 4035		return -EIO;
 4036	}
 4037
 4038	sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
 4039					sglist->scatterlist, sglist->num_sg,
 4040					DMA_TO_DEVICE);
 4041
 4042	if (!sglist->num_dma_sg) {
 4043		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4044		dev_err(&ioa_cfg->pdev->dev,
 4045			"Failed to map microcode download buffer!\n");
 4046		return -EIO;
 4047	}
 4048
 4049	ioa_cfg->ucode_sglist = sglist;
 4050	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
 4051	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4052	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 4053
 4054	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4055	ioa_cfg->ucode_sglist = NULL;
 4056	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4057	return 0;
 4058}
 4059
 4060/**
 4061 * ipr_store_update_fw - Update the firmware on the adapter
 4062 * @class_dev:	device struct
 4063 * @buf:	buffer
 4064 * @count:	buffer size
 4065 *
 4066 * This function will update the firmware on the adapter.
 4067 *
 4068 * Return value:
 4069 * 	count on success / other on failure
 4070 **/
 4071static ssize_t ipr_store_update_fw(struct device *dev,
 4072				   struct device_attribute *attr,
 4073				   const char *buf, size_t count)
 4074{
 4075	struct Scsi_Host *shost = class_to_shost(dev);
 4076	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 4077	struct ipr_ucode_image_header *image_hdr;
 4078	const struct firmware *fw_entry;
 4079	struct ipr_sglist *sglist;
 4080	char fname[100];
 4081	char *src;
 4082	char *endline;
 4083	int result, dnld_size;
 4084
 4085	if (!capable(CAP_SYS_ADMIN))
 4086		return -EACCES;
 4087
 4088	snprintf(fname, sizeof(fname), "%s", buf);
 4089
 4090	endline = strchr(fname, '\n');
 4091	if (endline)
 4092		*endline = '\0';
 4093
 4094	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
 4095		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
 4096		return -EIO;
 4097	}
 4098
 4099	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
 4100
 4101	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
 4102	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
 4103	sglist = ipr_alloc_ucode_buffer(dnld_size);
 4104
 4105	if (!sglist) {
 4106		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
 4107		release_firmware(fw_entry);
 4108		return -ENOMEM;
 4109	}
 4110
 4111	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
 4112
 4113	if (result) {
 4114		dev_err(&ioa_cfg->pdev->dev,
 4115			"Microcode buffer copy to DMA buffer failed\n");
 4116		goto out;
 4117	}
 4118
 4119	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
 4120
 4121	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
 4122
 4123	if (!result)
 4124		result = count;
 4125out:
 4126	ipr_free_ucode_buffer(sglist);
 4127	release_firmware(fw_entry);
 4128	return result;
 4129}
 4130
 4131static struct device_attribute ipr_update_fw_attr = {
 4132	.attr = {
 4133		.name =		"update_fw",
 4134		.mode =		S_IWUSR,
 4135	},
 4136	.store = ipr_store_update_fw
 4137};
 4138
 4139/**
 4140 * ipr_show_fw_type - Show the adapter's firmware type.
 4141 * @dev:	class device struct
 4142 * @buf:	buffer
 4143 *
 4144 * Return value:
 4145 *	number of bytes printed to buffer
 4146 **/
 4147static ssize_t ipr_show_fw_type(struct device *dev,
 4148				struct device_attribute *attr, char *buf)
 4149{
 4150	struct Scsi_Host *shost = class_to_shost(dev);
 4151	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 4152	unsigned long lock_flags = 0;
 4153	int len;
 4154
 4155	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4156	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
 4157	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4158	return len;
 4159}
 4160
 4161static struct device_attribute ipr_ioa_fw_type_attr = {
 4162	.attr = {
 4163		.name =		"fw_type",
 4164		.mode =		S_IRUGO,
 4165	},
 4166	.show = ipr_show_fw_type
 4167};
 4168
 4169static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
 4170				struct bin_attribute *bin_attr, char *buf,
 4171				loff_t off, size_t count)
 4172{
 4173	struct device *cdev = container_of(kobj, struct device, kobj);
 4174	struct Scsi_Host *shost = class_to_shost(cdev);
 4175	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 4176	struct ipr_hostrcb *hostrcb;
 4177	unsigned long lock_flags = 0;
 4178	int ret;
 4179
 4180	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4181	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
 4182					struct ipr_hostrcb, queue);
 4183	if (!hostrcb) {
 4184		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4185		return 0;
 4186	}
 4187	ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
 4188				sizeof(hostrcb->hcam));
 4189	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4190	return ret;
 4191}
 4192
 4193static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
 4194				struct bin_attribute *bin_attr, char *buf,
 4195				loff_t off, size_t count)
 4196{
 4197	struct device *cdev = container_of(kobj, struct device, kobj);
 4198	struct Scsi_Host *shost = class_to_shost(cdev);
 4199	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 4200	struct ipr_hostrcb *hostrcb;
 4201	unsigned long lock_flags = 0;
 4202
 4203	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4204	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
 4205					struct ipr_hostrcb, queue);
 4206	if (!hostrcb) {
 4207		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4208		return count;
 4209	}
 4210
 4211	/* Reclaim hostrcb before exit */
 4212	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
 4213	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4214	return count;
 4215}
 4216
 4217static struct bin_attribute ipr_ioa_async_err_log = {
 4218	.attr = {
 4219		.name =		"async_err_log",
 4220		.mode =		S_IRUGO | S_IWUSR,
 4221	},
 4222	.size = 0,
 4223	.read = ipr_read_async_err_log,
 4224	.write = ipr_next_async_err_log
 4225};
 4226
 4227static struct device_attribute *ipr_ioa_attrs[] = {
 4228	&ipr_fw_version_attr,
 4229	&ipr_log_level_attr,
 4230	&ipr_diagnostics_attr,
 4231	&ipr_ioa_state_attr,
 4232	&ipr_ioa_reset_attr,
 4233	&ipr_update_fw_attr,
 4234	&ipr_ioa_fw_type_attr,
 4235	&ipr_iopoll_weight_attr,
 4236	NULL,
 4237};
 4238
 4239#ifdef CONFIG_SCSI_IPR_DUMP
 4240/**
 4241 * ipr_read_dump - Dump the adapter
 4242 * @filp:		open sysfs file
 4243 * @kobj:		kobject struct
 4244 * @bin_attr:		bin_attribute struct
 4245 * @buf:		buffer
 4246 * @off:		offset
 4247 * @count:		buffer size
 4248 *
 4249 * Return value:
 4250 *	number of bytes printed to buffer
 4251 **/
 4252static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
 4253			     struct bin_attribute *bin_attr,
 4254			     char *buf, loff_t off, size_t count)
 4255{
 4256	struct device *cdev = container_of(kobj, struct device, kobj);
 4257	struct Scsi_Host *shost = class_to_shost(cdev);
 4258	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 4259	struct ipr_dump *dump;
 4260	unsigned long lock_flags = 0;
 4261	char *src;
 4262	int len, sdt_end;
 4263	size_t rc = count;
 4264
 4265	if (!capable(CAP_SYS_ADMIN))
 4266		return -EACCES;
 4267
 4268	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4269	dump = ioa_cfg->dump;
 4270
 4271	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
 4272		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4273		return 0;
 4274	}
 4275	kref_get(&dump->kref);
 4276	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4277
 4278	if (off > dump->driver_dump.hdr.len) {
 4279		kref_put(&dump->kref, ipr_release_dump);
 4280		return 0;
 4281	}
 4282
 4283	if (off + count > dump->driver_dump.hdr.len) {
 4284		count = dump->driver_dump.hdr.len - off;
 4285		rc = count;
 4286	}
 4287
 4288	if (count && off < sizeof(dump->driver_dump)) {
 4289		if (off + count > sizeof(dump->driver_dump))
 4290			len = sizeof(dump->driver_dump) - off;
 4291		else
 4292			len = count;
 4293		src = (u8 *)&dump->driver_dump + off;
 4294		memcpy(buf, src, len);
 4295		buf += len;
 4296		off += len;
 4297		count -= len;
 4298	}
 4299
 4300	off -= sizeof(dump->driver_dump);
 4301
 4302	if (ioa_cfg->sis64)
 4303		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
 4304			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
 4305			   sizeof(struct ipr_sdt_entry));
 4306	else
 4307		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
 4308			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
 4309
 4310	if (count && off < sdt_end) {
 4311		if (off + count > sdt_end)
 4312			len = sdt_end - off;
 4313		else
 4314			len = count;
 4315		src = (u8 *)&dump->ioa_dump + off;
 4316		memcpy(buf, src, len);
 4317		buf += len;
 4318		off += len;
 4319		count -= len;
 4320	}
 4321
 4322	off -= sdt_end;
 4323
 4324	while (count) {
 4325		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
 4326			len = PAGE_ALIGN(off) - off;
 4327		else
 4328			len = count;
 4329		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
 4330		src += off & ~PAGE_MASK;
 4331		memcpy(buf, src, len);
 4332		buf += len;
 4333		off += len;
 4334		count -= len;
 4335	}
 4336
 4337	kref_put(&dump->kref, ipr_release_dump);
 4338	return rc;
 4339}
 4340
 4341/**
 4342 * ipr_alloc_dump - Prepare for adapter dump
 4343 * @ioa_cfg:	ioa config struct
 4344 *
 4345 * Return value:
 4346 *	0 on success / other on failure
 4347 **/
 4348static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
 4349{
 4350	struct ipr_dump *dump;
 4351	__be32 **ioa_data;
 4352	unsigned long lock_flags = 0;
 4353
 4354	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
 4355
 4356	if (!dump) {
 4357		ipr_err("Dump memory allocation failed\n");
 4358		return -ENOMEM;
 4359	}
 4360
 4361	if (ioa_cfg->sis64)
 4362		ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
 4363					      sizeof(__be32 *)));
 4364	else
 4365		ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
 4366					      sizeof(__be32 *)));
 4367
 4368	if (!ioa_data) {
 4369		ipr_err("Dump memory allocation failed\n");
 4370		kfree(dump);
 4371		return -ENOMEM;
 4372	}
 4373
 4374	dump->ioa_dump.ioa_data = ioa_data;
 4375
 4376	kref_init(&dump->kref);
 4377	dump->ioa_cfg = ioa_cfg;
 4378
 4379	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4380
 4381	if (INACTIVE != ioa_cfg->sdt_state) {
 4382		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4383		vfree(dump->ioa_dump.ioa_data);
 4384		kfree(dump);
 4385		return 0;
 4386	}
 4387
 4388	ioa_cfg->dump = dump;
 4389	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
 4390	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
 4391		ioa_cfg->dump_taken = 1;
 4392		schedule_work(&ioa_cfg->work_q);
 4393	}
 4394	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4395
 4396	return 0;
 4397}
 4398
 4399/**
 4400 * ipr_free_dump - Free adapter dump memory
 4401 * @ioa_cfg:	ioa config struct
 4402 *
 4403 * Return value:
 4404 *	0 on success / other on failure
 4405 **/
 4406static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
 4407{
 4408	struct ipr_dump *dump;
 4409	unsigned long lock_flags = 0;
 4410
 4411	ENTER;
 4412
 4413	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4414	dump = ioa_cfg->dump;
 4415	if (!dump) {
 4416		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4417		return 0;
 4418	}
 4419
 4420	ioa_cfg->dump = NULL;
 4421	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4422
 4423	kref_put(&dump->kref, ipr_release_dump);
 4424
 4425	LEAVE;
 4426	return 0;
 4427}
 4428
 4429/**
 4430 * ipr_write_dump - Setup dump state of adapter
 4431 * @filp:		open sysfs file
 4432 * @kobj:		kobject struct
 4433 * @bin_attr:		bin_attribute struct
 4434 * @buf:		buffer
 4435 * @off:		offset
 4436 * @count:		buffer size
 4437 *
 4438 * Return value:
 4439 *	number of bytes printed to buffer
 4440 **/
 4441static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
 4442			      struct bin_attribute *bin_attr,
 4443			      char *buf, loff_t off, size_t count)
 4444{
 4445	struct device *cdev = container_of(kobj, struct device, kobj);
 4446	struct Scsi_Host *shost = class_to_shost(cdev);
 4447	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 4448	int rc;
 4449
 4450	if (!capable(CAP_SYS_ADMIN))
 4451		return -EACCES;
 4452
 4453	if (buf[0] == '1')
 4454		rc = ipr_alloc_dump(ioa_cfg);
 4455	else if (buf[0] == '0')
 4456		rc = ipr_free_dump(ioa_cfg);
 4457	else
 4458		return -EINVAL;
 4459
 4460	if (rc)
 4461		return rc;
 4462	else
 4463		return count;
 4464}
 4465
 4466static struct bin_attribute ipr_dump_attr = {
 4467	.attr =	{
 4468		.name = "dump",
 4469		.mode = S_IRUSR | S_IWUSR,
 4470	},
 4471	.size = 0,
 4472	.read = ipr_read_dump,
 4473	.write = ipr_write_dump
 4474};
 4475#else
 4476static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
 4477#endif
 4478
 4479/**
 4480 * ipr_change_queue_depth - Change the device's queue depth
 4481 * @sdev:	scsi device struct
 4482 * @qdepth:	depth to set
 4483 * @reason:	calling context
 4484 *
 4485 * Return value:
 4486 * 	actual depth set
 4487 **/
 4488static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
 4489{
 4490	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4491	struct ipr_resource_entry *res;
 4492	unsigned long lock_flags = 0;
 4493
 4494	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4495	res = (struct ipr_resource_entry *)sdev->hostdata;
 4496
 4497	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
 4498		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
 4499	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4500
 4501	scsi_change_queue_depth(sdev, qdepth);
 4502	return sdev->queue_depth;
 4503}
 4504
 4505/**
 4506 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
 4507 * @dev:	device struct
 4508 * @attr:	device attribute structure
 4509 * @buf:	buffer
 4510 *
 4511 * Return value:
 4512 * 	number of bytes printed to buffer
 4513 **/
 4514static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
 4515{
 4516	struct scsi_device *sdev = to_scsi_device(dev);
 4517	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4518	struct ipr_resource_entry *res;
 4519	unsigned long lock_flags = 0;
 4520	ssize_t len = -ENXIO;
 4521
 4522	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4523	res = (struct ipr_resource_entry *)sdev->hostdata;
 4524	if (res)
 4525		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
 4526	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4527	return len;
 4528}
 4529
 4530static struct device_attribute ipr_adapter_handle_attr = {
 4531	.attr = {
 4532		.name = 	"adapter_handle",
 4533		.mode =		S_IRUSR,
 4534	},
 4535	.show = ipr_show_adapter_handle
 4536};
 4537
 4538/**
 4539 * ipr_show_resource_path - Show the resource path or the resource address for
 4540 *			    this device.
 4541 * @dev:	device struct
 4542 * @attr:	device attribute structure
 4543 * @buf:	buffer
 4544 *
 4545 * Return value:
 4546 * 	number of bytes printed to buffer
 4547 **/
 4548static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
 4549{
 4550	struct scsi_device *sdev = to_scsi_device(dev);
 4551	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4552	struct ipr_resource_entry *res;
 4553	unsigned long lock_flags = 0;
 4554	ssize_t len = -ENXIO;
 4555	char buffer[IPR_MAX_RES_PATH_LENGTH];
 4556
 4557	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4558	res = (struct ipr_resource_entry *)sdev->hostdata;
 4559	if (res && ioa_cfg->sis64)
 4560		len = snprintf(buf, PAGE_SIZE, "%s\n",
 4561			       __ipr_format_res_path(res->res_path, buffer,
 4562						     sizeof(buffer)));
 4563	else if (res)
 4564		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
 4565			       res->bus, res->target, res->lun);
 4566
 4567	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4568	return len;
 4569}
 4570
 4571static struct device_attribute ipr_resource_path_attr = {
 4572	.attr = {
 4573		.name = 	"resource_path",
 4574		.mode =		S_IRUGO,
 4575	},
 4576	.show = ipr_show_resource_path
 4577};
 4578
 4579/**
 4580 * ipr_show_device_id - Show the device_id for this device.
 4581 * @dev:	device struct
 4582 * @attr:	device attribute structure
 4583 * @buf:	buffer
 4584 *
 4585 * Return value:
 4586 *	number of bytes printed to buffer
 4587 **/
 4588static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
 4589{
 4590	struct scsi_device *sdev = to_scsi_device(dev);
 4591	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4592	struct ipr_resource_entry *res;
 4593	unsigned long lock_flags = 0;
 4594	ssize_t len = -ENXIO;
 4595
 4596	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4597	res = (struct ipr_resource_entry *)sdev->hostdata;
 4598	if (res && ioa_cfg->sis64)
 4599		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
 4600	else if (res)
 4601		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
 4602
 4603	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4604	return len;
 4605}
 4606
 4607static struct device_attribute ipr_device_id_attr = {
 4608	.attr = {
 4609		.name =		"device_id",
 4610		.mode =		S_IRUGO,
 4611	},
 4612	.show = ipr_show_device_id
 4613};
 4614
 4615/**
 4616 * ipr_show_resource_type - Show the resource type for this device.
 4617 * @dev:	device struct
 4618 * @attr:	device attribute structure
 4619 * @buf:	buffer
 4620 *
 4621 * Return value:
 4622 *	number of bytes printed to buffer
 4623 **/
 4624static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
 4625{
 4626	struct scsi_device *sdev = to_scsi_device(dev);
 4627	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4628	struct ipr_resource_entry *res;
 4629	unsigned long lock_flags = 0;
 4630	ssize_t len = -ENXIO;
 4631
 4632	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4633	res = (struct ipr_resource_entry *)sdev->hostdata;
 4634
 4635	if (res)
 4636		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
 4637
 4638	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4639	return len;
 4640}
 4641
 4642static struct device_attribute ipr_resource_type_attr = {
 4643	.attr = {
 4644		.name =		"resource_type",
 4645		.mode =		S_IRUGO,
 4646	},
 4647	.show = ipr_show_resource_type
 4648};
 4649
 4650/**
 4651 * ipr_show_raw_mode - Show the adapter's raw mode
 4652 * @dev:	class device struct
 4653 * @buf:	buffer
 4654 *
 4655 * Return value:
 4656 * 	number of bytes printed to buffer
 4657 **/
 4658static ssize_t ipr_show_raw_mode(struct device *dev,
 4659				 struct device_attribute *attr, char *buf)
 4660{
 4661	struct scsi_device *sdev = to_scsi_device(dev);
 4662	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4663	struct ipr_resource_entry *res;
 4664	unsigned long lock_flags = 0;
 4665	ssize_t len;
 4666
 4667	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4668	res = (struct ipr_resource_entry *)sdev->hostdata;
 4669	if (res)
 4670		len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
 4671	else
 4672		len = -ENXIO;
 4673	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4674	return len;
 4675}
 4676
 4677/**
 4678 * ipr_store_raw_mode - Change the adapter's raw mode
 4679 * @dev:	class device struct
 4680 * @buf:	buffer
 4681 *
 4682 * Return value:
 4683 * 	number of bytes printed to buffer
 4684 **/
 4685static ssize_t ipr_store_raw_mode(struct device *dev,
 4686				  struct device_attribute *attr,
 4687				  const char *buf, size_t count)
 4688{
 4689	struct scsi_device *sdev = to_scsi_device(dev);
 4690	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
 4691	struct ipr_resource_entry *res;
 4692	unsigned long lock_flags = 0;
 4693	ssize_t len;
 4694
 4695	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4696	res = (struct ipr_resource_entry *)sdev->hostdata;
 4697	if (res) {
 4698		if (ipr_is_af_dasd_device(res)) {
 4699			res->raw_mode = simple_strtoul(buf, NULL, 10);
 4700			len = strlen(buf);
 4701			if (res->sdev)
 4702				sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
 4703					res->raw_mode ? "enabled" : "disabled");
 4704		} else
 4705			len = -EINVAL;
 4706	} else
 4707		len = -ENXIO;
 4708	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4709	return len;
 4710}
 4711
 4712static struct device_attribute ipr_raw_mode_attr = {
 4713	.attr = {
 4714		.name =		"raw_mode",
 4715		.mode =		S_IRUGO | S_IWUSR,
 4716	},
 4717	.show = ipr_show_raw_mode,
 4718	.store = ipr_store_raw_mode
 4719};
 4720
 4721static struct device_attribute *ipr_dev_attrs[] = {
 4722	&ipr_adapter_handle_attr,
 4723	&ipr_resource_path_attr,
 4724	&ipr_device_id_attr,
 4725	&ipr_resource_type_attr,
 4726	&ipr_raw_mode_attr,
 4727	NULL,
 4728};
 4729
 4730/**
 4731 * ipr_biosparam - Return the HSC mapping
 4732 * @sdev:			scsi device struct
 4733 * @block_device:	block device pointer
 4734 * @capacity:		capacity of the device
 4735 * @parm:			Array containing returned HSC values.
 4736 *
 4737 * This function generates the HSC parms that fdisk uses.
 4738 * We want to make sure we return something that places partitions
 4739 * on 4k boundaries for best performance with the IOA.
 4740 *
 4741 * Return value:
 4742 * 	0 on success
 4743 **/
 4744static int ipr_biosparam(struct scsi_device *sdev,
 4745			 struct block_device *block_device,
 4746			 sector_t capacity, int *parm)
 4747{
 4748	int heads, sectors;
 4749	sector_t cylinders;
 4750
 4751	heads = 128;
 4752	sectors = 32;
 4753
 4754	cylinders = capacity;
 4755	sector_div(cylinders, (128 * 32));
 4756
 4757	/* return result */
 4758	parm[0] = heads;
 4759	parm[1] = sectors;
 4760	parm[2] = cylinders;
 4761
 4762	return 0;
 4763}
 4764
 4765/**
 4766 * ipr_find_starget - Find target based on bus/target.
 4767 * @starget:	scsi target struct
 4768 *
 4769 * Return value:
 4770 * 	resource entry pointer if found / NULL if not found
 4771 **/
 4772static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
 4773{
 4774	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 4775	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
 4776	struct ipr_resource_entry *res;
 4777
 4778	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 4779		if ((res->bus == starget->channel) &&
 4780		    (res->target == starget->id)) {
 4781			return res;
 4782		}
 4783	}
 4784
 4785	return NULL;
 4786}
 4787
 4788static struct ata_port_info sata_port_info;
 4789
 4790/**
 4791 * ipr_target_alloc - Prepare for commands to a SCSI target
 4792 * @starget:	scsi target struct
 4793 *
 4794 * If the device is a SATA device, this function allocates an
 4795 * ATA port with libata, else it does nothing.
 4796 *
 4797 * Return value:
 4798 * 	0 on success / non-0 on failure
 4799 **/
 4800static int ipr_target_alloc(struct scsi_target *starget)
 4801{
 4802	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 4803	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
 4804	struct ipr_sata_port *sata_port;
 4805	struct ata_port *ap;
 4806	struct ipr_resource_entry *res;
 4807	unsigned long lock_flags;
 4808
 4809	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4810	res = ipr_find_starget(starget);
 4811	starget->hostdata = NULL;
 4812
 4813	if (res && ipr_is_gata(res)) {
 4814		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4815		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
 4816		if (!sata_port)
 4817			return -ENOMEM;
 4818
 4819		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
 4820		if (ap) {
 4821			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4822			sata_port->ioa_cfg = ioa_cfg;
 4823			sata_port->ap = ap;
 4824			sata_port->res = res;
 4825
 4826			res->sata_port = sata_port;
 4827			ap->private_data = sata_port;
 4828			starget->hostdata = sata_port;
 4829		} else {
 4830			kfree(sata_port);
 4831			return -ENOMEM;
 4832		}
 4833	}
 4834	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4835
 4836	return 0;
 4837}
 4838
 4839/**
 4840 * ipr_target_destroy - Destroy a SCSI target
 4841 * @starget:	scsi target struct
 4842 *
 4843 * If the device was a SATA device, this function frees the libata
 4844 * ATA port, else it does nothing.
 4845 *
 4846 **/
 4847static void ipr_target_destroy(struct scsi_target *starget)
 4848{
 4849	struct ipr_sata_port *sata_port = starget->hostdata;
 4850	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 4851	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
 4852
 4853	if (ioa_cfg->sis64) {
 4854		if (!ipr_find_starget(starget)) {
 4855			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
 4856				clear_bit(starget->id, ioa_cfg->array_ids);
 4857			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
 4858				clear_bit(starget->id, ioa_cfg->vset_ids);
 4859			else if (starget->channel == 0)
 4860				clear_bit(starget->id, ioa_cfg->target_ids);
 4861		}
 4862	}
 4863
 4864	if (sata_port) {
 4865		starget->hostdata = NULL;
 4866		ata_sas_port_destroy(sata_port->ap);
 4867		kfree(sata_port);
 4868	}
 4869}
 4870
 4871/**
 4872 * ipr_find_sdev - Find device based on bus/target/lun.
 4873 * @sdev:	scsi device struct
 4874 *
 4875 * Return value:
 4876 * 	resource entry pointer if found / NULL if not found
 4877 **/
 4878static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
 4879{
 4880	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
 4881	struct ipr_resource_entry *res;
 4882
 4883	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 4884		if ((res->bus == sdev->channel) &&
 4885		    (res->target == sdev->id) &&
 4886		    (res->lun == sdev->lun))
 4887			return res;
 4888	}
 4889
 4890	return NULL;
 4891}
 4892
 4893/**
 4894 * ipr_slave_destroy - Unconfigure a SCSI device
 4895 * @sdev:	scsi device struct
 4896 *
 4897 * Return value:
 4898 * 	nothing
 4899 **/
 4900static void ipr_slave_destroy(struct scsi_device *sdev)
 4901{
 4902	struct ipr_resource_entry *res;
 4903	struct ipr_ioa_cfg *ioa_cfg;
 4904	unsigned long lock_flags = 0;
 4905
 4906	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
 4907
 4908	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4909	res = (struct ipr_resource_entry *) sdev->hostdata;
 4910	if (res) {
 4911		if (res->sata_port)
 4912			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
 4913		sdev->hostdata = NULL;
 4914		res->sdev = NULL;
 4915		res->sata_port = NULL;
 4916	}
 4917	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4918}
 4919
 4920/**
 4921 * ipr_slave_configure - Configure a SCSI device
 4922 * @sdev:	scsi device struct
 4923 *
 4924 * This function configures the specified scsi device.
 4925 *
 4926 * Return value:
 4927 * 	0 on success
 4928 **/
 4929static int ipr_slave_configure(struct scsi_device *sdev)
 4930{
 4931	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
 4932	struct ipr_resource_entry *res;
 4933	struct ata_port *ap = NULL;
 4934	unsigned long lock_flags = 0;
 4935	char buffer[IPR_MAX_RES_PATH_LENGTH];
 4936
 4937	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 4938	res = sdev->hostdata;
 4939	if (res) {
 4940		if (ipr_is_af_dasd_device(res))
 4941			sdev->type = TYPE_RAID;
 4942		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
 4943			sdev->scsi_level = 4;
 4944			sdev->no_uld_attach = 1;
 4945		}
 4946		if (ipr_is_vset_device(res)) {
 4947			sdev->scsi_level = SCSI_SPC_3;
 4948			sdev->no_report_opcodes = 1;
 4949			blk_queue_rq_timeout(sdev->request_queue,
 4950					     IPR_VSET_RW_TIMEOUT);
 4951			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
 4952		}
 4953		if (ipr_is_gata(res) && res->sata_port)
 4954			ap = res->sata_port->ap;
 4955		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4956
 4957		if (ap) {
 4958			scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
 4959			ata_sas_slave_configure(sdev, ap);
 4960		}
 4961
 4962		if (ioa_cfg->sis64)
 4963			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
 4964				    ipr_format_res_path(ioa_cfg,
 4965				res->res_path, buffer, sizeof(buffer)));
 4966		return 0;
 4967	}
 4968	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 4969	return 0;
 4970}
 4971
 4972/**
 4973 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
 4974 * @sdev:	scsi device struct
 4975 *
 4976 * This function initializes an ATA port so that future commands
 4977 * sent through queuecommand will work.
 4978 *
 4979 * Return value:
 4980 * 	0 on success
 4981 **/
 4982static int ipr_ata_slave_alloc(struct scsi_device *sdev)
 4983{
 4984	struct ipr_sata_port *sata_port = NULL;
 4985	int rc = -ENXIO;
 4986
 4987	ENTER;
 4988	if (sdev->sdev_target)
 4989		sata_port = sdev->sdev_target->hostdata;
 4990	if (sata_port) {
 4991		rc = ata_sas_port_init(sata_port->ap);
 4992		if (rc == 0)
 4993			rc = ata_sas_sync_probe(sata_port->ap);
 4994	}
 4995
 4996	if (rc)
 4997		ipr_slave_destroy(sdev);
 4998
 4999	LEAVE;
 5000	return rc;
 5001}
 5002
 5003/**
 5004 * ipr_slave_alloc - Prepare for commands to a device.
 5005 * @sdev:	scsi device struct
 5006 *
 5007 * This function saves a pointer to the resource entry
 5008 * in the scsi device struct if the device exists. We
 5009 * can then use this pointer in ipr_queuecommand when
 5010 * handling new commands.
 5011 *
 5012 * Return value:
 5013 * 	0 on success / -ENXIO if device does not exist
 5014 **/
 5015static int ipr_slave_alloc(struct scsi_device *sdev)
 5016{
 5017	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
 5018	struct ipr_resource_entry *res;
 5019	unsigned long lock_flags;
 5020	int rc = -ENXIO;
 5021
 5022	sdev->hostdata = NULL;
 5023
 5024	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5025
 5026	res = ipr_find_sdev(sdev);
 5027	if (res) {
 5028		res->sdev = sdev;
 5029		res->add_to_ml = 0;
 5030		res->in_erp = 0;
 5031		sdev->hostdata = res;
 5032		if (!ipr_is_naca_model(res))
 5033			res->needs_sync_complete = 1;
 5034		rc = 0;
 5035		if (ipr_is_gata(res)) {
 5036			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5037			return ipr_ata_slave_alloc(sdev);
 5038		}
 5039	}
 5040
 5041	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5042
 5043	return rc;
 5044}
 5045
 5046/**
 5047 * ipr_match_lun - Match function for specified LUN
 5048 * @ipr_cmd:	ipr command struct
 5049 * @device:		device to match (sdev)
 5050 *
 5051 * Returns:
 5052 *	1 if command matches sdev / 0 if command does not match sdev
 5053 **/
 5054static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
 5055{
 5056	if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
 5057		return 1;
 5058	return 0;
 5059}
 5060
 5061/**
 5062 * ipr_cmnd_is_free - Check if a command is free or not
 5063 * @ipr_cmd	ipr command struct
 5064 *
 5065 * Returns:
 5066 *	true / false
 5067 **/
 5068static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
 5069{
 5070	struct ipr_cmnd *loop_cmd;
 5071
 5072	list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
 5073		if (loop_cmd == ipr_cmd)
 5074			return true;
 5075	}
 5076
 5077	return false;
 5078}
 5079
 5080/**
 5081 * ipr_match_res - Match function for specified resource entry
 5082 * @ipr_cmd:	ipr command struct
 5083 * @resource:	resource entry to match
 5084 *
 5085 * Returns:
 5086 *	1 if command matches sdev / 0 if command does not match sdev
 5087 **/
 5088static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
 5089{
 5090	struct ipr_resource_entry *res = resource;
 5091
 5092	if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
 5093		return 1;
 5094	return 0;
 5095}
 5096
 5097/**
 5098 * ipr_wait_for_ops - Wait for matching commands to complete
 5099 * @ipr_cmd:	ipr command struct
 5100 * @device:		device to match (sdev)
 5101 * @match:		match function to use
 5102 *
 5103 * Returns:
 5104 *	SUCCESS / FAILED
 5105 **/
 5106static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
 5107			    int (*match)(struct ipr_cmnd *, void *))
 5108{
 5109	struct ipr_cmnd *ipr_cmd;
 5110	int wait, i;
 5111	unsigned long flags;
 5112	struct ipr_hrr_queue *hrrq;
 5113	signed long timeout = IPR_ABORT_TASK_TIMEOUT;
 5114	DECLARE_COMPLETION_ONSTACK(comp);
 5115
 5116	ENTER;
 5117	do {
 5118		wait = 0;
 5119
 5120		for_each_hrrq(hrrq, ioa_cfg) {
 5121			spin_lock_irqsave(hrrq->lock, flags);
 5122			for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
 5123				ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
 5124				if (!ipr_cmnd_is_free(ipr_cmd)) {
 5125					if (match(ipr_cmd, device)) {
 5126						ipr_cmd->eh_comp = &comp;
 5127						wait++;
 5128					}
 5129				}
 5130			}
 5131			spin_unlock_irqrestore(hrrq->lock, flags);
 5132		}
 5133
 5134		if (wait) {
 5135			timeout = wait_for_completion_timeout(&comp, timeout);
 5136
 5137			if (!timeout) {
 5138				wait = 0;
 5139
 5140				for_each_hrrq(hrrq, ioa_cfg) {
 5141					spin_lock_irqsave(hrrq->lock, flags);
 5142					for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
 5143						ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
 5144						if (!ipr_cmnd_is_free(ipr_cmd)) {
 5145							if (match(ipr_cmd, device)) {
 5146								ipr_cmd->eh_comp = NULL;
 5147								wait++;
 5148							}
 5149						}
 5150					}
 5151					spin_unlock_irqrestore(hrrq->lock, flags);
 5152				}
 5153
 5154				if (wait)
 5155					dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
 5156				LEAVE;
 5157				return wait ? FAILED : SUCCESS;
 5158			}
 5159		}
 5160	} while (wait);
 5161
 5162	LEAVE;
 5163	return SUCCESS;
 5164}
 5165
 5166static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
 5167{
 5168	struct ipr_ioa_cfg *ioa_cfg;
 5169	unsigned long lock_flags = 0;
 5170	int rc = SUCCESS;
 5171
 5172	ENTER;
 5173	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
 5174	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5175
 5176	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
 5177		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
 5178		dev_err(&ioa_cfg->pdev->dev,
 5179			"Adapter being reset as a result of error recovery.\n");
 5180
 5181		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
 5182			ioa_cfg->sdt_state = GET_DUMP;
 5183	}
 5184
 5185	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5186	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 5187	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5188
 5189	/* If we got hit with a host reset while we were already resetting
 5190	 the adapter for some reason, and the reset failed. */
 5191	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
 5192		ipr_trace;
 5193		rc = FAILED;
 5194	}
 5195
 5196	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5197	LEAVE;
 5198	return rc;
 5199}
 5200
 5201/**
 5202 * ipr_device_reset - Reset the device
 5203 * @ioa_cfg:	ioa config struct
 5204 * @res:		resource entry struct
 5205 *
 5206 * This function issues a device reset to the affected device.
 5207 * If the device is a SCSI device, a LUN reset will be sent
 5208 * to the device first. If that does not work, a target reset
 5209 * will be sent. If the device is a SATA device, a PHY reset will
 5210 * be sent.
 5211 *
 5212 * Return value:
 5213 *	0 on success / non-zero on failure
 5214 **/
 5215static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
 5216			    struct ipr_resource_entry *res)
 5217{
 5218	struct ipr_cmnd *ipr_cmd;
 5219	struct ipr_ioarcb *ioarcb;
 5220	struct ipr_cmd_pkt *cmd_pkt;
 5221	struct ipr_ioarcb_ata_regs *regs;
 5222	u32 ioasc;
 5223
 5224	ENTER;
 5225	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 5226	ioarcb = &ipr_cmd->ioarcb;
 5227	cmd_pkt = &ioarcb->cmd_pkt;
 5228
 5229	if (ipr_cmd->ioa_cfg->sis64) {
 5230		regs = &ipr_cmd->i.ata_ioadl.regs;
 5231		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
 5232	} else
 5233		regs = &ioarcb->u.add_data.u.regs;
 5234
 5235	ioarcb->res_handle = res->res_handle;
 5236	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
 5237	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
 5238	if (ipr_is_gata(res)) {
 5239		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
 5240		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
 5241		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
 5242	}
 5243
 5244	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
 5245	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 5246	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 5247	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
 5248		if (ipr_cmd->ioa_cfg->sis64)
 5249			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
 5250			       sizeof(struct ipr_ioasa_gata));
 5251		else
 5252			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
 5253			       sizeof(struct ipr_ioasa_gata));
 5254	}
 5255
 5256	LEAVE;
 5257	return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
 5258}
 5259
 5260/**
 5261 * ipr_sata_reset - Reset the SATA port
 5262 * @link:	SATA link to reset
 5263 * @classes:	class of the attached device
 5264 *
 5265 * This function issues a SATA phy reset to the affected ATA link.
 5266 *
 5267 * Return value:
 5268 *	0 on success / non-zero on failure
 5269 **/
 5270static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
 5271				unsigned long deadline)
 5272{
 5273	struct ipr_sata_port *sata_port = link->ap->private_data;
 5274	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
 5275	struct ipr_resource_entry *res;
 5276	unsigned long lock_flags = 0;
 5277	int rc = -ENXIO, ret;
 5278
 5279	ENTER;
 5280	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5281	while (ioa_cfg->in_reset_reload) {
 5282		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5283		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 5284		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5285	}
 5286
 5287	res = sata_port->res;
 5288	if (res) {
 5289		rc = ipr_device_reset(ioa_cfg, res);
 5290		*classes = res->ata_class;
 5291		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5292
 5293		ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
 5294		if (ret != SUCCESS) {
 5295			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5296			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
 5297			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5298
 5299			wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 5300		}
 5301	} else
 5302		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5303
 5304	LEAVE;
 5305	return rc;
 5306}
 5307
 5308/**
 5309 * ipr_eh_dev_reset - Reset the device
 5310 * @scsi_cmd:	scsi command struct
 5311 *
 5312 * This function issues a device reset to the affected device.
 5313 * A LUN reset will be sent to the device first. If that does
 5314 * not work, a target reset will be sent.
 5315 *
 5316 * Return value:
 5317 *	SUCCESS / FAILED
 5318 **/
 5319static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
 5320{
 5321	struct ipr_cmnd *ipr_cmd;
 5322	struct ipr_ioa_cfg *ioa_cfg;
 5323	struct ipr_resource_entry *res;
 5324	struct ata_port *ap;
 5325	int rc = 0, i;
 5326	struct ipr_hrr_queue *hrrq;
 5327
 5328	ENTER;
 5329	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
 5330	res = scsi_cmd->device->hostdata;
 5331
 5332	/*
 5333	 * If we are currently going through reset/reload, return failed. This will force the
 5334	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
 5335	 * reset to complete
 5336	 */
 5337	if (ioa_cfg->in_reset_reload)
 5338		return FAILED;
 5339	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
 5340		return FAILED;
 5341
 5342	for_each_hrrq(hrrq, ioa_cfg) {
 5343		spin_lock(&hrrq->_lock);
 5344		for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
 5345			ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
 5346
 5347			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
 5348				if (!ipr_cmd->qc)
 5349					continue;
 5350				if (ipr_cmnd_is_free(ipr_cmd))
 5351					continue;
 5352
 5353				ipr_cmd->done = ipr_sata_eh_done;
 5354				if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
 5355					ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
 5356					ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
 5357				}
 5358			}
 5359		}
 5360		spin_unlock(&hrrq->_lock);
 5361	}
 5362	res->resetting_device = 1;
 5363	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
 5364
 5365	if (ipr_is_gata(res) && res->sata_port) {
 5366		ap = res->sata_port->ap;
 5367		spin_unlock_irq(scsi_cmd->device->host->host_lock);
 5368		ata_std_error_handler(ap);
 5369		spin_lock_irq(scsi_cmd->device->host->host_lock);
 5370	} else
 5371		rc = ipr_device_reset(ioa_cfg, res);
 5372	res->resetting_device = 0;
 5373	res->reset_occurred = 1;
 5374
 5375	LEAVE;
 5376	return rc ? FAILED : SUCCESS;
 5377}
 5378
 5379static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
 5380{
 5381	int rc;
 5382	struct ipr_ioa_cfg *ioa_cfg;
 5383	struct ipr_resource_entry *res;
 5384
 5385	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
 5386	res = cmd->device->hostdata;
 5387
 5388	if (!res)
 5389		return FAILED;
 5390
 5391	spin_lock_irq(cmd->device->host->host_lock);
 5392	rc = __ipr_eh_dev_reset(cmd);
 5393	spin_unlock_irq(cmd->device->host->host_lock);
 5394
 5395	if (rc == SUCCESS) {
 5396		if (ipr_is_gata(res) && res->sata_port)
 5397			rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
 5398		else
 5399			rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
 5400	}
 5401
 5402	return rc;
 5403}
 5404
 5405/**
 5406 * ipr_bus_reset_done - Op done function for bus reset.
 5407 * @ipr_cmd:	ipr command struct
 5408 *
 5409 * This function is the op done function for a bus reset
 5410 *
 5411 * Return value:
 5412 * 	none
 5413 **/
 5414static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
 5415{
 5416	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 5417	struct ipr_resource_entry *res;
 5418
 5419	ENTER;
 5420	if (!ioa_cfg->sis64)
 5421		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 5422			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
 5423				scsi_report_bus_reset(ioa_cfg->host, res->bus);
 5424				break;
 5425			}
 5426		}
 5427
 5428	/*
 5429	 * If abort has not completed, indicate the reset has, else call the
 5430	 * abort's done function to wake the sleeping eh thread
 5431	 */
 5432	if (ipr_cmd->sibling->sibling)
 5433		ipr_cmd->sibling->sibling = NULL;
 5434	else
 5435		ipr_cmd->sibling->done(ipr_cmd->sibling);
 5436
 5437	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 5438	LEAVE;
 5439}
 5440
 5441/**
 5442 * ipr_abort_timeout - An abort task has timed out
 5443 * @ipr_cmd:	ipr command struct
 5444 *
 5445 * This function handles when an abort task times out. If this
 5446 * happens we issue a bus reset since we have resources tied
 5447 * up that must be freed before returning to the midlayer.
 5448 *
 5449 * Return value:
 5450 *	none
 5451 **/
 5452static void ipr_abort_timeout(struct timer_list *t)
 5453{
 5454	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
 5455	struct ipr_cmnd *reset_cmd;
 5456	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 5457	struct ipr_cmd_pkt *cmd_pkt;
 5458	unsigned long lock_flags = 0;
 5459
 5460	ENTER;
 5461	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 5462	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
 5463		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5464		return;
 5465	}
 5466
 5467	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
 5468	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 5469	ipr_cmd->sibling = reset_cmd;
 5470	reset_cmd->sibling = ipr_cmd;
 5471	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
 5472	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
 5473	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
 5474	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
 5475	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
 5476
 5477	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
 5478	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 5479	LEAVE;
 5480}
 5481
 5482/**
 5483 * ipr_cancel_op - Cancel specified op
 5484 * @scsi_cmd:	scsi command struct
 5485 *
 5486 * This function cancels specified op.
 5487 *
 5488 * Return value:
 5489 *	SUCCESS / FAILED
 5490 **/
 5491static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
 5492{
 5493	struct ipr_cmnd *ipr_cmd;
 5494	struct ipr_ioa_cfg *ioa_cfg;
 5495	struct ipr_resource_entry *res;
 5496	struct ipr_cmd_pkt *cmd_pkt;
 5497	u32 ioasc, int_reg;
 5498	int i, op_found = 0;
 5499	struct ipr_hrr_queue *hrrq;
 5500
 5501	ENTER;
 5502	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
 5503	res = scsi_cmd->device->hostdata;
 5504
 5505	/* If we are currently going through reset/reload, return failed.
 5506	 * This will force the mid-layer to call ipr_eh_host_reset,
 5507	 * which will then go to sleep and wait for the reset to complete
 5508	 */
 5509	if (ioa_cfg->in_reset_reload ||
 5510	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
 5511		return FAILED;
 5512	if (!res)
 5513		return FAILED;
 5514
 5515	/*
 5516	 * If we are aborting a timed out op, chances are that the timeout was caused
 5517	 * by a still not detected EEH error. In such cases, reading a register will
 5518	 * trigger the EEH recovery infrastructure.
 5519	 */
 5520	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 5521
 5522	if (!ipr_is_gscsi(res))
 5523		return FAILED;
 5524
 5525	for_each_hrrq(hrrq, ioa_cfg) {
 5526		spin_lock(&hrrq->_lock);
 5527		for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
 5528			if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
 5529				if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
 5530					op_found = 1;
 5531					break;
 5532				}
 5533			}
 5534		}
 5535		spin_unlock(&hrrq->_lock);
 5536	}
 5537
 5538	if (!op_found)
 5539		return SUCCESS;
 5540
 5541	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 5542	ipr_cmd->ioarcb.res_handle = res->res_handle;
 5543	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
 5544	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
 5545	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
 5546	ipr_cmd->u.sdev = scsi_cmd->device;
 5547
 5548	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
 5549		    scsi_cmd->cmnd[0]);
 5550	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
 5551	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 5552
 5553	/*
 5554	 * If the abort task timed out and we sent a bus reset, we will get
 5555	 * one the following responses to the abort
 5556	 */
 5557	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
 5558		ioasc = 0;
 5559		ipr_trace;
 5560	}
 5561
 5562	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 5563	if (!ipr_is_naca_model(res))
 5564		res->needs_sync_complete = 1;
 5565
 5566	LEAVE;
 5567	return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
 5568}
 5569
 5570/**
 5571 * ipr_eh_abort - Abort a single op
 5572 * @scsi_cmd:	scsi command struct
 5573 *
 5574 * Return value:
 5575 *	0 if scan in progress / 1 if scan is complete
 5576 **/
 5577static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
 5578{
 5579	unsigned long lock_flags;
 5580	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
 5581	int rc = 0;
 5582
 5583	spin_lock_irqsave(shost->host_lock, lock_flags);
 5584	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
 5585		rc = 1;
 5586	if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
 5587		rc = 1;
 5588	spin_unlock_irqrestore(shost->host_lock, lock_flags);
 5589	return rc;
 5590}
 5591
 5592/**
 5593 * ipr_eh_host_reset - Reset the host adapter
 5594 * @scsi_cmd:	scsi command struct
 5595 *
 5596 * Return value:
 5597 * 	SUCCESS / FAILED
 5598 **/
 5599static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
 5600{
 5601	unsigned long flags;
 5602	int rc;
 5603	struct ipr_ioa_cfg *ioa_cfg;
 5604
 5605	ENTER;
 5606
 5607	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
 5608
 5609	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
 5610	rc = ipr_cancel_op(scsi_cmd);
 5611	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
 5612
 5613	if (rc == SUCCESS)
 5614		rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
 5615	LEAVE;
 5616	return rc;
 5617}
 5618
 5619/**
 5620 * ipr_handle_other_interrupt - Handle "other" interrupts
 5621 * @ioa_cfg:	ioa config struct
 5622 * @int_reg:	interrupt register
 5623 *
 5624 * Return value:
 5625 * 	IRQ_NONE / IRQ_HANDLED
 5626 **/
 5627static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
 5628					      u32 int_reg)
 5629{
 5630	irqreturn_t rc = IRQ_HANDLED;
 5631	u32 int_mask_reg;
 5632
 5633	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
 5634	int_reg &= ~int_mask_reg;
 5635
 5636	/* If an interrupt on the adapter did not occur, ignore it.
 5637	 * Or in the case of SIS 64, check for a stage change interrupt.
 5638	 */
 5639	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
 5640		if (ioa_cfg->sis64) {
 5641			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
 5642			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
 5643			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
 5644
 5645				/* clear stage change */
 5646				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
 5647				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
 5648				list_del(&ioa_cfg->reset_cmd->queue);
 5649				del_timer(&ioa_cfg->reset_cmd->timer);
 5650				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
 5651				return IRQ_HANDLED;
 5652			}
 5653		}
 5654
 5655		return IRQ_NONE;
 5656	}
 5657
 5658	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
 5659		/* Mask the interrupt */
 5660		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
 5661		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 5662
 5663		list_del(&ioa_cfg->reset_cmd->queue);
 5664		del_timer(&ioa_cfg->reset_cmd->timer);
 5665		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
 5666	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
 5667		if (ioa_cfg->clear_isr) {
 5668			if (ipr_debug && printk_ratelimit())
 5669				dev_err(&ioa_cfg->pdev->dev,
 5670					"Spurious interrupt detected. 0x%08X\n", int_reg);
 5671			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
 5672			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
 5673			return IRQ_NONE;
 5674		}
 5675	} else {
 5676		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
 5677			ioa_cfg->ioa_unit_checked = 1;
 5678		else if (int_reg & IPR_PCII_NO_HOST_RRQ)
 5679			dev_err(&ioa_cfg->pdev->dev,
 5680				"No Host RRQ. 0x%08X\n", int_reg);
 5681		else
 5682			dev_err(&ioa_cfg->pdev->dev,
 5683				"Permanent IOA failure. 0x%08X\n", int_reg);
 5684
 5685		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
 5686			ioa_cfg->sdt_state = GET_DUMP;
 5687
 5688		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
 5689		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 5690	}
 5691
 5692	return rc;
 5693}
 5694
 5695/**
 5696 * ipr_isr_eh - Interrupt service routine error handler
 5697 * @ioa_cfg:	ioa config struct
 5698 * @msg:	message to log
 5699 *
 5700 * Return value:
 5701 * 	none
 5702 **/
 5703static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
 5704{
 5705	ioa_cfg->errors_logged++;
 5706	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
 5707
 5708	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
 5709		ioa_cfg->sdt_state = GET_DUMP;
 5710
 5711	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 5712}
 5713
 5714static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
 5715						struct list_head *doneq)
 5716{
 5717	u32 ioasc;
 5718	u16 cmd_index;
 5719	struct ipr_cmnd *ipr_cmd;
 5720	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
 5721	int num_hrrq = 0;
 5722
 5723	/* If interrupts are disabled, ignore the interrupt */
 5724	if (!hrr_queue->allow_interrupts)
 5725		return 0;
 5726
 5727	while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
 5728	       hrr_queue->toggle_bit) {
 5729
 5730		cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
 5731			     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
 5732			     IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
 5733
 5734		if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
 5735			     cmd_index < hrr_queue->min_cmd_id)) {
 5736			ipr_isr_eh(ioa_cfg,
 5737				"Invalid response handle from IOA: ",
 5738				cmd_index);
 5739			break;
 5740		}
 5741
 5742		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
 5743		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 5744
 5745		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
 5746
 5747		list_move_tail(&ipr_cmd->queue, doneq);
 5748
 5749		if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
 5750			hrr_queue->hrrq_curr++;
 5751		} else {
 5752			hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
 5753			hrr_queue->toggle_bit ^= 1u;
 5754		}
 5755		num_hrrq++;
 5756		if (budget > 0 && num_hrrq >= budget)
 5757			break;
 5758	}
 5759
 5760	return num_hrrq;
 5761}
 5762
 5763static int ipr_iopoll(struct irq_poll *iop, int budget)
 5764{
 5765	struct ipr_ioa_cfg *ioa_cfg;
 5766	struct ipr_hrr_queue *hrrq;
 5767	struct ipr_cmnd *ipr_cmd, *temp;
 5768	unsigned long hrrq_flags;
 5769	int completed_ops;
 5770	LIST_HEAD(doneq);
 5771
 5772	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
 5773	ioa_cfg = hrrq->ioa_cfg;
 5774
 5775	spin_lock_irqsave(hrrq->lock, hrrq_flags);
 5776	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
 5777
 5778	if (completed_ops < budget)
 5779		irq_poll_complete(iop);
 5780	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
 5781
 5782	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
 5783		list_del(&ipr_cmd->queue);
 5784		del_timer(&ipr_cmd->timer);
 5785		ipr_cmd->fast_done(ipr_cmd);
 5786	}
 5787
 5788	return completed_ops;
 5789}
 5790
 5791/**
 5792 * ipr_isr - Interrupt service routine
 5793 * @irq:	irq number
 5794 * @devp:	pointer to ioa config struct
 5795 *
 5796 * Return value:
 5797 * 	IRQ_NONE / IRQ_HANDLED
 5798 **/
 5799static irqreturn_t ipr_isr(int irq, void *devp)
 5800{
 5801	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
 5802	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
 5803	unsigned long hrrq_flags = 0;
 5804	u32 int_reg = 0;
 5805	int num_hrrq = 0;
 5806	int irq_none = 0;
 5807	struct ipr_cmnd *ipr_cmd, *temp;
 5808	irqreturn_t rc = IRQ_NONE;
 5809	LIST_HEAD(doneq);
 5810
 5811	spin_lock_irqsave(hrrq->lock, hrrq_flags);
 5812	/* If interrupts are disabled, ignore the interrupt */
 5813	if (!hrrq->allow_interrupts) {
 5814		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
 5815		return IRQ_NONE;
 5816	}
 5817
 5818	while (1) {
 5819		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
 5820			rc =  IRQ_HANDLED;
 5821
 5822			if (!ioa_cfg->clear_isr)
 5823				break;
 5824
 5825			/* Clear the PCI interrupt */
 5826			num_hrrq = 0;
 5827			do {
 5828				writel(IPR_PCII_HRRQ_UPDATED,
 5829				     ioa_cfg->regs.clr_interrupt_reg32);
 5830				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
 5831			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
 5832				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
 5833
 5834		} else if (rc == IRQ_NONE && irq_none == 0)