PageRenderTime 58ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 2ms

/linux-2.6.21.x/drivers/scsi/ipr.c

https://bitbucket.org/altlc/wive-rtnl-ralink-rt305x-routers-firmware-amod
C | 7617 lines | 4767 code | 1002 blank | 1848 comment | 609 complexity | 01c35d6800260896eda7cec5504a6dec MD5 | raw file
Possible License(s): CC-BY-SA-3.0, BSD-3-Clause, MPL-2.0-no-copyleft-exception, GPL-2.0, GPL-3.0, LGPL-3.0, 0BSD, AGPL-1.0, LGPL-2.1, LGPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * ipr.c -- driver for IBM Power Linux RAID adapters
  3. *
  4. * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
  5. *
  6. * Copyright (C) 2003, 2004 IBM Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. /*
  24. * Notes:
  25. *
  26. * This driver is used to control the following SCSI adapters:
  27. *
  28. * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
  29. *
  30. * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
  31. * PCI-X Dual Channel Ultra 320 SCSI Adapter
  32. * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
  33. * Embedded SCSI adapter on p615 and p655 systems
  34. *
  35. * Supported Hardware Features:
  36. * - Ultra 320 SCSI controller
  37. * - PCI-X host interface
  38. * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
  39. * - Non-Volatile Write Cache
  40. * - Supports attachment of non-RAID disks, tape, and optical devices
  41. * - RAID Levels 0, 5, 10
  42. * - Hot spare
  43. * - Background Parity Checking
  44. * - Background Data Scrubbing
  45. * - Ability to increase the capacity of an existing RAID 5 disk array
  46. * by adding disks
  47. *
  48. * Driver Features:
  49. * - Tagged command queuing
  50. * - Adapter microcode download
  51. * - PCI hot plug
  52. * - SCSI device hot plug
  53. *
  54. */
  55. #include <linux/fs.h>
  56. #include <linux/init.h>
  57. #include <linux/types.h>
  58. #include <linux/errno.h>
  59. #include <linux/kernel.h>
  60. #include <linux/ioport.h>
  61. #include <linux/delay.h>
  62. #include <linux/pci.h>
  63. #include <linux/wait.h>
  64. #include <linux/spinlock.h>
  65. #include <linux/sched.h>
  66. #include <linux/interrupt.h>
  67. #include <linux/blkdev.h>
  68. #include <linux/firmware.h>
  69. #include <linux/module.h>
  70. #include <linux/moduleparam.h>
  71. #include <linux/libata.h>
  72. #include <asm/io.h>
  73. #include <asm/irq.h>
  74. #include <asm/processor.h>
  75. #include <scsi/scsi.h>
  76. #include <scsi/scsi_host.h>
  77. #include <scsi/scsi_tcq.h>
  78. #include <scsi/scsi_eh.h>
  79. #include <scsi/scsi_cmnd.h>
  80. #include "ipr.h"
  81. /*
  82. * Global Data
  83. */
  84. static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
  85. static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
  86. static unsigned int ipr_max_speed = 1;
  87. static int ipr_testmode = 0;
  88. static unsigned int ipr_fastfail = 0;
  89. static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
  90. static unsigned int ipr_enable_cache = 1;
  91. static unsigned int ipr_debug = 0;
  92. static int ipr_auto_create = 1;
  93. static DEFINE_SPINLOCK(ipr_driver_lock);
  94. /* This table describes the differences between DMA controller chips */
  95. static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
  96. { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
  97. .mailbox = 0x0042C,
  98. .cache_line_size = 0x20,
  99. {
  100. .set_interrupt_mask_reg = 0x0022C,
  101. .clr_interrupt_mask_reg = 0x00230,
  102. .sense_interrupt_mask_reg = 0x0022C,
  103. .clr_interrupt_reg = 0x00228,
  104. .sense_interrupt_reg = 0x00224,
  105. .ioarrin_reg = 0x00404,
  106. .sense_uproc_interrupt_reg = 0x00214,
  107. .set_uproc_interrupt_reg = 0x00214,
  108. .clr_uproc_interrupt_reg = 0x00218
  109. }
  110. },
  111. { /* Snipe and Scamp */
  112. .mailbox = 0x0052C,
  113. .cache_line_size = 0x20,
  114. {
  115. .set_interrupt_mask_reg = 0x00288,
  116. .clr_interrupt_mask_reg = 0x0028C,
  117. .sense_interrupt_mask_reg = 0x00288,
  118. .clr_interrupt_reg = 0x00284,
  119. .sense_interrupt_reg = 0x00280,
  120. .ioarrin_reg = 0x00504,
  121. .sense_uproc_interrupt_reg = 0x00290,
  122. .set_uproc_interrupt_reg = 0x00290,
  123. .clr_uproc_interrupt_reg = 0x00294
  124. }
  125. },
  126. };
  127. static const struct ipr_chip_t ipr_chip[] = {
  128. { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
  129. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
  130. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
  131. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
  132. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
  133. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
  134. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
  135. };
  136. static int ipr_max_bus_speeds [] = {
  137. IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
  138. };
  139. MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
  140. MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
  141. module_param_named(max_speed, ipr_max_speed, uint, 0);
  142. MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
  143. module_param_named(log_level, ipr_log_level, uint, 0);
  144. MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
  145. module_param_named(testmode, ipr_testmode, int, 0);
  146. MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
  147. module_param_named(fastfail, ipr_fastfail, int, 0);
  148. MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
  149. module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
  150. MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
  151. module_param_named(enable_cache, ipr_enable_cache, int, 0);
  152. MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
  153. module_param_named(debug, ipr_debug, int, 0);
  154. MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
  155. module_param_named(auto_create, ipr_auto_create, int, 0);
  156. MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
  157. MODULE_LICENSE("GPL");
  158. MODULE_VERSION(IPR_DRIVER_VERSION);
  159. /* A constant array of IOASCs/URCs/Error Messages */
  160. static const
  161. struct ipr_error_table_t ipr_error_table[] = {
  162. {0x00000000, 1, 1,
  163. "8155: An unknown error was received"},
  164. {0x00330000, 0, 0,
  165. "Soft underlength error"},
  166. {0x005A0000, 0, 0,
  167. "Command to be cancelled not found"},
  168. {0x00808000, 0, 0,
  169. "Qualified success"},
  170. {0x01080000, 1, 1,
  171. "FFFE: Soft device bus error recovered by the IOA"},
  172. {0x01088100, 0, 1,
  173. "4101: Soft device bus fabric error"},
  174. {0x01170600, 0, 1,
  175. "FFF9: Device sector reassign successful"},
  176. {0x01170900, 0, 1,
  177. "FFF7: Media error recovered by device rewrite procedures"},
  178. {0x01180200, 0, 1,
  179. "7001: IOA sector reassignment successful"},
  180. {0x01180500, 0, 1,
  181. "FFF9: Soft media error. Sector reassignment recommended"},
  182. {0x01180600, 0, 1,
  183. "FFF7: Media error recovered by IOA rewrite procedures"},
  184. {0x01418000, 0, 1,
  185. "FF3D: Soft PCI bus error recovered by the IOA"},
  186. {0x01440000, 1, 1,
  187. "FFF6: Device hardware error recovered by the IOA"},
  188. {0x01448100, 0, 1,
  189. "FFF6: Device hardware error recovered by the device"},
  190. {0x01448200, 1, 1,
  191. "FF3D: Soft IOA error recovered by the IOA"},
  192. {0x01448300, 0, 1,
  193. "FFFA: Undefined device response recovered by the IOA"},
  194. {0x014A0000, 1, 1,
  195. "FFF6: Device bus error, message or command phase"},
  196. {0x014A8000, 0, 1,
  197. "FFFE: Task Management Function failed"},
  198. {0x015D0000, 0, 1,
  199. "FFF6: Failure prediction threshold exceeded"},
  200. {0x015D9200, 0, 1,
  201. "8009: Impending cache battery pack failure"},
  202. {0x02040400, 0, 0,
  203. "34FF: Disk device format in progress"},
  204. {0x023F0000, 0, 0,
  205. "Synchronization required"},
  206. {0x024E0000, 0, 0,
  207. "No ready, IOA shutdown"},
  208. {0x025A0000, 0, 0,
  209. "Not ready, IOA has been shutdown"},
  210. {0x02670100, 0, 1,
  211. "3020: Storage subsystem configuration error"},
  212. {0x03110B00, 0, 0,
  213. "FFF5: Medium error, data unreadable, recommend reassign"},
  214. {0x03110C00, 0, 0,
  215. "7000: Medium error, data unreadable, do not reassign"},
  216. {0x03310000, 0, 1,
  217. "FFF3: Disk media format bad"},
  218. {0x04050000, 0, 1,
  219. "3002: Addressed device failed to respond to selection"},
  220. {0x04080000, 1, 1,
  221. "3100: Device bus error"},
  222. {0x04080100, 0, 1,
  223. "3109: IOA timed out a device command"},
  224. {0x04088000, 0, 0,
  225. "3120: SCSI bus is not operational"},
  226. {0x04088100, 0, 1,
  227. "4100: Hard device bus fabric error"},
  228. {0x04118000, 0, 1,
  229. "9000: IOA reserved area data check"},
  230. {0x04118100, 0, 1,
  231. "9001: IOA reserved area invalid data pattern"},
  232. {0x04118200, 0, 1,
  233. "9002: IOA reserved area LRC error"},
  234. {0x04320000, 0, 1,
  235. "102E: Out of alternate sectors for disk storage"},
  236. {0x04330000, 1, 1,
  237. "FFF4: Data transfer underlength error"},
  238. {0x04338000, 1, 1,
  239. "FFF4: Data transfer overlength error"},
  240. {0x043E0100, 0, 1,
  241. "3400: Logical unit failure"},
  242. {0x04408500, 0, 1,
  243. "FFF4: Device microcode is corrupt"},
  244. {0x04418000, 1, 1,
  245. "8150: PCI bus error"},
  246. {0x04430000, 1, 0,
  247. "Unsupported device bus message received"},
  248. {0x04440000, 1, 1,
  249. "FFF4: Disk device problem"},
  250. {0x04448200, 1, 1,
  251. "8150: Permanent IOA failure"},
  252. {0x04448300, 0, 1,
  253. "3010: Disk device returned wrong response to IOA"},
  254. {0x04448400, 0, 1,
  255. "8151: IOA microcode error"},
  256. {0x04448500, 0, 0,
  257. "Device bus status error"},
  258. {0x04448600, 0, 1,
  259. "8157: IOA error requiring IOA reset to recover"},
  260. {0x04448700, 0, 0,
  261. "ATA device status error"},
  262. {0x04490000, 0, 0,
  263. "Message reject received from the device"},
  264. {0x04449200, 0, 1,
  265. "8008: A permanent cache battery pack failure occurred"},
  266. {0x0444A000, 0, 1,
  267. "9090: Disk unit has been modified after the last known status"},
  268. {0x0444A200, 0, 1,
  269. "9081: IOA detected device error"},
  270. {0x0444A300, 0, 1,
  271. "9082: IOA detected device error"},
  272. {0x044A0000, 1, 1,
  273. "3110: Device bus error, message or command phase"},
  274. {0x044A8000, 1, 1,
  275. "3110: SAS Command / Task Management Function failed"},
  276. {0x04670400, 0, 1,
  277. "9091: Incorrect hardware configuration change has been detected"},
  278. {0x04678000, 0, 1,
  279. "9073: Invalid multi-adapter configuration"},
  280. {0x04678100, 0, 1,
  281. "4010: Incorrect connection between cascaded expanders"},
  282. {0x04678200, 0, 1,
  283. "4020: Connections exceed IOA design limits"},
  284. {0x04678300, 0, 1,
  285. "4030: Incorrect multipath connection"},
  286. {0x04679000, 0, 1,
  287. "4110: Unsupported enclosure function"},
  288. {0x046E0000, 0, 1,
  289. "FFF4: Command to logical unit failed"},
  290. {0x05240000, 1, 0,
  291. "Illegal request, invalid request type or request packet"},
  292. {0x05250000, 0, 0,
  293. "Illegal request, invalid resource handle"},
  294. {0x05258000, 0, 0,
  295. "Illegal request, commands not allowed to this device"},
  296. {0x05258100, 0, 0,
  297. "Illegal request, command not allowed to a secondary adapter"},
  298. {0x05260000, 0, 0,
  299. "Illegal request, invalid field in parameter list"},
  300. {0x05260100, 0, 0,
  301. "Illegal request, parameter not supported"},
  302. {0x05260200, 0, 0,
  303. "Illegal request, parameter value invalid"},
  304. {0x052C0000, 0, 0,
  305. "Illegal request, command sequence error"},
  306. {0x052C8000, 1, 0,
  307. "Illegal request, dual adapter support not enabled"},
  308. {0x06040500, 0, 1,
  309. "9031: Array protection temporarily suspended, protection resuming"},
  310. {0x06040600, 0, 1,
  311. "9040: Array protection temporarily suspended, protection resuming"},
  312. {0x06288000, 0, 1,
  313. "3140: Device bus not ready to ready transition"},
  314. {0x06290000, 0, 1,
  315. "FFFB: SCSI bus was reset"},
  316. {0x06290500, 0, 0,
  317. "FFFE: SCSI bus transition to single ended"},
  318. {0x06290600, 0, 0,
  319. "FFFE: SCSI bus transition to LVD"},
  320. {0x06298000, 0, 1,
  321. "FFFB: SCSI bus was reset by another initiator"},
  322. {0x063F0300, 0, 1,
  323. "3029: A device replacement has occurred"},
  324. {0x064C8000, 0, 1,
  325. "9051: IOA cache data exists for a missing or failed device"},
  326. {0x064C8100, 0, 1,
  327. "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
  328. {0x06670100, 0, 1,
  329. "9025: Disk unit is not supported at its physical location"},
  330. {0x06670600, 0, 1,
  331. "3020: IOA detected a SCSI bus configuration error"},
  332. {0x06678000, 0, 1,
  333. "3150: SCSI bus configuration error"},
  334. {0x06678100, 0, 1,
  335. "9074: Asymmetric advanced function disk configuration"},
  336. {0x06678300, 0, 1,
  337. "4040: Incomplete multipath connection between IOA and enclosure"},
  338. {0x06678400, 0, 1,
  339. "4041: Incomplete multipath connection between enclosure and device"},
  340. {0x06678500, 0, 1,
  341. "9075: Incomplete multipath connection between IOA and remote IOA"},
  342. {0x06678600, 0, 1,
  343. "9076: Configuration error, missing remote IOA"},
  344. {0x06679100, 0, 1,
  345. "4050: Enclosure does not support a required multipath function"},
  346. {0x06690200, 0, 1,
  347. "9041: Array protection temporarily suspended"},
  348. {0x06698200, 0, 1,
  349. "9042: Corrupt array parity detected on specified device"},
  350. {0x066B0200, 0, 1,
  351. "9030: Array no longer protected due to missing or failed disk unit"},
  352. {0x066B8000, 0, 1,
  353. "9071: Link operational transition"},
  354. {0x066B8100, 0, 1,
  355. "9072: Link not operational transition"},
  356. {0x066B8200, 0, 1,
  357. "9032: Array exposed but still protected"},
  358. {0x066B9100, 0, 1,
  359. "4061: Multipath redundancy level got better"},
  360. {0x066B9200, 0, 1,
  361. "4060: Multipath redundancy level got worse"},
  362. {0x07270000, 0, 0,
  363. "Failure due to other device"},
  364. {0x07278000, 0, 1,
  365. "9008: IOA does not support functions expected by devices"},
  366. {0x07278100, 0, 1,
  367. "9010: Cache data associated with attached devices cannot be found"},
  368. {0x07278200, 0, 1,
  369. "9011: Cache data belongs to devices other than those attached"},
  370. {0x07278400, 0, 1,
  371. "9020: Array missing 2 or more devices with only 1 device present"},
  372. {0x07278500, 0, 1,
  373. "9021: Array missing 2 or more devices with 2 or more devices present"},
  374. {0x07278600, 0, 1,
  375. "9022: Exposed array is missing a required device"},
  376. {0x07278700, 0, 1,
  377. "9023: Array member(s) not at required physical locations"},
  378. {0x07278800, 0, 1,
  379. "9024: Array not functional due to present hardware configuration"},
  380. {0x07278900, 0, 1,
  381. "9026: Array not functional due to present hardware configuration"},
  382. {0x07278A00, 0, 1,
  383. "9027: Array is missing a device and parity is out of sync"},
  384. {0x07278B00, 0, 1,
  385. "9028: Maximum number of arrays already exist"},
  386. {0x07278C00, 0, 1,
  387. "9050: Required cache data cannot be located for a disk unit"},
  388. {0x07278D00, 0, 1,
  389. "9052: Cache data exists for a device that has been modified"},
  390. {0x07278F00, 0, 1,
  391. "9054: IOA resources not available due to previous problems"},
  392. {0x07279100, 0, 1,
  393. "9092: Disk unit requires initialization before use"},
  394. {0x07279200, 0, 1,
  395. "9029: Incorrect hardware configuration change has been detected"},
  396. {0x07279600, 0, 1,
  397. "9060: One or more disk pairs are missing from an array"},
  398. {0x07279700, 0, 1,
  399. "9061: One or more disks are missing from an array"},
  400. {0x07279800, 0, 1,
  401. "9062: One or more disks are missing from an array"},
  402. {0x07279900, 0, 1,
  403. "9063: Maximum number of functional arrays has been exceeded"},
  404. {0x0B260000, 0, 0,
  405. "Aborted command, invalid descriptor"},
  406. {0x0B5A0000, 0, 0,
  407. "Command terminated by host"}
  408. };
  409. static const struct ipr_ses_table_entry ipr_ses_table[] = {
  410. { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
  411. { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
  412. { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
  413. { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
  414. { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
  415. { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
  416. { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
  417. { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
  418. { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
  419. { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
  420. { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
  421. { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
  422. { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
  423. };
  424. /*
  425. * Function Prototypes
  426. */
  427. static int ipr_reset_alert(struct ipr_cmnd *);
  428. static void ipr_process_ccn(struct ipr_cmnd *);
  429. static void ipr_process_error(struct ipr_cmnd *);
  430. static void ipr_reset_ioa_job(struct ipr_cmnd *);
  431. static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
  432. enum ipr_shutdown_type);
  433. #ifdef CONFIG_SCSI_IPR_TRACE
  434. /**
  435. * ipr_trc_hook - Add a trace entry to the driver trace
  436. * @ipr_cmd: ipr command struct
  437. * @type: trace type
  438. * @add_data: additional data
  439. *
  440. * Return value:
  441. * none
  442. **/
  443. static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
  444. u8 type, u32 add_data)
  445. {
  446. struct ipr_trace_entry *trace_entry;
  447. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  448. trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
  449. trace_entry->time = jiffies;
  450. trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
  451. trace_entry->type = type;
  452. trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
  453. trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
  454. trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
  455. trace_entry->u.add_data = add_data;
  456. }
  457. #else
  458. #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
  459. #endif
  460. /**
  461. * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
  462. * @ipr_cmd: ipr command struct
  463. *
  464. * Return value:
  465. * none
  466. **/
  467. static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
  468. {
  469. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  470. struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
  471. memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
  472. ioarcb->write_data_transfer_length = 0;
  473. ioarcb->read_data_transfer_length = 0;
  474. ioarcb->write_ioadl_len = 0;
  475. ioarcb->read_ioadl_len = 0;
  476. ioasa->ioasc = 0;
  477. ioasa->residual_data_len = 0;
  478. ioasa->u.gata.status = 0;
  479. ipr_cmd->scsi_cmd = NULL;
  480. ipr_cmd->qc = NULL;
  481. ipr_cmd->sense_buffer[0] = 0;
  482. ipr_cmd->dma_use_sg = 0;
  483. }
  484. /**
  485. * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
  486. * @ipr_cmd: ipr command struct
  487. *
  488. * Return value:
  489. * none
  490. **/
  491. static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
  492. {
  493. ipr_reinit_ipr_cmnd(ipr_cmd);
  494. ipr_cmd->u.scratch = 0;
  495. ipr_cmd->sibling = NULL;
  496. init_timer(&ipr_cmd->timer);
  497. }
  498. /**
  499. * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
  500. * @ioa_cfg: ioa config struct
  501. *
  502. * Return value:
  503. * pointer to ipr command struct
  504. **/
  505. static
  506. struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
  507. {
  508. struct ipr_cmnd *ipr_cmd;
  509. ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
  510. list_del(&ipr_cmd->queue);
  511. ipr_init_ipr_cmnd(ipr_cmd);
  512. return ipr_cmd;
  513. }
  514. /**
  515. * ipr_unmap_sglist - Unmap scatterlist if mapped
  516. * @ioa_cfg: ioa config struct
  517. * @ipr_cmd: ipr command struct
  518. *
  519. * Return value:
  520. * nothing
  521. **/
  522. static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
  523. struct ipr_cmnd *ipr_cmd)
  524. {
  525. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  526. if (ipr_cmd->dma_use_sg) {
  527. if (scsi_cmd->use_sg > 0) {
  528. pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
  529. scsi_cmd->use_sg,
  530. scsi_cmd->sc_data_direction);
  531. } else {
  532. pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
  533. scsi_cmd->request_bufflen,
  534. scsi_cmd->sc_data_direction);
  535. }
  536. }
  537. }
  538. /**
  539. * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
  540. * @ioa_cfg: ioa config struct
  541. * @clr_ints: interrupts to clear
  542. *
  543. * This function masks all interrupts on the adapter, then clears the
  544. * interrupts specified in the mask
  545. *
  546. * Return value:
  547. * none
  548. **/
  549. static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
  550. u32 clr_ints)
  551. {
  552. volatile u32 int_reg;
  553. /* Stop new interrupts */
  554. ioa_cfg->allow_interrupts = 0;
  555. /* Set interrupt mask to stop all new interrupts */
  556. writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
  557. /* Clear any pending interrupts */
  558. writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
  559. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
  560. }
  561. /**
  562. * ipr_save_pcix_cmd_reg - Save PCI-X command register
  563. * @ioa_cfg: ioa config struct
  564. *
  565. * Return value:
  566. * 0 on success / -EIO on failure
  567. **/
  568. static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  569. {
  570. int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
  571. if (pcix_cmd_reg == 0)
  572. return 0;
  573. if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
  574. &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
  575. dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
  576. return -EIO;
  577. }
  578. ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
  579. return 0;
  580. }
  581. /**
  582. * ipr_set_pcix_cmd_reg - Setup PCI-X command register
  583. * @ioa_cfg: ioa config struct
  584. *
  585. * Return value:
  586. * 0 on success / -EIO on failure
  587. **/
  588. static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  589. {
  590. int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
  591. if (pcix_cmd_reg) {
  592. if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
  593. ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
  594. dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
  595. return -EIO;
  596. }
  597. }
  598. return 0;
  599. }
  600. /**
  601. * ipr_sata_eh_done - done function for aborted SATA commands
  602. * @ipr_cmd: ipr command struct
  603. *
  604. * This function is invoked for ops generated to SATA
  605. * devices which are being aborted.
  606. *
  607. * Return value:
  608. * none
  609. **/
  610. static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
  611. {
  612. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  613. struct ata_queued_cmd *qc = ipr_cmd->qc;
  614. struct ipr_sata_port *sata_port = qc->ap->private_data;
  615. qc->err_mask |= AC_ERR_OTHER;
  616. sata_port->ioasa.status |= ATA_BUSY;
  617. list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
  618. ata_qc_complete(qc);
  619. }
  620. /**
  621. * ipr_scsi_eh_done - mid-layer done function for aborted ops
  622. * @ipr_cmd: ipr command struct
  623. *
  624. * This function is invoked by the interrupt handler for
  625. * ops generated by the SCSI mid-layer which are being aborted.
  626. *
  627. * Return value:
  628. * none
  629. **/
  630. static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
  631. {
  632. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  633. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  634. scsi_cmd->result |= (DID_ERROR << 16);
  635. ipr_unmap_sglist(ioa_cfg, ipr_cmd);
  636. scsi_cmd->scsi_done(scsi_cmd);
  637. list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
  638. }
  639. /**
  640. * ipr_fail_all_ops - Fails all outstanding ops.
  641. * @ioa_cfg: ioa config struct
  642. *
  643. * This function fails all outstanding ops.
  644. *
  645. * Return value:
  646. * none
  647. **/
  648. static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
  649. {
  650. struct ipr_cmnd *ipr_cmd, *temp;
  651. ENTER;
  652. list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
  653. list_del(&ipr_cmd->queue);
  654. ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
  655. ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
  656. if (ipr_cmd->scsi_cmd)
  657. ipr_cmd->done = ipr_scsi_eh_done;
  658. else if (ipr_cmd->qc)
  659. ipr_cmd->done = ipr_sata_eh_done;
  660. ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
  661. del_timer(&ipr_cmd->timer);
  662. ipr_cmd->done(ipr_cmd);
  663. }
  664. LEAVE;
  665. }
  666. /**
  667. * ipr_do_req - Send driver initiated requests.
  668. * @ipr_cmd: ipr command struct
  669. * @done: done function
  670. * @timeout_func: timeout function
  671. * @timeout: timeout value
  672. *
  673. * This function sends the specified command to the adapter with the
  674. * timeout given. The done function is invoked on command completion.
  675. *
  676. * Return value:
  677. * none
  678. **/
  679. static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
  680. void (*done) (struct ipr_cmnd *),
  681. void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
  682. {
  683. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  684. list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
  685. ipr_cmd->done = done;
  686. ipr_cmd->timer.data = (unsigned long) ipr_cmd;
  687. ipr_cmd->timer.expires = jiffies + timeout;
  688. ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
  689. add_timer(&ipr_cmd->timer);
  690. ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
  691. mb();
  692. writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
  693. ioa_cfg->regs.ioarrin_reg);
  694. }
  695. /**
  696. * ipr_internal_cmd_done - Op done function for an internally generated op.
  697. * @ipr_cmd: ipr command struct
  698. *
  699. * This function is the op done function for an internally generated,
  700. * blocking op. It simply wakes the sleeping thread.
  701. *
  702. * Return value:
  703. * none
  704. **/
  705. static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
  706. {
  707. if (ipr_cmd->sibling)
  708. ipr_cmd->sibling = NULL;
  709. else
  710. complete(&ipr_cmd->completion);
  711. }
  712. /**
  713. * ipr_send_blocking_cmd - Send command and sleep on its completion.
  714. * @ipr_cmd: ipr command struct
  715. * @timeout_func: function to invoke if command times out
  716. * @timeout: timeout
  717. *
  718. * Return value:
  719. * none
  720. **/
  721. static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
  722. void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
  723. u32 timeout)
  724. {
  725. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  726. init_completion(&ipr_cmd->completion);
  727. ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
  728. spin_unlock_irq(ioa_cfg->host->host_lock);
  729. wait_for_completion(&ipr_cmd->completion);
  730. spin_lock_irq(ioa_cfg->host->host_lock);
  731. }
  732. /**
  733. * ipr_send_hcam - Send an HCAM to the adapter.
  734. * @ioa_cfg: ioa config struct
  735. * @type: HCAM type
  736. * @hostrcb: hostrcb struct
  737. *
  738. * This function will send a Host Controlled Async command to the adapter.
  739. * If HCAMs are currently not allowed to be issued to the adapter, it will
  740. * place the hostrcb on the free queue.
  741. *
  742. * Return value:
  743. * none
  744. **/
  745. static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
  746. struct ipr_hostrcb *hostrcb)
  747. {
  748. struct ipr_cmnd *ipr_cmd;
  749. struct ipr_ioarcb *ioarcb;
  750. if (ioa_cfg->allow_cmds) {
  751. ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  752. list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
  753. list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
  754. ipr_cmd->u.hostrcb = hostrcb;
  755. ioarcb = &ipr_cmd->ioarcb;
  756. ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  757. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
  758. ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
  759. ioarcb->cmd_pkt.cdb[1] = type;
  760. ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
  761. ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
  762. ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
  763. ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
  764. ipr_cmd->ioadl[0].flags_and_data_len =
  765. cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
  766. ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
  767. if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
  768. ipr_cmd->done = ipr_process_ccn;
  769. else
  770. ipr_cmd->done = ipr_process_error;
  771. ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
  772. mb();
  773. writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
  774. ioa_cfg->regs.ioarrin_reg);
  775. } else {
  776. list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
  777. }
  778. }
  779. /**
  780. * ipr_init_res_entry - Initialize a resource entry struct.
  781. * @res: resource entry struct
  782. *
  783. * Return value:
  784. * none
  785. **/
  786. static void ipr_init_res_entry(struct ipr_resource_entry *res)
  787. {
  788. res->needs_sync_complete = 0;
  789. res->in_erp = 0;
  790. res->add_to_ml = 0;
  791. res->del_from_ml = 0;
  792. res->resetting_device = 0;
  793. res->sdev = NULL;
  794. res->sata_port = NULL;
  795. }
  796. /**
  797. * ipr_handle_config_change - Handle a config change from the adapter
  798. * @ioa_cfg: ioa config struct
  799. * @hostrcb: hostrcb
  800. *
  801. * Return value:
  802. * none
  803. **/
  804. static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
  805. struct ipr_hostrcb *hostrcb)
  806. {
  807. struct ipr_resource_entry *res = NULL;
  808. struct ipr_config_table_entry *cfgte;
  809. u32 is_ndn = 1;
  810. cfgte = &hostrcb->hcam.u.ccn.cfgte;
  811. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  812. if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
  813. sizeof(cfgte->res_addr))) {
  814. is_ndn = 0;
  815. break;
  816. }
  817. }
  818. if (is_ndn) {
  819. if (list_empty(&ioa_cfg->free_res_q)) {
  820. ipr_send_hcam(ioa_cfg,
  821. IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
  822. hostrcb);
  823. return;
  824. }
  825. res = list_entry(ioa_cfg->free_res_q.next,
  826. struct ipr_resource_entry, queue);
  827. list_del(&res->queue);
  828. ipr_init_res_entry(res);
  829. list_add_tail(&res->queue, &ioa_cfg->used_res_q);
  830. }
  831. memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
  832. if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
  833. if (res->sdev) {
  834. res->del_from_ml = 1;
  835. res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
  836. if (ioa_cfg->allow_ml_add_del)
  837. schedule_work(&ioa_cfg->work_q);
  838. } else
  839. list_move_tail(&res->queue, &ioa_cfg->free_res_q);
  840. } else if (!res->sdev) {
  841. res->add_to_ml = 1;
  842. if (ioa_cfg->allow_ml_add_del)
  843. schedule_work(&ioa_cfg->work_q);
  844. }
  845. ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
  846. }
  847. /**
  848. * ipr_process_ccn - Op done function for a CCN.
  849. * @ipr_cmd: ipr command struct
  850. *
  851. * This function is the op done function for a configuration
  852. * change notification host controlled async from the adapter.
  853. *
  854. * Return value:
  855. * none
  856. **/
  857. static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
  858. {
  859. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  860. struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
  861. u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
  862. list_del(&hostrcb->queue);
  863. list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
  864. if (ioasc) {
  865. if (ioasc != IPR_IOASC_IOA_WAS_RESET)
  866. dev_err(&ioa_cfg->pdev->dev,
  867. "Host RCB failed with IOASC: 0x%08X\n", ioasc);
  868. ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
  869. } else {
  870. ipr_handle_config_change(ioa_cfg, hostrcb);
  871. }
  872. }
  873. /**
  874. * ipr_log_vpd - Log the passed VPD to the error log.
  875. * @vpd: vendor/product id/sn struct
  876. *
  877. * Return value:
  878. * none
  879. **/
  880. static void ipr_log_vpd(struct ipr_vpd *vpd)
  881. {
  882. char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
  883. + IPR_SERIAL_NUM_LEN];
  884. memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
  885. memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
  886. IPR_PROD_ID_LEN);
  887. buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
  888. ipr_err("Vendor/Product ID: %s\n", buffer);
  889. memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
  890. buffer[IPR_SERIAL_NUM_LEN] = '\0';
  891. ipr_err(" Serial Number: %s\n", buffer);
  892. }
  893. /**
  894. * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
  895. * @vpd: vendor/product id/sn/wwn struct
  896. *
  897. * Return value:
  898. * none
  899. **/
  900. static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
  901. {
  902. ipr_log_vpd(&vpd->vpd);
  903. ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
  904. be32_to_cpu(vpd->wwid[1]));
  905. }
  906. /**
  907. * ipr_log_enhanced_cache_error - Log a cache error.
  908. * @ioa_cfg: ioa config struct
  909. * @hostrcb: hostrcb struct
  910. *
  911. * Return value:
  912. * none
  913. **/
  914. static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
  915. struct ipr_hostrcb *hostrcb)
  916. {
  917. struct ipr_hostrcb_type_12_error *error =
  918. &hostrcb->hcam.u.error.u.type_12_error;
  919. ipr_err("-----Current Configuration-----\n");
  920. ipr_err("Cache Directory Card Information:\n");
  921. ipr_log_ext_vpd(&error->ioa_vpd);
  922. ipr_err("Adapter Card Information:\n");
  923. ipr_log_ext_vpd(&error->cfc_vpd);
  924. ipr_err("-----Expected Configuration-----\n");
  925. ipr_err("Cache Directory Card Information:\n");
  926. ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
  927. ipr_err("Adapter Card Information:\n");
  928. ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
  929. ipr_err("Additional IOA Data: %08X %08X %08X\n",
  930. be32_to_cpu(error->ioa_data[0]),
  931. be32_to_cpu(error->ioa_data[1]),
  932. be32_to_cpu(error->ioa_data[2]));
  933. }
  934. /**
  935. * ipr_log_cache_error - Log a cache error.
  936. * @ioa_cfg: ioa config struct
  937. * @hostrcb: hostrcb struct
  938. *
  939. * Return value:
  940. * none
  941. **/
  942. static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
  943. struct ipr_hostrcb *hostrcb)
  944. {
  945. struct ipr_hostrcb_type_02_error *error =
  946. &hostrcb->hcam.u.error.u.type_02_error;
  947. ipr_err("-----Current Configuration-----\n");
  948. ipr_err("Cache Directory Card Information:\n");
  949. ipr_log_vpd(&error->ioa_vpd);
  950. ipr_err("Adapter Card Information:\n");
  951. ipr_log_vpd(&error->cfc_vpd);
  952. ipr_err("-----Expected Configuration-----\n");
  953. ipr_err("Cache Directory Card Information:\n");
  954. ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
  955. ipr_err("Adapter Card Information:\n");
  956. ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
  957. ipr_err("Additional IOA Data: %08X %08X %08X\n",
  958. be32_to_cpu(error->ioa_data[0]),
  959. be32_to_cpu(error->ioa_data[1]),
  960. be32_to_cpu(error->ioa_data[2]));
  961. }
  962. /**
  963. * ipr_log_enhanced_config_error - Log a configuration error.
  964. * @ioa_cfg: ioa config struct
  965. * @hostrcb: hostrcb struct
  966. *
  967. * Return value:
  968. * none
  969. **/
  970. static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
  971. struct ipr_hostrcb *hostrcb)
  972. {
  973. int errors_logged, i;
  974. struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
  975. struct ipr_hostrcb_type_13_error *error;
  976. error = &hostrcb->hcam.u.error.u.type_13_error;
  977. errors_logged = be32_to_cpu(error->errors_logged);
  978. ipr_err("Device Errors Detected/Logged: %d/%d\n",
  979. be32_to_cpu(error->errors_detected), errors_logged);
  980. dev_entry = error->dev;
  981. for (i = 0; i < errors_logged; i++, dev_entry++) {
  982. ipr_err_separator;
  983. ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
  984. ipr_log_ext_vpd(&dev_entry->vpd);
  985. ipr_err("-----New Device Information-----\n");
  986. ipr_log_ext_vpd(&dev_entry->new_vpd);
  987. ipr_err("Cache Directory Card Information:\n");
  988. ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
  989. ipr_err("Adapter Card Information:\n");
  990. ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
  991. }
  992. }
  993. /**
  994. * ipr_log_config_error - Log a configuration error.
  995. * @ioa_cfg: ioa config struct
  996. * @hostrcb: hostrcb struct
  997. *
  998. * Return value:
  999. * none
  1000. **/
  1001. static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
  1002. struct ipr_hostrcb *hostrcb)
  1003. {
  1004. int errors_logged, i;
  1005. struct ipr_hostrcb_device_data_entry *dev_entry;
  1006. struct ipr_hostrcb_type_03_error *error;
  1007. error = &hostrcb->hcam.u.error.u.type_03_error;
  1008. errors_logged = be32_to_cpu(error->errors_logged);
  1009. ipr_err("Device Errors Detected/Logged: %d/%d\n",
  1010. be32_to_cpu(error->errors_detected), errors_logged);
  1011. dev_entry = error->dev;
  1012. for (i = 0; i < errors_logged; i++, dev_entry++) {
  1013. ipr_err_separator;
  1014. ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
  1015. ipr_log_vpd(&dev_entry->vpd);
  1016. ipr_err("-----New Device Information-----\n");
  1017. ipr_log_vpd(&dev_entry->new_vpd);
  1018. ipr_err("Cache Directory Card Information:\n");
  1019. ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
  1020. ipr_err("Adapter Card Information:\n");
  1021. ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
  1022. ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
  1023. be32_to_cpu(dev_entry->ioa_data[0]),
  1024. be32_to_cpu(dev_entry->ioa_data[1]),
  1025. be32_to_cpu(dev_entry->ioa_data[2]),
  1026. be32_to_cpu(dev_entry->ioa_data[3]),
  1027. be32_to_cpu(dev_entry->ioa_data[4]));
  1028. }
  1029. }
  1030. /**
  1031. * ipr_log_enhanced_array_error - Log an array configuration error.
  1032. * @ioa_cfg: ioa config struct
  1033. * @hostrcb: hostrcb struct
  1034. *
  1035. * Return value:
  1036. * none
  1037. **/
  1038. static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
  1039. struct ipr_hostrcb *hostrcb)
  1040. {
  1041. int i, num_entries;
  1042. struct ipr_hostrcb_type_14_error *error;
  1043. struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
  1044. const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
  1045. error = &hostrcb->hcam.u.error.u.type_14_error;
  1046. ipr_err_separator;
  1047. ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
  1048. error->protection_level,
  1049. ioa_cfg->host->host_no,
  1050. error->last_func_vset_res_addr.bus,
  1051. error->last_func_vset_res_addr.target,
  1052. error->last_func_vset_res_addr.lun);
  1053. ipr_err_separator;
  1054. array_entry = error->array_member;
  1055. num_entries = min_t(u32, be32_to_cpu(error->num_entries),
  1056. sizeof(error->array_member));
  1057. for (i = 0; i < num_entries; i++, array_entry++) {
  1058. if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
  1059. continue;
  1060. if (be32_to_cpu(error->exposed_mode_adn) == i)
  1061. ipr_err("Exposed Array Member %d:\n", i);
  1062. else
  1063. ipr_err("Array Member %d:\n", i);
  1064. ipr_log_ext_vpd(&array_entry->vpd);
  1065. ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
  1066. ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
  1067. "Expected Location");
  1068. ipr_err_separator;
  1069. }
  1070. }
  1071. /**
  1072. * ipr_log_array_error - Log an array configuration error.
  1073. * @ioa_cfg: ioa config struct
  1074. * @hostrcb: hostrcb struct
  1075. *
  1076. * Return value:
  1077. * none
  1078. **/
  1079. static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
  1080. struct ipr_hostrcb *hostrcb)
  1081. {
  1082. int i;
  1083. struct ipr_hostrcb_type_04_error *error;
  1084. struct ipr_hostrcb_array_data_entry *array_entry;
  1085. const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
  1086. error = &hostrcb->hcam.u.error.u.type_04_error;
  1087. ipr_err_separator;
  1088. ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
  1089. error->protection_level,
  1090. ioa_cfg->host->host_no,
  1091. error->last_func_vset_res_addr.bus,
  1092. error->last_func_vset_res_addr.target,
  1093. error->last_func_vset_res_addr.lun);
  1094. ipr_err_separator;
  1095. array_entry = error->array_member;
  1096. for (i = 0; i < 18; i++) {
  1097. if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
  1098. continue;
  1099. if (be32_to_cpu(error->exposed_mode_adn) == i)
  1100. ipr_err("Exposed Array Member %d:\n", i);
  1101. else
  1102. ipr_err("Array Member %d:\n", i);
  1103. ipr_log_vpd(&array_entry->vpd);
  1104. ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
  1105. ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
  1106. "Expected Location");
  1107. ipr_err_separator;
  1108. if (i == 9)
  1109. array_entry = error->array_member2;
  1110. else
  1111. array_entry++;
  1112. }
  1113. }
  1114. /**
  1115. * ipr_log_hex_data - Log additional hex IOA error data.
  1116. * @ioa_cfg: ioa config struct
  1117. * @data: IOA error data
  1118. * @len: data length
  1119. *
  1120. * Return value:
  1121. * none
  1122. **/
  1123. static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
  1124. {
  1125. int i;
  1126. if (len == 0)
  1127. return;
  1128. if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
  1129. len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
  1130. for (i = 0; i < len / 4; i += 4) {
  1131. ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
  1132. be32_to_cpu(data[i]),
  1133. be32_to_cpu(data[i+1]),
  1134. be32_to_cpu(data[i+2]),
  1135. be32_to_cpu(data[i+3]));
  1136. }
  1137. }
  1138. /**
  1139. * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
  1140. * @ioa_cfg: ioa config struct
  1141. * @hostrcb: hostrcb struct
  1142. *
  1143. * Return value:
  1144. * none
  1145. **/
  1146. static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
  1147. struct ipr_hostrcb *hostrcb)
  1148. {
  1149. struct ipr_hostrcb_type_17_error *error;
  1150. error = &hostrcb->hcam.u.error.u.type_17_error;
  1151. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  1152. ipr_err("%s\n", error->failure_reason);
  1153. ipr_err("Remote Adapter VPD:\n");
  1154. ipr_log_ext_vpd(&error->vpd);
  1155. ipr_log_hex_data(ioa_cfg, error->data,
  1156. be32_to_cpu(hostrcb->hcam.length) -
  1157. (offsetof(struct ipr_hostrcb_error, u) +
  1158. offsetof(struct ipr_hostrcb_type_17_error, data)));
  1159. }
  1160. /**
  1161. * ipr_log_dual_ioa_error - Log a dual adapter error.
  1162. * @ioa_cfg: ioa config struct
  1163. * @hostrcb: hostrcb struct
  1164. *
  1165. * Return value:
  1166. * none
  1167. **/
  1168. static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
  1169. struct ipr_hostrcb *hostrcb)
  1170. {
  1171. struct ipr_hostrcb_type_07_error *error;
  1172. error = &hostrcb->hcam.u.error.u.type_07_error;
  1173. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  1174. ipr_err("%s\n", error->failure_reason);
  1175. ipr_err("Remote Adapter VPD:\n");
  1176. ipr_log_vpd(&error->vpd);
  1177. ipr_log_hex_data(ioa_cfg, error->data,
  1178. be32_to_cpu(hostrcb->hcam.length) -
  1179. (offsetof(struct ipr_hostrcb_error, u) +
  1180. offsetof(struct ipr_hostrcb_type_07_error, data)));
  1181. }
  1182. static const struct {
  1183. u8 active;
  1184. char *desc;
  1185. } path_active_desc[] = {
  1186. { IPR_PATH_NO_INFO, "Path" },
  1187. { IPR_PATH_ACTIVE, "Active path" },
  1188. { IPR_PATH_NOT_ACTIVE, "Inactive path" }
  1189. };
  1190. static const struct {
  1191. u8 state;
  1192. char *desc;
  1193. } path_state_desc[] = {
  1194. { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
  1195. { IPR_PATH_HEALTHY, "is healthy" },
  1196. { IPR_PATH_DEGRADED, "is degraded" },
  1197. { IPR_PATH_FAILED, "is failed" }
  1198. };
  1199. /**
  1200. * ipr_log_fabric_path - Log a fabric path error
  1201. * @hostrcb: hostrcb struct
  1202. * @fabric: fabric descriptor
  1203. *
  1204. * Return value:
  1205. * none
  1206. **/
  1207. static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
  1208. struct ipr_hostrcb_fabric_desc *fabric)
  1209. {
  1210. int i, j;
  1211. u8 path_state = fabric->path_state;
  1212. u8 active = path_state & IPR_PATH_ACTIVE_MASK;
  1213. u8 state = path_state & IPR_PATH_STATE_MASK;
  1214. for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
  1215. if (path_active_desc[i].active != active)
  1216. continue;
  1217. for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
  1218. if (path_state_desc[j].state != state)
  1219. continue;
  1220. if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
  1221. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
  1222. path_active_desc[i].desc, path_state_desc[j].desc,
  1223. fabric->ioa_port);
  1224. } else if (fabric->cascaded_expander == 0xff) {
  1225. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
  1226. path_active_desc[i].desc, path_state_desc[j].desc,
  1227. fabric->ioa_port, fabric->phy);
  1228. } else if (fabric->phy == 0xff) {
  1229. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
  1230. path_active_desc[i].desc, path_state_desc[j].desc,
  1231. fabric->ioa_port, fabric->cascaded_expander);
  1232. } else {
  1233. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
  1234. path_active_desc[i].desc, path_state_desc[j].desc,
  1235. fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
  1236. }
  1237. return;
  1238. }
  1239. }
  1240. ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
  1241. fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
  1242. }
  1243. static const struct {
  1244. u8 type;
  1245. char *desc;
  1246. } path_type_desc[] = {
  1247. { IPR_PATH_CFG_IOA_PORT, "IOA port" },
  1248. { IPR_PATH_CFG_EXP_PORT, "Expander port" },
  1249. { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
  1250. { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
  1251. };
  1252. static const struct {
  1253. u8 status;
  1254. char *desc;
  1255. } path_status_desc[] = {
  1256. { IPR_PATH_CFG_NO_PROB, "Functional" },
  1257. { IPR_PATH_CFG_DEGRADED, "Degraded" },
  1258. { IPR_PATH_CFG_FAILED, "Failed" },
  1259. { IPR_PATH_CFG_SUSPECT, "Suspect" },
  1260. { IPR_PATH_NOT_DETECTED, "Missing" },
  1261. { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
  1262. };
  1263. static const char *link_rate[] = {
  1264. "unknown",
  1265. "disabled",
  1266. "phy reset problem",
  1267. "spinup hold",
  1268. "port selector",
  1269. "unknown",
  1270. "unknown",
  1271. "unknown",
  1272. "1.5Gbps",
  1273. "3.0Gbps",
  1274. "unknown",
  1275. "unknown",
  1276. "unknown",
  1277. "unknown",
  1278. "unknown",
  1279. "unknown"
  1280. };
  1281. /**
  1282. * ipr_log_path_elem - Log a fabric path element.
  1283. * @hostrcb: hostrcb struct
  1284. * @cfg: fabric path element struct
  1285. *
  1286. * Return value:
  1287. * none
  1288. **/
  1289. static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
  1290. struct ipr_hostrcb_config_element *cfg)
  1291. {
  1292. int i, j;
  1293. u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
  1294. u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
  1295. if (type == IPR_PATH_CFG_NOT_EXIST)
  1296. return;
  1297. for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
  1298. if (path_type_desc[i].type != type)
  1299. continue;
  1300. for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
  1301. if (path_status_desc[j].status != status)
  1302. continue;
  1303. if (type == IPR_PATH_CFG_IOA_PORT) {
  1304. ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
  1305. path_status_desc[j].desc, path_type_desc[i].desc,
  1306. cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1307. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1308. } else {
  1309. if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
  1310. ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
  1311. path_status_desc[j].desc, path_type_desc[i].desc,
  1312. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1313. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1314. } else if (cfg->cascaded_expander == 0xff) {
  1315. ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
  1316. "WWN=%08X%08X\n", path_status_desc[j].desc,
  1317. path_type_desc[i].desc, cfg->phy,
  1318. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1319. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1320. } else if (cfg->phy == 0xff) {
  1321. ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
  1322. "WWN=%08X%08X\n", path_status_desc[j].desc,
  1323. path_type_desc[i].desc, cfg->cascaded_expander,
  1324. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1325. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1326. } else {
  1327. ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
  1328. "WWN=%08X%08X\n", path_status_desc[j].desc,
  1329. path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
  1330. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1331. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1332. }
  1333. }
  1334. return;
  1335. }
  1336. }
  1337. ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
  1338. "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
  1339. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1340. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1341. }
  1342. /**
  1343. * ipr_log_fabric_error - Log a fabric error.
  1344. * @ioa_cfg: ioa config struct
  1345. * @hostrcb: hostrcb struct
  1346. *
  1347. * Return value:
  1348. * none
  1349. **/
  1350. static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
  1351. struct ipr_hostrcb *hostrcb)
  1352. {
  1353. struct ipr_hostrcb_type_20_error *error;
  1354. struct ipr_hostrcb_fabric_desc *fabric;
  1355. struct ipr_hostrcb_config_element *cfg;
  1356. int i, add_len;
  1357. error = &hostrcb->hcam.u.error.u.type_20_error;
  1358. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  1359. ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
  1360. add_len = be32_to_cpu(hostrcb->hcam.length) -
  1361. (offsetof(struct ipr_hostrcb_error, u) +
  1362. offsetof(struct ipr_hostrcb_type_20_error, desc));
  1363. for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
  1364. ipr_log_fabric_path(hostrcb, fabric);
  1365. for_each_fabric_cfg(fabric, cfg)
  1366. ipr_log_path_elem(hostrcb, cfg);
  1367. add_len -= be16_to_cpu(fabric->length);
  1368. fabric = (struct ipr_hostrcb_fabric_desc *)
  1369. ((unsigned long)fabric + be16_to_cpu(fabric->length));
  1370. }
  1371. ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
  1372. }
  1373. /**
  1374. * ipr_log_generic_error - Log an adapter error.
  1375. * @ioa_cfg: ioa config struct
  1376. * @hostrcb: hostrcb struct
  1377. *
  1378. * Return value:
  1379. * none
  1380. **/
  1381. static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
  1382. struct ipr_hostrcb *hostrcb)
  1383. {
  1384. ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
  1385. be32_to_cpu(hostrcb->hcam.length));
  1386. }
  1387. /**
  1388. * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
  1389. * @ioasc: IOASC
  1390. *
  1391. * This function will return the index of into the ipr_error_table
  1392. * for the specified IOASC. If the IOASC is not in the table,
  1393. * 0 will be returned, which points to the entry used for unknown errors.
  1394. *
  1395. * Return value:
  1396. * index into the ipr_error_table
  1397. **/
  1398. static u32 ipr_get_error(u32 ioasc)
  1399. {
  1400. int i;
  1401. for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
  1402. if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
  1403. return i;
  1404. return 0;
  1405. }
  1406. /**
  1407. * ipr_handle_log_data - Log an adapter error.
  1408. * @ioa_cfg: ioa config struct
  1409. * @hostrcb: hostrcb struct
  1410. *
  1411. * This function logs an adapter error to the system.
  1412. *
  1413. * Return value:
  1414. * none
  1415. **/
  1416. static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
  1417. struct ipr_hostrcb *hostrcb)
  1418. {
  1419. u32 ioasc;
  1420. int error_index;
  1421. if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
  1422. return;
  1423. if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
  1424. dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
  1425. ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
  1426. if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
  1427. ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
  1428. /* Tell the midlayer we had a bus reset so it will handle the UA properly */
  1429. scsi_report_bus_reset(ioa_cfg->host,
  1430. hostrcb->hcam.u.error.failing_dev_res_addr.bus);
  1431. }
  1432. error_index = ipr_get_error(ioasc);
  1433. if (!ipr_error_table[error_index].log_hcam)
  1434. return;
  1435. ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
  1436. /* Set indication we have logged an error */
  1437. ioa_cfg->errors_logged++;
  1438. if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
  1439. return;
  1440. if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
  1441. hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
  1442. switch (hostrcb->hcam.overlay_id) {
  1443. case IPR_HOST_RCB_OVERLAY_ID_2:
  1444. ipr_log_cache_error(ioa_cfg, hostrcb);
  1445. break;
  1446. case IPR_HOST_RCB_OVERLAY_ID_3:
  1447. ipr_log_config_error(ioa_cfg, hostrcb);
  1448. break;
  1449. case IPR_HOST_RCB_OVERLAY_ID_4:
  1450. case IPR_HOST_RCB_OVERLAY_ID_6:
  1451. ipr_log_array_error(ioa_cfg, hostrcb);
  1452. break;
  1453. case IPR_HOST_RCB_OVERLAY_ID_7:
  1454. ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
  1455. break;
  1456. case IPR_HOST_RCB_OVERLAY_ID_12:
  1457. ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
  1458. break;
  1459. case IPR_HOST_RCB_OVERLAY_ID_13:
  1460. ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
  1461. break;
  1462. case IPR_HOST_RCB_OVERLAY_ID_14:
  1463. case IPR_HOST_RCB_OVERLAY_ID_16:
  1464. ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
  1465. break;
  1466. case IPR_HOST_RCB_OVERLAY_ID_17:
  1467. ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
  1468. break;
  1469. case IPR_HOST_RCB_OVERLAY_ID_20:
  1470. ipr_log_fabric_error(ioa_cfg, hostrcb);
  1471. break;
  1472. case IPR_HOST_RCB_OVERLAY_ID_1:
  1473. case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
  1474. default:
  1475. ipr_log_generic_error(ioa_cfg, hostrcb);
  1476. break;
  1477. }
  1478. }
  1479. /**
  1480. * ipr_process_error - Op done function for an adapter error log.
  1481. * @ipr_cmd: ipr command struct
  1482. *
  1483. * This function is the op done function for an error log host
  1484. * controlled async from the adapter. It will log the error and
  1485. * send the HCAM back to the adapter.
  1486. *
  1487. * Return value:
  1488. * none
  1489. **/
  1490. static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
  1491. {
  1492. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  1493. struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
  1494. u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
  1495. list_del(&hostrcb->queue);
  1496. list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
  1497. if (!ioasc) {
  1498. ipr_handle_log_data(ioa_cfg, hostrcb);
  1499. } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
  1500. dev_err(&ioa_cfg->pdev->dev,
  1501. "Host RCB failed with IOASC: 0x%08X\n", ioasc);
  1502. }
  1503. ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
  1504. }
  1505. /**
  1506. * ipr_timeout - An internally generated op has timed out.
  1507. * @ipr_cmd: ipr command struct
  1508. *
  1509. * This function blocks host requests and initiates an
  1510. * adapter reset.
  1511. *
  1512. * Return value:
  1513. *…

Large files files are truncated, but you can click here to view the full file