PageRenderTime 75ms CodeModel.GetById 36ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/hpsa.c

https://gitlab.com/CadeLaRen/linux
C | 1567 lines | 1246 code | 143 blank | 178 comment | 185 complexity | fa2a3175bf00c09a473541e03ff8cffb MD5 | raw file
  1. /*
  2. * Disk Array driver for HP Smart Array SAS controllers
  3. * Copyright 2016 Microsemi Corporation
  4. * Copyright 2014-2015 PMC-Sierra, Inc.
  5. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14. * NON INFRINGEMENT. See the GNU General Public License for more details.
  15. *
  16. * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci-aspm.h>
  24. #include <linux/kernel.h>
  25. #include <linux/slab.h>
  26. #include <linux/delay.h>
  27. #include <linux/fs.h>
  28. #include <linux/timer.h>
  29. #include <linux/init.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/compat.h>
  32. #include <linux/blktrace_api.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/io.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/completion.h>
  37. #include <linux/moduleparam.h>
  38. #include <scsi/scsi.h>
  39. #include <scsi/scsi_cmnd.h>
  40. #include <scsi/scsi_device.h>
  41. #include <scsi/scsi_host.h>
  42. #include <scsi/scsi_tcq.h>
  43. #include <scsi/scsi_eh.h>
  44. #include <scsi/scsi_transport_sas.h>
  45. #include <scsi/scsi_dbg.h>
  46. #include <linux/cciss_ioctl.h>
  47. #include <linux/string.h>
  48. #include <linux/bitmap.h>
  49. #include <linux/atomic.h>
  50. #include <linux/jiffies.h>
  51. #include <linux/percpu-defs.h>
  52. #include <linux/percpu.h>
  53. #include <asm/unaligned.h>
  54. #include <asm/div64.h>
  55. #include "hpsa_cmd.h"
  56. #include "hpsa.h"
  57. /*
  58. * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
  59. * with an optional trailing '-' followed by a byte value (0-255).
  60. */
  61. #define HPSA_DRIVER_VERSION "3.4.16-0"
  62. #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
  63. #define HPSA "hpsa"
  64. /* How long to wait for CISS doorbell communication */
  65. #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
  66. #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
  67. #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
  68. #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
  69. #define MAX_IOCTL_CONFIG_WAIT 1000
  70. /*define how many times we will try a command because of bus resets */
  71. #define MAX_CMD_RETRIES 3
  72. /* Embedded module documentation macros - see modules.h */
  73. MODULE_AUTHOR("Hewlett-Packard Company");
  74. MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
  75. HPSA_DRIVER_VERSION);
  76. MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
  77. MODULE_VERSION(HPSA_DRIVER_VERSION);
  78. MODULE_LICENSE("GPL");
  79. static int hpsa_allow_any;
  80. module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
  81. MODULE_PARM_DESC(hpsa_allow_any,
  82. "Allow hpsa driver to access unknown HP Smart Array hardware");
  83. static int hpsa_simple_mode;
  84. module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
  85. MODULE_PARM_DESC(hpsa_simple_mode,
  86. "Use 'simple mode' rather than 'performant mode'");
  87. /* define the PCI info for the cards we can control */
  88. static const struct pci_device_id hpsa_pci_device_id[] = {
  89. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
  90. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
  91. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
  92. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
  93. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
  94. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
  95. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
  96. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
  97. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
  98. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
  99. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
  100. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
  101. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
  102. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
  103. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
  104. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
  105. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
  106. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
  107. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
  108. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
  109. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
  110. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
  111. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
  112. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
  113. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
  114. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
  115. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
  116. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
  117. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
  118. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
  119. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
  120. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
  121. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
  122. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
  123. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
  124. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
  125. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
  126. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
  127. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
  128. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
  129. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
  130. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
  131. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
  132. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
  133. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
  134. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
  135. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
  136. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
  137. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
  138. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
  139. {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
  140. {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  141. PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
  142. {0,}
  143. };
  144. MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
  145. /* board_id = Subsystem Device ID & Vendor ID
  146. * product = Marketing Name for the board
  147. * access = Address of the struct of function pointers
  148. */
  149. static struct board_type products[] = {
  150. {0x3241103C, "Smart Array P212", &SA5_access},
  151. {0x3243103C, "Smart Array P410", &SA5_access},
  152. {0x3245103C, "Smart Array P410i", &SA5_access},
  153. {0x3247103C, "Smart Array P411", &SA5_access},
  154. {0x3249103C, "Smart Array P812", &SA5_access},
  155. {0x324A103C, "Smart Array P712m", &SA5_access},
  156. {0x324B103C, "Smart Array P711m", &SA5_access},
  157. {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
  158. {0x3350103C, "Smart Array P222", &SA5_access},
  159. {0x3351103C, "Smart Array P420", &SA5_access},
  160. {0x3352103C, "Smart Array P421", &SA5_access},
  161. {0x3353103C, "Smart Array P822", &SA5_access},
  162. {0x3354103C, "Smart Array P420i", &SA5_access},
  163. {0x3355103C, "Smart Array P220i", &SA5_access},
  164. {0x3356103C, "Smart Array P721m", &SA5_access},
  165. {0x1921103C, "Smart Array P830i", &SA5_access},
  166. {0x1922103C, "Smart Array P430", &SA5_access},
  167. {0x1923103C, "Smart Array P431", &SA5_access},
  168. {0x1924103C, "Smart Array P830", &SA5_access},
  169. {0x1926103C, "Smart Array P731m", &SA5_access},
  170. {0x1928103C, "Smart Array P230i", &SA5_access},
  171. {0x1929103C, "Smart Array P530", &SA5_access},
  172. {0x21BD103C, "Smart Array P244br", &SA5_access},
  173. {0x21BE103C, "Smart Array P741m", &SA5_access},
  174. {0x21BF103C, "Smart HBA H240ar", &SA5_access},
  175. {0x21C0103C, "Smart Array P440ar", &SA5_access},
  176. {0x21C1103C, "Smart Array P840ar", &SA5_access},
  177. {0x21C2103C, "Smart Array P440", &SA5_access},
  178. {0x21C3103C, "Smart Array P441", &SA5_access},
  179. {0x21C4103C, "Smart Array", &SA5_access},
  180. {0x21C5103C, "Smart Array P841", &SA5_access},
  181. {0x21C6103C, "Smart HBA H244br", &SA5_access},
  182. {0x21C7103C, "Smart HBA H240", &SA5_access},
  183. {0x21C8103C, "Smart HBA H241", &SA5_access},
  184. {0x21C9103C, "Smart Array", &SA5_access},
  185. {0x21CA103C, "Smart Array P246br", &SA5_access},
  186. {0x21CB103C, "Smart Array P840", &SA5_access},
  187. {0x21CC103C, "Smart Array", &SA5_access},
  188. {0x21CD103C, "Smart Array", &SA5_access},
  189. {0x21CE103C, "Smart HBA", &SA5_access},
  190. {0x05809005, "SmartHBA-SA", &SA5_access},
  191. {0x05819005, "SmartHBA-SA 8i", &SA5_access},
  192. {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
  193. {0x05839005, "SmartHBA-SA 8e", &SA5_access},
  194. {0x05849005, "SmartHBA-SA 16i", &SA5_access},
  195. {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
  196. {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
  197. {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
  198. {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
  199. {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
  200. {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
  201. {0xFFFF103C, "Unknown Smart Array", &SA5_access},
  202. };
  203. static struct scsi_transport_template *hpsa_sas_transport_template;
  204. static int hpsa_add_sas_host(struct ctlr_info *h);
  205. static void hpsa_delete_sas_host(struct ctlr_info *h);
  206. static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
  207. struct hpsa_scsi_dev_t *device);
  208. static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
  209. static struct hpsa_scsi_dev_t
  210. *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
  211. struct sas_rphy *rphy);
  212. #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
  213. static const struct scsi_cmnd hpsa_cmd_busy;
  214. #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
  215. static const struct scsi_cmnd hpsa_cmd_idle;
  216. static int number_of_controllers;
  217. static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
  218. static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
  219. static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
  220. #ifdef CONFIG_COMPAT
  221. static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
  222. void __user *arg);
  223. #endif
  224. static void cmd_free(struct ctlr_info *h, struct CommandList *c);
  225. static struct CommandList *cmd_alloc(struct ctlr_info *h);
  226. static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
  227. static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
  228. struct scsi_cmnd *scmd);
  229. static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
  230. void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
  231. int cmd_type);
  232. static void hpsa_free_cmd_pool(struct ctlr_info *h);
  233. #define VPD_PAGE (1 << 8)
  234. #define HPSA_SIMPLE_ERROR_BITS 0x03
  235. static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  236. static void hpsa_scan_start(struct Scsi_Host *);
  237. static int hpsa_scan_finished(struct Scsi_Host *sh,
  238. unsigned long elapsed_time);
  239. static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
  240. static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
  241. static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
  242. static int hpsa_slave_alloc(struct scsi_device *sdev);
  243. static int hpsa_slave_configure(struct scsi_device *sdev);
  244. static void hpsa_slave_destroy(struct scsi_device *sdev);
  245. static void hpsa_update_scsi_devices(struct ctlr_info *h);
  246. static int check_for_unit_attention(struct ctlr_info *h,
  247. struct CommandList *c);
  248. static void check_ioctl_unit_attention(struct ctlr_info *h,
  249. struct CommandList *c);
  250. /* performant mode helper functions */
  251. static void calc_bucket_map(int *bucket, int num_buckets,
  252. int nsgs, int min_blocks, u32 *bucket_map);
  253. static void hpsa_free_performant_mode(struct ctlr_info *h);
  254. static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
  255. static inline u32 next_command(struct ctlr_info *h, u8 q);
  256. static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
  257. u32 *cfg_base_addr, u64 *cfg_base_addr_index,
  258. u64 *cfg_offset);
  259. static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
  260. unsigned long *memory_bar);
  261. static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
  262. static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
  263. int wait_for_ready);
  264. static inline void finish_cmd(struct CommandList *c);
  265. static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
  266. #define BOARD_NOT_READY 0
  267. #define BOARD_READY 1
  268. static void hpsa_drain_accel_commands(struct ctlr_info *h);
  269. static void hpsa_flush_cache(struct ctlr_info *h);
  270. static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
  271. struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
  272. u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
  273. static void hpsa_command_resubmit_worker(struct work_struct *work);
  274. static u32 lockup_detected(struct ctlr_info *h);
  275. static int detect_controller_lockup(struct ctlr_info *h);
  276. static void hpsa_disable_rld_caching(struct ctlr_info *h);
  277. static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
  278. struct ReportExtendedLUNdata *buf, int bufsize);
  279. static int hpsa_luns_changed(struct ctlr_info *h);
  280. static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
  281. struct hpsa_scsi_dev_t *dev,
  282. unsigned char *scsi3addr);
  283. static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
  284. {
  285. unsigned long *priv = shost_priv(sdev->host);
  286. return (struct ctlr_info *) *priv;
  287. }
  288. static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
  289. {
  290. unsigned long *priv = shost_priv(sh);
  291. return (struct ctlr_info *) *priv;
  292. }
  293. static inline bool hpsa_is_cmd_idle(struct CommandList *c)
  294. {
  295. return c->scsi_cmd == SCSI_CMD_IDLE;
  296. }
  297. static inline bool hpsa_is_pending_event(struct CommandList *c)
  298. {
  299. return c->abort_pending || c->reset_pending;
  300. }
  301. /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
  302. static void decode_sense_data(const u8 *sense_data, int sense_data_len,
  303. u8 *sense_key, u8 *asc, u8 *ascq)
  304. {
  305. struct scsi_sense_hdr sshdr;
  306. bool rc;
  307. *sense_key = -1;
  308. *asc = -1;
  309. *ascq = -1;
  310. if (sense_data_len < 1)
  311. return;
  312. rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
  313. if (rc) {
  314. *sense_key = sshdr.sense_key;
  315. *asc = sshdr.asc;
  316. *ascq = sshdr.ascq;
  317. }
  318. }
  319. static int check_for_unit_attention(struct ctlr_info *h,
  320. struct CommandList *c)
  321. {
  322. u8 sense_key, asc, ascq;
  323. int sense_len;
  324. if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
  325. sense_len = sizeof(c->err_info->SenseInfo);
  326. else
  327. sense_len = c->err_info->SenseLen;
  328. decode_sense_data(c->err_info->SenseInfo, sense_len,
  329. &sense_key, &asc, &ascq);
  330. if (sense_key != UNIT_ATTENTION || asc == 0xff)
  331. return 0;
  332. switch (asc) {
  333. case STATE_CHANGED:
  334. dev_warn(&h->pdev->dev,
  335. "%s: a state change detected, command retried\n",
  336. h->devname);
  337. break;
  338. case LUN_FAILED:
  339. dev_warn(&h->pdev->dev,
  340. "%s: LUN failure detected\n", h->devname);
  341. break;
  342. case REPORT_LUNS_CHANGED:
  343. dev_warn(&h->pdev->dev,
  344. "%s: report LUN data changed\n", h->devname);
  345. /*
  346. * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
  347. * target (array) devices.
  348. */
  349. break;
  350. case POWER_OR_RESET:
  351. dev_warn(&h->pdev->dev,
  352. "%s: a power on or device reset detected\n",
  353. h->devname);
  354. break;
  355. case UNIT_ATTENTION_CLEARED:
  356. dev_warn(&h->pdev->dev,
  357. "%s: unit attention cleared by another initiator\n",
  358. h->devname);
  359. break;
  360. default:
  361. dev_warn(&h->pdev->dev,
  362. "%s: unknown unit attention detected\n",
  363. h->devname);
  364. break;
  365. }
  366. return 1;
  367. }
  368. static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
  369. {
  370. if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
  371. (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
  372. c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
  373. return 0;
  374. dev_warn(&h->pdev->dev, HPSA "device busy");
  375. return 1;
  376. }
  377. static u32 lockup_detected(struct ctlr_info *h);
  378. static ssize_t host_show_lockup_detected(struct device *dev,
  379. struct device_attribute *attr, char *buf)
  380. {
  381. int ld;
  382. struct ctlr_info *h;
  383. struct Scsi_Host *shost = class_to_shost(dev);
  384. h = shost_to_hba(shost);
  385. ld = lockup_detected(h);
  386. return sprintf(buf, "ld=%d\n", ld);
  387. }
  388. static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
  389. struct device_attribute *attr,
  390. const char *buf, size_t count)
  391. {
  392. int status, len;
  393. struct ctlr_info *h;
  394. struct Scsi_Host *shost = class_to_shost(dev);
  395. char tmpbuf[10];
  396. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  397. return -EACCES;
  398. len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
  399. strncpy(tmpbuf, buf, len);
  400. tmpbuf[len] = '\0';
  401. if (sscanf(tmpbuf, "%d", &status) != 1)
  402. return -EINVAL;
  403. h = shost_to_hba(shost);
  404. h->acciopath_status = !!status;
  405. dev_warn(&h->pdev->dev,
  406. "hpsa: HP SSD Smart Path %s via sysfs update.\n",
  407. h->acciopath_status ? "enabled" : "disabled");
  408. return count;
  409. }
  410. static ssize_t host_store_raid_offload_debug(struct device *dev,
  411. struct device_attribute *attr,
  412. const char *buf, size_t count)
  413. {
  414. int debug_level, len;
  415. struct ctlr_info *h;
  416. struct Scsi_Host *shost = class_to_shost(dev);
  417. char tmpbuf[10];
  418. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  419. return -EACCES;
  420. len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
  421. strncpy(tmpbuf, buf, len);
  422. tmpbuf[len] = '\0';
  423. if (sscanf(tmpbuf, "%d", &debug_level) != 1)
  424. return -EINVAL;
  425. if (debug_level < 0)
  426. debug_level = 0;
  427. h = shost_to_hba(shost);
  428. h->raid_offload_debug = debug_level;
  429. dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
  430. h->raid_offload_debug);
  431. return count;
  432. }
  433. static ssize_t host_store_rescan(struct device *dev,
  434. struct device_attribute *attr,
  435. const char *buf, size_t count)
  436. {
  437. struct ctlr_info *h;
  438. struct Scsi_Host *shost = class_to_shost(dev);
  439. h = shost_to_hba(shost);
  440. hpsa_scan_start(h->scsi_host);
  441. return count;
  442. }
  443. static ssize_t host_show_firmware_revision(struct device *dev,
  444. struct device_attribute *attr, char *buf)
  445. {
  446. struct ctlr_info *h;
  447. struct Scsi_Host *shost = class_to_shost(dev);
  448. unsigned char *fwrev;
  449. h = shost_to_hba(shost);
  450. if (!h->hba_inquiry_data)
  451. return 0;
  452. fwrev = &h->hba_inquiry_data[32];
  453. return snprintf(buf, 20, "%c%c%c%c\n",
  454. fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
  455. }
  456. static ssize_t host_show_commands_outstanding(struct device *dev,
  457. struct device_attribute *attr, char *buf)
  458. {
  459. struct Scsi_Host *shost = class_to_shost(dev);
  460. struct ctlr_info *h = shost_to_hba(shost);
  461. return snprintf(buf, 20, "%d\n",
  462. atomic_read(&h->commands_outstanding));
  463. }
  464. static ssize_t host_show_transport_mode(struct device *dev,
  465. struct device_attribute *attr, char *buf)
  466. {
  467. struct ctlr_info *h;
  468. struct Scsi_Host *shost = class_to_shost(dev);
  469. h = shost_to_hba(shost);
  470. return snprintf(buf, 20, "%s\n",
  471. h->transMethod & CFGTBL_Trans_Performant ?
  472. "performant" : "simple");
  473. }
  474. static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
  475. struct device_attribute *attr, char *buf)
  476. {
  477. struct ctlr_info *h;
  478. struct Scsi_Host *shost = class_to_shost(dev);
  479. h = shost_to_hba(shost);
  480. return snprintf(buf, 30, "HP SSD Smart Path %s\n",
  481. (h->acciopath_status == 1) ? "enabled" : "disabled");
  482. }
  483. /* List of controllers which cannot be hard reset on kexec with reset_devices */
  484. static u32 unresettable_controller[] = {
  485. 0x324a103C, /* Smart Array P712m */
  486. 0x324b103C, /* Smart Array P711m */
  487. 0x3223103C, /* Smart Array P800 */
  488. 0x3234103C, /* Smart Array P400 */
  489. 0x3235103C, /* Smart Array P400i */
  490. 0x3211103C, /* Smart Array E200i */
  491. 0x3212103C, /* Smart Array E200 */
  492. 0x3213103C, /* Smart Array E200i */
  493. 0x3214103C, /* Smart Array E200i */
  494. 0x3215103C, /* Smart Array E200i */
  495. 0x3237103C, /* Smart Array E500 */
  496. 0x323D103C, /* Smart Array P700m */
  497. 0x40800E11, /* Smart Array 5i */
  498. 0x409C0E11, /* Smart Array 6400 */
  499. 0x409D0E11, /* Smart Array 6400 EM */
  500. 0x40700E11, /* Smart Array 5300 */
  501. 0x40820E11, /* Smart Array 532 */
  502. 0x40830E11, /* Smart Array 5312 */
  503. 0x409A0E11, /* Smart Array 641 */
  504. 0x409B0E11, /* Smart Array 642 */
  505. 0x40910E11, /* Smart Array 6i */
  506. };
  507. /* List of controllers which cannot even be soft reset */
  508. static u32 soft_unresettable_controller[] = {
  509. 0x40800E11, /* Smart Array 5i */
  510. 0x40700E11, /* Smart Array 5300 */
  511. 0x40820E11, /* Smart Array 532 */
  512. 0x40830E11, /* Smart Array 5312 */
  513. 0x409A0E11, /* Smart Array 641 */
  514. 0x409B0E11, /* Smart Array 642 */
  515. 0x40910E11, /* Smart Array 6i */
  516. /* Exclude 640x boards. These are two pci devices in one slot
  517. * which share a battery backed cache module. One controls the
  518. * cache, the other accesses the cache through the one that controls
  519. * it. If we reset the one controlling the cache, the other will
  520. * likely not be happy. Just forbid resetting this conjoined mess.
  521. * The 640x isn't really supported by hpsa anyway.
  522. */
  523. 0x409C0E11, /* Smart Array 6400 */
  524. 0x409D0E11, /* Smart Array 6400 EM */
  525. };
  526. static u32 needs_abort_tags_swizzled[] = {
  527. 0x323D103C, /* Smart Array P700m */
  528. 0x324a103C, /* Smart Array P712m */
  529. 0x324b103C, /* SmartArray P711m */
  530. };
  531. static int board_id_in_array(u32 a[], int nelems, u32 board_id)
  532. {
  533. int i;
  534. for (i = 0; i < nelems; i++)
  535. if (a[i] == board_id)
  536. return 1;
  537. return 0;
  538. }
  539. static int ctlr_is_hard_resettable(u32 board_id)
  540. {
  541. return !board_id_in_array(unresettable_controller,
  542. ARRAY_SIZE(unresettable_controller), board_id);
  543. }
  544. static int ctlr_is_soft_resettable(u32 board_id)
  545. {
  546. return !board_id_in_array(soft_unresettable_controller,
  547. ARRAY_SIZE(soft_unresettable_controller), board_id);
  548. }
  549. static int ctlr_is_resettable(u32 board_id)
  550. {
  551. return ctlr_is_hard_resettable(board_id) ||
  552. ctlr_is_soft_resettable(board_id);
  553. }
  554. static int ctlr_needs_abort_tags_swizzled(u32 board_id)
  555. {
  556. return board_id_in_array(needs_abort_tags_swizzled,
  557. ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
  558. }
  559. static ssize_t host_show_resettable(struct device *dev,
  560. struct device_attribute *attr, char *buf)
  561. {
  562. struct ctlr_info *h;
  563. struct Scsi_Host *shost = class_to_shost(dev);
  564. h = shost_to_hba(shost);
  565. return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
  566. }
  567. static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
  568. {
  569. return (scsi3addr[3] & 0xC0) == 0x40;
  570. }
  571. static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
  572. "1(+0)ADM", "UNKNOWN", "PHYS DRV"
  573. };
  574. #define HPSA_RAID_0 0
  575. #define HPSA_RAID_4 1
  576. #define HPSA_RAID_1 2 /* also used for RAID 10 */
  577. #define HPSA_RAID_5 3 /* also used for RAID 50 */
  578. #define HPSA_RAID_51 4
  579. #define HPSA_RAID_6 5 /* also used for RAID 60 */
  580. #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
  581. #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
  582. #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
  583. static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
  584. {
  585. return !device->physical_device;
  586. }
  587. static ssize_t raid_level_show(struct device *dev,
  588. struct device_attribute *attr, char *buf)
  589. {
  590. ssize_t l = 0;
  591. unsigned char rlevel;
  592. struct ctlr_info *h;
  593. struct scsi_device *sdev;
  594. struct hpsa_scsi_dev_t *hdev;
  595. unsigned long flags;
  596. sdev = to_scsi_device(dev);
  597. h = sdev_to_hba(sdev);
  598. spin_lock_irqsave(&h->lock, flags);
  599. hdev = sdev->hostdata;
  600. if (!hdev) {
  601. spin_unlock_irqrestore(&h->lock, flags);
  602. return -ENODEV;
  603. }
  604. /* Is this even a logical drive? */
  605. if (!is_logical_device(hdev)) {
  606. spin_unlock_irqrestore(&h->lock, flags);
  607. l = snprintf(buf, PAGE_SIZE, "N/A\n");
  608. return l;
  609. }
  610. rlevel = hdev->raid_level;
  611. spin_unlock_irqrestore(&h->lock, flags);
  612. if (rlevel > RAID_UNKNOWN)
  613. rlevel = RAID_UNKNOWN;
  614. l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
  615. return l;
  616. }
  617. static ssize_t lunid_show(struct device *dev,
  618. struct device_attribute *attr, char *buf)
  619. {
  620. struct ctlr_info *h;
  621. struct scsi_device *sdev;
  622. struct hpsa_scsi_dev_t *hdev;
  623. unsigned long flags;
  624. unsigned char lunid[8];
  625. sdev = to_scsi_device(dev);
  626. h = sdev_to_hba(sdev);
  627. spin_lock_irqsave(&h->lock, flags);
  628. hdev = sdev->hostdata;
  629. if (!hdev) {
  630. spin_unlock_irqrestore(&h->lock, flags);
  631. return -ENODEV;
  632. }
  633. memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
  634. spin_unlock_irqrestore(&h->lock, flags);
  635. return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
  636. lunid[0], lunid[1], lunid[2], lunid[3],
  637. lunid[4], lunid[5], lunid[6], lunid[7]);
  638. }
  639. static ssize_t unique_id_show(struct device *dev,
  640. struct device_attribute *attr, char *buf)
  641. {
  642. struct ctlr_info *h;
  643. struct scsi_device *sdev;
  644. struct hpsa_scsi_dev_t *hdev;
  645. unsigned long flags;
  646. unsigned char sn[16];
  647. sdev = to_scsi_device(dev);
  648. h = sdev_to_hba(sdev);
  649. spin_lock_irqsave(&h->lock, flags);
  650. hdev = sdev->hostdata;
  651. if (!hdev) {
  652. spin_unlock_irqrestore(&h->lock, flags);
  653. return -ENODEV;
  654. }
  655. memcpy(sn, hdev->device_id, sizeof(sn));
  656. spin_unlock_irqrestore(&h->lock, flags);
  657. return snprintf(buf, 16 * 2 + 2,
  658. "%02X%02X%02X%02X%02X%02X%02X%02X"
  659. "%02X%02X%02X%02X%02X%02X%02X%02X\n",
  660. sn[0], sn[1], sn[2], sn[3],
  661. sn[4], sn[5], sn[6], sn[7],
  662. sn[8], sn[9], sn[10], sn[11],
  663. sn[12], sn[13], sn[14], sn[15]);
  664. }
  665. static ssize_t sas_address_show(struct device *dev,
  666. struct device_attribute *attr, char *buf)
  667. {
  668. struct ctlr_info *h;
  669. struct scsi_device *sdev;
  670. struct hpsa_scsi_dev_t *hdev;
  671. unsigned long flags;
  672. u64 sas_address;
  673. sdev = to_scsi_device(dev);
  674. h = sdev_to_hba(sdev);
  675. spin_lock_irqsave(&h->lock, flags);
  676. hdev = sdev->hostdata;
  677. if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
  678. spin_unlock_irqrestore(&h->lock, flags);
  679. return -ENODEV;
  680. }
  681. sas_address = hdev->sas_address;
  682. spin_unlock_irqrestore(&h->lock, flags);
  683. return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
  684. }
  685. static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
  686. struct device_attribute *attr, char *buf)
  687. {
  688. struct ctlr_info *h;
  689. struct scsi_device *sdev;
  690. struct hpsa_scsi_dev_t *hdev;
  691. unsigned long flags;
  692. int offload_enabled;
  693. sdev = to_scsi_device(dev);
  694. h = sdev_to_hba(sdev);
  695. spin_lock_irqsave(&h->lock, flags);
  696. hdev = sdev->hostdata;
  697. if (!hdev) {
  698. spin_unlock_irqrestore(&h->lock, flags);
  699. return -ENODEV;
  700. }
  701. offload_enabled = hdev->offload_enabled;
  702. spin_unlock_irqrestore(&h->lock, flags);
  703. return snprintf(buf, 20, "%d\n", offload_enabled);
  704. }
  705. #define MAX_PATHS 8
  706. static ssize_t path_info_show(struct device *dev,
  707. struct device_attribute *attr, char *buf)
  708. {
  709. struct ctlr_info *h;
  710. struct scsi_device *sdev;
  711. struct hpsa_scsi_dev_t *hdev;
  712. unsigned long flags;
  713. int i;
  714. int output_len = 0;
  715. u8 box;
  716. u8 bay;
  717. u8 path_map_index = 0;
  718. char *active;
  719. unsigned char phys_connector[2];
  720. sdev = to_scsi_device(dev);
  721. h = sdev_to_hba(sdev);
  722. spin_lock_irqsave(&h->devlock, flags);
  723. hdev = sdev->hostdata;
  724. if (!hdev) {
  725. spin_unlock_irqrestore(&h->devlock, flags);
  726. return -ENODEV;
  727. }
  728. bay = hdev->bay;
  729. for (i = 0; i < MAX_PATHS; i++) {
  730. path_map_index = 1<<i;
  731. if (i == hdev->active_path_index)
  732. active = "Active";
  733. else if (hdev->path_map & path_map_index)
  734. active = "Inactive";
  735. else
  736. continue;
  737. output_len += scnprintf(buf + output_len,
  738. PAGE_SIZE - output_len,
  739. "[%d:%d:%d:%d] %20.20s ",
  740. h->scsi_host->host_no,
  741. hdev->bus, hdev->target, hdev->lun,
  742. scsi_device_type(hdev->devtype));
  743. if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
  744. output_len += scnprintf(buf + output_len,
  745. PAGE_SIZE - output_len,
  746. "%s\n", active);
  747. continue;
  748. }
  749. box = hdev->box[i];
  750. memcpy(&phys_connector, &hdev->phys_connector[i],
  751. sizeof(phys_connector));
  752. if (phys_connector[0] < '0')
  753. phys_connector[0] = '0';
  754. if (phys_connector[1] < '0')
  755. phys_connector[1] = '0';
  756. output_len += scnprintf(buf + output_len,
  757. PAGE_SIZE - output_len,
  758. "PORT: %.2s ",
  759. phys_connector);
  760. if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
  761. hdev->expose_device) {
  762. if (box == 0 || box == 0xFF) {
  763. output_len += scnprintf(buf + output_len,
  764. PAGE_SIZE - output_len,
  765. "BAY: %hhu %s\n",
  766. bay, active);
  767. } else {
  768. output_len += scnprintf(buf + output_len,
  769. PAGE_SIZE - output_len,
  770. "BOX: %hhu BAY: %hhu %s\n",
  771. box, bay, active);
  772. }
  773. } else if (box != 0 && box != 0xFF) {
  774. output_len += scnprintf(buf + output_len,
  775. PAGE_SIZE - output_len, "BOX: %hhu %s\n",
  776. box, active);
  777. } else
  778. output_len += scnprintf(buf + output_len,
  779. PAGE_SIZE - output_len, "%s\n", active);
  780. }
  781. spin_unlock_irqrestore(&h->devlock, flags);
  782. return output_len;
  783. }
  784. static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
  785. static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
  786. static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
  787. static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
  788. static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
  789. static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
  790. host_show_hp_ssd_smart_path_enabled, NULL);
  791. static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
  792. static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
  793. host_show_hp_ssd_smart_path_status,
  794. host_store_hp_ssd_smart_path_status);
  795. static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
  796. host_store_raid_offload_debug);
  797. static DEVICE_ATTR(firmware_revision, S_IRUGO,
  798. host_show_firmware_revision, NULL);
  799. static DEVICE_ATTR(commands_outstanding, S_IRUGO,
  800. host_show_commands_outstanding, NULL);
  801. static DEVICE_ATTR(transport_mode, S_IRUGO,
  802. host_show_transport_mode, NULL);
  803. static DEVICE_ATTR(resettable, S_IRUGO,
  804. host_show_resettable, NULL);
  805. static DEVICE_ATTR(lockup_detected, S_IRUGO,
  806. host_show_lockup_detected, NULL);
  807. static struct device_attribute *hpsa_sdev_attrs[] = {
  808. &dev_attr_raid_level,
  809. &dev_attr_lunid,
  810. &dev_attr_unique_id,
  811. &dev_attr_hp_ssd_smart_path_enabled,
  812. &dev_attr_path_info,
  813. &dev_attr_sas_address,
  814. NULL,
  815. };
  816. static struct device_attribute *hpsa_shost_attrs[] = {
  817. &dev_attr_rescan,
  818. &dev_attr_firmware_revision,
  819. &dev_attr_commands_outstanding,
  820. &dev_attr_transport_mode,
  821. &dev_attr_resettable,
  822. &dev_attr_hp_ssd_smart_path_status,
  823. &dev_attr_raid_offload_debug,
  824. &dev_attr_lockup_detected,
  825. NULL,
  826. };
  827. #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
  828. HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
  829. static struct scsi_host_template hpsa_driver_template = {
  830. .module = THIS_MODULE,
  831. .name = HPSA,
  832. .proc_name = HPSA,
  833. .queuecommand = hpsa_scsi_queue_command,
  834. .scan_start = hpsa_scan_start,
  835. .scan_finished = hpsa_scan_finished,
  836. .change_queue_depth = hpsa_change_queue_depth,
  837. .this_id = -1,
  838. .use_clustering = ENABLE_CLUSTERING,
  839. .eh_abort_handler = hpsa_eh_abort_handler,
  840. .eh_device_reset_handler = hpsa_eh_device_reset_handler,
  841. .ioctl = hpsa_ioctl,
  842. .slave_alloc = hpsa_slave_alloc,
  843. .slave_configure = hpsa_slave_configure,
  844. .slave_destroy = hpsa_slave_destroy,
  845. #ifdef CONFIG_COMPAT
  846. .compat_ioctl = hpsa_compat_ioctl,
  847. #endif
  848. .sdev_attrs = hpsa_sdev_attrs,
  849. .shost_attrs = hpsa_shost_attrs,
  850. .max_sectors = 8192,
  851. .no_write_same = 1,
  852. };
  853. static inline u32 next_command(struct ctlr_info *h, u8 q)
  854. {
  855. u32 a;
  856. struct reply_queue_buffer *rq = &h->reply_queue[q];
  857. if (h->transMethod & CFGTBL_Trans_io_accel1)
  858. return h->access.command_completed(h, q);
  859. if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
  860. return h->access.command_completed(h, q);
  861. if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
  862. a = rq->head[rq->current_entry];
  863. rq->current_entry++;
  864. atomic_dec(&h->commands_outstanding);
  865. } else {
  866. a = FIFO_EMPTY;
  867. }
  868. /* Check for wraparound */
  869. if (rq->current_entry == h->max_commands) {
  870. rq->current_entry = 0;
  871. rq->wraparound ^= 1;
  872. }
  873. return a;
  874. }
  875. /*
  876. * There are some special bits in the bus address of the
  877. * command that we have to set for the controller to know
  878. * how to process the command:
  879. *
  880. * Normal performant mode:
  881. * bit 0: 1 means performant mode, 0 means simple mode.
  882. * bits 1-3 = block fetch table entry
  883. * bits 4-6 = command type (== 0)
  884. *
  885. * ioaccel1 mode:
  886. * bit 0 = "performant mode" bit.
  887. * bits 1-3 = block fetch table entry
  888. * bits 4-6 = command type (== 110)
  889. * (command type is needed because ioaccel1 mode
  890. * commands are submitted through the same register as normal
  891. * mode commands, so this is how the controller knows whether
  892. * the command is normal mode or ioaccel1 mode.)
  893. *
  894. * ioaccel2 mode:
  895. * bit 0 = "performant mode" bit.
  896. * bits 1-4 = block fetch table entry (note extra bit)
  897. * bits 4-6 = not needed, because ioaccel2 mode has
  898. * a separate special register for submitting commands.
  899. */
  900. /*
  901. * set_performant_mode: Modify the tag for cciss performant
  902. * set bit 0 for pull model, bits 3-1 for block fetch
  903. * register number
  904. */
  905. #define DEFAULT_REPLY_QUEUE (-1)
  906. static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
  907. int reply_queue)
  908. {
  909. if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
  910. c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
  911. if (unlikely(!h->msix_vector))
  912. return;
  913. if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
  914. c->Header.ReplyQueue =
  915. raw_smp_processor_id() % h->nreply_queues;
  916. else
  917. c->Header.ReplyQueue = reply_queue % h->nreply_queues;
  918. }
  919. }
  920. static void set_ioaccel1_performant_mode(struct ctlr_info *h,
  921. struct CommandList *c,
  922. int reply_queue)
  923. {
  924. struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
  925. /*
  926. * Tell the controller to post the reply to the queue for this
  927. * processor. This seems to give the best I/O throughput.
  928. */
  929. if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
  930. cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
  931. else
  932. cp->ReplyQueue = reply_queue % h->nreply_queues;
  933. /*
  934. * Set the bits in the address sent down to include:
  935. * - performant mode bit (bit 0)
  936. * - pull count (bits 1-3)
  937. * - command type (bits 4-6)
  938. */
  939. c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
  940. IOACCEL1_BUSADDR_CMDTYPE;
  941. }
  942. static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
  943. struct CommandList *c,
  944. int reply_queue)
  945. {
  946. struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
  947. &h->ioaccel2_cmd_pool[c->cmdindex];
  948. /* Tell the controller to post the reply to the queue for this
  949. * processor. This seems to give the best I/O throughput.
  950. */
  951. if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
  952. cp->reply_queue = smp_processor_id() % h->nreply_queues;
  953. else
  954. cp->reply_queue = reply_queue % h->nreply_queues;
  955. /* Set the bits in the address sent down to include:
  956. * - performant mode bit not used in ioaccel mode 2
  957. * - pull count (bits 0-3)
  958. * - command type isn't needed for ioaccel2
  959. */
  960. c->busaddr |= h->ioaccel2_blockFetchTable[0];
  961. }
  962. static void set_ioaccel2_performant_mode(struct ctlr_info *h,
  963. struct CommandList *c,
  964. int reply_queue)
  965. {
  966. struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
  967. /*
  968. * Tell the controller to post the reply to the queue for this
  969. * processor. This seems to give the best I/O throughput.
  970. */
  971. if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
  972. cp->reply_queue = smp_processor_id() % h->nreply_queues;
  973. else
  974. cp->reply_queue = reply_queue % h->nreply_queues;
  975. /*
  976. * Set the bits in the address sent down to include:
  977. * - performant mode bit not used in ioaccel mode 2
  978. * - pull count (bits 0-3)
  979. * - command type isn't needed for ioaccel2
  980. */
  981. c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
  982. }
  983. static int is_firmware_flash_cmd(u8 *cdb)
  984. {
  985. return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
  986. }
  987. /*
  988. * During firmware flash, the heartbeat register may not update as frequently
  989. * as it should. So we dial down lockup detection during firmware flash. and
  990. * dial it back up when firmware flash completes.
  991. */
  992. #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
  993. #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
  994. static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
  995. struct CommandList *c)
  996. {
  997. if (!is_firmware_flash_cmd(c->Request.CDB))
  998. return;
  999. atomic_inc(&h->firmware_flash_in_progress);
  1000. h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
  1001. }
  1002. static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
  1003. struct CommandList *c)
  1004. {
  1005. if (is_firmware_flash_cmd(c->Request.CDB) &&
  1006. atomic_dec_and_test(&h->firmware_flash_in_progress))
  1007. h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
  1008. }
  1009. static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
  1010. struct CommandList *c, int reply_queue)
  1011. {
  1012. dial_down_lockup_detection_during_fw_flash(h, c);
  1013. atomic_inc(&h->commands_outstanding);
  1014. switch (c->cmd_type) {
  1015. case CMD_IOACCEL1:
  1016. set_ioaccel1_performant_mode(h, c, reply_queue);
  1017. writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
  1018. break;
  1019. case CMD_IOACCEL2:
  1020. set_ioaccel2_performant_mode(h, c, reply_queue);
  1021. writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
  1022. break;
  1023. case IOACCEL2_TMF:
  1024. set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
  1025. writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
  1026. break;
  1027. default:
  1028. set_performant_mode(h, c, reply_queue);
  1029. h->access.submit_command(h, c);
  1030. }
  1031. }
  1032. static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
  1033. {
  1034. if (unlikely(hpsa_is_pending_event(c)))
  1035. return finish_cmd(c);
  1036. __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
  1037. }
  1038. static inline int is_hba_lunid(unsigned char scsi3addr[])
  1039. {
  1040. return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
  1041. }
  1042. static inline int is_scsi_rev_5(struct ctlr_info *h)
  1043. {
  1044. if (!h->hba_inquiry_data)
  1045. return 0;
  1046. if ((h->hba_inquiry_data[2] & 0x07) == 5)
  1047. return 1;
  1048. return 0;
  1049. }
  1050. static int hpsa_find_target_lun(struct ctlr_info *h,
  1051. unsigned char scsi3addr[], int bus, int *target, int *lun)
  1052. {
  1053. /* finds an unused bus, target, lun for a new physical device
  1054. * assumes h->devlock is held
  1055. */
  1056. int i, found = 0;
  1057. DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
  1058. bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
  1059. for (i = 0; i < h->ndevices; i++) {
  1060. if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
  1061. __set_bit(h->dev[i]->target, lun_taken);
  1062. }
  1063. i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
  1064. if (i < HPSA_MAX_DEVICES) {
  1065. /* *bus = 1; */
  1066. *target = i;
  1067. *lun = 0;
  1068. found = 1;
  1069. }
  1070. return !found;
  1071. }
  1072. static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
  1073. struct hpsa_scsi_dev_t *dev, char *description)
  1074. {
  1075. #define LABEL_SIZE 25
  1076. char label[LABEL_SIZE];
  1077. if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
  1078. return;
  1079. switch (dev->devtype) {
  1080. case TYPE_RAID:
  1081. snprintf(label, LABEL_SIZE, "controller");
  1082. break;
  1083. case TYPE_ENCLOSURE:
  1084. snprintf(label, LABEL_SIZE, "enclosure");
  1085. break;
  1086. case TYPE_DISK:
  1087. case TYPE_ZBC:
  1088. if (dev->external)
  1089. snprintf(label, LABEL_SIZE, "external");
  1090. else if (!is_logical_dev_addr_mode(dev->scsi3addr))
  1091. snprintf(label, LABEL_SIZE, "%s",
  1092. raid_label[PHYSICAL_DRIVE]);
  1093. else
  1094. snprintf(label, LABEL_SIZE, "RAID-%s",
  1095. dev->raid_level > RAID_UNKNOWN ? "?" :
  1096. raid_label[dev->raid_level]);
  1097. break;
  1098. case TYPE_ROM:
  1099. snprintf(label, LABEL_SIZE, "rom");
  1100. break;
  1101. case TYPE_TAPE:
  1102. snprintf(label, LABEL_SIZE, "tape");
  1103. break;
  1104. case TYPE_MEDIUM_CHANGER:
  1105. snprintf(label, LABEL_SIZE, "changer");
  1106. break;
  1107. default:
  1108. snprintf(label, LABEL_SIZE, "UNKNOWN");
  1109. break;
  1110. }
  1111. dev_printk(level, &h->pdev->dev,
  1112. "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
  1113. h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
  1114. description,
  1115. scsi_device_type(dev->devtype),
  1116. dev->vendor,
  1117. dev->model,
  1118. label,
  1119. dev->offload_config ? '+' : '-',
  1120. dev->offload_enabled ? '+' : '-',
  1121. dev->expose_device);
  1122. }
  1123. /* Add an entry into h->dev[] array. */
  1124. static int hpsa_scsi_add_entry(struct ctlr_info *h,
  1125. struct hpsa_scsi_dev_t *device,
  1126. struct hpsa_scsi_dev_t *added[], int *nadded)
  1127. {
  1128. /* assumes h->devlock is held */
  1129. int n = h->ndevices;
  1130. int i;
  1131. unsigned char addr1[8], addr2[8];
  1132. struct hpsa_scsi_dev_t *sd;
  1133. if (n >= HPSA_MAX_DEVICES) {
  1134. dev_err(&h->pdev->dev, "too many devices, some will be "
  1135. "inaccessible.\n");
  1136. return -1;
  1137. }
  1138. /* physical devices do not have lun or target assigned until now. */
  1139. if (device->lun != -1)
  1140. /* Logical device, lun is already assigned. */
  1141. goto lun_assigned;
  1142. /* If this device a non-zero lun of a multi-lun device
  1143. * byte 4 of the 8-byte LUN addr will contain the logical
  1144. * unit no, zero otherwise.
  1145. */
  1146. if (device->scsi3addr[4] == 0) {
  1147. /* This is not a non-zero lun of a multi-lun device */
  1148. if (hpsa_find_target_lun(h, device->scsi3addr,
  1149. device->bus, &device->target, &device->lun) != 0)
  1150. return -1;
  1151. goto lun_assigned;
  1152. }
  1153. /* This is a non-zero lun of a multi-lun device.
  1154. * Search through our list and find the device which
  1155. * has the same 8 byte LUN address, excepting byte 4 and 5.
  1156. * Assign the same bus and target for this new LUN.
  1157. * Use the logical unit number from the firmware.
  1158. */
  1159. memcpy(addr1, device->scsi3addr, 8);
  1160. addr1[4] = 0;
  1161. addr1[5] = 0;
  1162. for (i = 0; i < n; i++) {
  1163. sd = h->dev[i];
  1164. memcpy(addr2, sd->scsi3addr, 8);
  1165. addr2[4] = 0;
  1166. addr2[5] = 0;
  1167. /* differ only in byte 4 and 5? */
  1168. if (memcmp(addr1, addr2, 8) == 0) {
  1169. device->bus = sd->bus;
  1170. device->target = sd->target;
  1171. device->lun = device->scsi3addr[4];
  1172. break;
  1173. }
  1174. }
  1175. if (device->lun == -1) {
  1176. dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
  1177. " suspect firmware bug or unsupported hardware "
  1178. "configuration.\n");
  1179. return -1;
  1180. }
  1181. lun_assigned:
  1182. h->dev[n] = device;
  1183. h->ndevices++;
  1184. added[*nadded] = device;
  1185. (*nadded)++;
  1186. hpsa_show_dev_msg(KERN_INFO, h, device,
  1187. device->expose_device ? "added" : "masked");
  1188. device->offload_to_be_enabled = device->offload_enabled;
  1189. device->offload_enabled = 0;
  1190. return 0;
  1191. }
  1192. /* Update an entry in h->dev[] array. */
  1193. static void hpsa_scsi_update_entry(struct ctlr_info *h,
  1194. int entry, struct hpsa_scsi_dev_t *new_entry)
  1195. {
  1196. int offload_enabled;
  1197. /* assumes h->devlock is held */
  1198. BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
  1199. /* Raid level changed. */
  1200. h->dev[entry]->raid_level = new_entry->raid_level;
  1201. /* Raid offload parameters changed. Careful about the ordering. */
  1202. if (new_entry->offload_config && new_entry->offload_enabled) {
  1203. /*
  1204. * if drive is newly offload_enabled, we want to copy the
  1205. * raid map data first. If previously offload_enabled and
  1206. * offload_config were set, raid map data had better be
  1207. * the same as it was before. if raid map data is changed
  1208. * then it had better be the case that
  1209. * h->dev[entry]->offload_enabled is currently 0.
  1210. */
  1211. h->dev[entry]->raid_map = new_entry->raid_map;
  1212. h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
  1213. }
  1214. if (new_entry->hba_ioaccel_enabled) {
  1215. h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
  1216. wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
  1217. }
  1218. h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
  1219. h->dev[entry]->offload_config = new_entry->offload_config;
  1220. h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
  1221. h->dev[entry]->queue_depth = new_entry->queue_depth;
  1222. /*
  1223. * We can turn off ioaccel offload now, but need to delay turning
  1224. * it on until we can update h->dev[entry]->phys_disk[], but we
  1225. * can't do that until all the devices are updated.
  1226. */
  1227. h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
  1228. if (!new_entry->offload_enabled)
  1229. h->dev[entry]->offload_enabled = 0;
  1230. offload_enabled = h->dev[entry]->offload_enabled;
  1231. h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
  1232. hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
  1233. h->dev[entry]->offload_enabled = offload_enabled;
  1234. }
  1235. /* Replace an entry from h->dev[] array. */
  1236. static void hpsa_scsi_replace_entry(struct ctlr_info *h,
  1237. int entry, struct hpsa_scsi_dev_t *new_entry,
  1238. struct hpsa_scsi_dev_t *added[], int *nadded,
  1239. struct hpsa_scsi_dev_t *removed[], int *nremoved)
  1240. {
  1241. /* assumes h->devlock is held */
  1242. BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
  1243. removed[*nremoved] = h->dev[entry];
  1244. (*nremoved)++;
  1245. /*
  1246. * New physical devices won't have target/lun assigned yet
  1247. * so we need to preserve the values in the slot we are replacing.
  1248. */
  1249. if (new_entry->target == -1) {
  1250. new_entry->target = h->dev[entry]->target;
  1251. new_entry->lun = h->dev[entry]->lun;
  1252. }
  1253. h->dev[entry] = new_entry;
  1254. added[*nadded] = new_entry;
  1255. (*nadded)++;
  1256. hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
  1257. new_entry->offload_to_be_enabled = new_entry->offload_enabled;
  1258. new_entry->offload_enabled = 0;
  1259. }
  1260. /* Remove an entry from h->dev[] array. */
  1261. static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
  1262. struct hpsa_scsi_dev_t *removed[], int *nremoved)
  1263. {
  1264. /* assumes h->devlock is held */
  1265. int i;
  1266. struct hpsa_scsi_dev_t *sd;
  1267. BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
  1268. sd = h->dev[entry];
  1269. removed[*nremoved] = h->dev[entry];
  1270. (*nremoved)++;
  1271. for (i = entry; i < h->ndevices-1; i++)
  1272. h->dev[i] = h->dev[i+1];
  1273. h->ndevices--;
  1274. hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
  1275. }
  1276. #define SCSI3ADDR_EQ(a, b) ( \
  1277. (a)[7] == (b)[7] && \
  1278. (a)[6] == (b)[6] && \
  1279. (a)[5] == (b)[5] && \
  1280. (a)[4] == (b)[4] && \
  1281. (a)[3] == (b)[3] && \
  1282. (a)[2] == (b)[2] && \
  1283. (a)[1] == (b)[1] && \
  1284. (a)[0] == (b)[0])
  1285. static void fixup_botched_add(struct ctlr_info *h,
  1286. struct hpsa_scsi_dev_t *added)
  1287. {
  1288. /* called when scsi_add_device fails in order to re-adjust
  1289. * h->dev[] to match the mid layer's view.
  1290. */
  1291. unsigned long flags;
  1292. int i, j;
  1293. spin_lock_irqsave(&h->lock, flags);
  1294. for (i = 0; i < h->ndevices; i++) {
  1295. if (h->dev[i] == added) {
  1296. for (j = i; j < h->ndevices-1; j++)
  1297. h->dev[j] = h->dev[j+1];
  1298. h->ndevices--;
  1299. break;
  1300. }
  1301. }
  1302. spin_unlock_irqrestore(&h->lock, flags);
  1303. kfree(added);
  1304. }
  1305. static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
  1306. struct hpsa_scsi_dev_t *dev2)
  1307. {
  1308. /* we compare everything except lun and target as these
  1309. * are not yet assigned. Compare parts likely
  1310. * to differ first
  1311. */
  1312. if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
  1313. sizeof(dev1->scsi3addr)) != 0)
  1314. return 0;
  1315. if (memcmp(dev1->device_id, dev2->device_id,
  1316. sizeof(dev1->device_id)) != 0)
  1317. return 0;
  1318. if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
  1319. return 0;
  1320. if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
  1321. return 0;
  1322. if (dev1->devtype != dev2->devtype)
  1323. return 0;
  1324. if (dev1->bus != dev2->bus)
  1325. return 0;
  1326. return 1;
  1327. }
  1328. static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
  1329. struct hpsa_scsi_dev_t *dev2)
  1330. {
  1331. /* Device attributes that can change, but don't mean
  1332. * that the device is a different device, nor that the OS
  1333. * needs to be told anything about the change.
  1334. */
  1335. if (dev1->raid_level != dev2->raid_level)
  1336. return 1;
  1337. if (dev1->offload_config != dev2->offload_config)
  1338. return 1;
  1339. if (dev1->offload_enabled != dev2->offload_enabled)
  1340. return 1;
  1341. if (!is_logical_dev_addr_mode(dev1->scsi3addr))
  1342. if (dev1->queue_depth != dev2->queue_depth)
  1343. return 1;
  1344. return 0;
  1345. }
  1346. /* Find needle in haystack. If exact match found, return DEVICE_SAME,
  1347. * and return needle location in *index. If scsi3addr matches, but not
  1348. * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
  1349. * location in *index.
  1350. * In the case of a minor device attribute change, such as RAID level, just
  1351. * return DEVICE_UPDATED, along with the updated device's location in index.
  1352. * If needle not found, return DEVICE_NOT_FOUND.
  1353. */
  1354. static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
  1355. struct hpsa_scsi_dev_t *haystack[], int haystack_size,
  1356. int *index)
  1357. {
  1358. int i;
  1359. #define DEVICE_NOT_FOUND 0
  1360. #define DEVICE_CHANGED 1
  1361. #define DEVICE_SAME 2
  1362. #define DEVICE_UPDATED 3
  1363. if (needle == NULL)
  1364. return DEVICE_NOT_FOUND;
  1365. for (i = 0; i < haystack_size; i++) {
  1366. if (haystack[i] == NULL) /* previously removed. */
  1367. continue;
  1368. if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
  1369. *index = i;
  1370. if (device_is_the_same(needle, haystack[i])) {
  1371. if (device_updated(needle, haystack[i]))
  1372. return DEVICE_UPDATED;
  1373. return DEVICE_SAME;
  1374. } else {
  1375. /* Keep offline devices offline */
  1376. if (needle->volume_offline)
  1377. return DEVICE_NOT_FOUND;
  1378. return DEVICE_CHANGED;
  1379. }
  1380. }
  1381. }
  1382. *index = -1;
  1383. return DEVICE_NOT_FOUND;
  1384. }
  1385. static void hpsa_monitor_offline_device(struct ctlr_info *h,
  1386. unsigned char scsi3addr[])
  1387. {
  1388. struct offline_device_entry *device;
  1389. unsigned long flags;
  1390. /* Check to see if device is already on the list */
  1391. spin_lock_irqsave(&h->offline_device_lock, flags);
  1392. list_for_each_entry(device, &h->offline_device_list, offline_list) {
  1393. if (memcmp(device->scsi3addr, scsi3addr,
  1394. sizeof(device->scsi3addr)) == 0) {
  1395. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  1396. return;
  1397. }
  1398. }
  1399. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  1400. /* Device is not on the list, add it. */
  1401. device = kmalloc(sizeof(*device), GFP_KERNEL);
  1402. if (!device) {
  1403. dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
  1404. return;
  1405. }
  1406. memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
  1407. spin_lock_irqsave(&h->offline_device_lock, flags);
  1408. list_add_tail(&device->offline_list, &h->offline_device_list);
  1409. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  1410. }
  1411. /* Print a message explaining various offline volume states */
  1412. static void hpsa_show_volume_status(struct ctlr_info *h,
  1413. struct hpsa_scsi_dev_t *sd)
  1414. {
  1415. if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
  1416. dev_info(&h->pdev->dev,
  1417. "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
  1418. h->scsi_host->host_no,
  1419. sd->bus, sd->target, sd->lun);
  1420. switch (sd->volume_offline) {
  1421. case HPSA_LV_OK:
  1422. break;
  1423. case HPSA_LV_UNDERGOING_ERASE:
  1424. dev_inf