/drivers/scsi/aacraid/aachba.c

http://github.com/mirrors/linux · C · 4212 lines · 3240 code · 528 blank · 444 comment · 510 complexity · 54a8311794a8b0795d8df84fdd67ab9c MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Adaptec AAC series RAID controller driver
  4. * (c) Copyright 2001 Red Hat Inc.
  5. *
  6. * based on the old aacraid driver that is..
  7. * Adaptec aacraid device driver for Linux.
  8. *
  9. * Copyright (c) 2000-2010 Adaptec, Inc.
  10. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  11. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  12. *
  13. * Module Name:
  14. * aachba.c
  15. *
  16. * Abstract: Contains Interfaces to manage IOs.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/types.h>
  21. #include <linux/pci.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/slab.h>
  24. #include <linux/completion.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/highmem.h> /* For flush_kernel_dcache_page */
  28. #include <linux/module.h>
  29. #include <asm/unaligned.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_cmnd.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_host.h>
  34. #include "aacraid.h"
  35. /* values for inqd_pdt: Peripheral device type in plain English */
  36. #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
  37. #define INQD_PDT_PROC 0x03 /* Processor device */
  38. #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
  39. #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
  40. #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
  41. #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
  42. #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
  43. #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
  44. /*
  45. * Sense codes
  46. */
  47. #define SENCODE_NO_SENSE 0x00
  48. #define SENCODE_END_OF_DATA 0x00
  49. #define SENCODE_BECOMING_READY 0x04
  50. #define SENCODE_INIT_CMD_REQUIRED 0x04
  51. #define SENCODE_UNRECOVERED_READ_ERROR 0x11
  52. #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
  53. #define SENCODE_INVALID_COMMAND 0x20
  54. #define SENCODE_LBA_OUT_OF_RANGE 0x21
  55. #define SENCODE_INVALID_CDB_FIELD 0x24
  56. #define SENCODE_LUN_NOT_SUPPORTED 0x25
  57. #define SENCODE_INVALID_PARAM_FIELD 0x26
  58. #define SENCODE_PARAM_NOT_SUPPORTED 0x26
  59. #define SENCODE_PARAM_VALUE_INVALID 0x26
  60. #define SENCODE_RESET_OCCURRED 0x29
  61. #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
  62. #define SENCODE_INQUIRY_DATA_CHANGED 0x3F
  63. #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
  64. #define SENCODE_DIAGNOSTIC_FAILURE 0x40
  65. #define SENCODE_INTERNAL_TARGET_FAILURE 0x44
  66. #define SENCODE_INVALID_MESSAGE_ERROR 0x49
  67. #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
  68. #define SENCODE_OVERLAPPED_COMMAND 0x4E
  69. /*
  70. * Additional sense codes
  71. */
  72. #define ASENCODE_NO_SENSE 0x00
  73. #define ASENCODE_END_OF_DATA 0x05
  74. #define ASENCODE_BECOMING_READY 0x01
  75. #define ASENCODE_INIT_CMD_REQUIRED 0x02
  76. #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
  77. #define ASENCODE_INVALID_COMMAND 0x00
  78. #define ASENCODE_LBA_OUT_OF_RANGE 0x00
  79. #define ASENCODE_INVALID_CDB_FIELD 0x00
  80. #define ASENCODE_LUN_NOT_SUPPORTED 0x00
  81. #define ASENCODE_INVALID_PARAM_FIELD 0x00
  82. #define ASENCODE_PARAM_NOT_SUPPORTED 0x01
  83. #define ASENCODE_PARAM_VALUE_INVALID 0x02
  84. #define ASENCODE_RESET_OCCURRED 0x00
  85. #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
  86. #define ASENCODE_INQUIRY_DATA_CHANGED 0x03
  87. #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
  88. #define ASENCODE_DIAGNOSTIC_FAILURE 0x80
  89. #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
  90. #define ASENCODE_INVALID_MESSAGE_ERROR 0x00
  91. #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
  92. #define ASENCODE_OVERLAPPED_COMMAND 0x00
  93. #define BYTE0(x) (unsigned char)(x)
  94. #define BYTE1(x) (unsigned char)((x) >> 8)
  95. #define BYTE2(x) (unsigned char)((x) >> 16)
  96. #define BYTE3(x) (unsigned char)((x) >> 24)
  97. /* MODE_SENSE data format */
  98. typedef struct {
  99. struct {
  100. u8 data_length;
  101. u8 med_type;
  102. u8 dev_par;
  103. u8 bd_length;
  104. } __attribute__((packed)) hd;
  105. struct {
  106. u8 dens_code;
  107. u8 block_count[3];
  108. u8 reserved;
  109. u8 block_length[3];
  110. } __attribute__((packed)) bd;
  111. u8 mpc_buf[3];
  112. } __attribute__((packed)) aac_modep_data;
  113. /* MODE_SENSE_10 data format */
  114. typedef struct {
  115. struct {
  116. u8 data_length[2];
  117. u8 med_type;
  118. u8 dev_par;
  119. u8 rsrvd[2];
  120. u8 bd_length[2];
  121. } __attribute__((packed)) hd;
  122. struct {
  123. u8 dens_code;
  124. u8 block_count[3];
  125. u8 reserved;
  126. u8 block_length[3];
  127. } __attribute__((packed)) bd;
  128. u8 mpc_buf[3];
  129. } __attribute__((packed)) aac_modep10_data;
  130. /*------------------------------------------------------------------------------
  131. * S T R U C T S / T Y P E D E F S
  132. *----------------------------------------------------------------------------*/
  133. /* SCSI inquiry data */
  134. struct inquiry_data {
  135. u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
  136. u8 inqd_dtq; /* RMB | Device Type Qualifier */
  137. u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
  138. u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
  139. u8 inqd_len; /* Additional length (n-4) */
  140. u8 inqd_pad1[2];/* Reserved - must be zero */
  141. u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
  142. u8 inqd_vid[8]; /* Vendor ID */
  143. u8 inqd_pid[16];/* Product ID */
  144. u8 inqd_prl[4]; /* Product Revision Level */
  145. };
  146. /* Added for VPD 0x83 */
  147. struct tvpd_id_descriptor_type_1 {
  148. u8 codeset:4; /* VPD_CODE_SET */
  149. u8 reserved:4;
  150. u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
  151. u8 reserved2:4;
  152. u8 reserved3;
  153. u8 identifierlength;
  154. u8 venid[8];
  155. u8 productid[16];
  156. u8 serialnumber[8]; /* SN in ASCII */
  157. };
  158. struct tvpd_id_descriptor_type_2 {
  159. u8 codeset:4; /* VPD_CODE_SET */
  160. u8 reserved:4;
  161. u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
  162. u8 reserved2:4;
  163. u8 reserved3;
  164. u8 identifierlength;
  165. struct teu64id {
  166. u32 Serial;
  167. /* The serial number supposed to be 40 bits,
  168. * bit we only support 32, so make the last byte zero. */
  169. u8 reserved;
  170. u8 venid[3];
  171. } eu64id;
  172. };
  173. struct tvpd_id_descriptor_type_3 {
  174. u8 codeset : 4; /* VPD_CODE_SET */
  175. u8 reserved : 4;
  176. u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */
  177. u8 reserved2 : 4;
  178. u8 reserved3;
  179. u8 identifierlength;
  180. u8 Identifier[16];
  181. };
  182. struct tvpd_page83 {
  183. u8 DeviceType:5;
  184. u8 DeviceTypeQualifier:3;
  185. u8 PageCode;
  186. u8 reserved;
  187. u8 PageLength;
  188. struct tvpd_id_descriptor_type_1 type1;
  189. struct tvpd_id_descriptor_type_2 type2;
  190. struct tvpd_id_descriptor_type_3 type3;
  191. };
  192. /*
  193. * M O D U L E G L O B A L S
  194. */
  195. static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
  196. static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
  197. static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
  198. static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
  199. struct aac_raw_io2 *rio2, int sg_max);
  200. static long aac_build_sghba(struct scsi_cmnd *scsicmd,
  201. struct aac_hba_cmd_req *hbacmd,
  202. int sg_max, u64 sg_address);
  203. static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
  204. int pages, int nseg, int nseg_new);
  205. static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
  206. static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
  207. #ifdef AAC_DETAILED_STATUS_INFO
  208. static char *aac_get_status_string(u32 status);
  209. #endif
  210. /*
  211. * Non dasd selection is handled entirely in aachba now
  212. */
  213. static int nondasd = -1;
  214. static int aac_cache = 2; /* WCE=0 to avoid performance problems */
  215. static int dacmode = -1;
  216. int aac_msi;
  217. int aac_commit = -1;
  218. int startup_timeout = 180;
  219. int aif_timeout = 120;
  220. int aac_sync_mode; /* Only Sync. transfer - disabled */
  221. int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
  222. module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
  223. MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
  224. " 0=off, 1=on");
  225. module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
  226. MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
  227. " 0=off, 1=on");
  228. module_param(nondasd, int, S_IRUGO|S_IWUSR);
  229. MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
  230. " 0=off, 1=on");
  231. module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
  232. MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
  233. "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
  234. "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
  235. "\tbit 2 - Disable only if Battery is protecting Cache");
  236. module_param(dacmode, int, S_IRUGO|S_IWUSR);
  237. MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
  238. " 0=off, 1=on");
  239. module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
  240. MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
  241. " adapter for foreign arrays.\n"
  242. "This is typically needed in systems that do not have a BIOS."
  243. " 0=off, 1=on");
  244. module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
  245. MODULE_PARM_DESC(msi, "IRQ handling."
  246. " 0=PIC(default), 1=MSI, 2=MSI-X)");
  247. module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
  248. MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
  249. " adapter to have it's kernel up and\n"
  250. "running. This is typically adjusted for large systems that do not"
  251. " have a BIOS.");
  252. module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
  253. MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
  254. " applications to pick up AIFs before\n"
  255. "deregistering them. This is typically adjusted for heavily burdened"
  256. " systems.");
  257. int aac_fib_dump;
  258. module_param(aac_fib_dump, int, 0644);
  259. MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
  260. int numacb = -1;
  261. module_param(numacb, int, S_IRUGO|S_IWUSR);
  262. MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
  263. " blocks (FIB) allocated. Valid values are 512 and down. Default is"
  264. " to use suggestion from Firmware.");
  265. int acbsize = -1;
  266. module_param(acbsize, int, S_IRUGO|S_IWUSR);
  267. MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
  268. " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
  269. " suggestion from Firmware.");
  270. int update_interval = 30 * 60;
  271. module_param(update_interval, int, S_IRUGO|S_IWUSR);
  272. MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
  273. " updates issued to adapter.");
  274. int check_interval = 60;
  275. module_param(check_interval, int, S_IRUGO|S_IWUSR);
  276. MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
  277. " checks.");
  278. int aac_check_reset = 1;
  279. module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
  280. MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
  281. " adapter. a value of -1 forces the reset to adapters programmed to"
  282. " ignore it.");
  283. int expose_physicals = -1;
  284. module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
  285. MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
  286. " -1=protect 0=off, 1=on");
  287. int aac_reset_devices;
  288. module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
  289. MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
  290. int aac_wwn = 1;
  291. module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
  292. MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
  293. "\t0 - Disable\n"
  294. "\t1 - Array Meta Data Signature (default)\n"
  295. "\t2 - Adapter Serial Number");
  296. static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
  297. struct fib *fibptr) {
  298. struct scsi_device *device;
  299. if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
  300. dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
  301. aac_fib_complete(fibptr);
  302. return 0;
  303. }
  304. scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
  305. device = scsicmd->device;
  306. if (unlikely(!device)) {
  307. dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
  308. aac_fib_complete(fibptr);
  309. return 0;
  310. }
  311. return 1;
  312. }
  313. /**
  314. * aac_get_config_status - check the adapter configuration
  315. * @common: adapter to query
  316. *
  317. * Query config status, and commit the configuration if needed.
  318. */
  319. int aac_get_config_status(struct aac_dev *dev, int commit_flag)
  320. {
  321. int status = 0;
  322. struct fib * fibptr;
  323. if (!(fibptr = aac_fib_alloc(dev)))
  324. return -ENOMEM;
  325. aac_fib_init(fibptr);
  326. {
  327. struct aac_get_config_status *dinfo;
  328. dinfo = (struct aac_get_config_status *) fib_data(fibptr);
  329. dinfo->command = cpu_to_le32(VM_ContainerConfig);
  330. dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
  331. dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
  332. }
  333. status = aac_fib_send(ContainerCommand,
  334. fibptr,
  335. sizeof (struct aac_get_config_status),
  336. FsaNormal,
  337. 1, 1,
  338. NULL, NULL);
  339. if (status < 0) {
  340. printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
  341. } else {
  342. struct aac_get_config_status_resp *reply
  343. = (struct aac_get_config_status_resp *) fib_data(fibptr);
  344. dprintk((KERN_WARNING
  345. "aac_get_config_status: response=%d status=%d action=%d\n",
  346. le32_to_cpu(reply->response),
  347. le32_to_cpu(reply->status),
  348. le32_to_cpu(reply->data.action)));
  349. if ((le32_to_cpu(reply->response) != ST_OK) ||
  350. (le32_to_cpu(reply->status) != CT_OK) ||
  351. (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
  352. printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
  353. status = -EINVAL;
  354. }
  355. }
  356. /* Do not set XferState to zero unless receives a response from F/W */
  357. if (status >= 0)
  358. aac_fib_complete(fibptr);
  359. /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
  360. if (status >= 0) {
  361. if ((aac_commit == 1) || commit_flag) {
  362. struct aac_commit_config * dinfo;
  363. aac_fib_init(fibptr);
  364. dinfo = (struct aac_commit_config *) fib_data(fibptr);
  365. dinfo->command = cpu_to_le32(VM_ContainerConfig);
  366. dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
  367. status = aac_fib_send(ContainerCommand,
  368. fibptr,
  369. sizeof (struct aac_commit_config),
  370. FsaNormal,
  371. 1, 1,
  372. NULL, NULL);
  373. /* Do not set XferState to zero unless
  374. * receives a response from F/W */
  375. if (status >= 0)
  376. aac_fib_complete(fibptr);
  377. } else if (aac_commit == 0) {
  378. printk(KERN_WARNING
  379. "aac_get_config_status: Foreign device configurations are being ignored\n");
  380. }
  381. }
  382. /* FIB should be freed only after getting the response from the F/W */
  383. if (status != -ERESTARTSYS)
  384. aac_fib_free(fibptr);
  385. return status;
  386. }
  387. static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
  388. {
  389. char inq_data;
  390. scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
  391. if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
  392. inq_data &= 0xdf;
  393. scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
  394. }
  395. }
  396. /**
  397. * aac_get_containers - list containers
  398. * @common: adapter to probe
  399. *
  400. * Make a list of all containers on this controller
  401. */
  402. int aac_get_containers(struct aac_dev *dev)
  403. {
  404. struct fsa_dev_info *fsa_dev_ptr;
  405. u32 index;
  406. int status = 0;
  407. struct fib * fibptr;
  408. struct aac_get_container_count *dinfo;
  409. struct aac_get_container_count_resp *dresp;
  410. int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
  411. if (!(fibptr = aac_fib_alloc(dev)))
  412. return -ENOMEM;
  413. aac_fib_init(fibptr);
  414. dinfo = (struct aac_get_container_count *) fib_data(fibptr);
  415. dinfo->command = cpu_to_le32(VM_ContainerConfig);
  416. dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
  417. status = aac_fib_send(ContainerCommand,
  418. fibptr,
  419. sizeof (struct aac_get_container_count),
  420. FsaNormal,
  421. 1, 1,
  422. NULL, NULL);
  423. if (status >= 0) {
  424. dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
  425. maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
  426. if (fibptr->dev->supplement_adapter_info.supported_options2 &
  427. AAC_OPTION_SUPPORTED_240_VOLUMES) {
  428. maximum_num_containers =
  429. le32_to_cpu(dresp->MaxSimpleVolumes);
  430. }
  431. aac_fib_complete(fibptr);
  432. }
  433. /* FIB should be freed only after getting the response from the F/W */
  434. if (status != -ERESTARTSYS)
  435. aac_fib_free(fibptr);
  436. if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
  437. maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
  438. if (dev->fsa_dev == NULL ||
  439. dev->maximum_num_containers != maximum_num_containers) {
  440. fsa_dev_ptr = dev->fsa_dev;
  441. dev->fsa_dev = kcalloc(maximum_num_containers,
  442. sizeof(*fsa_dev_ptr), GFP_KERNEL);
  443. kfree(fsa_dev_ptr);
  444. fsa_dev_ptr = NULL;
  445. if (!dev->fsa_dev)
  446. return -ENOMEM;
  447. dev->maximum_num_containers = maximum_num_containers;
  448. }
  449. for (index = 0; index < dev->maximum_num_containers; index++) {
  450. dev->fsa_dev[index].devname[0] = '\0';
  451. dev->fsa_dev[index].valid = 0;
  452. status = aac_probe_container(dev, index);
  453. if (status < 0) {
  454. printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
  455. break;
  456. }
  457. }
  458. return status;
  459. }
  460. static void get_container_name_callback(void *context, struct fib * fibptr)
  461. {
  462. struct aac_get_name_resp * get_name_reply;
  463. struct scsi_cmnd * scsicmd;
  464. scsicmd = (struct scsi_cmnd *) context;
  465. if (!aac_valid_context(scsicmd, fibptr))
  466. return;
  467. dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
  468. BUG_ON(fibptr == NULL);
  469. get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
  470. /* Failure is irrelevant, using default value instead */
  471. if ((le32_to_cpu(get_name_reply->status) == CT_OK)
  472. && (get_name_reply->data[0] != '\0')) {
  473. char *sp = get_name_reply->data;
  474. int data_size = sizeof_field(struct aac_get_name_resp, data);
  475. sp[data_size - 1] = '\0';
  476. while (*sp == ' ')
  477. ++sp;
  478. if (*sp) {
  479. struct inquiry_data inq;
  480. char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
  481. int count = sizeof(d);
  482. char *dp = d;
  483. do {
  484. *dp++ = (*sp) ? *sp++ : ' ';
  485. } while (--count > 0);
  486. scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
  487. memcpy(inq.inqd_pid, d, sizeof(d));
  488. scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
  489. }
  490. }
  491. scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
  492. aac_fib_complete(fibptr);
  493. scsicmd->scsi_done(scsicmd);
  494. }
  495. /**
  496. * aac_get_container_name - get container name, none blocking.
  497. */
  498. static int aac_get_container_name(struct scsi_cmnd * scsicmd)
  499. {
  500. int status;
  501. int data_size;
  502. struct aac_get_name *dinfo;
  503. struct fib * cmd_fibcontext;
  504. struct aac_dev * dev;
  505. dev = (struct aac_dev *)scsicmd->device->host->hostdata;
  506. data_size = sizeof_field(struct aac_get_name_resp, data);
  507. cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
  508. aac_fib_init(cmd_fibcontext);
  509. dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
  510. scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
  511. dinfo->command = cpu_to_le32(VM_ContainerConfig);
  512. dinfo->type = cpu_to_le32(CT_READ_NAME);
  513. dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
  514. dinfo->count = cpu_to_le32(data_size - 1);
  515. status = aac_fib_send(ContainerCommand,
  516. cmd_fibcontext,
  517. sizeof(struct aac_get_name_resp),
  518. FsaNormal,
  519. 0, 1,
  520. (fib_callback)get_container_name_callback,
  521. (void *) scsicmd);
  522. /*
  523. * Check that the command queued to the controller
  524. */
  525. if (status == -EINPROGRESS)
  526. return 0;
  527. printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
  528. aac_fib_complete(cmd_fibcontext);
  529. return -1;
  530. }
  531. static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
  532. {
  533. struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
  534. if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
  535. return aac_scsi_cmd(scsicmd);
  536. scsicmd->result = DID_NO_CONNECT << 16;
  537. scsicmd->scsi_done(scsicmd);
  538. return 0;
  539. }
  540. static void _aac_probe_container2(void * context, struct fib * fibptr)
  541. {
  542. struct fsa_dev_info *fsa_dev_ptr;
  543. int (*callback)(struct scsi_cmnd *);
  544. struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
  545. int i;
  546. if (!aac_valid_context(scsicmd, fibptr))
  547. return;
  548. scsicmd->SCp.Status = 0;
  549. fsa_dev_ptr = fibptr->dev->fsa_dev;
  550. if (fsa_dev_ptr) {
  551. struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
  552. __le32 sup_options2;
  553. fsa_dev_ptr += scmd_id(scsicmd);
  554. sup_options2 =
  555. fibptr->dev->supplement_adapter_info.supported_options2;
  556. if ((le32_to_cpu(dresp->status) == ST_OK) &&
  557. (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
  558. (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
  559. if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
  560. dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
  561. fsa_dev_ptr->block_size = 0x200;
  562. } else {
  563. fsa_dev_ptr->block_size =
  564. le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
  565. }
  566. for (i = 0; i < 16; i++)
  567. fsa_dev_ptr->identifier[i] =
  568. dresp->mnt[0].fileinfo.bdevinfo
  569. .identifier[i];
  570. fsa_dev_ptr->valid = 1;
  571. /* sense_key holds the current state of the spin-up */
  572. if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
  573. fsa_dev_ptr->sense_data.sense_key = NOT_READY;
  574. else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
  575. fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
  576. fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
  577. fsa_dev_ptr->size
  578. = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
  579. (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
  580. fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
  581. }
  582. if ((fsa_dev_ptr->valid & 1) == 0)
  583. fsa_dev_ptr->valid = 0;
  584. scsicmd->SCp.Status = le32_to_cpu(dresp->count);
  585. }
  586. aac_fib_complete(fibptr);
  587. aac_fib_free(fibptr);
  588. callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
  589. scsicmd->SCp.ptr = NULL;
  590. (*callback)(scsicmd);
  591. return;
  592. }
  593. static void _aac_probe_container1(void * context, struct fib * fibptr)
  594. {
  595. struct scsi_cmnd * scsicmd;
  596. struct aac_mount * dresp;
  597. struct aac_query_mount *dinfo;
  598. int status;
  599. dresp = (struct aac_mount *) fib_data(fibptr);
  600. if (!aac_supports_2T(fibptr->dev)) {
  601. dresp->mnt[0].capacityhigh = 0;
  602. if ((le32_to_cpu(dresp->status) == ST_OK) &&
  603. (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
  604. _aac_probe_container2(context, fibptr);
  605. return;
  606. }
  607. }
  608. scsicmd = (struct scsi_cmnd *) context;
  609. if (!aac_valid_context(scsicmd, fibptr))
  610. return;
  611. aac_fib_init(fibptr);
  612. dinfo = (struct aac_query_mount *)fib_data(fibptr);
  613. if (fibptr->dev->supplement_adapter_info.supported_options2 &
  614. AAC_OPTION_VARIABLE_BLOCK_SIZE)
  615. dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
  616. else
  617. dinfo->command = cpu_to_le32(VM_NameServe64);
  618. dinfo->count = cpu_to_le32(scmd_id(scsicmd));
  619. dinfo->type = cpu_to_le32(FT_FILESYS);
  620. scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
  621. status = aac_fib_send(ContainerCommand,
  622. fibptr,
  623. sizeof(struct aac_query_mount),
  624. FsaNormal,
  625. 0, 1,
  626. _aac_probe_container2,
  627. (void *) scsicmd);
  628. /*
  629. * Check that the command queued to the controller
  630. */
  631. if (status < 0 && status != -EINPROGRESS) {
  632. /* Inherit results from VM_NameServe, if any */
  633. dresp->status = cpu_to_le32(ST_OK);
  634. _aac_probe_container2(context, fibptr);
  635. }
  636. }
  637. static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
  638. {
  639. struct fib * fibptr;
  640. int status = -ENOMEM;
  641. if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
  642. struct aac_query_mount *dinfo;
  643. aac_fib_init(fibptr);
  644. dinfo = (struct aac_query_mount *)fib_data(fibptr);
  645. if (fibptr->dev->supplement_adapter_info.supported_options2 &
  646. AAC_OPTION_VARIABLE_BLOCK_SIZE)
  647. dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
  648. else
  649. dinfo->command = cpu_to_le32(VM_NameServe);
  650. dinfo->count = cpu_to_le32(scmd_id(scsicmd));
  651. dinfo->type = cpu_to_le32(FT_FILESYS);
  652. scsicmd->SCp.ptr = (char *)callback;
  653. scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
  654. status = aac_fib_send(ContainerCommand,
  655. fibptr,
  656. sizeof(struct aac_query_mount),
  657. FsaNormal,
  658. 0, 1,
  659. _aac_probe_container1,
  660. (void *) scsicmd);
  661. /*
  662. * Check that the command queued to the controller
  663. */
  664. if (status == -EINPROGRESS)
  665. return 0;
  666. if (status < 0) {
  667. scsicmd->SCp.ptr = NULL;
  668. aac_fib_complete(fibptr);
  669. aac_fib_free(fibptr);
  670. }
  671. }
  672. if (status < 0) {
  673. struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
  674. if (fsa_dev_ptr) {
  675. fsa_dev_ptr += scmd_id(scsicmd);
  676. if ((fsa_dev_ptr->valid & 1) == 0) {
  677. fsa_dev_ptr->valid = 0;
  678. return (*callback)(scsicmd);
  679. }
  680. }
  681. }
  682. return status;
  683. }
  684. /**
  685. * aac_probe_container - query a logical volume
  686. * @dev: device to query
  687. * @cid: container identifier
  688. *
  689. * Queries the controller about the given volume. The volume information
  690. * is updated in the struct fsa_dev_info structure rather than returned.
  691. */
  692. static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
  693. {
  694. scsicmd->device = NULL;
  695. return 0;
  696. }
  697. static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
  698. {
  699. aac_probe_container_callback1(scsi_cmnd);
  700. }
  701. int aac_probe_container(struct aac_dev *dev, int cid)
  702. {
  703. struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
  704. struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
  705. int status;
  706. if (!scsicmd || !scsidev) {
  707. kfree(scsicmd);
  708. kfree(scsidev);
  709. return -ENOMEM;
  710. }
  711. scsicmd->list.next = NULL;
  712. scsicmd->scsi_done = aac_probe_container_scsi_done;
  713. scsicmd->device = scsidev;
  714. scsidev->sdev_state = 0;
  715. scsidev->id = cid;
  716. scsidev->host = dev->scsi_host_ptr;
  717. if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
  718. while (scsicmd->device == scsidev)
  719. schedule();
  720. kfree(scsidev);
  721. status = scsicmd->SCp.Status;
  722. kfree(scsicmd);
  723. return status;
  724. }
  725. /* Local Structure to set SCSI inquiry data strings */
  726. struct scsi_inq {
  727. char vid[8]; /* Vendor ID */
  728. char pid[16]; /* Product ID */
  729. char prl[4]; /* Product Revision Level */
  730. };
  731. /**
  732. * InqStrCopy - string merge
  733. * @a: string to copy from
  734. * @b: string to copy to
  735. *
  736. * Copy a String from one location to another
  737. * without copying \0
  738. */
  739. static void inqstrcpy(char *a, char *b)
  740. {
  741. while (*a != (char)0)
  742. *b++ = *a++;
  743. }
  744. static char *container_types[] = {
  745. "None",
  746. "Volume",
  747. "Mirror",
  748. "Stripe",
  749. "RAID5",
  750. "SSRW",
  751. "SSRO",
  752. "Morph",
  753. "Legacy",
  754. "RAID4",
  755. "RAID10",
  756. "RAID00",
  757. "V-MIRRORS",
  758. "PSEUDO R4",
  759. "RAID50",
  760. "RAID5D",
  761. "RAID5D0",
  762. "RAID1E",
  763. "RAID6",
  764. "RAID60",
  765. "Unknown"
  766. };
  767. char * get_container_type(unsigned tindex)
  768. {
  769. if (tindex >= ARRAY_SIZE(container_types))
  770. tindex = ARRAY_SIZE(container_types) - 1;
  771. return container_types[tindex];
  772. }
  773. /* Function: setinqstr
  774. *
  775. * Arguments: [1] pointer to void [1] int
  776. *
  777. * Purpose: Sets SCSI inquiry data strings for vendor, product
  778. * and revision level. Allows strings to be set in platform dependent
  779. * files instead of in OS dependent driver source.
  780. */
  781. static void setinqstr(struct aac_dev *dev, void *data, int tindex)
  782. {
  783. struct scsi_inq *str;
  784. struct aac_supplement_adapter_info *sup_adap_info;
  785. sup_adap_info = &dev->supplement_adapter_info;
  786. str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
  787. memset(str, ' ', sizeof(*str));
  788. if (sup_adap_info->adapter_type_text[0]) {
  789. int c;
  790. char *cp;
  791. char *cname = kmemdup(sup_adap_info->adapter_type_text,
  792. sizeof(sup_adap_info->adapter_type_text),
  793. GFP_ATOMIC);
  794. if (!cname)
  795. return;
  796. cp = cname;
  797. if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
  798. inqstrcpy("SMC", str->vid);
  799. else {
  800. c = sizeof(str->vid);
  801. while (*cp && *cp != ' ' && --c)
  802. ++cp;
  803. c = *cp;
  804. *cp = '\0';
  805. inqstrcpy(cname, str->vid);
  806. *cp = c;
  807. while (*cp && *cp != ' ')
  808. ++cp;
  809. }
  810. while (*cp == ' ')
  811. ++cp;
  812. /* last six chars reserved for vol type */
  813. if (strlen(cp) > sizeof(str->pid))
  814. cp[sizeof(str->pid)] = '\0';
  815. inqstrcpy (cp, str->pid);
  816. kfree(cname);
  817. } else {
  818. struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
  819. inqstrcpy (mp->vname, str->vid);
  820. /* last six chars reserved for vol type */
  821. inqstrcpy (mp->model, str->pid);
  822. }
  823. if (tindex < ARRAY_SIZE(container_types)){
  824. char *findit = str->pid;
  825. for ( ; *findit != ' '; findit++); /* walk till we find a space */
  826. /* RAID is superfluous in the context of a RAID device */
  827. if (memcmp(findit-4, "RAID", 4) == 0)
  828. *(findit -= 4) = ' ';
  829. if (((findit - str->pid) + strlen(container_types[tindex]))
  830. < (sizeof(str->pid) + sizeof(str->prl)))
  831. inqstrcpy (container_types[tindex], findit + 1);
  832. }
  833. inqstrcpy ("V1.0", str->prl);
  834. }
  835. static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
  836. struct aac_dev *dev, struct scsi_cmnd *scsicmd)
  837. {
  838. int container;
  839. vpdpage83data->type3.codeset = 1;
  840. vpdpage83data->type3.identifiertype = 3;
  841. vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
  842. - 4;
  843. for (container = 0; container < dev->maximum_num_containers;
  844. container++) {
  845. if (scmd_id(scsicmd) == container) {
  846. memcpy(vpdpage83data->type3.Identifier,
  847. dev->fsa_dev[container].identifier,
  848. 16);
  849. break;
  850. }
  851. }
  852. }
  853. static void get_container_serial_callback(void *context, struct fib * fibptr)
  854. {
  855. struct aac_get_serial_resp * get_serial_reply;
  856. struct scsi_cmnd * scsicmd;
  857. BUG_ON(fibptr == NULL);
  858. scsicmd = (struct scsi_cmnd *) context;
  859. if (!aac_valid_context(scsicmd, fibptr))
  860. return;
  861. get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
  862. /* Failure is irrelevant, using default value instead */
  863. if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
  864. /*Check to see if it's for VPD 0x83 or 0x80 */
  865. if (scsicmd->cmnd[2] == 0x83) {
  866. /* vpd page 0x83 - Device Identification Page */
  867. struct aac_dev *dev;
  868. int i;
  869. struct tvpd_page83 vpdpage83data;
  870. dev = (struct aac_dev *)scsicmd->device->host->hostdata;
  871. memset(((u8 *)&vpdpage83data), 0,
  872. sizeof(vpdpage83data));
  873. /* DIRECT_ACCESS_DEVIC */
  874. vpdpage83data.DeviceType = 0;
  875. /* DEVICE_CONNECTED */
  876. vpdpage83data.DeviceTypeQualifier = 0;
  877. /* VPD_DEVICE_IDENTIFIERS */
  878. vpdpage83data.PageCode = 0x83;
  879. vpdpage83data.reserved = 0;
  880. vpdpage83data.PageLength =
  881. sizeof(vpdpage83data.type1) +
  882. sizeof(vpdpage83data.type2);
  883. /* VPD 83 Type 3 is not supported for ARC */
  884. if (dev->sa_firmware)
  885. vpdpage83data.PageLength +=
  886. sizeof(vpdpage83data.type3);
  887. /* T10 Vendor Identifier Field Format */
  888. /* VpdcodesetAscii */
  889. vpdpage83data.type1.codeset = 2;
  890. /* VpdIdentifierTypeVendorId */
  891. vpdpage83data.type1.identifiertype = 1;
  892. vpdpage83data.type1.identifierlength =
  893. sizeof(vpdpage83data.type1) - 4;
  894. /* "ADAPTEC " for adaptec */
  895. memcpy(vpdpage83data.type1.venid,
  896. "ADAPTEC ",
  897. sizeof(vpdpage83data.type1.venid));
  898. memcpy(vpdpage83data.type1.productid,
  899. "ARRAY ",
  900. sizeof(
  901. vpdpage83data.type1.productid));
  902. /* Convert to ascii based serial number.
  903. * The LSB is the the end.
  904. */
  905. for (i = 0; i < 8; i++) {
  906. u8 temp =
  907. (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
  908. if (temp > 0x9) {
  909. vpdpage83data.type1.serialnumber[i] =
  910. 'A' + (temp - 0xA);
  911. } else {
  912. vpdpage83data.type1.serialnumber[i] =
  913. '0' + temp;
  914. }
  915. }
  916. /* VpdCodeSetBinary */
  917. vpdpage83data.type2.codeset = 1;
  918. /* VpdidentifiertypeEUI64 */
  919. vpdpage83data.type2.identifiertype = 2;
  920. vpdpage83data.type2.identifierlength =
  921. sizeof(vpdpage83data.type2) - 4;
  922. vpdpage83data.type2.eu64id.venid[0] = 0xD0;
  923. vpdpage83data.type2.eu64id.venid[1] = 0;
  924. vpdpage83data.type2.eu64id.venid[2] = 0;
  925. vpdpage83data.type2.eu64id.Serial =
  926. get_serial_reply->uid;
  927. vpdpage83data.type2.eu64id.reserved = 0;
  928. /*
  929. * VpdIdentifierTypeFCPHName
  930. * VPD 0x83 Type 3 not supported for ARC
  931. */
  932. if (dev->sa_firmware) {
  933. build_vpd83_type3(&vpdpage83data,
  934. dev, scsicmd);
  935. }
  936. /* Move the inquiry data to the response buffer. */
  937. scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
  938. sizeof(vpdpage83data));
  939. } else {
  940. /* It must be for VPD 0x80 */
  941. char sp[13];
  942. /* EVPD bit set */
  943. sp[0] = INQD_PDT_DA;
  944. sp[1] = scsicmd->cmnd[2];
  945. sp[2] = 0;
  946. sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
  947. le32_to_cpu(get_serial_reply->uid));
  948. scsi_sg_copy_from_buffer(scsicmd, sp,
  949. sizeof(sp));
  950. }
  951. }
  952. scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
  953. aac_fib_complete(fibptr);
  954. scsicmd->scsi_done(scsicmd);
  955. }
  956. /**
  957. * aac_get_container_serial - get container serial, none blocking.
  958. */
  959. static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
  960. {
  961. int status;
  962. struct aac_get_serial *dinfo;
  963. struct fib * cmd_fibcontext;
  964. struct aac_dev * dev;
  965. dev = (struct aac_dev *)scsicmd->device->host->hostdata;
  966. cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
  967. aac_fib_init(cmd_fibcontext);
  968. dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
  969. dinfo->command = cpu_to_le32(VM_ContainerConfig);
  970. dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
  971. dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
  972. scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
  973. status = aac_fib_send(ContainerCommand,
  974. cmd_fibcontext,
  975. sizeof(struct aac_get_serial_resp),
  976. FsaNormal,
  977. 0, 1,
  978. (fib_callback) get_container_serial_callback,
  979. (void *) scsicmd);
  980. /*
  981. * Check that the command queued to the controller
  982. */
  983. if (status == -EINPROGRESS)
  984. return 0;
  985. printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
  986. aac_fib_complete(cmd_fibcontext);
  987. return -1;
  988. }
  989. /* Function: setinqserial
  990. *
  991. * Arguments: [1] pointer to void [1] int
  992. *
  993. * Purpose: Sets SCSI Unit Serial number.
  994. * This is a fake. We should read a proper
  995. * serial number from the container. <SuSE>But
  996. * without docs it's quite hard to do it :-)
  997. * So this will have to do in the meantime.</SuSE>
  998. */
  999. static int setinqserial(struct aac_dev *dev, void *data, int cid)
  1000. {
  1001. /*
  1002. * This breaks array migration.
  1003. */
  1004. return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
  1005. le32_to_cpu(dev->adapter_info.serial[0]), cid);
  1006. }
  1007. static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
  1008. u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
  1009. {
  1010. u8 *sense_buf = (u8 *)sense_data;
  1011. /* Sense data valid, err code 70h */
  1012. sense_buf[0] = 0x70; /* No info field */
  1013. sense_buf[1] = 0; /* Segment number, always zero */
  1014. sense_buf[2] = sense_key; /* Sense key */
  1015. sense_buf[12] = sense_code; /* Additional sense code */
  1016. sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
  1017. if (sense_key == ILLEGAL_REQUEST) {
  1018. sense_buf[7] = 10; /* Additional sense length */
  1019. sense_buf[15] = bit_pointer;
  1020. /* Illegal parameter is in the parameter block */
  1021. if (sense_code == SENCODE_INVALID_CDB_FIELD)
  1022. sense_buf[15] |= 0xc0;/* Std sense key specific field */
  1023. /* Illegal parameter is in the CDB block */
  1024. sense_buf[16] = field_pointer >> 8; /* MSB */
  1025. sense_buf[17] = field_pointer; /* LSB */
  1026. } else
  1027. sense_buf[7] = 6; /* Additional sense length */
  1028. }
  1029. static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
  1030. {
  1031. if (lba & 0xffffffff00000000LL) {
  1032. int cid = scmd_id(cmd);
  1033. dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
  1034. cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
  1035. SAM_STAT_CHECK_CONDITION;
  1036. set_sense(&dev->fsa_dev[cid].sense_data,
  1037. HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
  1038. ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
  1039. memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
  1040. min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
  1041. SCSI_SENSE_BUFFERSIZE));
  1042. cmd->scsi_done(cmd);
  1043. return 1;
  1044. }
  1045. return 0;
  1046. }
  1047. static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
  1048. {
  1049. return 0;
  1050. }
  1051. static void io_callback(void *context, struct fib * fibptr);
  1052. static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
  1053. {
  1054. struct aac_dev *dev = fib->dev;
  1055. u16 fibsize, command;
  1056. long ret;
  1057. aac_fib_init(fib);
  1058. if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
  1059. dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
  1060. !dev->sync_mode) {
  1061. struct aac_raw_io2 *readcmd2;
  1062. readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
  1063. memset(readcmd2, 0, sizeof(struct aac_raw_io2));
  1064. readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
  1065. readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
  1066. readcmd2->byteCount = cpu_to_le32(count *
  1067. dev->fsa_dev[scmd_id(cmd)].block_size);
  1068. readcmd2->cid = cpu_to_le16(scmd_id(cmd));
  1069. readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
  1070. ret = aac_build_sgraw2(cmd, readcmd2,
  1071. dev->scsi_host_ptr->sg_tablesize);
  1072. if (ret < 0)
  1073. return ret;
  1074. command = ContainerRawIo2;
  1075. fibsize = sizeof(struct aac_raw_io2) +
  1076. ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
  1077. } else {
  1078. struct aac_raw_io *readcmd;
  1079. readcmd = (struct aac_raw_io *) fib_data(fib);
  1080. readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
  1081. readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
  1082. readcmd->count = cpu_to_le32(count *
  1083. dev->fsa_dev[scmd_id(cmd)].block_size);
  1084. readcmd->cid = cpu_to_le16(scmd_id(cmd));
  1085. readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
  1086. readcmd->bpTotal = 0;
  1087. readcmd->bpComplete = 0;
  1088. ret = aac_build_sgraw(cmd, &readcmd->sg);
  1089. if (ret < 0)
  1090. return ret;
  1091. command = ContainerRawIo;
  1092. fibsize = sizeof(struct aac_raw_io) +
  1093. ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
  1094. }
  1095. BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
  1096. /*
  1097. * Now send the Fib to the adapter
  1098. */
  1099. return aac_fib_send(command,
  1100. fib,
  1101. fibsize,
  1102. FsaNormal,
  1103. 0, 1,
  1104. (fib_callback) io_callback,
  1105. (void *) cmd);
  1106. }
  1107. static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
  1108. {
  1109. u16 fibsize;
  1110. struct aac_read64 *readcmd;
  1111. long ret;
  1112. aac_fib_init(fib);
  1113. readcmd = (struct aac_read64 *) fib_data(fib);
  1114. readcmd->command = cpu_to_le32(VM_CtHostRead64);
  1115. readcmd->cid = cpu_to_le16(scmd_id(cmd));
  1116. readcmd->sector_count = cpu_to_le16(count);
  1117. readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
  1118. readcmd->pad = 0;
  1119. readcmd->flags = 0;
  1120. ret = aac_build_sg64(cmd, &readcmd->sg);
  1121. if (ret < 0)
  1122. return ret;
  1123. fibsize = sizeof(struct aac_read64) +
  1124. ((le32_to_cpu(readcmd->sg.count) - 1) *
  1125. sizeof (struct sgentry64));
  1126. BUG_ON (fibsize > (fib->dev->max_fib_size -
  1127. sizeof(struct aac_fibhdr)));
  1128. /*
  1129. * Now send the Fib to the adapter
  1130. */
  1131. return aac_fib_send(ContainerCommand64,
  1132. fib,
  1133. fibsize,
  1134. FsaNormal,
  1135. 0, 1,
  1136. (fib_callback) io_callback,
  1137. (void *) cmd);
  1138. }
  1139. static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
  1140. {
  1141. u16 fibsize;
  1142. struct aac_read *readcmd;
  1143. struct aac_dev *dev = fib->dev;
  1144. long ret;
  1145. aac_fib_init(fib);
  1146. readcmd = (struct aac_read *) fib_data(fib);
  1147. readcmd->command = cpu_to_le32(VM_CtBlockRead);
  1148. readcmd->cid = cpu_to_le32(scmd_id(cmd));
  1149. readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
  1150. readcmd->count = cpu_to_le32(count *
  1151. dev->fsa_dev[scmd_id(cmd)].block_size);
  1152. ret = aac_build_sg(cmd, &readcmd->sg);
  1153. if (ret < 0)
  1154. return ret;
  1155. fibsize = sizeof(struct aac_read) +
  1156. ((le32_to_cpu(readcmd->sg.count) - 1) *
  1157. sizeof (struct sgentry));
  1158. BUG_ON (fibsize > (fib->dev->max_fib_size -
  1159. sizeof(struct aac_fibhdr)));
  1160. /*
  1161. * Now send the Fib to the adapter
  1162. */
  1163. return aac_fib_send(ContainerCommand,
  1164. fib,
  1165. fibsize,
  1166. FsaNormal,
  1167. 0, 1,
  1168. (fib_callback) io_callback,
  1169. (void *) cmd);
  1170. }
  1171. static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
  1172. {
  1173. struct aac_dev *dev = fib->dev;
  1174. u16 fibsize, command;
  1175. long ret;
  1176. aac_fib_init(fib);
  1177. if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
  1178. dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
  1179. !dev->sync_mode) {
  1180. struct aac_raw_io2 *writecmd2;
  1181. writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
  1182. memset(writecmd2, 0, sizeof(struct aac_raw_io2));
  1183. writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
  1184. writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
  1185. writecmd2->byteCount = cpu_to_le32(count *
  1186. dev->fsa_dev[scmd_id(cmd)].block_size);
  1187. writecmd2->cid = cpu_to_le16(scmd_id(cmd));
  1188. writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
  1189. (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
  1190. cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
  1191. cpu_to_le16(RIO2_IO_TYPE_WRITE);
  1192. ret = aac_build_sgraw2(cmd, writecmd2,
  1193. dev->scsi_host_ptr->sg_tablesize);
  1194. if (ret < 0)
  1195. return ret;
  1196. command = ContainerRawIo2;
  1197. fibsize = sizeof(struct aac_raw_io2) +
  1198. ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
  1199. } else {
  1200. struct aac_raw_io *writecmd;
  1201. writecmd = (struct aac_raw_io *) fib_data(fib);
  1202. writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
  1203. writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
  1204. writecmd->count = cpu_to_le32(count *
  1205. dev->fsa_dev[scmd_id(cmd)].block_size);
  1206. writecmd->cid = cpu_to_le16(scmd_id(cmd));
  1207. writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
  1208. (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
  1209. cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
  1210. cpu_to_le16(RIO_TYPE_WRITE);
  1211. writecmd->bpTotal = 0;
  1212. writecmd->bpComplete = 0;
  1213. ret = aac_build_sgraw(cmd, &writecmd->sg);
  1214. if (ret < 0)
  1215. return ret;
  1216. command = ContainerRawIo;
  1217. fibsize = sizeof(struct aac_raw_io) +
  1218. ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
  1219. }
  1220. BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
  1221. /*
  1222. * Now send the Fib to the adapter
  1223. */
  1224. return aac_fib_send(command,
  1225. fib,
  1226. fibsize,
  1227. FsaNormal,
  1228. 0, 1,
  1229. (fib_callback) io_callback,
  1230. (void *) cmd);
  1231. }
  1232. static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
  1233. {
  1234. u16 fibsize;
  1235. struct aac_write64 *writecmd;
  1236. long ret;
  1237. aac_fib_init(fib);
  1238. writecmd = (struct aac_write64 *) fib_data(fib);
  1239. writecmd->command = cpu_to_le32(VM_CtHostWrite64);
  1240. writecmd->cid = cpu_to_le16(scmd_id(cmd));
  1241. writecmd->sector_count = cpu_to_le16(count);
  1242. writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
  1243. writecmd->pad = 0;
  1244. writecmd->flags = 0;
  1245. ret = aac_build_sg64(cmd, &writecmd->sg);
  1246. if (ret < 0)
  1247. return ret;
  1248. fibsize = sizeof(struct aac_write64) +
  1249. ((le32_to_cpu(writecmd->sg.count) - 1) *
  1250. sizeof (struct sgentry64));
  1251. BUG_ON (fibsize > (fib->dev->max_fib_size -
  1252. sizeof(struct aac_fibhdr)));
  1253. /*
  1254. * Now send the Fib to the adapter
  1255. */
  1256. return aac_fib_send(ContainerCommand64,
  1257. fib,
  1258. fibsize,
  1259. FsaNormal,
  1260. 0, 1,
  1261. (fib_callback) io_callback,
  1262. (void *) cmd);
  1263. }
  1264. static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
  1265. {
  1266. u16 fibsize;
  1267. struct aac_write *writecmd;
  1268. struct aac_dev *dev = fib->dev;
  1269. long ret;
  1270. aac_fib_init(fib);
  1271. writecmd = (struct aac_write *) fib_data(fib);
  1272. writecmd->command = cpu_to_le32(VM_CtBlockWrite);
  1273. writecmd->cid = cpu_to_le32(scmd_id(cmd));
  1274. writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
  1275. writecmd->count = cpu_to_le32(count *
  1276. dev->fsa_dev[scmd_id(cmd)].block_size);
  1277. writecmd->sg.count = cpu_to_le32(1);
  1278. /* ->stable is not used - it did mean which type of write */
  1279. ret = aac_build_sg(cmd, &writecmd->sg);
  1280. if (ret < 0)
  1281. return ret;
  1282. fibsize = sizeof(struct aac_write) +
  1283. ((le32_to_cpu(writecmd->sg.count) - 1) *
  1284. sizeof (struct sgentry));
  1285. BUG_ON (fibsize > (fib->dev->max_fib_size -
  1286. sizeof(struct aac_fibhdr)));
  1287. /*
  1288. * Now send the Fib to the adapter
  1289. */
  1290. return aac_fib_send(ContainerCommand,
  1291. fib,
  1292. fibsize,
  1293. FsaNormal,
  1294. 0, 1,
  1295. (fib_callback) io_callback,
  1296. (void *) cmd);
  1297. }
  1298. static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
  1299. {
  1300. struct aac_srb * srbcmd;
  1301. u32 flag;
  1302. u32 timeout;
  1303. struct aac_dev *dev = fib->dev;
  1304. aac_fib_init(fib);
  1305. switch(cmd->sc_data_direction){
  1306. case DMA_TO_DEVICE:
  1307. flag = SRB_DataOut;
  1308. break;
  1309. case DMA_BIDIRECTIONAL:
  1310. flag = SRB_DataIn | SRB_DataOut;
  1311. break;
  1312. case DMA_FROM_DEVICE:
  1313. flag = SRB_DataIn;
  1314. break;
  1315. case DMA_NONE:
  1316. default: /* shuts up some versions of gcc */
  1317. flag = SRB_NoDataXfer;
  1318. break;
  1319. }
  1320. srbcmd = (struct aac_srb*) fib_data(fib);
  1321. srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
  1322. srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
  1323. srbcmd->id = cpu_to_le32(scmd_id(cmd));
  1324. srbcmd->lun = cpu_to_le32(cmd->device->lun);
  1325. srbcmd->flags = cpu_to_le32(flag);
  1326. timeout = cmd->request->timeout/HZ;
  1327. if (timeout == 0)
  1328. timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
  1329. srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
  1330. srbcmd->retry_limit = 0; /* Obsolete parameter */
  1331. srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
  1332. return srbcmd;
  1333. }
  1334. static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
  1335. struct scsi_cmnd *cmd)
  1336. {
  1337. struct aac_hba_cmd_req *hbacmd;
  1338. struct aac_dev *dev;
  1339. int bus, target;
  1340. u64 address;
  1341. dev = (struct aac_dev *)cmd->device->host->hostdata;
  1342. hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
  1343. memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
  1344. /* iu_type is a parameter of aac_hba_send */
  1345. switch (cmd->sc_data_direction) {
  1346. case DMA_TO_DEVICE:
  1347. hbacmd->byte1 = 2;
  1348. break;
  1349. case DMA_FROM_DEVICE:
  1350. case DMA_BIDIRECTIONAL:
  1351. hbacmd->byte1 = 1;
  1352. break;
  1353. case DMA_NONE:
  1354. default:
  1355. break;
  1356. }
  1357. hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
  1358. bus = aac_logical_to_phys(scmd_channel(cmd));
  1359. target = scmd_id(cmd);
  1360. hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
  1361. /* we fill in reply_qid later in aac_src_deliver_message */
  1362. /* we fill in iu_type, request_id later in aac_hba_send */
  1363. /* we fill in emb_data_desc_count later in aac_build_sghba */
  1364. memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
  1365. hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
  1366. address = (u64)fib->hw_error_pa;
  1367. hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  1368. hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  1369. hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  1370. return hbacmd;
  1371. }
  1372. static void aac_srb_callback(void *context, struct fib * fibptr);
  1373. static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
  1374. {
  1375. u16 fibsize;
  1376. struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
  1377. long ret;
  1378. ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
  1379. if (ret < 0)
  1380. return ret;
  1381. srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
  1382. memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  1383. memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
  1384. /*
  1385. * Build Scatter/Gather list
  1386. */
  1387. fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
  1388. ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
  1389. sizeof (struct sgentry64));
  1390. BUG_ON (fibsize > (fib->dev->max_fib_size -
  1391. sizeof(struct aac_fibhdr)));
  1392. /*
  1393. * Now send the Fib to the adapter
  1394. */
  1395. return aac_fib_send(ScsiPortCommand64, fib,
  1396. fibsize, FsaNormal, 0, 1,
  1397. (fib_callback) aac_srb_callback,
  1398. (void *) cmd);
  1399. }
  1400. static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
  1401. {
  1402. u16 fibsize;
  1403. struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
  1404. long ret;
  1405. ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
  1406. if (ret < 0)
  1407. return ret;
  1408. srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
  1409. memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  1410. memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
  1411. /*
  1412. * Build Scatter/Gather list
  1413. */
  1414. fibsize = sizeof (struct aac_srb) +
  1415. (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
  1416. sizeof (struct sgentry));
  1417. BUG_ON (fibsize > (fib->dev->max_fib_size -
  1418. sizeof(struct aac_fibhdr)));
  1419. /*
  1420. * Now send the Fib to the adapter
  1421. */
  1422. return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
  1423. (fib_callback) aac_srb_callback, (void *) cmd);
  1424. }
  1425. static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
  1426. {
  1427. if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
  1428. (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
  1429. return FAILED;
  1430. return aac_scsi_32(fib, cmd);
  1431. }
  1432. static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
  1433. {
  1434. struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
  1435. struct aac_dev *dev;
  1436. long ret;
  1437. dev = (struct aac_dev *)cmd->device->host->hostdata;
  1438. ret = aac_build_sghba(cmd, hbacmd,
  1439. dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
  1440. if (ret < 0)
  1441. return ret;
  1442. /*
  1443. * Now send the HBA command to the adapter
  1444. */
  1445. fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
  1446. sizeof(struct aac_hba_sgl);
  1447. return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
  1448. (fib_callback) aac_hba_callback,
  1449. (void *) cmd);
  1450. }
  1451. static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
  1452. struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
  1453. {
  1454. struct fib *fibptr;
  1455. dma_addr_t addr;
  1456. int rcode;
  1457. int fibsize;
  1458. struct aac_srb *srb;
  1459. struct aac_srb_reply *srb_reply;
  1460. struct sgmap64 *sg64;
  1461. u32 vbus;
  1462. u32 vid;
  1463. if (!dev->sa_firmware)
  1464. return 0;
  1465. /* allocate FIB */
  1466. fibptr = aac_fib_alloc(dev);
  1467. if (!fibptr)
  1468. return -ENOMEM;
  1469. aac_fib_init(fibptr);
  1470. fibptr->hw_fib_va->header.XferState &=
  1471. ~cpu_to_le32(FastResponseCapable);
  1472. fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
  1473. sizeof(struct sgentry64);
  1474. /* allocate DMA buffer for response */
  1475. addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
  1476. DMA_BIDIRECTIONAL);
  1477. if (dma_mapping_error(&dev->pdev->dev, addr)) {
  1478. rcode = -ENOMEM;
  1479. goto fib_error;
  1480. }
  1481. srb = fib_data(fibptr);
  1482. memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
  1483. vbus = (u32)le16_to_cpu(
  1484. dev->supplement_adapter_info.virt_device_bus);
  1485. vid = (u32)le16_to_cpu(
  1486. dev->supplement_adapter_info.virt_device_target);
  1487. /* set the common request fields */
  1488. srb->channel = cpu_to_le32(vbus);
  1489. srb->id = cpu_to_le32(vid);
  1490. srb->lun = 0;
  1491. srb->function = cpu_to_le32(SRBF_ExecuteScsi);
  1492. srb->timeout = 0;
  1493. srb->retry_limit = 0;
  1494. srb->cdb_size = cpu_to_le32(16);
  1495. srb->count = cpu_to_le32(xfer_len);
  1496. sg64 = (struct sgmap64 *)&srb->sg;
  1497. sg64->count = cpu_to_le32(1);
  1498. sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
  1499. sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
  1500. sg64->sg[0].count = cpu_to_le32(xfer_len);
  1501. /*
  1502. * Copy the updated data for other dumping or other usage if needed
  1503. */
  1504. memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
  1505. /* issue request to the controller */
  1506. rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
  1507. 1, 1, NULL, NULL);
  1508. if (rcode == -ERESTARTSYS)
  1509. rcode = -ERESTART;
  1510. if (unlikely(rcode < 0))
  1511. goto bmic_error;
  1512. srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
  1513. memcpy(&srbu->sr