/drivers/scsi/qla2xxx/qla_os.c

http://github.com/mirrors/linux · C · 7956 lines · 6323 code · 1033 blank · 600 comment · 1011 complexity · 69290e65b10cef3fe76a68cfae7f6121 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/moduleparam.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. #include <linux/kthread.h>
  12. #include <linux/mutex.h>
  13. #include <linux/kobject.h>
  14. #include <linux/slab.h>
  15. #include <linux/blk-mq-pci.h>
  16. #include <linux/refcount.h>
  17. #include <scsi/scsi_tcq.h>
  18. #include <scsi/scsicam.h>
  19. #include <scsi/scsi_transport.h>
  20. #include <scsi/scsi_transport_fc.h>
  21. #include "qla_target.h"
  22. /*
  23. * Driver version
  24. */
  25. char qla2x00_version_str[40];
  26. static int apidev_major;
  27. /*
  28. * SRB allocation cache
  29. */
  30. struct kmem_cache *srb_cachep;
  31. /*
  32. * CT6 CTX allocation cache
  33. */
  34. static struct kmem_cache *ctx_cachep;
  35. /*
  36. * error level for logging
  37. */
  38. uint ql_errlev = 0x8001;
  39. static int ql2xenableclass2;
  40. module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
  41. MODULE_PARM_DESC(ql2xenableclass2,
  42. "Specify if Class 2 operations are supported from the very "
  43. "beginning. Default is 0 - class 2 not supported.");
  44. int ql2xlogintimeout = 20;
  45. module_param(ql2xlogintimeout, int, S_IRUGO);
  46. MODULE_PARM_DESC(ql2xlogintimeout,
  47. "Login timeout value in seconds.");
  48. int qlport_down_retry;
  49. module_param(qlport_down_retry, int, S_IRUGO);
  50. MODULE_PARM_DESC(qlport_down_retry,
  51. "Maximum number of command retries to a port that returns "
  52. "a PORT-DOWN status.");
  53. int ql2xplogiabsentdevice;
  54. module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  55. MODULE_PARM_DESC(ql2xplogiabsentdevice,
  56. "Option to enable PLOGI to devices that are not present after "
  57. "a Fabric scan. This is needed for several broken switches. "
  58. "Default is 0 - no PLOGI. 1 - perform PLOGI.");
  59. int ql2xloginretrycount;
  60. module_param(ql2xloginretrycount, int, S_IRUGO);
  61. MODULE_PARM_DESC(ql2xloginretrycount,
  62. "Specify an alternate value for the NVRAM login retry count.");
  63. int ql2xallocfwdump = 1;
  64. module_param(ql2xallocfwdump, int, S_IRUGO);
  65. MODULE_PARM_DESC(ql2xallocfwdump,
  66. "Option to enable allocation of memory for a firmware dump "
  67. "during HBA initialization. Memory allocation requirements "
  68. "vary by ISP type. Default is 1 - allocate memory.");
  69. int ql2xextended_error_logging;
  70. module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  71. module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  72. MODULE_PARM_DESC(ql2xextended_error_logging,
  73. "Option to enable extended error logging,\n"
  74. "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
  75. "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
  76. "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
  77. "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
  78. "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
  79. "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
  80. "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
  81. "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
  82. "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
  83. "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
  84. "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
  85. "\t\t0x1e400000 - Preferred value for capturing essential "
  86. "debug information (equivalent to old "
  87. "ql2xextended_error_logging=1).\n"
  88. "\t\tDo LOGICAL OR of the value to enable more than one level");
  89. int ql2xshiftctondsd = 6;
  90. module_param(ql2xshiftctondsd, int, S_IRUGO);
  91. MODULE_PARM_DESC(ql2xshiftctondsd,
  92. "Set to control shifting of command type processing "
  93. "based on total number of SG elements.");
  94. int ql2xfdmienable = 1;
  95. module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
  96. module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
  97. MODULE_PARM_DESC(ql2xfdmienable,
  98. "Enables FDMI registrations. "
  99. "0 - no FDMI registrations. "
  100. "1 - provide FDMI registrations (default).");
  101. #define MAX_Q_DEPTH 64
  102. static int ql2xmaxqdepth = MAX_Q_DEPTH;
  103. module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
  104. MODULE_PARM_DESC(ql2xmaxqdepth,
  105. "Maximum queue depth to set for each LUN. "
  106. "Default is 64.");
  107. int ql2xenabledif = 2;
  108. module_param(ql2xenabledif, int, S_IRUGO);
  109. MODULE_PARM_DESC(ql2xenabledif,
  110. " Enable T10-CRC-DIF:\n"
  111. " Default is 2.\n"
  112. " 0 -- No DIF Support\n"
  113. " 1 -- Enable DIF for all types\n"
  114. " 2 -- Enable DIF for all types, except Type 0.\n");
  115. #if (IS_ENABLED(CONFIG_NVME_FC))
  116. int ql2xnvmeenable = 1;
  117. #else
  118. int ql2xnvmeenable;
  119. #endif
  120. module_param(ql2xnvmeenable, int, 0644);
  121. MODULE_PARM_DESC(ql2xnvmeenable,
  122. "Enables NVME support. "
  123. "0 - no NVMe. Default is Y");
  124. int ql2xenablehba_err_chk = 2;
  125. module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
  126. MODULE_PARM_DESC(ql2xenablehba_err_chk,
  127. " Enable T10-CRC-DIF Error isolation by HBA:\n"
  128. " Default is 2.\n"
  129. " 0 -- Error isolation disabled\n"
  130. " 1 -- Error isolation enabled only for DIX Type 0\n"
  131. " 2 -- Error isolation enabled for all Types\n");
  132. int ql2xiidmaenable = 1;
  133. module_param(ql2xiidmaenable, int, S_IRUGO);
  134. MODULE_PARM_DESC(ql2xiidmaenable,
  135. "Enables iIDMA settings "
  136. "Default is 1 - perform iIDMA. 0 - no iIDMA.");
  137. int ql2xmqsupport = 1;
  138. module_param(ql2xmqsupport, int, S_IRUGO);
  139. MODULE_PARM_DESC(ql2xmqsupport,
  140. "Enable on demand multiple queue pairs support "
  141. "Default is 1 for supported. "
  142. "Set it to 0 to turn off mq qpair support.");
  143. int ql2xfwloadbin;
  144. module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
  145. module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
  146. MODULE_PARM_DESC(ql2xfwloadbin,
  147. "Option to specify location from which to load ISP firmware:.\n"
  148. " 2 -- load firmware via the request_firmware() (hotplug).\n"
  149. " interface.\n"
  150. " 1 -- load firmware from flash.\n"
  151. " 0 -- use default semantics.\n");
  152. int ql2xetsenable;
  153. module_param(ql2xetsenable, int, S_IRUGO);
  154. MODULE_PARM_DESC(ql2xetsenable,
  155. "Enables firmware ETS burst."
  156. "Default is 0 - skip ETS enablement.");
  157. int ql2xdbwr = 1;
  158. module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
  159. MODULE_PARM_DESC(ql2xdbwr,
  160. "Option to specify scheme for request queue posting.\n"
  161. " 0 -- Regular doorbell.\n"
  162. " 1 -- CAMRAM doorbell (faster).\n");
  163. int ql2xtargetreset = 1;
  164. module_param(ql2xtargetreset, int, S_IRUGO);
  165. MODULE_PARM_DESC(ql2xtargetreset,
  166. "Enable target reset."
  167. "Default is 1 - use hw defaults.");
  168. int ql2xgffidenable;
  169. module_param(ql2xgffidenable, int, S_IRUGO);
  170. MODULE_PARM_DESC(ql2xgffidenable,
  171. "Enables GFF_ID checks of port type. "
  172. "Default is 0 - Do not use GFF_ID information.");
  173. int ql2xasynctmfenable = 1;
  174. module_param(ql2xasynctmfenable, int, S_IRUGO);
  175. MODULE_PARM_DESC(ql2xasynctmfenable,
  176. "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
  177. "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
  178. int ql2xdontresethba;
  179. module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
  180. MODULE_PARM_DESC(ql2xdontresethba,
  181. "Option to specify reset behaviour.\n"
  182. " 0 (Default) -- Reset on failure.\n"
  183. " 1 -- Do not reset on failure.\n");
  184. uint64_t ql2xmaxlun = MAX_LUNS;
  185. module_param(ql2xmaxlun, ullong, S_IRUGO);
  186. MODULE_PARM_DESC(ql2xmaxlun,
  187. "Defines the maximum LU number to register with the SCSI "
  188. "midlayer. Default is 65535.");
  189. int ql2xmdcapmask = 0x1F;
  190. module_param(ql2xmdcapmask, int, S_IRUGO);
  191. MODULE_PARM_DESC(ql2xmdcapmask,
  192. "Set the Minidump driver capture mask level. "
  193. "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
  194. int ql2xmdenable = 1;
  195. module_param(ql2xmdenable, int, S_IRUGO);
  196. MODULE_PARM_DESC(ql2xmdenable,
  197. "Enable/disable MiniDump. "
  198. "0 - MiniDump disabled. "
  199. "1 (Default) - MiniDump enabled.");
  200. int ql2xexlogins;
  201. module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
  202. MODULE_PARM_DESC(ql2xexlogins,
  203. "Number of extended Logins. "
  204. "0 (Default)- Disabled.");
  205. int ql2xexchoffld = 1024;
  206. module_param(ql2xexchoffld, uint, 0644);
  207. MODULE_PARM_DESC(ql2xexchoffld,
  208. "Number of target exchanges.");
  209. int ql2xiniexchg = 1024;
  210. module_param(ql2xiniexchg, uint, 0644);
  211. MODULE_PARM_DESC(ql2xiniexchg,
  212. "Number of initiator exchanges.");
  213. int ql2xfwholdabts;
  214. module_param(ql2xfwholdabts, int, S_IRUGO);
  215. MODULE_PARM_DESC(ql2xfwholdabts,
  216. "Allow FW to hold status IOCB until ABTS rsp received. "
  217. "0 (Default) Do not set fw option. "
  218. "1 - Set fw option to hold ABTS.");
  219. int ql2xmvasynctoatio = 1;
  220. module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
  221. MODULE_PARM_DESC(ql2xmvasynctoatio,
  222. "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
  223. "0 (Default). Do not move IOCBs"
  224. "1 - Move IOCBs.");
  225. int ql2xautodetectsfp = 1;
  226. module_param(ql2xautodetectsfp, int, 0444);
  227. MODULE_PARM_DESC(ql2xautodetectsfp,
  228. "Detect SFP range and set appropriate distance.\n"
  229. "1 (Default): Enable\n");
  230. int ql2xenablemsix = 1;
  231. module_param(ql2xenablemsix, int, 0444);
  232. MODULE_PARM_DESC(ql2xenablemsix,
  233. "Set to enable MSI or MSI-X interrupt mechanism.\n"
  234. " Default is 1, enable MSI-X interrupt mechanism.\n"
  235. " 0 -- enable traditional pin-based mechanism.\n"
  236. " 1 -- enable MSI-X interrupt mechanism.\n"
  237. " 2 -- enable MSI interrupt mechanism.\n");
  238. int qla2xuseresexchforels;
  239. module_param(qla2xuseresexchforels, int, 0444);
  240. MODULE_PARM_DESC(qla2xuseresexchforels,
  241. "Reserve 1/2 of emergency exchanges for ELS.\n"
  242. " 0 (default): disabled");
  243. static int ql2xprotmask;
  244. module_param(ql2xprotmask, int, 0644);
  245. MODULE_PARM_DESC(ql2xprotmask,
  246. "Override DIF/DIX protection capabilities mask\n"
  247. "Default is 0 which sets protection mask based on "
  248. "capabilities reported by HBA firmware.\n");
  249. static int ql2xprotguard;
  250. module_param(ql2xprotguard, int, 0644);
  251. MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
  252. " 0 -- Let HBA firmware decide\n"
  253. " 1 -- Force T10 CRC\n"
  254. " 2 -- Force IP checksum\n");
  255. int ql2xdifbundlinginternalbuffers;
  256. module_param(ql2xdifbundlinginternalbuffers, int, 0644);
  257. MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
  258. "Force using internal buffers for DIF information\n"
  259. "0 (Default). Based on check.\n"
  260. "1 Force using internal buffers\n");
  261. int ql2xsmartsan;
  262. module_param(ql2xsmartsan, int, 0444);
  263. module_param_named(smartsan, ql2xsmartsan, int, 0444);
  264. MODULE_PARM_DESC(ql2xsmartsan,
  265. "Send SmartSAN Management Attributes for FDMI Registration."
  266. " Default is 0 - No SmartSAN registration,"
  267. " 1 - Register SmartSAN Management Attributes.");
  268. int ql2xrdpenable;
  269. module_param(ql2xrdpenable, int, 0444);
  270. module_param_named(rdpenable, ql2xrdpenable, int, 0444);
  271. MODULE_PARM_DESC(ql2xrdpenable,
  272. "Enables RDP responses. "
  273. "0 - no RDP responses (default). "
  274. "1 - provide RDP responses.");
  275. static void qla2x00_clear_drv_active(struct qla_hw_data *);
  276. static void qla2x00_free_device(scsi_qla_host_t *);
  277. static int qla2xxx_map_queues(struct Scsi_Host *shost);
  278. static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
  279. static struct scsi_transport_template *qla2xxx_transport_template = NULL;
  280. struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
  281. /* TODO Convert to inlines
  282. *
  283. * Timer routines
  284. */
  285. __inline__ void
  286. qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
  287. {
  288. timer_setup(&vha->timer, qla2x00_timer, 0);
  289. vha->timer.expires = jiffies + interval * HZ;
  290. add_timer(&vha->timer);
  291. vha->timer_active = 1;
  292. }
  293. static inline void
  294. qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
  295. {
  296. /* Currently used for 82XX only. */
  297. if (vha->device_flags & DFLG_DEV_FAILED) {
  298. ql_dbg(ql_dbg_timer, vha, 0x600d,
  299. "Device in a failed state, returning.\n");
  300. return;
  301. }
  302. mod_timer(&vha->timer, jiffies + interval * HZ);
  303. }
  304. static __inline__ void
  305. qla2x00_stop_timer(scsi_qla_host_t *vha)
  306. {
  307. del_timer_sync(&vha->timer);
  308. vha->timer_active = 0;
  309. }
  310. static int qla2x00_do_dpc(void *data);
  311. static void qla2x00_rst_aen(scsi_qla_host_t *);
  312. static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
  313. struct req_que **, struct rsp_que **);
  314. static void qla2x00_free_fw_dump(struct qla_hw_data *);
  315. static void qla2x00_mem_free(struct qla_hw_data *);
  316. int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
  317. struct qla_qpair *qpair);
  318. /* -------------------------------------------------------------------------- */
  319. static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
  320. struct rsp_que *rsp)
  321. {
  322. struct qla_hw_data *ha = vha->hw;
  323. rsp->qpair = ha->base_qpair;
  324. rsp->req = req;
  325. ha->base_qpair->hw = ha;
  326. ha->base_qpair->req = req;
  327. ha->base_qpair->rsp = rsp;
  328. ha->base_qpair->vha = vha;
  329. ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
  330. ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
  331. ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
  332. ha->base_qpair->srb_mempool = ha->srb_mempool;
  333. INIT_LIST_HEAD(&ha->base_qpair->hints_list);
  334. ha->base_qpair->enable_class_2 = ql2xenableclass2;
  335. /* init qpair to this cpu. Will adjust at run time. */
  336. qla_cpu_update(rsp->qpair, raw_smp_processor_id());
  337. ha->base_qpair->pdev = ha->pdev;
  338. if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
  339. ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
  340. }
  341. static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
  342. struct rsp_que *rsp)
  343. {
  344. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  345. ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
  346. GFP_KERNEL);
  347. if (!ha->req_q_map) {
  348. ql_log(ql_log_fatal, vha, 0x003b,
  349. "Unable to allocate memory for request queue ptrs.\n");
  350. goto fail_req_map;
  351. }
  352. ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
  353. GFP_KERNEL);
  354. if (!ha->rsp_q_map) {
  355. ql_log(ql_log_fatal, vha, 0x003c,
  356. "Unable to allocate memory for response queue ptrs.\n");
  357. goto fail_rsp_map;
  358. }
  359. ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
  360. if (ha->base_qpair == NULL) {
  361. ql_log(ql_log_warn, vha, 0x00e0,
  362. "Failed to allocate base queue pair memory.\n");
  363. goto fail_base_qpair;
  364. }
  365. qla_init_base_qpair(vha, req, rsp);
  366. if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
  367. ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
  368. GFP_KERNEL);
  369. if (!ha->queue_pair_map) {
  370. ql_log(ql_log_fatal, vha, 0x0180,
  371. "Unable to allocate memory for queue pair ptrs.\n");
  372. goto fail_qpair_map;
  373. }
  374. }
  375. /*
  376. * Make sure we record at least the request and response queue zero in
  377. * case we need to free them if part of the probe fails.
  378. */
  379. ha->rsp_q_map[0] = rsp;
  380. ha->req_q_map[0] = req;
  381. set_bit(0, ha->rsp_qid_map);
  382. set_bit(0, ha->req_qid_map);
  383. return 0;
  384. fail_qpair_map:
  385. kfree(ha->base_qpair);
  386. ha->base_qpair = NULL;
  387. fail_base_qpair:
  388. kfree(ha->rsp_q_map);
  389. ha->rsp_q_map = NULL;
  390. fail_rsp_map:
  391. kfree(ha->req_q_map);
  392. ha->req_q_map = NULL;
  393. fail_req_map:
  394. return -ENOMEM;
  395. }
  396. static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
  397. {
  398. if (IS_QLAFX00(ha)) {
  399. if (req && req->ring_fx00)
  400. dma_free_coherent(&ha->pdev->dev,
  401. (req->length_fx00 + 1) * sizeof(request_t),
  402. req->ring_fx00, req->dma_fx00);
  403. } else if (req && req->ring)
  404. dma_free_coherent(&ha->pdev->dev,
  405. (req->length + 1) * sizeof(request_t),
  406. req->ring, req->dma);
  407. if (req)
  408. kfree(req->outstanding_cmds);
  409. kfree(req);
  410. }
  411. static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
  412. {
  413. if (IS_QLAFX00(ha)) {
  414. if (rsp && rsp->ring_fx00)
  415. dma_free_coherent(&ha->pdev->dev,
  416. (rsp->length_fx00 + 1) * sizeof(request_t),
  417. rsp->ring_fx00, rsp->dma_fx00);
  418. } else if (rsp && rsp->ring) {
  419. dma_free_coherent(&ha->pdev->dev,
  420. (rsp->length + 1) * sizeof(response_t),
  421. rsp->ring, rsp->dma);
  422. }
  423. kfree(rsp);
  424. }
  425. static void qla2x00_free_queues(struct qla_hw_data *ha)
  426. {
  427. struct req_que *req;
  428. struct rsp_que *rsp;
  429. int cnt;
  430. unsigned long flags;
  431. if (ha->queue_pair_map) {
  432. kfree(ha->queue_pair_map);
  433. ha->queue_pair_map = NULL;
  434. }
  435. if (ha->base_qpair) {
  436. kfree(ha->base_qpair);
  437. ha->base_qpair = NULL;
  438. }
  439. spin_lock_irqsave(&ha->hardware_lock, flags);
  440. for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
  441. if (!test_bit(cnt, ha->req_qid_map))
  442. continue;
  443. req = ha->req_q_map[cnt];
  444. clear_bit(cnt, ha->req_qid_map);
  445. ha->req_q_map[cnt] = NULL;
  446. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  447. qla2x00_free_req_que(ha, req);
  448. spin_lock_irqsave(&ha->hardware_lock, flags);
  449. }
  450. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  451. kfree(ha->req_q_map);
  452. ha->req_q_map = NULL;
  453. spin_lock_irqsave(&ha->hardware_lock, flags);
  454. for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
  455. if (!test_bit(cnt, ha->rsp_qid_map))
  456. continue;
  457. rsp = ha->rsp_q_map[cnt];
  458. clear_bit(cnt, ha->rsp_qid_map);
  459. ha->rsp_q_map[cnt] = NULL;
  460. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  461. qla2x00_free_rsp_que(ha, rsp);
  462. spin_lock_irqsave(&ha->hardware_lock, flags);
  463. }
  464. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  465. kfree(ha->rsp_q_map);
  466. ha->rsp_q_map = NULL;
  467. }
  468. static char *
  469. qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
  470. {
  471. struct qla_hw_data *ha = vha->hw;
  472. static const char *const pci_bus_modes[] = {
  473. "33", "66", "100", "133",
  474. };
  475. uint16_t pci_bus;
  476. pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
  477. if (pci_bus) {
  478. snprintf(str, str_len, "PCI-X (%s MHz)",
  479. pci_bus_modes[pci_bus]);
  480. } else {
  481. pci_bus = (ha->pci_attr & BIT_8) >> 8;
  482. snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
  483. }
  484. return str;
  485. }
  486. static char *
  487. qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
  488. {
  489. static const char *const pci_bus_modes[] = {
  490. "33", "66", "100", "133",
  491. };
  492. struct qla_hw_data *ha = vha->hw;
  493. uint32_t pci_bus;
  494. if (pci_is_pcie(ha->pdev)) {
  495. uint32_t lstat, lspeed, lwidth;
  496. const char *speed_str;
  497. pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
  498. lspeed = lstat & PCI_EXP_LNKCAP_SLS;
  499. lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
  500. switch (lspeed) {
  501. case 1:
  502. speed_str = "2.5GT/s";
  503. break;
  504. case 2:
  505. speed_str = "5.0GT/s";
  506. break;
  507. case 3:
  508. speed_str = "8.0GT/s";
  509. break;
  510. case 4:
  511. speed_str = "16.0GT/s";
  512. break;
  513. default:
  514. speed_str = "<unknown>";
  515. break;
  516. }
  517. snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
  518. return str;
  519. }
  520. pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
  521. if (pci_bus == 0 || pci_bus == 8)
  522. snprintf(str, str_len, "PCI (%s MHz)",
  523. pci_bus_modes[pci_bus >> 3]);
  524. else
  525. snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
  526. pci_bus & 4 ? 2 : 1,
  527. pci_bus_modes[pci_bus & 3]);
  528. return str;
  529. }
  530. static char *
  531. qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
  532. {
  533. char un_str[10];
  534. struct qla_hw_data *ha = vha->hw;
  535. snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
  536. ha->fw_minor_version, ha->fw_subminor_version);
  537. if (ha->fw_attributes & BIT_9) {
  538. strcat(str, "FLX");
  539. return (str);
  540. }
  541. switch (ha->fw_attributes & 0xFF) {
  542. case 0x7:
  543. strcat(str, "EF");
  544. break;
  545. case 0x17:
  546. strcat(str, "TP");
  547. break;
  548. case 0x37:
  549. strcat(str, "IP");
  550. break;
  551. case 0x77:
  552. strcat(str, "VI");
  553. break;
  554. default:
  555. sprintf(un_str, "(%x)", ha->fw_attributes);
  556. strcat(str, un_str);
  557. break;
  558. }
  559. if (ha->fw_attributes & 0x100)
  560. strcat(str, "X");
  561. return (str);
  562. }
  563. static char *
  564. qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
  565. {
  566. struct qla_hw_data *ha = vha->hw;
  567. snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
  568. ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
  569. return str;
  570. }
  571. void qla2x00_sp_free_dma(srb_t *sp)
  572. {
  573. struct qla_hw_data *ha = sp->vha->hw;
  574. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  575. if (sp->flags & SRB_DMA_VALID) {
  576. scsi_dma_unmap(cmd);
  577. sp->flags &= ~SRB_DMA_VALID;
  578. }
  579. if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
  580. dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
  581. scsi_prot_sg_count(cmd), cmd->sc_data_direction);
  582. sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
  583. }
  584. if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
  585. /* List assured to be having elements */
  586. qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
  587. sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
  588. }
  589. if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
  590. struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
  591. dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
  592. sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
  593. }
  594. if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
  595. struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
  596. dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
  597. ctx1->fcp_cmnd_dma);
  598. list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
  599. ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
  600. ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
  601. mempool_free(ctx1, ha->ctx_mempool);
  602. }
  603. }
  604. void qla2x00_sp_compl(srb_t *sp, int res)
  605. {
  606. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  607. struct completion *comp = sp->comp;
  608. sp->free(sp);
  609. cmd->result = res;
  610. CMD_SP(cmd) = NULL;
  611. cmd->scsi_done(cmd);
  612. if (comp)
  613. complete(comp);
  614. }
  615. void qla2xxx_qpair_sp_free_dma(srb_t *sp)
  616. {
  617. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  618. struct qla_hw_data *ha = sp->fcport->vha->hw;
  619. if (sp->flags & SRB_DMA_VALID) {
  620. scsi_dma_unmap(cmd);
  621. sp->flags &= ~SRB_DMA_VALID;
  622. }
  623. if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
  624. dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
  625. scsi_prot_sg_count(cmd), cmd->sc_data_direction);
  626. sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
  627. }
  628. if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
  629. /* List assured to be having elements */
  630. qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
  631. sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
  632. }
  633. if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
  634. struct crc_context *difctx = sp->u.scmd.crc_ctx;
  635. struct dsd_dma *dif_dsd, *nxt_dsd;
  636. list_for_each_entry_safe(dif_dsd, nxt_dsd,
  637. &difctx->ldif_dma_hndl_list, list) {
  638. list_del(&dif_dsd->list);
  639. dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
  640. dif_dsd->dsd_list_dma);
  641. kfree(dif_dsd);
  642. difctx->no_dif_bundl--;
  643. }
  644. list_for_each_entry_safe(dif_dsd, nxt_dsd,
  645. &difctx->ldif_dsd_list, list) {
  646. list_del(&dif_dsd->list);
  647. dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
  648. dif_dsd->dsd_list_dma);
  649. kfree(dif_dsd);
  650. difctx->no_ldif_dsd--;
  651. }
  652. if (difctx->no_ldif_dsd) {
  653. ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
  654. "%s: difctx->no_ldif_dsd=%x\n",
  655. __func__, difctx->no_ldif_dsd);
  656. }
  657. if (difctx->no_dif_bundl) {
  658. ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
  659. "%s: difctx->no_dif_bundl=%x\n",
  660. __func__, difctx->no_dif_bundl);
  661. }
  662. sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
  663. }
  664. if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
  665. struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
  666. dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
  667. ctx1->fcp_cmnd_dma);
  668. list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
  669. ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
  670. ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
  671. mempool_free(ctx1, ha->ctx_mempool);
  672. sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
  673. }
  674. if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
  675. struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
  676. dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
  677. sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
  678. }
  679. }
  680. void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
  681. {
  682. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  683. struct completion *comp = sp->comp;
  684. sp->free(sp);
  685. cmd->result = res;
  686. CMD_SP(cmd) = NULL;
  687. cmd->scsi_done(cmd);
  688. if (comp)
  689. complete(comp);
  690. }
  691. static int
  692. qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  693. {
  694. scsi_qla_host_t *vha = shost_priv(host);
  695. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  696. struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
  697. struct qla_hw_data *ha = vha->hw;
  698. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  699. srb_t *sp;
  700. int rval;
  701. if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
  702. WARN_ON_ONCE(!rport)) {
  703. cmd->result = DID_NO_CONNECT << 16;
  704. goto qc24_fail_command;
  705. }
  706. if (ha->mqenable) {
  707. uint32_t tag;
  708. uint16_t hwq;
  709. struct qla_qpair *qpair = NULL;
  710. tag = blk_mq_unique_tag(cmd->request);
  711. hwq = blk_mq_unique_tag_to_hwq(tag);
  712. qpair = ha->queue_pair_map[hwq];
  713. if (qpair)
  714. return qla2xxx_mqueuecommand(host, cmd, qpair);
  715. }
  716. if (ha->flags.eeh_busy) {
  717. if (ha->flags.pci_channel_io_perm_failure) {
  718. ql_dbg(ql_dbg_aer, vha, 0x9010,
  719. "PCI Channel IO permanent failure, exiting "
  720. "cmd=%p.\n", cmd);
  721. cmd->result = DID_NO_CONNECT << 16;
  722. } else {
  723. ql_dbg(ql_dbg_aer, vha, 0x9011,
  724. "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
  725. cmd->result = DID_REQUEUE << 16;
  726. }
  727. goto qc24_fail_command;
  728. }
  729. rval = fc_remote_port_chkready(rport);
  730. if (rval) {
  731. cmd->result = rval;
  732. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
  733. "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
  734. cmd, rval);
  735. goto qc24_fail_command;
  736. }
  737. if (!vha->flags.difdix_supported &&
  738. scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
  739. ql_dbg(ql_dbg_io, vha, 0x3004,
  740. "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
  741. cmd);
  742. cmd->result = DID_NO_CONNECT << 16;
  743. goto qc24_fail_command;
  744. }
  745. if (!fcport) {
  746. cmd->result = DID_NO_CONNECT << 16;
  747. goto qc24_fail_command;
  748. }
  749. if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
  750. if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
  751. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  752. ql_dbg(ql_dbg_io, vha, 0x3005,
  753. "Returning DNC, fcport_state=%d loop_state=%d.\n",
  754. atomic_read(&fcport->state),
  755. atomic_read(&base_vha->loop_state));
  756. cmd->result = DID_NO_CONNECT << 16;
  757. goto qc24_fail_command;
  758. }
  759. goto qc24_target_busy;
  760. }
  761. /*
  762. * Return target busy if we've received a non-zero retry_delay_timer
  763. * in a FCP_RSP.
  764. */
  765. if (fcport->retry_delay_timestamp == 0) {
  766. /* retry delay not set */
  767. } else if (time_after(jiffies, fcport->retry_delay_timestamp))
  768. fcport->retry_delay_timestamp = 0;
  769. else
  770. goto qc24_target_busy;
  771. sp = scsi_cmd_priv(cmd);
  772. qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
  773. sp->u.scmd.cmd = cmd;
  774. sp->type = SRB_SCSI_CMD;
  775. CMD_SP(cmd) = (void *)sp;
  776. sp->free = qla2x00_sp_free_dma;
  777. sp->done = qla2x00_sp_compl;
  778. rval = ha->isp_ops->start_scsi(sp);
  779. if (rval != QLA_SUCCESS) {
  780. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
  781. "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
  782. goto qc24_host_busy_free_sp;
  783. }
  784. return 0;
  785. qc24_host_busy_free_sp:
  786. sp->free(sp);
  787. qc24_target_busy:
  788. return SCSI_MLQUEUE_TARGET_BUSY;
  789. qc24_fail_command:
  790. cmd->scsi_done(cmd);
  791. return 0;
  792. }
  793. /* For MQ supported I/O */
  794. int
  795. qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
  796. struct qla_qpair *qpair)
  797. {
  798. scsi_qla_host_t *vha = shost_priv(host);
  799. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  800. struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
  801. struct qla_hw_data *ha = vha->hw;
  802. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  803. srb_t *sp;
  804. int rval;
  805. rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
  806. if (rval) {
  807. cmd->result = rval;
  808. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
  809. "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
  810. cmd, rval);
  811. goto qc24_fail_command;
  812. }
  813. if (!fcport) {
  814. cmd->result = DID_NO_CONNECT << 16;
  815. goto qc24_fail_command;
  816. }
  817. if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
  818. if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
  819. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  820. ql_dbg(ql_dbg_io, vha, 0x3077,
  821. "Returning DNC, fcport_state=%d loop_state=%d.\n",
  822. atomic_read(&fcport->state),
  823. atomic_read(&base_vha->loop_state));
  824. cmd->result = DID_NO_CONNECT << 16;
  825. goto qc24_fail_command;
  826. }
  827. goto qc24_target_busy;
  828. }
  829. /*
  830. * Return target busy if we've received a non-zero retry_delay_timer
  831. * in a FCP_RSP.
  832. */
  833. if (fcport->retry_delay_timestamp == 0) {
  834. /* retry delay not set */
  835. } else if (time_after(jiffies, fcport->retry_delay_timestamp))
  836. fcport->retry_delay_timestamp = 0;
  837. else
  838. goto qc24_target_busy;
  839. sp = scsi_cmd_priv(cmd);
  840. qla2xxx_init_sp(sp, vha, qpair, fcport);
  841. sp->u.scmd.cmd = cmd;
  842. sp->type = SRB_SCSI_CMD;
  843. CMD_SP(cmd) = (void *)sp;
  844. sp->free = qla2xxx_qpair_sp_free_dma;
  845. sp->done = qla2xxx_qpair_sp_compl;
  846. rval = ha->isp_ops->start_scsi_mq(sp);
  847. if (rval != QLA_SUCCESS) {
  848. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
  849. "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
  850. if (rval == QLA_INTERFACE_ERROR)
  851. goto qc24_free_sp_fail_command;
  852. goto qc24_host_busy_free_sp;
  853. }
  854. return 0;
  855. qc24_host_busy_free_sp:
  856. sp->free(sp);
  857. qc24_target_busy:
  858. return SCSI_MLQUEUE_TARGET_BUSY;
  859. qc24_free_sp_fail_command:
  860. sp->free(sp);
  861. CMD_SP(cmd) = NULL;
  862. qla2xxx_rel_qpair_sp(sp->qpair, sp);
  863. qc24_fail_command:
  864. cmd->scsi_done(cmd);
  865. return 0;
  866. }
  867. /*
  868. * qla2x00_eh_wait_on_command
  869. * Waits for the command to be returned by the Firmware for some
  870. * max time.
  871. *
  872. * Input:
  873. * cmd = Scsi Command to wait on.
  874. *
  875. * Return:
  876. * Completed in time : QLA_SUCCESS
  877. * Did not complete in time : QLA_FUNCTION_FAILED
  878. */
  879. static int
  880. qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
  881. {
  882. #define ABORT_POLLING_PERIOD 1000
  883. #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
  884. unsigned long wait_iter = ABORT_WAIT_ITER;
  885. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  886. struct qla_hw_data *ha = vha->hw;
  887. int ret = QLA_SUCCESS;
  888. if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
  889. ql_dbg(ql_dbg_taskm, vha, 0x8005,
  890. "Return:eh_wait.\n");
  891. return ret;
  892. }
  893. while (CMD_SP(cmd) && wait_iter--) {
  894. msleep(ABORT_POLLING_PERIOD);
  895. }
  896. if (CMD_SP(cmd))
  897. ret = QLA_FUNCTION_FAILED;
  898. return ret;
  899. }
  900. /*
  901. * qla2x00_wait_for_hba_online
  902. * Wait till the HBA is online after going through
  903. * <= MAX_RETRIES_OF_ISP_ABORT or
  904. * finally HBA is disabled ie marked offline
  905. *
  906. * Input:
  907. * ha - pointer to host adapter structure
  908. *
  909. * Note:
  910. * Does context switching-Release SPIN_LOCK
  911. * (if any) before calling this routine.
  912. *
  913. * Return:
  914. * Success (Adapter is online) : 0
  915. * Failed (Adapter is offline/disabled) : 1
  916. */
  917. int
  918. qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
  919. {
  920. int return_status;
  921. unsigned long wait_online;
  922. struct qla_hw_data *ha = vha->hw;
  923. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  924. wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  925. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  926. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  927. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  928. ha->dpc_active) && time_before(jiffies, wait_online)) {
  929. msleep(1000);
  930. }
  931. if (base_vha->flags.online)
  932. return_status = QLA_SUCCESS;
  933. else
  934. return_status = QLA_FUNCTION_FAILED;
  935. return (return_status);
  936. }
  937. static inline int test_fcport_count(scsi_qla_host_t *vha)
  938. {
  939. struct qla_hw_data *ha = vha->hw;
  940. unsigned long flags;
  941. int res;
  942. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  943. ql_dbg(ql_dbg_init, vha, 0x00ec,
  944. "tgt %p, fcport_count=%d\n",
  945. vha, vha->fcport_count);
  946. res = (vha->fcport_count == 0);
  947. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  948. return res;
  949. }
  950. /*
  951. * qla2x00_wait_for_sess_deletion can only be called from remove_one.
  952. * it has dependency on UNLOADING flag to stop device discovery
  953. */
  954. void
  955. qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
  956. {
  957. u8 i;
  958. qla2x00_mark_all_devices_lost(vha);
  959. for (i = 0; i < 10; i++) {
  960. if (wait_event_timeout(vha->fcport_waitQ,
  961. test_fcport_count(vha), HZ) > 0)
  962. break;
  963. }
  964. flush_workqueue(vha->hw->wq);
  965. }
  966. /*
  967. * qla2x00_wait_for_hba_ready
  968. * Wait till the HBA is ready before doing driver unload
  969. *
  970. * Input:
  971. * ha - pointer to host adapter structure
  972. *
  973. * Note:
  974. * Does context switching-Release SPIN_LOCK
  975. * (if any) before calling this routine.
  976. *
  977. */
  978. static void
  979. qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
  980. {
  981. struct qla_hw_data *ha = vha->hw;
  982. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  983. while ((qla2x00_reset_active(vha) || ha->dpc_active ||
  984. ha->flags.mbox_busy) ||
  985. test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
  986. test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
  987. if (test_bit(UNLOADING, &base_vha->dpc_flags))
  988. break;
  989. msleep(1000);
  990. }
  991. }
  992. int
  993. qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
  994. {
  995. int return_status;
  996. unsigned long wait_reset;
  997. struct qla_hw_data *ha = vha->hw;
  998. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  999. wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  1000. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  1001. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  1002. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  1003. ha->dpc_active) && time_before(jiffies, wait_reset)) {
  1004. msleep(1000);
  1005. if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
  1006. ha->flags.chip_reset_done)
  1007. break;
  1008. }
  1009. if (ha->flags.chip_reset_done)
  1010. return_status = QLA_SUCCESS;
  1011. else
  1012. return_status = QLA_FUNCTION_FAILED;
  1013. return return_status;
  1014. }
  1015. #define ISP_REG_DISCONNECT 0xffffffffU
  1016. /**************************************************************************
  1017. * qla2x00_isp_reg_stat
  1018. *
  1019. * Description:
  1020. * Read the host status register of ISP before aborting the command.
  1021. *
  1022. * Input:
  1023. * ha = pointer to host adapter structure.
  1024. *
  1025. *
  1026. * Returns:
  1027. * Either true or false.
  1028. *
  1029. * Note: Return true if there is register disconnect.
  1030. **************************************************************************/
  1031. static inline
  1032. uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
  1033. {
  1034. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1035. struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
  1036. if (IS_P3P_TYPE(ha))
  1037. return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
  1038. else
  1039. return ((RD_REG_DWORD(&reg->host_status)) ==
  1040. ISP_REG_DISCONNECT);
  1041. }
  1042. /**************************************************************************
  1043. * qla2xxx_eh_abort
  1044. *
  1045. * Description:
  1046. * The abort function will abort the specified command.
  1047. *
  1048. * Input:
  1049. * cmd = Linux SCSI command packet to be aborted.
  1050. *
  1051. * Returns:
  1052. * Either SUCCESS or FAILED.
  1053. *
  1054. * Note:
  1055. * Only return FAILED if command not returned by firmware.
  1056. **************************************************************************/
  1057. static int
  1058. qla2xxx_eh_abort(struct scsi_cmnd *cmd)
  1059. {
  1060. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1061. DECLARE_COMPLETION_ONSTACK(comp);
  1062. srb_t *sp;
  1063. int ret;
  1064. unsigned int id;
  1065. uint64_t lun;
  1066. int rval;
  1067. struct qla_hw_data *ha = vha->hw;
  1068. uint32_t ratov_j;
  1069. struct qla_qpair *qpair;
  1070. unsigned long flags;
  1071. if (qla2x00_isp_reg_stat(ha)) {
  1072. ql_log(ql_log_info, vha, 0x8042,
  1073. "PCI/Register disconnect, exiting.\n");
  1074. return FAILED;
  1075. }
  1076. ret = fc_block_scsi_eh(cmd);
  1077. if (ret != 0)
  1078. return ret;
  1079. sp = scsi_cmd_priv(cmd);
  1080. qpair = sp->qpair;
  1081. if ((sp->fcport && sp->fcport->deleted) || !qpair)
  1082. return SUCCESS;
  1083. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  1084. sp->comp = &comp;
  1085. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  1086. id = cmd->device->id;
  1087. lun = cmd->device->lun;
  1088. ql_dbg(ql_dbg_taskm, vha, 0x8002,
  1089. "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
  1090. vha->host_no, id, lun, sp, cmd, sp->handle);
  1091. /*
  1092. * Abort will release the original Command/sp from FW. Let the
  1093. * original command call scsi_done. In return, he will wakeup
  1094. * this sleeping thread.
  1095. */
  1096. rval = ha->isp_ops->abort_command(sp);
  1097. ql_dbg(ql_dbg_taskm, vha, 0x8003,
  1098. "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
  1099. /* Wait for the command completion. */
  1100. ratov_j = ha->r_a_tov/10 * 4 * 1000;
  1101. ratov_j = msecs_to_jiffies(ratov_j);
  1102. switch (rval) {
  1103. case QLA_SUCCESS:
  1104. if (!wait_for_completion_timeout(&comp, ratov_j)) {
  1105. ql_dbg(ql_dbg_taskm, vha, 0xffff,
  1106. "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
  1107. __func__, ha->r_a_tov/10);
  1108. ret = FAILED;
  1109. } else {
  1110. ret = SUCCESS;
  1111. }
  1112. break;
  1113. default:
  1114. ret = FAILED;
  1115. break;
  1116. }
  1117. sp->comp = NULL;
  1118. ql_log(ql_log_info, vha, 0x801c,
  1119. "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
  1120. vha->host_no, id, lun, ret);
  1121. return ret;
  1122. }
  1123. /*
  1124. * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
  1125. */
  1126. int
  1127. qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
  1128. uint64_t l, enum nexus_wait_type type)
  1129. {
  1130. int cnt, match, status;
  1131. unsigned long flags;
  1132. struct qla_hw_data *ha = vha->hw;
  1133. struct req_que *req;
  1134. srb_t *sp;
  1135. struct scsi_cmnd *cmd;
  1136. status = QLA_SUCCESS;
  1137. spin_lock_irqsave(&ha->hardware_lock, flags);
  1138. req = vha->req;
  1139. for (cnt = 1; status == QLA_SUCCESS &&
  1140. cnt < req->num_outstanding_cmds; cnt++) {
  1141. sp = req->outstanding_cmds[cnt];
  1142. if (!sp)
  1143. continue;
  1144. if (sp->type != SRB_SCSI_CMD)
  1145. continue;
  1146. if (vha->vp_idx != sp->vha->vp_idx)
  1147. continue;
  1148. match = 0;
  1149. cmd = GET_CMD_SP(sp);
  1150. switch (type) {
  1151. case WAIT_HOST:
  1152. match = 1;
  1153. break;
  1154. case WAIT_TARGET:
  1155. match = cmd->device->id == t;
  1156. break;
  1157. case WAIT_LUN:
  1158. match = (cmd->device->id == t &&
  1159. cmd->device->lun == l);
  1160. break;
  1161. }
  1162. if (!match)
  1163. continue;
  1164. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1165. status = qla2x00_eh_wait_on_command(cmd);
  1166. spin_lock_irqsave(&ha->hardware_lock, flags);
  1167. }
  1168. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1169. return status;
  1170. }
  1171. static char *reset_errors[] = {
  1172. "HBA not online",
  1173. "HBA not ready",
  1174. "Task management failed",
  1175. "Waiting for command completions",
  1176. };
  1177. static int
  1178. __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
  1179. struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
  1180. {
  1181. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1182. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  1183. int err;
  1184. if (!fcport) {
  1185. return FAILED;
  1186. }
  1187. err = fc_block_scsi_eh(cmd);
  1188. if (err != 0)
  1189. return err;
  1190. if (fcport->deleted)
  1191. return SUCCESS;
  1192. ql_log(ql_log_info, vha, 0x8009,
  1193. "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
  1194. cmd->device->id, cmd->device->lun, cmd);
  1195. err = 0;
  1196. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1197. ql_log(ql_log_warn, vha, 0x800a,
  1198. "Wait for hba online failed for cmd=%p.\n", cmd);
  1199. goto eh_reset_failed;
  1200. }
  1201. err = 2;
  1202. if (do_reset(fcport, cmd->device->lun, 1)
  1203. != QLA_SUCCESS) {
  1204. ql_log(ql_log_warn, vha, 0x800c,
  1205. "do_reset failed for cmd=%p.\n", cmd);
  1206. goto eh_reset_failed;
  1207. }
  1208. err = 3;
  1209. if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
  1210. cmd->device->lun, type) != QLA_SUCCESS) {
  1211. ql_log(ql_log_warn, vha, 0x800d,
  1212. "wait for pending cmds failed for cmd=%p.\n", cmd);
  1213. goto eh_reset_failed;
  1214. }
  1215. ql_log(ql_log_info, vha, 0x800e,
  1216. "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
  1217. vha->host_no, cmd->device->id, cmd->device->lun, cmd);
  1218. return SUCCESS;
  1219. eh_reset_failed:
  1220. ql_log(ql_log_info, vha, 0x800f,
  1221. "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
  1222. reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
  1223. cmd);
  1224. return FAILED;
  1225. }
  1226. static int
  1227. qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
  1228. {
  1229. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1230. struct qla_hw_data *ha = vha->hw;
  1231. if (qla2x00_isp_reg_stat(ha)) {
  1232. ql_log(ql_log_info, vha, 0x803e,
  1233. "PCI/Register disconnect, exiting.\n");
  1234. return FAILED;
  1235. }
  1236. return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
  1237. ha->isp_ops->lun_reset);
  1238. }
  1239. static int
  1240. qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
  1241. {
  1242. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1243. struct qla_hw_data *ha = vha->hw;
  1244. if (qla2x00_isp_reg_stat(ha)) {
  1245. ql_log(ql_log_info, vha, 0x803f,
  1246. "PCI/Register disconnect, exiting.\n");
  1247. return FAILED;
  1248. }
  1249. return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
  1250. ha->isp_ops->target_reset);
  1251. }
  1252. /**************************************************************************
  1253. * qla2xxx_eh_bus_reset
  1254. *
  1255. * Description:
  1256. * The bus reset function will reset the bus and abort any executing
  1257. * commands.
  1258. *
  1259. * Input:
  1260. * cmd = Linux SCSI command packet of the command that cause the
  1261. * bus reset.
  1262. *
  1263. * Returns:
  1264. * SUCCESS/FAILURE (defined as macro in scsi.h).
  1265. *
  1266. **************************************************************************/
  1267. static int
  1268. qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
  1269. {
  1270. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1271. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  1272. int ret = FAILED;
  1273. unsigned int id;
  1274. uint64_t lun;
  1275. struct qla_hw_data *ha = vha->hw;
  1276. if (qla2x00_isp_reg_stat(ha)) {
  1277. ql_log(ql_log_info, vha, 0x8040,
  1278. "PCI/Register disconnect, exiting.\n");
  1279. return FAILED;
  1280. }
  1281. id = cmd->device->id;
  1282. lun = cmd->device->lun;
  1283. if (!fcport) {
  1284. return ret;
  1285. }
  1286. ret = fc_block_scsi_eh(cmd);
  1287. if (ret != 0)
  1288. return ret;
  1289. ret = FAILED;
  1290. if (qla2x00_chip_is_down(vha))
  1291. return ret;
  1292. ql_log(ql_log_info, vha, 0x8012,
  1293. "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
  1294. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1295. ql_log(ql_log_fatal, vha, 0x8013,
  1296. "Wait for hba online failed board disabled.\n");
  1297. goto eh_bus_reset_done;
  1298. }
  1299. if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
  1300. ret = SUCCESS;
  1301. if (ret == FAILED)
  1302. goto eh_bus_reset_done;
  1303. /* Flush outstanding commands. */
  1304. if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
  1305. QLA_SUCCESS) {
  1306. ql_log(ql_log_warn, vha, 0x8014,
  1307. "Wait for pending commands failed.\n");
  1308. ret = FAILED;
  1309. }
  1310. eh_bus_reset_done:
  1311. ql_log(ql_log_warn, vha, 0x802b,
  1312. "BUS RESET %s nexus=%ld:%d:%llu.\n",
  1313. (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
  1314. return ret;
  1315. }
  1316. /**************************************************************************
  1317. * qla2xxx_eh_host_reset
  1318. *
  1319. * Description:
  1320. * The reset function will reset the Adapter.
  1321. *
  1322. * Input:
  1323. * cmd = Linux SCSI command packet of the command that cause the
  1324. * adapter reset.
  1325. *
  1326. * Returns:
  1327. * Either SUCCESS or FAILED.
  1328. *
  1329. * Note:
  1330. **************************************************************************/
  1331. static int
  1332. qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
  1333. {
  1334. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1335. struct qla_hw_data *ha = vha->hw;
  1336. int ret = FAILED;
  1337. unsigned int id;
  1338. uint64_t lun;
  1339. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  1340. if (qla2x00_isp_reg_stat(ha)) {
  1341. ql_log(ql_log_info, vha, 0x8041,
  1342. "PCI/Register disconnect, exiting.\n");
  1343. schedule_work(&ha->board_disable);
  1344. return SUCCESS;
  1345. }
  1346. id = cmd->device->id;
  1347. lun = cmd->device->lun;
  1348. ql_log(ql_log_info, vha, 0x8018,
  1349. "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
  1350. /*
  1351. * No point in issuing another reset if one is active. Also do not
  1352. * attempt a reset if we are updating flash.
  1353. */
  1354. if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
  1355. goto eh_host_reset_lock;
  1356. if (vha != base_vha) {
  1357. if (qla2x00_vp_abort_isp(vha))
  1358. goto eh_host_reset_lock;
  1359. } else {
  1360. if (IS_P3P_TYPE(vha->hw)) {
  1361. if (!qla82xx_fcoe_ctx_reset(vha)) {
  1362. /* Ctx reset success */
  1363. ret = SUCCESS;
  1364. goto eh_host_reset_lock;
  1365. }
  1366. /* fall thru if ctx reset failed */
  1367. }
  1368. if (ha->wq)
  1369. flush_workqueue(ha->wq);
  1370. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1371. if (ha->isp_ops->abort_isp(base_vha)) {
  1372. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1373. /* failed. schedule dpc to try */
  1374. set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
  1375. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1376. ql_log(ql_log_warn, vha, 0x802a,
  1377. "wait for hba online failed.\n");
  1378. goto eh_host_reset_lock;
  1379. }
  1380. }
  1381. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1382. }
  1383. /* Waiting for command to be returned to OS.*/
  1384. if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
  1385. QLA_SUCCESS)
  1386. ret = SUCCESS;
  1387. eh_host_reset_lock:
  1388. ql_log(ql_log_info, vha, 0x8017,
  1389. "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
  1390. (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
  1391. return ret;
  1392. }
  1393. /*
  1394. * qla2x00_loop_reset
  1395. * Issue loop reset.
  1396. *
  1397. * Input:
  1398. * ha = adapter block pointer.
  1399. *
  1400. * Returns:
  1401. * 0 = success
  1402. */
  1403. int
  1404. qla2x00_loop_reset(scsi_qla_host_t *vha)
  1405. {
  1406. int ret;
  1407. struct fc_port *fcport;
  1408. struct qla_hw_data *ha = vha->hw;
  1409. if (IS_QLAFX00(ha)) {
  1410. return qlafx00_loop_reset(vha);
  1411. }
  1412. if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
  1413. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1414. if (fcport->port_type != FCT_TARGET)
  1415. continue;
  1416. ret = ha->isp_ops->target_reset(fcport, 0, 0);
  1417. if (ret != QLA_SUCCESS) {
  1418. ql_dbg(ql_dbg_taskm, vha, 0x802c,
  1419. "Bus Reset failed: Reset=%d "
  1420. "d_id=%x.\n", ret, fcport->d_id.b24);
  1421. }
  1422. }
  1423. }
  1424. if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
  1425. atomic_set(&vha->loop_state, LOOP_DOWN);
  1426. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1427. qla2x00_mark_all_devices_lost(vha);
  1428. ret = qla2x00_full_login_lip(vha);
  1429. if (ret != QLA_SUCCESS) {
  1430. ql_dbg(ql_dbg_taskm, vha, 0x802d,
  1431. "full_login_lip=%d.\n", ret);
  1432. }
  1433. }
  1434. if (ha->flags.enable_lip_reset) {
  1435. ret = qla2x00_lip_reset(vha);
  1436. if (ret != QLA_SUCCESS)
  1437. ql_dbg(ql_dbg_taskm, vha, 0x802e,
  1438. "lip_reset failed (%d).\n", ret);
  1439. }
  1440. /* Issue marker command only when we are going to start the I/O */
  1441. vha->marker_needed = 1;
  1442. return QLA_SUCCESS;
  1443. }
  1444. /*
  1445. * The caller must ensure that no completion interrupts will happen
  1446. * while this function is in progress.
  1447. */
  1448. static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
  1449. unsigned long *flags)
  1450. __releases(qp->qp_lock_ptr)
  1451. __acquires(qp->qp_lock_ptr)
  1452. {
  1453. DECLARE_COMPLETION_ONSTACK(comp);
  1454. scsi_qla_host_t *vha = qp->vha;
  1455. struct qla_hw_data *ha = vha->hw;
  1456. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  1457. int rval;
  1458. bool ret_cmd;
  1459. uint32_t ratov_j;
  1460. lockdep_assert_held(qp->qp_lock_ptr);
  1461. if (qla2x00_chip_is_down(vha)) {
  1462. sp->done(sp, res);
  1463. return;
  1464. }
  1465. if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
  1466. (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
  1467. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  1468. !qla2x00_isp_reg_stat(ha))) {
  1469. if (sp->comp) {
  1470. sp->done(sp, res);
  1471. return;
  1472. }
  1473. sp->comp = &comp;
  1474. spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
  1475. rval = ha->isp_ops->abort_command(sp);
  1476. /* Wait for command completion. */
  1477. ret_cmd = false;
  1478. ratov_j = ha->r_a_tov/10 * 4 * 1000;
  1479. ratov_j = msecs_to_jiffies(ratov_j);
  1480. switch (rval) {
  1481. case QLA_SUCCESS:
  1482. if (wait_for_completion_timeout(&comp, ratov_j)) {
  1483. ql_dbg(ql_dbg_taskm, vha, 0xffff,
  1484. "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
  1485. __func__, ha->r_a_tov/10);
  1486. ret_cmd = true;
  1487. }
  1488. /* else FW return SP to driver */
  1489. break;
  1490. default:
  1491. ret_cmd = true;
  1492. break;
  1493. }
  1494. spin_lock_irqsave(qp->qp_lock_ptr, *flags);
  1495. if (ret_cmd && blk_mq_request_started(cmd->request))
  1496. sp->done(sp, res);
  1497. } else {
  1498. sp->done(sp, res);
  1499. }
  1500. }
  1501. /*
  1502. * The caller must ensure that no completion interrupts will happen
  1503. * while this function is in progress.
  1504. */
  1505. static void
  1506. __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
  1507. {
  1508. int cnt;
  1509. unsigned long flags;
  1510. srb_t *sp;
  1511. scsi_qla_host_t *vha = qp->vha;
  1512. struct qla_hw_data *ha = vha->hw;
  1513. struct req_que *req;
  1514. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  1515. struct qla_tgt_cmd *cmd;
  1516. if (!ha->req_q_map)
  1517. return;
  1518. spin_lock_irqsave(qp->qp_lock_ptr, flags);
  1519. req = qp->req;
  1520. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
  1521. sp = req->outstanding_cmds[cnt];
  1522. if (sp) {
  1523. switch (sp->cmd_type) {
  1524. case TYPE_SRB:
  1525. qla2x00_abort_srb(qp, sp, res, &flags);
  1526. break;
  1527. case TYPE_TGT_CMD:
  1528. if (!vha->hw->tgt.tgt_ops || !tgt ||
  1529. qla_ini_mode_enabled(vha)) {
  1530. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
  1531. "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
  1532. vha->dpc_flags);
  1533. continue;
  1534. }
  1535. cmd = (struct qla_tgt_cmd *)sp;
  1536. cmd->aborted = 1;
  1537. break;
  1538. case TYPE_TGT_TMCMD:
  1539. /* Skip task management functions. */
  1540. break;
  1541. default:
  1542. break;
  1543. }
  1544. req->outstanding_cmds[cnt] = NULL;
  1545. }
  1546. }
  1547. spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
  1548. }
  1549. /*
  1550. * The caller must ensure that no completion interrupts will happen
  1551. * while this function is in progress.
  1552. */
  1553. void
  1554. qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
  1555. {
  1556. int que;
  1557. struct qla_hw_data *ha = vha->hw;
  1558. /* Continue only if initialization complete. */
  1559. if (!ha->base_qpair)
  1560. return;
  1561. __qla2x00_abort_all_cmds(ha->base_qpair, res);
  1562. if (!ha->queue_pair_map)
  1563. return;
  1564. for (que = 0; que < ha->max_qpairs; que++) {
  1565. if (!ha->queue_pair_map[que])
  1566. continue;
  1567. __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
  1568. }
  1569. }
  1570. static int
  1571. qla2xxx_slave_alloc(struct scsi_device *sdev)
  1572. {
  1573. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1574. if (!rport || fc_remote_port_chkready(rport))
  1575. return -ENXIO;
  1576. sdev->hostdata = *(fc_port_t **)rport->dd_data;
  1577. return 0;
  1578. }
  1579. static int
  1580. qla2xxx_slave_configure(struct scsi_device *sdev)
  1581. {
  1582. scsi_qla_host_t *vha = shost_priv(sdev->host);
  1583. struct req_que *req = vha->req;
  1584. if (IS_T10_PI_CAPABLE(vha->hw))
  1585. blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
  1586. scsi_change_queue_depth(sdev, req->max_q_depth);
  1587. return 0;
  1588. }
  1589. static void
  1590. qla2xxx_slave_destroy(struct scsi_device *sdev)
  1591. {
  1592. sdev->hostdata = NULL;
  1593. }
  1594. /**
  1595. * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
  1596. * @ha: HA context
  1597. *
  1598. * At exit, the @ha's flags.enable_64bit_addressing set to indicated
  1599. * supported addressing method.
  1600. */
  1601. static void
  1602. qla2x00_config_dma_addressing(struct qla_hw_data *ha)
  1603. {
  1604. /* Assume a 32bit DMA mask. */
  1605. ha->flags.enable_64bit_addressing = 0;
  1606. if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
  1607. /* Any upper-dword bits set? */
  1608. if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
  1609. !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
  1610. /* Ok, a 64bit DMA mask is applicable. */
  1611. ha->flags.enable_64bit_addressing = 1;
  1612. ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
  1613. ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
  1614. return;
  1615. }
  1616. }
  1617. dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
  1618. pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
  1619. }
  1620. static void
  1621. qla2x00_enable_intrs(struct qla_hw_data *ha)
  1622. {
  1623. unsigned long flags = 0;
  1624. struct device_reg_2xxx _