/drivers/scsi/mpt2sas/mpt2sas_base.c

https://bitbucket.org/abioy/linux · C · 3819 lines · 2660 code · 437 blank · 722 comment · 369 complexity · d38806ed30cbbad5f34e536fb2833842 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * This is the Fusion MPT base driver providing common API layer interface
  3. * for access to MPT (Message Passing Technology) firmware.
  4. *
  5. * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
  6. * Copyright (C) 2007-2009 LSI Corporation
  7. * (mailto:DL-MPTFusionLinux@lsi.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version 2
  12. * of the License, or (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * NO WARRANTY
  20. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24. * solely responsible for determining the appropriateness of using and
  25. * distributing the Program and assumes all risks associated with its
  26. * exercise of rights under this Agreement, including but not limited to
  27. * the risks and costs of program errors, damage to or loss of data,
  28. * programs or equipment, and unavailability or interruption of operations.
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. * You should have received a copy of the GNU General Public License
  38. * along with this program; if not, write to the Free Software
  39. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  40. * USA.
  41. */
  42. #include <linux/version.h>
  43. #include <linux/kernel.h>
  44. #include <linux/module.h>
  45. #include <linux/errno.h>
  46. #include <linux/init.h>
  47. #include <linux/slab.h>
  48. #include <linux/types.h>
  49. #include <linux/pci.h>
  50. #include <linux/kdev_t.h>
  51. #include <linux/blkdev.h>
  52. #include <linux/delay.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/dma-mapping.h>
  55. #include <linux/sort.h>
  56. #include <linux/io.h>
  57. #include <linux/time.h>
  58. #include "mpt2sas_base.h"
  59. static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
  60. #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  61. #define MPT2SAS_MAX_REQUEST_QUEUE 600 /* maximum controller queue depth */
  62. static int max_queue_depth = -1;
  63. module_param(max_queue_depth, int, 0);
  64. MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  65. static int max_sgl_entries = -1;
  66. module_param(max_sgl_entries, int, 0);
  67. MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  68. static int msix_disable = -1;
  69. module_param(msix_disable, int, 0);
  70. MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  71. /* diag_buffer_enable is bitwise
  72. * bit 0 set = TRACE
  73. * bit 1 set = SNAPSHOT
  74. * bit 2 set = EXTENDED
  75. *
  76. * Either bit can be set, or both
  77. */
  78. static int diag_buffer_enable;
  79. module_param(diag_buffer_enable, int, 0);
  80. MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
  81. "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
  82. int mpt2sas_fwfault_debug;
  83. MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
  84. "and halt firmware - (default=0)");
  85. /**
  86. * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  87. *
  88. */
  89. static int
  90. _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
  91. {
  92. int ret = param_set_int(val, kp);
  93. struct MPT2SAS_ADAPTER *ioc;
  94. if (ret)
  95. return ret;
  96. printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
  97. list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
  98. ioc->fwfault_debug = mpt2sas_fwfault_debug;
  99. return 0;
  100. }
  101. module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
  102. param_get_int, &mpt2sas_fwfault_debug, 0644);
  103. /**
  104. * _base_fault_reset_work - workq handling ioc fault conditions
  105. * @work: input argument, used to derive ioc
  106. * Context: sleep.
  107. *
  108. * Return nothing.
  109. */
  110. static void
  111. _base_fault_reset_work(struct work_struct *work)
  112. {
  113. struct MPT2SAS_ADAPTER *ioc =
  114. container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
  115. unsigned long flags;
  116. u32 doorbell;
  117. int rc;
  118. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  119. if (ioc->shost_recovery)
  120. goto rearm_timer;
  121. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  122. doorbell = mpt2sas_base_get_iocstate(ioc, 0);
  123. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  124. rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
  125. FORCE_BIG_HAMMER);
  126. printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
  127. __func__, (rc == 0) ? "success" : "failed");
  128. doorbell = mpt2sas_base_get_iocstate(ioc, 0);
  129. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  130. mpt2sas_base_fault_info(ioc, doorbell &
  131. MPI2_DOORBELL_DATA_MASK);
  132. }
  133. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  134. rearm_timer:
  135. if (ioc->fault_reset_work_q)
  136. queue_delayed_work(ioc->fault_reset_work_q,
  137. &ioc->fault_reset_work,
  138. msecs_to_jiffies(FAULT_POLLING_INTERVAL));
  139. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  140. }
  141. /**
  142. * mpt2sas_base_start_watchdog - start the fault_reset_work_q
  143. * @ioc: per adapter object
  144. * Context: sleep.
  145. *
  146. * Return nothing.
  147. */
  148. void
  149. mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
  150. {
  151. unsigned long flags;
  152. if (ioc->fault_reset_work_q)
  153. return;
  154. /* initialize fault polling */
  155. INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
  156. snprintf(ioc->fault_reset_work_q_name,
  157. sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
  158. ioc->fault_reset_work_q =
  159. create_singlethread_workqueue(ioc->fault_reset_work_q_name);
  160. if (!ioc->fault_reset_work_q) {
  161. printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
  162. ioc->name, __func__, __LINE__);
  163. return;
  164. }
  165. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  166. if (ioc->fault_reset_work_q)
  167. queue_delayed_work(ioc->fault_reset_work_q,
  168. &ioc->fault_reset_work,
  169. msecs_to_jiffies(FAULT_POLLING_INTERVAL));
  170. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  171. }
  172. /**
  173. * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
  174. * @ioc: per adapter object
  175. * Context: sleep.
  176. *
  177. * Return nothing.
  178. */
  179. void
  180. mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
  181. {
  182. unsigned long flags;
  183. struct workqueue_struct *wq;
  184. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  185. wq = ioc->fault_reset_work_q;
  186. ioc->fault_reset_work_q = NULL;
  187. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  188. if (wq) {
  189. if (!cancel_delayed_work(&ioc->fault_reset_work))
  190. flush_workqueue(wq);
  191. destroy_workqueue(wq);
  192. }
  193. }
  194. /**
  195. * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
  196. * @ioc: per adapter object
  197. * @fault_code: fault code
  198. *
  199. * Return nothing.
  200. */
  201. void
  202. mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
  203. {
  204. printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
  205. ioc->name, fault_code);
  206. }
  207. /**
  208. * mpt2sas_halt_firmware - halt's mpt controller firmware
  209. * @ioc: per adapter object
  210. *
  211. * For debugging timeout related issues. Writing 0xCOFFEE00
  212. * to the doorbell register will halt controller firmware. With
  213. * the purpose to stop both driver and firmware, the enduser can
  214. * obtain a ring buffer from controller UART.
  215. */
  216. void
  217. mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
  218. {
  219. u32 doorbell;
  220. if (!ioc->fwfault_debug)
  221. return;
  222. dump_stack();
  223. doorbell = readl(&ioc->chip->Doorbell);
  224. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  225. mpt2sas_base_fault_info(ioc , doorbell);
  226. else {
  227. writel(0xC0FFEE00, &ioc->chip->Doorbell);
  228. printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
  229. "timeout\n", ioc->name);
  230. }
  231. panic("panic in %s\n", __func__);
  232. }
  233. #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
  234. /**
  235. * _base_sas_ioc_info - verbose translation of the ioc status
  236. * @ioc: per adapter object
  237. * @mpi_reply: reply mf payload returned from firmware
  238. * @request_hdr: request mf
  239. *
  240. * Return nothing.
  241. */
  242. static void
  243. _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
  244. MPI2RequestHeader_t *request_hdr)
  245. {
  246. u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
  247. MPI2_IOCSTATUS_MASK;
  248. char *desc = NULL;
  249. u16 frame_sz;
  250. char *func_str = NULL;
  251. /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
  252. if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  253. request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
  254. request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
  255. return;
  256. switch (ioc_status) {
  257. /****************************************************************************
  258. * Common IOCStatus values for all replies
  259. ****************************************************************************/
  260. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  261. desc = "invalid function";
  262. break;
  263. case MPI2_IOCSTATUS_BUSY:
  264. desc = "busy";
  265. break;
  266. case MPI2_IOCSTATUS_INVALID_SGL:
  267. desc = "invalid sgl";
  268. break;
  269. case MPI2_IOCSTATUS_INTERNAL_ERROR:
  270. desc = "internal error";
  271. break;
  272. case MPI2_IOCSTATUS_INVALID_VPID:
  273. desc = "invalid vpid";
  274. break;
  275. case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
  276. desc = "insufficient resources";
  277. break;
  278. case MPI2_IOCSTATUS_INVALID_FIELD:
  279. desc = "invalid field";
  280. break;
  281. case MPI2_IOCSTATUS_INVALID_STATE:
  282. desc = "invalid state";
  283. break;
  284. case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
  285. desc = "op state not supported";
  286. break;
  287. /****************************************************************************
  288. * Config IOCStatus values
  289. ****************************************************************************/
  290. case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
  291. desc = "config invalid action";
  292. break;
  293. case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
  294. desc = "config invalid type";
  295. break;
  296. case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
  297. desc = "config invalid page";
  298. break;
  299. case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
  300. desc = "config invalid data";
  301. break;
  302. case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
  303. desc = "config no defaults";
  304. break;
  305. case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
  306. desc = "config cant commit";
  307. break;
  308. /****************************************************************************
  309. * SCSI IO Reply
  310. ****************************************************************************/
  311. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  312. case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
  313. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  314. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  315. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  316. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  317. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  318. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  319. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  320. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  321. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  322. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  323. break;
  324. /****************************************************************************
  325. * For use by SCSI Initiator and SCSI Target end-to-end data protection
  326. ****************************************************************************/
  327. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  328. desc = "eedp guard error";
  329. break;
  330. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  331. desc = "eedp ref tag error";
  332. break;
  333. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  334. desc = "eedp app tag error";
  335. break;
  336. /****************************************************************************
  337. * SCSI Target values
  338. ****************************************************************************/
  339. case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
  340. desc = "target invalid io index";
  341. break;
  342. case MPI2_IOCSTATUS_TARGET_ABORTED:
  343. desc = "target aborted";
  344. break;
  345. case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
  346. desc = "target no conn retryable";
  347. break;
  348. case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
  349. desc = "target no connection";
  350. break;
  351. case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
  352. desc = "target xfer count mismatch";
  353. break;
  354. case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
  355. desc = "target data offset error";
  356. break;
  357. case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
  358. desc = "target too much write data";
  359. break;
  360. case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
  361. desc = "target iu too short";
  362. break;
  363. case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
  364. desc = "target ack nak timeout";
  365. break;
  366. case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
  367. desc = "target nak received";
  368. break;
  369. /****************************************************************************
  370. * Serial Attached SCSI values
  371. ****************************************************************************/
  372. case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
  373. desc = "smp request failed";
  374. break;
  375. case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
  376. desc = "smp data overrun";
  377. break;
  378. /****************************************************************************
  379. * Diagnostic Buffer Post / Diagnostic Release values
  380. ****************************************************************************/
  381. case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
  382. desc = "diagnostic released";
  383. break;
  384. default:
  385. break;
  386. }
  387. if (!desc)
  388. return;
  389. switch (request_hdr->Function) {
  390. case MPI2_FUNCTION_CONFIG:
  391. frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
  392. func_str = "config_page";
  393. break;
  394. case MPI2_FUNCTION_SCSI_TASK_MGMT:
  395. frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
  396. func_str = "task_mgmt";
  397. break;
  398. case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
  399. frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
  400. func_str = "sas_iounit_ctl";
  401. break;
  402. case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
  403. frame_sz = sizeof(Mpi2SepRequest_t);
  404. func_str = "enclosure";
  405. break;
  406. case MPI2_FUNCTION_IOC_INIT:
  407. frame_sz = sizeof(Mpi2IOCInitRequest_t);
  408. func_str = "ioc_init";
  409. break;
  410. case MPI2_FUNCTION_PORT_ENABLE:
  411. frame_sz = sizeof(Mpi2PortEnableRequest_t);
  412. func_str = "port_enable";
  413. break;
  414. case MPI2_FUNCTION_SMP_PASSTHROUGH:
  415. frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
  416. func_str = "smp_passthru";
  417. break;
  418. default:
  419. frame_sz = 32;
  420. func_str = "unknown";
  421. break;
  422. }
  423. printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
  424. " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
  425. _debug_dump_mf(request_hdr, frame_sz/4);
  426. }
  427. /**
  428. * _base_display_event_data - verbose translation of firmware asyn events
  429. * @ioc: per adapter object
  430. * @mpi_reply: reply mf payload returned from firmware
  431. *
  432. * Return nothing.
  433. */
  434. static void
  435. _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
  436. Mpi2EventNotificationReply_t *mpi_reply)
  437. {
  438. char *desc = NULL;
  439. u16 event;
  440. if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
  441. return;
  442. event = le16_to_cpu(mpi_reply->Event);
  443. switch (event) {
  444. case MPI2_EVENT_LOG_DATA:
  445. desc = "Log Data";
  446. break;
  447. case MPI2_EVENT_STATE_CHANGE:
  448. desc = "Status Change";
  449. break;
  450. case MPI2_EVENT_HARD_RESET_RECEIVED:
  451. desc = "Hard Reset Received";
  452. break;
  453. case MPI2_EVENT_EVENT_CHANGE:
  454. desc = "Event Change";
  455. break;
  456. case MPI2_EVENT_TASK_SET_FULL:
  457. desc = "Task Set Full";
  458. break;
  459. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  460. desc = "Device Status Change";
  461. break;
  462. case MPI2_EVENT_IR_OPERATION_STATUS:
  463. desc = "IR Operation Status";
  464. break;
  465. case MPI2_EVENT_SAS_DISCOVERY:
  466. desc = "Discovery";
  467. break;
  468. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  469. desc = "SAS Broadcast Primitive";
  470. break;
  471. case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
  472. desc = "SAS Init Device Status Change";
  473. break;
  474. case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
  475. desc = "SAS Init Table Overflow";
  476. break;
  477. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  478. desc = "SAS Topology Change List";
  479. break;
  480. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  481. desc = "SAS Enclosure Device Status Change";
  482. break;
  483. case MPI2_EVENT_IR_VOLUME:
  484. desc = "IR Volume";
  485. break;
  486. case MPI2_EVENT_IR_PHYSICAL_DISK:
  487. desc = "IR Physical Disk";
  488. break;
  489. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  490. desc = "IR Configuration Change List";
  491. break;
  492. case MPI2_EVENT_LOG_ENTRY_ADDED:
  493. desc = "Log Entry Added";
  494. break;
  495. }
  496. if (!desc)
  497. return;
  498. printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
  499. }
  500. #endif
  501. /**
  502. * _base_sas_log_info - verbose translation of firmware log info
  503. * @ioc: per adapter object
  504. * @log_info: log info
  505. *
  506. * Return nothing.
  507. */
  508. static void
  509. _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
  510. {
  511. union loginfo_type {
  512. u32 loginfo;
  513. struct {
  514. u32 subcode:16;
  515. u32 code:8;
  516. u32 originator:4;
  517. u32 bus_type:4;
  518. } dw;
  519. };
  520. union loginfo_type sas_loginfo;
  521. char *originator_str = NULL;
  522. sas_loginfo.loginfo = log_info;
  523. if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
  524. return;
  525. /* each nexus loss loginfo */
  526. if (log_info == 0x31170000)
  527. return;
  528. /* eat the loginfos associated with task aborts */
  529. if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
  530. 0x31140000 || log_info == 0x31130000))
  531. return;
  532. switch (sas_loginfo.dw.originator) {
  533. case 0:
  534. originator_str = "IOP";
  535. break;
  536. case 1:
  537. originator_str = "PL";
  538. break;
  539. case 2:
  540. originator_str = "IR";
  541. break;
  542. }
  543. printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
  544. "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
  545. originator_str, sas_loginfo.dw.code,
  546. sas_loginfo.dw.subcode);
  547. }
  548. /**
  549. * _base_display_reply_info -
  550. * @ioc: per adapter object
  551. * @smid: system request message index
  552. * @msix_index: MSIX table index supplied by the OS
  553. * @reply: reply message frame(lower 32bit addr)
  554. *
  555. * Return nothing.
  556. */
  557. static void
  558. _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  559. u32 reply)
  560. {
  561. MPI2DefaultReply_t *mpi_reply;
  562. u16 ioc_status;
  563. mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
  564. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  565. #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
  566. if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
  567. (ioc->logging_level & MPT_DEBUG_REPLY)) {
  568. _base_sas_ioc_info(ioc , mpi_reply,
  569. mpt2sas_base_get_msg_frame(ioc, smid));
  570. }
  571. #endif
  572. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
  573. _base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
  574. }
  575. /**
  576. * mpt2sas_base_done - base internal command completion routine
  577. * @ioc: per adapter object
  578. * @smid: system request message index
  579. * @msix_index: MSIX table index supplied by the OS
  580. * @reply: reply message frame(lower 32bit addr)
  581. *
  582. * Return 1 meaning mf should be freed from _base_interrupt
  583. * 0 means the mf is freed from this function.
  584. */
  585. u8
  586. mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  587. u32 reply)
  588. {
  589. MPI2DefaultReply_t *mpi_reply;
  590. mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
  591. if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
  592. return 1;
  593. if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
  594. return 1;
  595. ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
  596. if (mpi_reply) {
  597. ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
  598. memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  599. }
  600. ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
  601. complete(&ioc->base_cmds.done);
  602. return 1;
  603. }
  604. /**
  605. * _base_async_event - main callback handler for firmware asyn events
  606. * @ioc: per adapter object
  607. * @msix_index: MSIX table index supplied by the OS
  608. * @reply: reply message frame(lower 32bit addr)
  609. *
  610. * Return 1 meaning mf should be freed from _base_interrupt
  611. * 0 means the mf is freed from this function.
  612. */
  613. static u8
  614. _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
  615. {
  616. Mpi2EventNotificationReply_t *mpi_reply;
  617. Mpi2EventAckRequest_t *ack_request;
  618. u16 smid;
  619. mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
  620. if (!mpi_reply)
  621. return 1;
  622. if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
  623. return 1;
  624. #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
  625. _base_display_event_data(ioc, mpi_reply);
  626. #endif
  627. if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
  628. goto out;
  629. smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
  630. if (!smid) {
  631. printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
  632. ioc->name, __func__);
  633. goto out;
  634. }
  635. ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
  636. memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
  637. ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
  638. ack_request->Event = mpi_reply->Event;
  639. ack_request->EventContext = mpi_reply->EventContext;
  640. ack_request->VF_ID = 0; /* TODO */
  641. ack_request->VP_ID = 0;
  642. mpt2sas_base_put_smid_default(ioc, smid);
  643. out:
  644. /* scsih callback handler */
  645. mpt2sas_scsih_event_callback(ioc, msix_index, reply);
  646. /* ctl callback handler */
  647. mpt2sas_ctl_event_callback(ioc, msix_index, reply);
  648. return 1;
  649. }
  650. /**
  651. * _base_get_cb_idx - obtain the callback index
  652. * @ioc: per adapter object
  653. * @smid: system request message index
  654. *
  655. * Return callback index.
  656. */
  657. static u8
  658. _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  659. {
  660. int i;
  661. u8 cb_idx = 0xFF;
  662. if (smid >= ioc->hi_priority_smid) {
  663. if (smid < ioc->internal_smid) {
  664. i = smid - ioc->hi_priority_smid;
  665. cb_idx = ioc->hpr_lookup[i].cb_idx;
  666. } else {
  667. i = smid - ioc->internal_smid;
  668. cb_idx = ioc->internal_lookup[i].cb_idx;
  669. }
  670. } else {
  671. i = smid - 1;
  672. cb_idx = ioc->scsi_lookup[i].cb_idx;
  673. }
  674. return cb_idx;
  675. }
  676. /**
  677. * _base_mask_interrupts - disable interrupts
  678. * @ioc: per adapter object
  679. *
  680. * Disabling ResetIRQ, Reply and Doorbell Interrupts
  681. *
  682. * Return nothing.
  683. */
  684. static void
  685. _base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
  686. {
  687. u32 him_register;
  688. ioc->mask_interrupts = 1;
  689. him_register = readl(&ioc->chip->HostInterruptMask);
  690. him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
  691. writel(him_register, &ioc->chip->HostInterruptMask);
  692. readl(&ioc->chip->HostInterruptMask);
  693. }
  694. /**
  695. * _base_unmask_interrupts - enable interrupts
  696. * @ioc: per adapter object
  697. *
  698. * Enabling only Reply Interrupts
  699. *
  700. * Return nothing.
  701. */
  702. static void
  703. _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
  704. {
  705. u32 him_register;
  706. him_register = readl(&ioc->chip->HostInterruptMask);
  707. him_register &= ~MPI2_HIM_RIM;
  708. writel(him_register, &ioc->chip->HostInterruptMask);
  709. ioc->mask_interrupts = 0;
  710. }
  711. union reply_descriptor {
  712. u64 word;
  713. struct {
  714. u32 low;
  715. u32 high;
  716. } u;
  717. };
  718. /**
  719. * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
  720. * @irq: irq number (not used)
  721. * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
  722. * @r: pt_regs pointer (not used)
  723. *
  724. * Return IRQ_HANDLE if processed, else IRQ_NONE.
  725. */
  726. static irqreturn_t
  727. _base_interrupt(int irq, void *bus_id)
  728. {
  729. union reply_descriptor rd;
  730. u32 completed_cmds;
  731. u8 request_desript_type;
  732. u16 smid;
  733. u8 cb_idx;
  734. u32 reply;
  735. u8 msix_index;
  736. struct MPT2SAS_ADAPTER *ioc = bus_id;
  737. Mpi2ReplyDescriptorsUnion_t *rpf;
  738. u8 rc;
  739. if (ioc->mask_interrupts)
  740. return IRQ_NONE;
  741. rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
  742. request_desript_type = rpf->Default.ReplyFlags
  743. & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  744. if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  745. return IRQ_NONE;
  746. completed_cmds = 0;
  747. do {
  748. rd.word = rpf->Words;
  749. if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
  750. goto out;
  751. reply = 0;
  752. cb_idx = 0xFF;
  753. smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
  754. msix_index = rpf->Default.MSIxIndex;
  755. if (request_desript_type ==
  756. MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
  757. reply = le32_to_cpu
  758. (rpf->AddressReply.ReplyFrameAddress);
  759. } else if (request_desript_type ==
  760. MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
  761. goto next;
  762. else if (request_desript_type ==
  763. MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
  764. goto next;
  765. if (smid)
  766. cb_idx = _base_get_cb_idx(ioc, smid);
  767. if (smid && cb_idx != 0xFF) {
  768. rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
  769. reply);
  770. if (reply)
  771. _base_display_reply_info(ioc, smid, msix_index,
  772. reply);
  773. if (rc)
  774. mpt2sas_base_free_smid(ioc, smid);
  775. }
  776. if (!smid)
  777. _base_async_event(ioc, msix_index, reply);
  778. /* reply free queue handling */
  779. if (reply) {
  780. ioc->reply_free_host_index =
  781. (ioc->reply_free_host_index ==
  782. (ioc->reply_free_queue_depth - 1)) ?
  783. 0 : ioc->reply_free_host_index + 1;
  784. ioc->reply_free[ioc->reply_free_host_index] =
  785. cpu_to_le32(reply);
  786. wmb();
  787. writel(ioc->reply_free_host_index,
  788. &ioc->chip->ReplyFreeHostIndex);
  789. }
  790. next:
  791. rpf->Words = ULLONG_MAX;
  792. ioc->reply_post_host_index = (ioc->reply_post_host_index ==
  793. (ioc->reply_post_queue_depth - 1)) ? 0 :
  794. ioc->reply_post_host_index + 1;
  795. request_desript_type =
  796. ioc->reply_post_free[ioc->reply_post_host_index].Default.
  797. ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  798. completed_cmds++;
  799. if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  800. goto out;
  801. if (!ioc->reply_post_host_index)
  802. rpf = ioc->reply_post_free;
  803. else
  804. rpf++;
  805. } while (1);
  806. out:
  807. if (!completed_cmds)
  808. return IRQ_NONE;
  809. wmb();
  810. writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
  811. return IRQ_HANDLED;
  812. }
  813. /**
  814. * mpt2sas_base_release_callback_handler - clear interupt callback handler
  815. * @cb_idx: callback index
  816. *
  817. * Return nothing.
  818. */
  819. void
  820. mpt2sas_base_release_callback_handler(u8 cb_idx)
  821. {
  822. mpt_callbacks[cb_idx] = NULL;
  823. }
  824. /**
  825. * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
  826. * @cb_func: callback function
  827. *
  828. * Returns cb_func.
  829. */
  830. u8
  831. mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
  832. {
  833. u8 cb_idx;
  834. for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
  835. if (mpt_callbacks[cb_idx] == NULL)
  836. break;
  837. mpt_callbacks[cb_idx] = cb_func;
  838. return cb_idx;
  839. }
  840. /**
  841. * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
  842. *
  843. * Return nothing.
  844. */
  845. void
  846. mpt2sas_base_initialize_callback_handler(void)
  847. {
  848. u8 cb_idx;
  849. for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
  850. mpt2sas_base_release_callback_handler(cb_idx);
  851. }
  852. /**
  853. * mpt2sas_base_build_zero_len_sge - build zero length sg entry
  854. * @ioc: per adapter object
  855. * @paddr: virtual address for SGE
  856. *
  857. * Create a zero length scatter gather entry to insure the IOCs hardware has
  858. * something to use if the target device goes brain dead and tries
  859. * to send data even when none is asked for.
  860. *
  861. * Return nothing.
  862. */
  863. void
  864. mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
  865. {
  866. u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
  867. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
  868. MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
  869. MPI2_SGE_FLAGS_SHIFT);
  870. ioc->base_add_sg_single(paddr, flags_length, -1);
  871. }
  872. /**
  873. * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
  874. * @paddr: virtual address for SGE
  875. * @flags_length: SGE flags and data transfer length
  876. * @dma_addr: Physical address
  877. *
  878. * Return nothing.
  879. */
  880. static void
  881. _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
  882. {
  883. Mpi2SGESimple32_t *sgel = paddr;
  884. flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
  885. MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
  886. sgel->FlagsLength = cpu_to_le32(flags_length);
  887. sgel->Address = cpu_to_le32(dma_addr);
  888. }
  889. /**
  890. * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
  891. * @paddr: virtual address for SGE
  892. * @flags_length: SGE flags and data transfer length
  893. * @dma_addr: Physical address
  894. *
  895. * Return nothing.
  896. */
  897. static void
  898. _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
  899. {
  900. Mpi2SGESimple64_t *sgel = paddr;
  901. flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
  902. MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
  903. sgel->FlagsLength = cpu_to_le32(flags_length);
  904. sgel->Address = cpu_to_le64(dma_addr);
  905. }
  906. #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
  907. /**
  908. * _base_config_dma_addressing - set dma addressing
  909. * @ioc: per adapter object
  910. * @pdev: PCI device struct
  911. *
  912. * Returns 0 for success, non-zero for failure.
  913. */
  914. static int
  915. _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
  916. {
  917. struct sysinfo s;
  918. char *desc = NULL;
  919. if (sizeof(dma_addr_t) > 4) {
  920. const uint64_t required_mask =
  921. dma_get_required_mask(&pdev->dev);
  922. if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
  923. DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
  924. DMA_BIT_MASK(64))) {
  925. ioc->base_add_sg_single = &_base_add_sg_single_64;
  926. ioc->sge_size = sizeof(Mpi2SGESimple64_t);
  927. desc = "64";
  928. goto out;
  929. }
  930. }
  931. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
  932. && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
  933. ioc->base_add_sg_single = &_base_add_sg_single_32;
  934. ioc->sge_size = sizeof(Mpi2SGESimple32_t);
  935. desc = "32";
  936. } else
  937. return -ENODEV;
  938. out:
  939. si_meminfo(&s);
  940. printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
  941. "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
  942. return 0;
  943. }
  944. /**
  945. * _base_save_msix_table - backup msix vector table
  946. * @ioc: per adapter object
  947. *
  948. * This address an errata where diag reset clears out the table
  949. */
  950. static void
  951. _base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
  952. {
  953. int i;
  954. if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
  955. return;
  956. for (i = 0; i < ioc->msix_vector_count; i++)
  957. ioc->msix_table_backup[i] = ioc->msix_table[i];
  958. }
  959. /**
  960. * _base_restore_msix_table - this restores the msix vector table
  961. * @ioc: per adapter object
  962. *
  963. */
  964. static void
  965. _base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
  966. {
  967. int i;
  968. if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
  969. return;
  970. for (i = 0; i < ioc->msix_vector_count; i++)
  971. ioc->msix_table[i] = ioc->msix_table_backup[i];
  972. }
  973. /**
  974. * _base_check_enable_msix - checks MSIX capabable.
  975. * @ioc: per adapter object
  976. *
  977. * Check to see if card is capable of MSIX, and set number
  978. * of avaliable msix vectors
  979. */
  980. static int
  981. _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
  982. {
  983. int base;
  984. u16 message_control;
  985. u32 msix_table_offset;
  986. base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
  987. if (!base) {
  988. dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
  989. "supported\n", ioc->name));
  990. return -EINVAL;
  991. }
  992. /* get msix vector count */
  993. pci_read_config_word(ioc->pdev, base + 2, &message_control);
  994. ioc->msix_vector_count = (message_control & 0x3FF) + 1;
  995. /* get msix table */
  996. pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
  997. msix_table_offset &= 0xFFFFFFF8;
  998. ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
  999. dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
  1000. "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
  1001. ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
  1002. return 0;
  1003. }
  1004. /**
  1005. * _base_disable_msix - disables msix
  1006. * @ioc: per adapter object
  1007. *
  1008. */
  1009. static void
  1010. _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
  1011. {
  1012. if (ioc->msix_enable) {
  1013. pci_disable_msix(ioc->pdev);
  1014. kfree(ioc->msix_table_backup);
  1015. ioc->msix_table_backup = NULL;
  1016. ioc->msix_enable = 0;
  1017. }
  1018. }
  1019. /**
  1020. * _base_enable_msix - enables msix, failback to io_apic
  1021. * @ioc: per adapter object
  1022. *
  1023. */
  1024. static int
  1025. _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
  1026. {
  1027. struct msix_entry entries;
  1028. int r;
  1029. u8 try_msix = 0;
  1030. if (msix_disable == -1 || msix_disable == 0)
  1031. try_msix = 1;
  1032. if (!try_msix)
  1033. goto try_ioapic;
  1034. if (_base_check_enable_msix(ioc) != 0)
  1035. goto try_ioapic;
  1036. ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
  1037. sizeof(u32), GFP_KERNEL);
  1038. if (!ioc->msix_table_backup) {
  1039. dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
  1040. "msix_table_backup failed!!!\n", ioc->name));
  1041. goto try_ioapic;
  1042. }
  1043. memset(&entries, 0, sizeof(struct msix_entry));
  1044. r = pci_enable_msix(ioc->pdev, &entries, 1);
  1045. if (r) {
  1046. dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
  1047. "failed (r=%d) !!!\n", ioc->name, r));
  1048. goto try_ioapic;
  1049. }
  1050. r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED,
  1051. ioc->name, ioc);
  1052. if (r) {
  1053. dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate "
  1054. "interrupt %d !!!\n", ioc->name, entries.vector));
  1055. pci_disable_msix(ioc->pdev);
  1056. goto try_ioapic;
  1057. }
  1058. ioc->pci_irq = entries.vector;
  1059. ioc->msix_enable = 1;
  1060. return 0;
  1061. /* failback to io_apic interrupt routing */
  1062. try_ioapic:
  1063. r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED,
  1064. ioc->name, ioc);
  1065. if (r) {
  1066. printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
  1067. ioc->name, ioc->pdev->irq);
  1068. r = -EBUSY;
  1069. goto out_fail;
  1070. }
  1071. ioc->pci_irq = ioc->pdev->irq;
  1072. return 0;
  1073. out_fail:
  1074. return r;
  1075. }
  1076. /**
  1077. * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
  1078. * @ioc: per adapter object
  1079. *
  1080. * Returns 0 for success, non-zero for failure.
  1081. */
  1082. int
  1083. mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
  1084. {
  1085. struct pci_dev *pdev = ioc->pdev;
  1086. u32 memap_sz;
  1087. u32 pio_sz;
  1088. int i, r = 0;
  1089. u64 pio_chip = 0;
  1090. u64 chip_phys = 0;
  1091. dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
  1092. ioc->name, __func__));
  1093. ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1094. if (pci_enable_device_mem(pdev)) {
  1095. printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
  1096. "failed\n", ioc->name);
  1097. return -ENODEV;
  1098. }
  1099. if (pci_request_selected_regions(pdev, ioc->bars,
  1100. MPT2SAS_DRIVER_NAME)) {
  1101. printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
  1102. "failed\n", ioc->name);
  1103. r = -ENODEV;
  1104. goto out_fail;
  1105. }
  1106. pci_set_master(pdev);
  1107. if (_base_config_dma_addressing(ioc, pdev) != 0) {
  1108. printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
  1109. ioc->name, pci_name(pdev));
  1110. r = -ENODEV;
  1111. goto out_fail;
  1112. }
  1113. for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
  1114. if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
  1115. if (pio_sz)
  1116. continue;
  1117. pio_chip = (u64)pci_resource_start(pdev, i);
  1118. pio_sz = pci_resource_len(pdev, i);
  1119. } else {
  1120. if (memap_sz)
  1121. continue;
  1122. ioc->chip_phys = pci_resource_start(pdev, i);
  1123. chip_phys = (u64)ioc->chip_phys;
  1124. memap_sz = pci_resource_len(pdev, i);
  1125. ioc->chip = ioremap(ioc->chip_phys, memap_sz);
  1126. if (ioc->chip == NULL) {
  1127. printk(MPT2SAS_ERR_FMT "unable to map adapter "
  1128. "memory!\n", ioc->name);
  1129. r = -EINVAL;
  1130. goto out_fail;
  1131. }
  1132. }
  1133. }
  1134. _base_mask_interrupts(ioc);
  1135. r = _base_enable_msix(ioc);
  1136. if (r)
  1137. goto out_fail;
  1138. printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
  1139. ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
  1140. "IO-APIC enabled"), ioc->pci_irq);
  1141. printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
  1142. ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
  1143. printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
  1144. ioc->name, (unsigned long long)pio_chip, pio_sz);
  1145. return 0;
  1146. out_fail:
  1147. if (ioc->chip_phys)
  1148. iounmap(ioc->chip);
  1149. ioc->chip_phys = 0;
  1150. ioc->pci_irq = -1;
  1151. pci_release_selected_regions(ioc->pdev, ioc->bars);
  1152. pci_disable_device(pdev);
  1153. return r;
  1154. }
  1155. /**
  1156. * mpt2sas_base_get_msg_frame - obtain request mf pointer
  1157. * @ioc: per adapter object
  1158. * @smid: system request message index(smid zero is invalid)
  1159. *
  1160. * Returns virt pointer to message frame.
  1161. */
  1162. void *
  1163. mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  1164. {
  1165. return (void *)(ioc->request + (smid * ioc->request_sz));
  1166. }
  1167. /**
  1168. * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
  1169. * @ioc: per adapter object
  1170. * @smid: system request message index
  1171. *
  1172. * Returns virt pointer to sense buffer.
  1173. */
  1174. void *
  1175. mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  1176. {
  1177. return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
  1178. }
  1179. /**
  1180. * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
  1181. * @ioc: per adapter object
  1182. * @smid: system request message index
  1183. *
  1184. * Returns phys pointer to the low 32bit address of the sense buffer.
  1185. */
  1186. __le32
  1187. mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  1188. {
  1189. return cpu_to_le32(ioc->sense_dma +
  1190. ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
  1191. }
  1192. /**
  1193. * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
  1194. * @ioc: per adapter object
  1195. * @phys_addr: lower 32 physical addr of the reply
  1196. *
  1197. * Converts 32bit lower physical addr into a virt address.
  1198. */
  1199. void *
  1200. mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
  1201. {
  1202. if (!phys_addr)
  1203. return NULL;
  1204. return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
  1205. }
  1206. /**
  1207. * mpt2sas_base_get_smid - obtain a free smid from internal queue
  1208. * @ioc: per adapter object
  1209. * @cb_idx: callback index
  1210. *
  1211. * Returns smid (zero is invalid)
  1212. */
  1213. u16
  1214. mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
  1215. {
  1216. unsigned long flags;
  1217. struct request_tracker *request;
  1218. u16 smid;
  1219. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  1220. if (list_empty(&ioc->internal_free_list)) {
  1221. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1222. printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
  1223. ioc->name, __func__);
  1224. return 0;
  1225. }
  1226. request = list_entry(ioc->internal_free_list.next,
  1227. struct request_tracker, tracker_list);
  1228. request->cb_idx = cb_idx;
  1229. smid = request->smid;
  1230. list_del(&request->tracker_list);
  1231. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1232. return smid;
  1233. }
  1234. /**
  1235. * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
  1236. * @ioc: per adapter object
  1237. * @cb_idx: callback index
  1238. * @scmd: pointer to scsi command object
  1239. *
  1240. * Returns smid (zero is invalid)
  1241. */
  1242. u16
  1243. mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
  1244. struct scsi_cmnd *scmd)
  1245. {
  1246. unsigned long flags;
  1247. struct request_tracker *request;
  1248. u16 smid;
  1249. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  1250. if (list_empty(&ioc->free_list)) {
  1251. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1252. printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
  1253. ioc->name, __func__);
  1254. return 0;
  1255. }
  1256. request = list_entry(ioc->free_list.next,
  1257. struct request_tracker, tracker_list);
  1258. request->scmd = scmd;
  1259. request->cb_idx = cb_idx;
  1260. smid = request->smid;
  1261. list_del(&request->tracker_list);
  1262. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1263. return smid;
  1264. }
  1265. /**
  1266. * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
  1267. * @ioc: per adapter object
  1268. * @cb_idx: callback index
  1269. *
  1270. * Returns smid (zero is invalid)
  1271. */
  1272. u16
  1273. mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
  1274. {
  1275. unsigned long flags;
  1276. struct request_tracker *request;
  1277. u16 smid;
  1278. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  1279. if (list_empty(&ioc->hpr_free_list)) {
  1280. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1281. return 0;
  1282. }
  1283. request = list_entry(ioc->hpr_free_list.next,
  1284. struct request_tracker, tracker_list);
  1285. request->cb_idx = cb_idx;
  1286. smid = request->smid;
  1287. list_del(&request->tracker_list);
  1288. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1289. return smid;
  1290. }
  1291. /**
  1292. * mpt2sas_base_free_smid - put smid back on free_list
  1293. * @ioc: per adapter object
  1294. * @smid: system request message index
  1295. *
  1296. * Return nothing.
  1297. */
  1298. void
  1299. mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  1300. {
  1301. unsigned long flags;
  1302. int i;
  1303. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  1304. if (smid >= ioc->hi_priority_smid) {
  1305. if (smid < ioc->internal_smid) {
  1306. /* hi-priority */
  1307. i = smid - ioc->hi_priority_smid;
  1308. ioc->hpr_lookup[i].cb_idx = 0xFF;
  1309. list_add_tail(&ioc->hpr_lookup[i].tracker_list,
  1310. &ioc->hpr_free_list);
  1311. } else {
  1312. /* internal queue */
  1313. i = smid - ioc->internal_smid;
  1314. ioc->internal_lookup[i].cb_idx = 0xFF;
  1315. list_add_tail(&ioc->internal_lookup[i].tracker_list,
  1316. &ioc->internal_free_list);
  1317. }
  1318. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1319. return;
  1320. }
  1321. /* scsiio queue */
  1322. i = smid - 1;
  1323. ioc->scsi_lookup[i].cb_idx = 0xFF;
  1324. ioc->scsi_lookup[i].scmd = NULL;
  1325. list_add_tail(&ioc->scsi_lookup[i].tracker_list,
  1326. &ioc->free_list);
  1327. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1328. /*
  1329. * See _wait_for_commands_to_complete() call with regards to this code.
  1330. */
  1331. if (ioc->shost_recovery && ioc->pending_io_count) {
  1332. if (ioc->pending_io_count == 1)
  1333. wake_up(&ioc->reset_wq);
  1334. ioc->pending_io_count--;
  1335. }
  1336. }
  1337. /**
  1338. * _base_writeq - 64 bit write to MMIO
  1339. * @ioc: per adapter object
  1340. * @b: data payload
  1341. * @addr: address in MMIO space
  1342. * @writeq_lock: spin lock
  1343. *
  1344. * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
  1345. * care of 32 bit environment where its not quarenteed to send the entire word
  1346. * in one transfer.
  1347. */
  1348. #ifndef writeq
  1349. static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
  1350. spinlock_t *writeq_lock)
  1351. {
  1352. unsigned long flags;
  1353. __u64 data_out = cpu_to_le64(b);
  1354. spin_lock_irqsave(writeq_lock, flags);
  1355. writel((u32)(data_out), addr);
  1356. writel((u32)(data_out >> 32), (addr + 4));
  1357. spin_unlock_irqrestore(writeq_lock, flags);
  1358. }
  1359. #else
  1360. static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
  1361. spinlock_t *writeq_lock)
  1362. {
  1363. writeq(cpu_to_le64(b), addr);
  1364. }
  1365. #endif
  1366. /**
  1367. * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
  1368. * @ioc: per adapter object
  1369. * @smid: system request message index
  1370. * @handle: device handle
  1371. *
  1372. * Return nothing.
  1373. */
  1374. void
  1375. mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
  1376. {
  1377. Mpi2RequestDescriptorUnion_t descriptor;
  1378. u64 *request = (u64 *)&descriptor;
  1379. descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
  1380. descriptor.SCSIIO.MSIxIndex = 0; /* TODO */
  1381. descriptor.SCSIIO.SMID = cpu_to_le16(smid);
  1382. descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
  1383. descriptor.SCSIIO.LMID = 0;
  1384. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  1385. &ioc->scsi_lookup_lock);
  1386. }
  1387. /**
  1388. * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware
  1389. * @ioc: per adapter object
  1390. * @smid: system request message index
  1391. *
  1392. * Return nothing.
  1393. */
  1394. void
  1395. mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  1396. {
  1397. Mpi2RequestDescriptorUnion_t descriptor;
  1398. u64 *request = (u64 *)&descriptor;
  1399. descriptor.HighPriority.RequestFlags =
  1400. MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  1401. descriptor.HighPriority.MSIxIndex = 0; /* TODO */
  1402. descriptor.HighPriority.SMID = cpu_to_le16(smid);
  1403. descriptor.HighPriority.LMID = 0;
  1404. descriptor.HighPriority.Reserved1 = 0;
  1405. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  1406. &ioc->scsi_lookup_lock);
  1407. }
  1408. /**
  1409. * mpt2sas_base_put_smid_default - Default, primarily used for config pages
  1410. * @ioc: per adapter object
  1411. * @smid: system request message index
  1412. *
  1413. * Return nothing.
  1414. */
  1415. void
  1416. mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
  1417. {
  1418. Mpi2RequestDescriptorUnion_t descriptor;
  1419. u64 *request = (u64 *)&descriptor;
  1420. descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  1421. descriptor.Default.MSIxIndex = 0; /* TODO */
  1422. descriptor.Default.SMID = cpu_to_le16(smid);
  1423. descriptor.Default.LMID = 0;
  1424. descriptor.Default.DescriptorTypeDependent = 0;
  1425. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  1426. &ioc->scsi_lookup_lock);
  1427. }
  1428. /**
  1429. * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
  1430. * @ioc: per adapter object
  1431. * @smid: system request message index
  1432. * @io_index: value used to track the IO
  1433. *
  1434. * Return nothing.
  1435. */
  1436. void
  1437. mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
  1438. u16 io_index)
  1439. {
  1440. Mpi2RequestDescriptorUnion_t descriptor;
  1441. u64 *request = (u64 *)&descriptor;
  1442. descriptor.SCSITarget.RequestFlags =
  1443. MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
  1444. descriptor.SCSITarget.MSIxIndex = 0; /* TODO */
  1445. descriptor.SCSITarget.SMID = cpu_to_le16(smid);
  1446. descriptor.SCSITarget.LMID = 0;
  1447. descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
  1448. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  1449. &ioc->scsi_lookup_lock);
  1450. }
  1451. /**
  1452. * _base_display_dell_branding - Disply branding string
  1453. * @ioc: per adapter object
  1454. *
  1455. * Return nothing.
  1456. */
  1457. static void
  1458. _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
  1459. {
  1460. char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
  1461. if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
  1462. return;
  1463. memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
  1464. switch (ioc->pdev->subsystem_device) {
  1465. case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
  1466. strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
  1467. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1468. break;
  1469. case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
  1470. strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
  1471. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1472. break;
  1473. case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
  1474. strncpy(dell_branding,
  1475. MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
  1476. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1477. break;
  1478. case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
  1479. strncpy(dell_branding,
  1480. MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
  1481. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1482. break;
  1483. case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
  1484. strncpy(dell_branding,
  1485. MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
  1486. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1487. break;
  1488. case MPT2SAS_DELL_PERC_H200_SSDID:
  1489. strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
  1490. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1491. break;
  1492. case MPT2SAS_DELL_6GBPS_SAS_SSDID:
  1493. strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
  1494. MPT2SAS_DELL_BRANDING_SIZE - 1);
  1495. break;
  1496. default:
  1497. sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
  1498. break;
  1499. }
  1500. printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
  1501. " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
  1502. ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
  1503. ioc->pdev->subsystem_device);
  1504. }
  1505. /**
  1506. * _base_display_ioc_capabilities - Disply IOC's capabilities.
  1507. * @ioc: per adapter object
  1508. *
  1509. * Return nothing.
  1510. */
  1511. static void
  1512. _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
  1513. {
  1514. int i = 0;
  1515. char desc[16];
  1516. u8 revision;
  1517. u32 iounit_pg1_flags;
  1518. pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
  1519. strncpy(desc, ioc->manu_pg0.ChipName, 16);
  1520. printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
  1521. "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
  1522. ioc->name, desc,
  1523. (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
  1524. (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
  1525. (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
  1526. ioc->facts.FWVersion.Word & 0x000000FF,
  1527. revision,
  1528. (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24,
  1529. (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16,
  1530. (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
  1531. ioc->bios_pg3.BiosVersion & 0x000000FF);
  1532. _base_display_dell_branding(ioc);
  1533. printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
  1534. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
  1535. printk("Initiator");
  1536. i++;
  1537. }
  1538. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
  1539. printk("%sTarget", i ? "," : "");
  1540. i++;
  1541. }
  1542. i = 0;
  1543. printk("), ");
  1544. printk("Capabilities=(");
  1545. if (ioc->facts.IOCCapabilities &
  1546. MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
  1547. printk("Raid");
  1548. i++;
  1549. }
  1550. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
  1551. printk("%sTLR", i ? "," : "");
  1552. i++;
  1553. }
  1554. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
  1555. printk("%sMulticast", i ? "," : "");
  1556. i++;
  1557. }
  1558. if (ioc->facts.IOCCapabilities &
  1559. MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
  1560. printk("%sBIDI Target", i ? "," : "");
  1561. i++;
  1562. }
  1563. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
  1564. printk("%sEEDP", i ? "," : "");
  1565. i++;
  1566. }
  1567. if (ioc->facts.IOCCapabilities &
  1568. MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
  1569. printk("%sSnapshot Buffer", i ? "," : "");
  1570. i++;
  1571. }
  1572. if (ioc->facts.IOCCapabilities &
  1573. MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
  1574. printk("%sDiag Trace Buffer", i ? "," : "");
  1575. i++;
  1576. }
  1577. if (ioc->facts.IOCCapabilities &
  1578. MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
  1579. printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
  1580. i++;
  1581. }
  1582. if (ioc->facts.IOCCapabilities &
  1583. MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
  1584. printk("%sTask Set Full", i ? "," : "");
  1585. i++;
  1586. }
  1587. iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
  1588. if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
  1589. printk("%sNCQ", i ? "," : "");
  1590. i++;
  1591. }
  1592. printk(")\n");
  1593. }
  1594. /**
  1595. * _base_static_config_pages - static start of day config pages
  1596. * @ioc: per adapter object
  1597. *
  1598. * Return nothing.
  1599. */
  1600. static void
  1601. _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
  1602. {
  1603. Mpi2ConfigReply_t mpi_reply;
  1604. u32 iounit_pg1_flags;
  1605. mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
  1606. if (ioc->ir_firmware)
  1607. mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
  1608. &ioc->manu_pg10);
  1609. mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
  1610. mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
  1611. mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
  1612. mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
  1613. mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
  1614. _base_display_ioc_capabilities(ioc);
  1615. /*
  1616. * Enable task_set_full handling in iounit_pg1 when the
  1617. * facts capabilities indicate that its supported.
  1618. */
  1619. iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
  1620. if ((ioc->facts.IOCCapabilities &
  1621. MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
  1622. iounit_pg1_flags &=
  1623. ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
  1624. else
  1625. iounit_pg1_flags |=
  1626. MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
  1627. ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
  1628. mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
  1629. }
  1630. /**
  1631. * _base_release_memory_pools - release memory
  1632. * @ioc: per adapter object
  1633. *
  1634. * Free memory allocated from _base_allocate_memory_pools.
  1635. *
  1636. * Retur…