PageRenderTime 92ms CodeModel.GetById 36ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/message/fusion/mptsas.c

https://bitbucket.org/ndreys/linux-sunxi
C | 5416 lines | 4279 code | 669 blank | 468 comment | 596 complexity | 71677b1d80352a180e7c6361b27e0e19 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * linux/drivers/message/fusion/mptsas.c
  3. * For use with LSI PCI chip/adapter(s)
  4. * running LSI Fusion MPT (Message Passing Technology) firmware.
  5. *
  6. * Copyright (c) 1999-2008 LSI Corporation
  7. * (mailto:DL-MPTFusionLinux@lsi.com)
  8. */
  9. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  10. /*
  11. This program is free software; you can redistribute it and/or modify
  12. it under the terms of the GNU General Public License as published by
  13. the Free Software Foundation; version 2 of the License.
  14. This program is distributed in the hope that it will be useful,
  15. but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. GNU General Public License for more details.
  18. NO WARRANTY
  19. THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  20. CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  21. LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  22. MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  23. solely responsible for determining the appropriateness of using and
  24. distributing the Program and assumes all risks associated with its
  25. exercise of rights under this Agreement, including but not limited to
  26. the risks and costs of program errors, damage to or loss of data,
  27. programs or equipment, and unavailability or interruption of operations.
  28. DISCLAIMER OF LIABILITY
  29. NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  30. DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  31. DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  32. ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  33. TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  34. USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  35. HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  36. You should have received a copy of the GNU General Public License
  37. along with this program; if not, write to the Free Software
  38. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  39. */
  40. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  41. #include <linux/module.h>
  42. #include <linux/kernel.h>
  43. #include <linux/slab.h>
  44. #include <linux/init.h>
  45. #include <linux/errno.h>
  46. #include <linux/jiffies.h>
  47. #include <linux/workqueue.h>
  48. #include <linux/delay.h> /* for mdelay */
  49. #include <scsi/scsi.h>
  50. #include <scsi/scsi_cmnd.h>
  51. #include <scsi/scsi_device.h>
  52. #include <scsi/scsi_host.h>
  53. #include <scsi/scsi_transport_sas.h>
  54. #include <scsi/scsi_transport.h>
  55. #include <scsi/scsi_dbg.h>
  56. #include "mptbase.h"
  57. #include "mptscsih.h"
  58. #include "mptsas.h"
  59. #define my_NAME "Fusion MPT SAS Host driver"
  60. #define my_VERSION MPT_LINUX_VERSION_COMMON
  61. #define MYNAM "mptsas"
  62. /*
  63. * Reserved channel for integrated raid
  64. */
  65. #define MPTSAS_RAID_CHANNEL 1
  66. #define SAS_CONFIG_PAGE_TIMEOUT 30
  67. MODULE_AUTHOR(MODULEAUTHOR);
  68. MODULE_DESCRIPTION(my_NAME);
  69. MODULE_LICENSE("GPL");
  70. MODULE_VERSION(my_VERSION);
  71. static int mpt_pt_clear;
  72. module_param(mpt_pt_clear, int, 0);
  73. MODULE_PARM_DESC(mpt_pt_clear,
  74. " Clear persistency table: enable=1 "
  75. "(default=MPTSCSIH_PT_CLEAR=0)");
  76. /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
  77. #define MPTSAS_MAX_LUN (16895)
  78. static int max_lun = MPTSAS_MAX_LUN;
  79. module_param(max_lun, int, 0);
  80. MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
  81. static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
  82. static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
  83. static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
  84. static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
  85. static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
  86. static void mptsas_firmware_event_work(struct work_struct *work);
  87. static void mptsas_send_sas_event(struct fw_event_work *fw_event);
  88. static void mptsas_send_raid_event(struct fw_event_work *fw_event);
  89. static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
  90. static void mptsas_parse_device_info(struct sas_identify *identify,
  91. struct mptsas_devinfo *device_info);
  92. static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
  93. struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
  94. static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
  95. (MPT_ADAPTER *ioc, u64 sas_address);
  96. static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
  97. struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
  98. static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
  99. struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
  100. static int mptsas_add_end_device(MPT_ADAPTER *ioc,
  101. struct mptsas_phyinfo *phy_info);
  102. static void mptsas_del_end_device(MPT_ADAPTER *ioc,
  103. struct mptsas_phyinfo *phy_info);
  104. static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
  105. static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
  106. (MPT_ADAPTER *ioc, u64 sas_address);
  107. static void mptsas_expander_delete(MPT_ADAPTER *ioc,
  108. struct mptsas_portinfo *port_info, u8 force);
  109. static void mptsas_send_expander_event(struct fw_event_work *fw_event);
  110. static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
  111. static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
  112. static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
  113. static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
  114. static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
  115. void mptsas_schedule_target_reset(void *ioc);
  116. static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
  117. MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
  118. {
  119. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  120. "---- IO UNIT PAGE 0 ------------\n", ioc->name));
  121. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
  122. ioc->name, le16_to_cpu(phy_data->AttachedDeviceHandle)));
  123. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Controller Handle=0x%X\n",
  124. ioc->name, le16_to_cpu(phy_data->ControllerDevHandle)));
  125. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port=0x%X\n",
  126. ioc->name, phy_data->Port));
  127. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port Flags=0x%X\n",
  128. ioc->name, phy_data->PortFlags));
  129. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Flags=0x%X\n",
  130. ioc->name, phy_data->PhyFlags));
  131. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
  132. ioc->name, phy_data->NegotiatedLinkRate));
  133. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  134. "Controller PHY Device Info=0x%X\n", ioc->name,
  135. le32_to_cpu(phy_data->ControllerPhyDeviceInfo)));
  136. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DiscoveryStatus=0x%X\n\n",
  137. ioc->name, le32_to_cpu(phy_data->DiscoveryStatus)));
  138. }
  139. static void mptsas_print_phy_pg0(MPT_ADAPTER *ioc, SasPhyPage0_t *pg0)
  140. {
  141. __le64 sas_address;
  142. memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
  143. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  144. "---- SAS PHY PAGE 0 ------------\n", ioc->name));
  145. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  146. "Attached Device Handle=0x%X\n", ioc->name,
  147. le16_to_cpu(pg0->AttachedDevHandle)));
  148. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
  149. ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
  150. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  151. "Attached PHY Identifier=0x%X\n", ioc->name,
  152. pg0->AttachedPhyIdentifier));
  153. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Attached Device Info=0x%X\n",
  154. ioc->name, le32_to_cpu(pg0->AttachedDeviceInfo)));
  155. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
  156. ioc->name, pg0->ProgrammedLinkRate));
  157. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Change Count=0x%X\n",
  158. ioc->name, pg0->ChangeCount));
  159. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Info=0x%X\n\n",
  160. ioc->name, le32_to_cpu(pg0->PhyInfo)));
  161. }
  162. static void mptsas_print_phy_pg1(MPT_ADAPTER *ioc, SasPhyPage1_t *pg1)
  163. {
  164. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  165. "---- SAS PHY PAGE 1 ------------\n", ioc->name));
  166. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Invalid Dword Count=0x%x\n",
  167. ioc->name, pg1->InvalidDwordCount));
  168. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  169. "Running Disparity Error Count=0x%x\n", ioc->name,
  170. pg1->RunningDisparityErrorCount));
  171. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  172. "Loss Dword Synch Count=0x%x\n", ioc->name,
  173. pg1->LossDwordSynchCount));
  174. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  175. "PHY Reset Problem Count=0x%x\n\n", ioc->name,
  176. pg1->PhyResetProblemCount));
  177. }
  178. static void mptsas_print_device_pg0(MPT_ADAPTER *ioc, SasDevicePage0_t *pg0)
  179. {
  180. __le64 sas_address;
  181. memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
  182. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  183. "---- SAS DEVICE PAGE 0 ---------\n", ioc->name));
  184. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
  185. ioc->name, le16_to_cpu(pg0->DevHandle)));
  186. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Handle=0x%X\n",
  187. ioc->name, le16_to_cpu(pg0->ParentDevHandle)));
  188. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Enclosure Handle=0x%X\n",
  189. ioc->name, le16_to_cpu(pg0->EnclosureHandle)));
  190. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Slot=0x%X\n",
  191. ioc->name, le16_to_cpu(pg0->Slot)));
  192. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
  193. ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
  194. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Target ID=0x%X\n",
  195. ioc->name, pg0->TargetID));
  196. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Bus=0x%X\n",
  197. ioc->name, pg0->Bus));
  198. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Phy Num=0x%X\n",
  199. ioc->name, pg0->PhyNum));
  200. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Access Status=0x%X\n",
  201. ioc->name, le16_to_cpu(pg0->AccessStatus)));
  202. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Device Info=0x%X\n",
  203. ioc->name, le32_to_cpu(pg0->DeviceInfo)));
  204. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Flags=0x%X\n",
  205. ioc->name, le16_to_cpu(pg0->Flags)));
  206. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n\n",
  207. ioc->name, pg0->PhysicalPort));
  208. }
  209. static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
  210. {
  211. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  212. "---- SAS EXPANDER PAGE 1 ------------\n", ioc->name));
  213. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n",
  214. ioc->name, pg1->PhysicalPort));
  215. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Identifier=0x%X\n",
  216. ioc->name, pg1->PhyIdentifier));
  217. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
  218. ioc->name, pg1->NegotiatedLinkRate));
  219. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
  220. ioc->name, pg1->ProgrammedLinkRate));
  221. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hardware Link Rate=0x%X\n",
  222. ioc->name, pg1->HwLinkRate));
  223. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Owner Device Handle=0x%X\n",
  224. ioc->name, le16_to_cpu(pg1->OwnerDevHandle)));
  225. dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  226. "Attached Device Handle=0x%X\n\n", ioc->name,
  227. le16_to_cpu(pg1->AttachedDevHandle)));
  228. }
  229. /* inhibit sas firmware event handling */
  230. static void
  231. mptsas_fw_event_off(MPT_ADAPTER *ioc)
  232. {
  233. unsigned long flags;
  234. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  235. ioc->fw_events_off = 1;
  236. ioc->sas_discovery_quiesce_io = 0;
  237. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  238. }
  239. /* enable sas firmware event handling */
  240. static void
  241. mptsas_fw_event_on(MPT_ADAPTER *ioc)
  242. {
  243. unsigned long flags;
  244. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  245. ioc->fw_events_off = 0;
  246. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  247. }
  248. /* queue a sas firmware event */
  249. static void
  250. mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
  251. unsigned long delay)
  252. {
  253. unsigned long flags;
  254. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  255. list_add_tail(&fw_event->list, &ioc->fw_event_list);
  256. INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
  257. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
  258. ioc->name, __func__, fw_event));
  259. queue_delayed_work(ioc->fw_event_q, &fw_event->work,
  260. delay);
  261. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  262. }
  263. /* requeue a sas firmware event */
  264. static void
  265. mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
  266. unsigned long delay)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  270. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
  271. "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
  272. fw_event->retries++;
  273. queue_delayed_work(ioc->fw_event_q, &fw_event->work,
  274. msecs_to_jiffies(delay));
  275. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  276. }
  277. /* free memory associated to a sas firmware event */
  278. static void
  279. mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
  280. {
  281. unsigned long flags;
  282. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  283. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
  284. ioc->name, __func__, fw_event));
  285. list_del(&fw_event->list);
  286. kfree(fw_event);
  287. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  288. }
  289. /* walk the firmware event queue, and either stop or wait for
  290. * outstanding events to complete */
  291. static void
  292. mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
  293. {
  294. struct fw_event_work *fw_event, *next;
  295. struct mptsas_target_reset_event *target_reset_list, *n;
  296. MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
  297. /* flush the target_reset_list */
  298. if (!list_empty(&hd->target_reset_list)) {
  299. list_for_each_entry_safe(target_reset_list, n,
  300. &hd->target_reset_list, list) {
  301. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  302. "%s: removing target reset for id=%d\n",
  303. ioc->name, __func__,
  304. target_reset_list->sas_event_data.TargetID));
  305. list_del(&target_reset_list->list);
  306. kfree(target_reset_list);
  307. }
  308. }
  309. if (list_empty(&ioc->fw_event_list) ||
  310. !ioc->fw_event_q || in_interrupt())
  311. return;
  312. list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
  313. if (cancel_delayed_work(&fw_event->work))
  314. mptsas_free_fw_event(ioc, fw_event);
  315. }
  316. }
  317. static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
  318. {
  319. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  320. return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
  321. }
  322. static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
  323. {
  324. struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
  325. return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
  326. }
  327. /*
  328. * mptsas_find_portinfo_by_handle
  329. *
  330. * This function should be called with the sas_topology_mutex already held
  331. */
  332. static struct mptsas_portinfo *
  333. mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
  334. {
  335. struct mptsas_portinfo *port_info, *rc=NULL;
  336. int i;
  337. list_for_each_entry(port_info, &ioc->sas_topology, list)
  338. for (i = 0; i < port_info->num_phys; i++)
  339. if (port_info->phy_info[i].identify.handle == handle) {
  340. rc = port_info;
  341. goto out;
  342. }
  343. out:
  344. return rc;
  345. }
  346. /**
  347. * mptsas_find_portinfo_by_sas_address -
  348. * @ioc: Pointer to MPT_ADAPTER structure
  349. * @handle:
  350. *
  351. * This function should be called with the sas_topology_mutex already held
  352. *
  353. **/
  354. static struct mptsas_portinfo *
  355. mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
  356. {
  357. struct mptsas_portinfo *port_info, *rc = NULL;
  358. int i;
  359. if (sas_address >= ioc->hba_port_sas_addr &&
  360. sas_address < (ioc->hba_port_sas_addr +
  361. ioc->hba_port_num_phy))
  362. return ioc->hba_port_info;
  363. mutex_lock(&ioc->sas_topology_mutex);
  364. list_for_each_entry(port_info, &ioc->sas_topology, list)
  365. for (i = 0; i < port_info->num_phys; i++)
  366. if (port_info->phy_info[i].identify.sas_address ==
  367. sas_address) {
  368. rc = port_info;
  369. goto out;
  370. }
  371. out:
  372. mutex_unlock(&ioc->sas_topology_mutex);
  373. return rc;
  374. }
  375. /*
  376. * Returns true if there is a scsi end device
  377. */
  378. static inline int
  379. mptsas_is_end_device(struct mptsas_devinfo * attached)
  380. {
  381. if ((attached->sas_address) &&
  382. (attached->device_info &
  383. MPI_SAS_DEVICE_INFO_END_DEVICE) &&
  384. ((attached->device_info &
  385. MPI_SAS_DEVICE_INFO_SSP_TARGET) |
  386. (attached->device_info &
  387. MPI_SAS_DEVICE_INFO_STP_TARGET) |
  388. (attached->device_info &
  389. MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
  390. return 1;
  391. else
  392. return 0;
  393. }
  394. /* no mutex */
  395. static void
  396. mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
  397. {
  398. struct mptsas_portinfo *port_info;
  399. struct mptsas_phyinfo *phy_info;
  400. u8 i;
  401. if (!port_details)
  402. return;
  403. port_info = port_details->port_info;
  404. phy_info = port_info->phy_info;
  405. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
  406. "bitmask=0x%016llX\n", ioc->name, __func__, port_details,
  407. port_details->num_phys, (unsigned long long)
  408. port_details->phy_bitmask));
  409. for (i = 0; i < port_info->num_phys; i++, phy_info++) {
  410. if(phy_info->port_details != port_details)
  411. continue;
  412. memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
  413. mptsas_set_rphy(ioc, phy_info, NULL);
  414. phy_info->port_details = NULL;
  415. }
  416. kfree(port_details);
  417. }
  418. static inline struct sas_rphy *
  419. mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
  420. {
  421. if (phy_info->port_details)
  422. return phy_info->port_details->rphy;
  423. else
  424. return NULL;
  425. }
  426. static inline void
  427. mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
  428. {
  429. if (phy_info->port_details) {
  430. phy_info->port_details->rphy = rphy;
  431. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
  432. ioc->name, rphy));
  433. }
  434. if (rphy) {
  435. dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
  436. &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
  437. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
  438. ioc->name, rphy, rphy->dev.release));
  439. }
  440. }
  441. static inline struct sas_port *
  442. mptsas_get_port(struct mptsas_phyinfo *phy_info)
  443. {
  444. if (phy_info->port_details)
  445. return phy_info->port_details->port;
  446. else
  447. return NULL;
  448. }
  449. static inline void
  450. mptsas_set_port(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_port *port)
  451. {
  452. if (phy_info->port_details)
  453. phy_info->port_details->port = port;
  454. if (port) {
  455. dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
  456. &port->dev, MYIOC_s_FMT "add:", ioc->name));
  457. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "port=%p release=%p\n",
  458. ioc->name, port, port->dev.release));
  459. }
  460. }
  461. static inline struct scsi_target *
  462. mptsas_get_starget(struct mptsas_phyinfo *phy_info)
  463. {
  464. if (phy_info->port_details)
  465. return phy_info->port_details->starget;
  466. else
  467. return NULL;
  468. }
  469. static inline void
  470. mptsas_set_starget(struct mptsas_phyinfo *phy_info, struct scsi_target *
  471. starget)
  472. {
  473. if (phy_info->port_details)
  474. phy_info->port_details->starget = starget;
  475. }
  476. /**
  477. * mptsas_add_device_component -
  478. * @ioc: Pointer to MPT_ADAPTER structure
  479. * @channel: fw mapped id's
  480. * @id:
  481. * @sas_address:
  482. * @device_info:
  483. *
  484. **/
  485. static void
  486. mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
  487. u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
  488. {
  489. struct mptsas_device_info *sas_info, *next;
  490. struct scsi_device *sdev;
  491. struct scsi_target *starget;
  492. struct sas_rphy *rphy;
  493. /*
  494. * Delete all matching devices out of the list
  495. */
  496. mutex_lock(&ioc->sas_device_info_mutex);
  497. list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
  498. list) {
  499. if (!sas_info->is_logical_volume &&
  500. (sas_info->sas_address == sas_address ||
  501. (sas_info->fw.channel == channel &&
  502. sas_info->fw.id == id))) {
  503. list_del(&sas_info->list);
  504. kfree(sas_info);
  505. }
  506. }
  507. sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
  508. if (!sas_info)
  509. goto out;
  510. /*
  511. * Set Firmware mapping
  512. */
  513. sas_info->fw.id = id;
  514. sas_info->fw.channel = channel;
  515. sas_info->sas_address = sas_address;
  516. sas_info->device_info = device_info;
  517. sas_info->slot = slot;
  518. sas_info->enclosure_logical_id = enclosure_logical_id;
  519. INIT_LIST_HEAD(&sas_info->list);
  520. list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
  521. /*
  522. * Set OS mapping
  523. */
  524. shost_for_each_device(sdev, ioc->sh) {
  525. starget = scsi_target(sdev);
  526. rphy = dev_to_rphy(starget->dev.parent);
  527. if (rphy->identify.sas_address == sas_address) {
  528. sas_info->os.id = starget->id;
  529. sas_info->os.channel = starget->channel;
  530. }
  531. }
  532. out:
  533. mutex_unlock(&ioc->sas_device_info_mutex);
  534. return;
  535. }
  536. /**
  537. * mptsas_add_device_component_by_fw -
  538. * @ioc: Pointer to MPT_ADAPTER structure
  539. * @channel: fw mapped id's
  540. * @id:
  541. *
  542. **/
  543. static void
  544. mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
  545. {
  546. struct mptsas_devinfo sas_device;
  547. struct mptsas_enclosure enclosure_info;
  548. int rc;
  549. rc = mptsas_sas_device_pg0(ioc, &sas_device,
  550. (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
  551. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  552. (channel << 8) + id);
  553. if (rc)
  554. return;
  555. memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
  556. mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
  557. (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
  558. MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
  559. sas_device.handle_enclosure);
  560. mptsas_add_device_component(ioc, sas_device.channel,
  561. sas_device.id, sas_device.sas_address, sas_device.device_info,
  562. sas_device.slot, enclosure_info.enclosure_logical_id);
  563. }
  564. /**
  565. * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
  566. * @ioc: Pointer to MPT_ADAPTER structure
  567. * @channel: fw mapped id's
  568. * @id:
  569. *
  570. **/
  571. static void
  572. mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
  573. struct scsi_target *starget)
  574. {
  575. CONFIGPARMS cfg;
  576. ConfigPageHeader_t hdr;
  577. dma_addr_t dma_handle;
  578. pRaidVolumePage0_t buffer = NULL;
  579. int i;
  580. RaidPhysDiskPage0_t phys_disk;
  581. struct mptsas_device_info *sas_info, *next;
  582. memset(&cfg, 0 , sizeof(CONFIGPARMS));
  583. memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
  584. hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
  585. /* assumption that all volumes on channel = 0 */
  586. cfg.pageAddr = starget->id;
  587. cfg.cfghdr.hdr = &hdr;
  588. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  589. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  590. if (mpt_config(ioc, &cfg) != 0)
  591. goto out;
  592. if (!hdr.PageLength)
  593. goto out;
  594. buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
  595. &dma_handle);
  596. if (!buffer)
  597. goto out;
  598. cfg.physAddr = dma_handle;
  599. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  600. if (mpt_config(ioc, &cfg) != 0)
  601. goto out;
  602. if (!buffer->NumPhysDisks)
  603. goto out;
  604. /*
  605. * Adding entry for hidden components
  606. */
  607. for (i = 0; i < buffer->NumPhysDisks; i++) {
  608. if (mpt_raid_phys_disk_pg0(ioc,
  609. buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
  610. continue;
  611. mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
  612. phys_disk.PhysDiskID);
  613. mutex_lock(&ioc->sas_device_info_mutex);
  614. list_for_each_entry(sas_info, &ioc->sas_device_info_list,
  615. list) {
  616. if (!sas_info->is_logical_volume &&
  617. (sas_info->fw.channel == phys_disk.PhysDiskBus &&
  618. sas_info->fw.id == phys_disk.PhysDiskID)) {
  619. sas_info->is_hidden_raid_component = 1;
  620. sas_info->volume_id = starget->id;
  621. }
  622. }
  623. mutex_unlock(&ioc->sas_device_info_mutex);
  624. }
  625. /*
  626. * Delete all matching devices out of the list
  627. */
  628. mutex_lock(&ioc->sas_device_info_mutex);
  629. list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
  630. list) {
  631. if (sas_info->is_logical_volume && sas_info->fw.id ==
  632. starget->id) {
  633. list_del(&sas_info->list);
  634. kfree(sas_info);
  635. }
  636. }
  637. sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
  638. if (sas_info) {
  639. sas_info->fw.id = starget->id;
  640. sas_info->os.id = starget->id;
  641. sas_info->os.channel = starget->channel;
  642. sas_info->is_logical_volume = 1;
  643. INIT_LIST_HEAD(&sas_info->list);
  644. list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
  645. }
  646. mutex_unlock(&ioc->sas_device_info_mutex);
  647. out:
  648. if (buffer)
  649. pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
  650. dma_handle);
  651. }
  652. /**
  653. * mptsas_add_device_component_starget -
  654. * @ioc: Pointer to MPT_ADAPTER structure
  655. * @starget:
  656. *
  657. **/
  658. static void
  659. mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
  660. struct scsi_target *starget)
  661. {
  662. VirtTarget *vtarget;
  663. struct sas_rphy *rphy;
  664. struct mptsas_phyinfo *phy_info = NULL;
  665. struct mptsas_enclosure enclosure_info;
  666. rphy = dev_to_rphy(starget->dev.parent);
  667. vtarget = starget->hostdata;
  668. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  669. rphy->identify.sas_address);
  670. if (!phy_info)
  671. return;
  672. memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
  673. mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
  674. (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
  675. MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
  676. phy_info->attached.handle_enclosure);
  677. mptsas_add_device_component(ioc, phy_info->attached.channel,
  678. phy_info->attached.id, phy_info->attached.sas_address,
  679. phy_info->attached.device_info,
  680. phy_info->attached.slot, enclosure_info.enclosure_logical_id);
  681. }
  682. /**
  683. * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
  684. * @ioc: Pointer to MPT_ADAPTER structure
  685. * @channel: os mapped id's
  686. * @id:
  687. *
  688. **/
  689. static void
  690. mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
  691. {
  692. struct mptsas_device_info *sas_info, *next;
  693. /*
  694. * Set is_cached flag
  695. */
  696. list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
  697. list) {
  698. if (sas_info->os.channel == channel && sas_info->os.id == id)
  699. sas_info->is_cached = 1;
  700. }
  701. }
  702. /**
  703. * mptsas_del_device_components - Cleaning the list
  704. * @ioc: Pointer to MPT_ADAPTER structure
  705. *
  706. **/
  707. static void
  708. mptsas_del_device_components(MPT_ADAPTER *ioc)
  709. {
  710. struct mptsas_device_info *sas_info, *next;
  711. mutex_lock(&ioc->sas_device_info_mutex);
  712. list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
  713. list) {
  714. list_del(&sas_info->list);
  715. kfree(sas_info);
  716. }
  717. mutex_unlock(&ioc->sas_device_info_mutex);
  718. }
  719. /*
  720. * mptsas_setup_wide_ports
  721. *
  722. * Updates for new and existing narrow/wide port configuration
  723. * in the sas_topology
  724. */
  725. static void
  726. mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
  727. {
  728. struct mptsas_portinfo_details * port_details;
  729. struct mptsas_phyinfo *phy_info, *phy_info_cmp;
  730. u64 sas_address;
  731. int i, j;
  732. mutex_lock(&ioc->sas_topology_mutex);
  733. phy_info = port_info->phy_info;
  734. for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
  735. if (phy_info->attached.handle)
  736. continue;
  737. port_details = phy_info->port_details;
  738. if (!port_details)
  739. continue;
  740. if (port_details->num_phys < 2)
  741. continue;
  742. /*
  743. * Removing a phy from a port, letting the last
  744. * phy be removed by firmware events.
  745. */
  746. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  747. "%s: [%p]: deleting phy = %d\n",
  748. ioc->name, __func__, port_details, i));
  749. port_details->num_phys--;
  750. port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
  751. memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
  752. if (phy_info->phy) {
  753. devtprintk(ioc, dev_printk(KERN_DEBUG,
  754. &phy_info->phy->dev, MYIOC_s_FMT
  755. "delete phy %d, phy-obj (0x%p)\n", ioc->name,
  756. phy_info->phy_id, phy_info->phy));
  757. sas_port_delete_phy(port_details->port, phy_info->phy);
  758. }
  759. phy_info->port_details = NULL;
  760. }
  761. /*
  762. * Populate and refresh the tree
  763. */
  764. phy_info = port_info->phy_info;
  765. for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
  766. sas_address = phy_info->attached.sas_address;
  767. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "phy_id=%d sas_address=0x%018llX\n",
  768. ioc->name, i, (unsigned long long)sas_address));
  769. if (!sas_address)
  770. continue;
  771. port_details = phy_info->port_details;
  772. /*
  773. * Forming a port
  774. */
  775. if (!port_details) {
  776. port_details = kzalloc(sizeof(struct
  777. mptsas_portinfo_details), GFP_KERNEL);
  778. if (!port_details)
  779. goto out;
  780. port_details->num_phys = 1;
  781. port_details->port_info = port_info;
  782. if (phy_info->phy_id < 64 )
  783. port_details->phy_bitmask |=
  784. (1 << phy_info->phy_id);
  785. phy_info->sas_port_add_phy=1;
  786. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tForming port\n\t\t"
  787. "phy_id=%d sas_address=0x%018llX\n",
  788. ioc->name, i, (unsigned long long)sas_address));
  789. phy_info->port_details = port_details;
  790. }
  791. if (i == port_info->num_phys - 1)
  792. continue;
  793. phy_info_cmp = &port_info->phy_info[i + 1];
  794. for (j = i + 1 ; j < port_info->num_phys ; j++,
  795. phy_info_cmp++) {
  796. if (!phy_info_cmp->attached.sas_address)
  797. continue;
  798. if (sas_address != phy_info_cmp->attached.sas_address)
  799. continue;
  800. if (phy_info_cmp->port_details == port_details )
  801. continue;
  802. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  803. "\t\tphy_id=%d sas_address=0x%018llX\n",
  804. ioc->name, j, (unsigned long long)
  805. phy_info_cmp->attached.sas_address));
  806. if (phy_info_cmp->port_details) {
  807. port_details->rphy =
  808. mptsas_get_rphy(phy_info_cmp);
  809. port_details->port =
  810. mptsas_get_port(phy_info_cmp);
  811. port_details->starget =
  812. mptsas_get_starget(phy_info_cmp);
  813. port_details->num_phys =
  814. phy_info_cmp->port_details->num_phys;
  815. if (!phy_info_cmp->port_details->num_phys)
  816. kfree(phy_info_cmp->port_details);
  817. } else
  818. phy_info_cmp->sas_port_add_phy=1;
  819. /*
  820. * Adding a phy to a port
  821. */
  822. phy_info_cmp->port_details = port_details;
  823. if (phy_info_cmp->phy_id < 64 )
  824. port_details->phy_bitmask |=
  825. (1 << phy_info_cmp->phy_id);
  826. port_details->num_phys++;
  827. }
  828. }
  829. out:
  830. for (i = 0; i < port_info->num_phys; i++) {
  831. port_details = port_info->phy_info[i].port_details;
  832. if (!port_details)
  833. continue;
  834. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  835. "%s: [%p]: phy_id=%02d num_phys=%02d "
  836. "bitmask=0x%016llX\n", ioc->name, __func__,
  837. port_details, i, port_details->num_phys,
  838. (unsigned long long)port_details->phy_bitmask));
  839. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
  840. ioc->name, port_details->port, port_details->rphy));
  841. }
  842. dsaswideprintk(ioc, printk("\n"));
  843. mutex_unlock(&ioc->sas_topology_mutex);
  844. }
  845. /**
  846. * csmisas_find_vtarget
  847. *
  848. * @ioc
  849. * @volume_id
  850. * @volume_bus
  851. *
  852. **/
  853. static VirtTarget *
  854. mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
  855. {
  856. struct scsi_device *sdev;
  857. VirtDevice *vdevice;
  858. VirtTarget *vtarget = NULL;
  859. shost_for_each_device(sdev, ioc->sh) {
  860. vdevice = sdev->hostdata;
  861. if ((vdevice == NULL) ||
  862. (vdevice->vtarget == NULL))
  863. continue;
  864. if ((vdevice->vtarget->tflags &
  865. MPT_TARGET_FLAGS_RAID_COMPONENT ||
  866. vdevice->vtarget->raidVolume))
  867. continue;
  868. if (vdevice->vtarget->id == id &&
  869. vdevice->vtarget->channel == channel)
  870. vtarget = vdevice->vtarget;
  871. }
  872. return vtarget;
  873. }
  874. static void
  875. mptsas_queue_device_delete(MPT_ADAPTER *ioc,
  876. MpiEventDataSasDeviceStatusChange_t *sas_event_data)
  877. {
  878. struct fw_event_work *fw_event;
  879. int sz;
  880. sz = offsetof(struct fw_event_work, event_data) +
  881. sizeof(MpiEventDataSasDeviceStatusChange_t);
  882. fw_event = kzalloc(sz, GFP_ATOMIC);
  883. if (!fw_event) {
  884. printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
  885. ioc->name, __func__, __LINE__);
  886. return;
  887. }
  888. memcpy(fw_event->event_data, sas_event_data,
  889. sizeof(MpiEventDataSasDeviceStatusChange_t));
  890. fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
  891. fw_event->ioc = ioc;
  892. mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
  893. }
  894. static void
  895. mptsas_queue_rescan(MPT_ADAPTER *ioc)
  896. {
  897. struct fw_event_work *fw_event;
  898. int sz;
  899. sz = offsetof(struct fw_event_work, event_data);
  900. fw_event = kzalloc(sz, GFP_ATOMIC);
  901. if (!fw_event) {
  902. printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
  903. ioc->name, __func__, __LINE__);
  904. return;
  905. }
  906. fw_event->event = -1;
  907. fw_event->ioc = ioc;
  908. mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
  909. }
  910. /**
  911. * mptsas_target_reset
  912. *
  913. * Issues TARGET_RESET to end device using handshaking method
  914. *
  915. * @ioc
  916. * @channel
  917. * @id
  918. *
  919. * Returns (1) success
  920. * (0) failure
  921. *
  922. **/
  923. static int
  924. mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
  925. {
  926. MPT_FRAME_HDR *mf;
  927. SCSITaskMgmt_t *pScsiTm;
  928. if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
  929. return 0;
  930. mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
  931. if (mf == NULL) {
  932. dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
  933. "%s, no msg frames @%d!!\n", ioc->name,
  934. __func__, __LINE__));
  935. goto out_fail;
  936. }
  937. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
  938. ioc->name, mf));
  939. /* Format the Request
  940. */
  941. pScsiTm = (SCSITaskMgmt_t *) mf;
  942. memset (pScsiTm, 0, sizeof(SCSITaskMgmt_t));
  943. pScsiTm->TargetID = id;
  944. pScsiTm->Bus = channel;
  945. pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
  946. pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  947. pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
  948. DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
  949. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  950. "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
  951. ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
  952. mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
  953. return 1;
  954. out_fail:
  955. mpt_clear_taskmgmt_in_progress_flag(ioc);
  956. return 0;
  957. }
  958. static void
  959. mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
  960. {
  961. scsi_device_set_state(sdev, SDEV_BLOCK);
  962. }
  963. static void
  964. mptsas_block_io_starget(struct scsi_target *starget)
  965. {
  966. if (starget)
  967. starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
  968. }
  969. /**
  970. * mptsas_target_reset_queue
  971. *
  972. * Receive request for TARGET_RESET after receiving an firmware
  973. * event NOT_RESPONDING_EVENT, then put command in link list
  974. * and queue if task_queue already in use.
  975. *
  976. * @ioc
  977. * @sas_event_data
  978. *
  979. **/
  980. static void
  981. mptsas_target_reset_queue(MPT_ADAPTER *ioc,
  982. EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
  983. {
  984. MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
  985. VirtTarget *vtarget = NULL;
  986. struct mptsas_target_reset_event *target_reset_list;
  987. u8 id, channel;
  988. id = sas_event_data->TargetID;
  989. channel = sas_event_data->Bus;
  990. vtarget = mptsas_find_vtarget(ioc, channel, id);
  991. if (vtarget) {
  992. mptsas_block_io_starget(vtarget->starget);
  993. vtarget->deleted = 1; /* block IO */
  994. }
  995. target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
  996. GFP_ATOMIC);
  997. if (!target_reset_list) {
  998. dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
  999. "%s, failed to allocate mem @%d..!!\n",
  1000. ioc->name, __func__, __LINE__));
  1001. return;
  1002. }
  1003. memcpy(&target_reset_list->sas_event_data, sas_event_data,
  1004. sizeof(*sas_event_data));
  1005. list_add_tail(&target_reset_list->list, &hd->target_reset_list);
  1006. target_reset_list->time_count = jiffies;
  1007. if (mptsas_target_reset(ioc, channel, id)) {
  1008. target_reset_list->target_reset_issued = 1;
  1009. }
  1010. }
  1011. /**
  1012. * mptsas_schedule_target_reset- send pending target reset
  1013. * @iocp: per adapter object
  1014. *
  1015. * This function will delete scheduled target reset from the list and
  1016. * try to send next target reset. This will be called from completion
  1017. * context of any Task management command.
  1018. */
  1019. void
  1020. mptsas_schedule_target_reset(void *iocp)
  1021. {
  1022. MPT_ADAPTER *ioc = (MPT_ADAPTER *)(iocp);
  1023. MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
  1024. struct list_head *head = &hd->target_reset_list;
  1025. struct mptsas_target_reset_event *target_reset_list;
  1026. u8 id, channel;
  1027. /*
  1028. * issue target reset to next device in the queue
  1029. */
  1030. head = &hd->target_reset_list;
  1031. if (list_empty(head))
  1032. return;
  1033. target_reset_list = list_entry(head->next,
  1034. struct mptsas_target_reset_event, list);
  1035. id = target_reset_list->sas_event_data.TargetID;
  1036. channel = target_reset_list->sas_event_data.Bus;
  1037. target_reset_list->time_count = jiffies;
  1038. if (mptsas_target_reset(ioc, channel, id))
  1039. target_reset_list->target_reset_issued = 1;
  1040. return;
  1041. }
  1042. /**
  1043. * mptsas_taskmgmt_complete - complete SAS task management function
  1044. * @ioc: Pointer to MPT_ADAPTER structure
  1045. *
  1046. * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
  1047. * queue to finish off removing device from upper layers. then send next
  1048. * TARGET_RESET in the queue.
  1049. **/
  1050. static int
  1051. mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
  1052. {
  1053. MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
  1054. struct list_head *head = &hd->target_reset_list;
  1055. u8 id, channel;
  1056. struct mptsas_target_reset_event *target_reset_list;
  1057. SCSITaskMgmtReply_t *pScsiTmReply;
  1058. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
  1059. "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
  1060. pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
  1061. if (pScsiTmReply) {
  1062. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  1063. "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
  1064. "\ttask_type = 0x%02X, iocstatus = 0x%04X "
  1065. "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
  1066. "term_cmnds = %d\n", ioc->name,
  1067. pScsiTmReply->Bus, pScsiTmReply->TargetID,
  1068. pScsiTmReply->TaskType,
  1069. le16_to_cpu(pScsiTmReply->IOCStatus),
  1070. le32_to_cpu(pScsiTmReply->IOCLogInfo),
  1071. pScsiTmReply->ResponseCode,
  1072. le32_to_cpu(pScsiTmReply->TerminationCount)));
  1073. if (pScsiTmReply->ResponseCode)
  1074. mptscsih_taskmgmt_response_code(ioc,
  1075. pScsiTmReply->ResponseCode);
  1076. }
  1077. if (pScsiTmReply && (pScsiTmReply->TaskType ==
  1078. MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
  1079. MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
  1080. ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
  1081. ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
  1082. memcpy(ioc->taskmgmt_cmds.reply, mr,
  1083. min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
  1084. if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
  1085. ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
  1086. complete(&ioc->taskmgmt_cmds.done);
  1087. return 1;
  1088. }
  1089. return 0;
  1090. }
  1091. mpt_clear_taskmgmt_in_progress_flag(ioc);
  1092. if (list_empty(head))
  1093. return 1;
  1094. target_reset_list = list_entry(head->next,
  1095. struct mptsas_target_reset_event, list);
  1096. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  1097. "TaskMgmt: completed (%d seconds)\n",
  1098. ioc->name, jiffies_to_msecs(jiffies -
  1099. target_reset_list->time_count)/1000));
  1100. id = pScsiTmReply->TargetID;
  1101. channel = pScsiTmReply->Bus;
  1102. target_reset_list->time_count = jiffies;
  1103. /*
  1104. * retry target reset
  1105. */
  1106. if (!target_reset_list->target_reset_issued) {
  1107. if (mptsas_target_reset(ioc, channel, id))
  1108. target_reset_list->target_reset_issued = 1;
  1109. return 1;
  1110. }
  1111. /*
  1112. * enable work queue to remove device from upper layers
  1113. */
  1114. list_del(&target_reset_list->list);
  1115. if (!ioc->fw_events_off)
  1116. mptsas_queue_device_delete(ioc,
  1117. &target_reset_list->sas_event_data);
  1118. ioc->schedule_target_reset(ioc);
  1119. return 1;
  1120. }
  1121. /**
  1122. * mptscsih_ioc_reset
  1123. *
  1124. * @ioc
  1125. * @reset_phase
  1126. *
  1127. **/
  1128. static int
  1129. mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
  1130. {
  1131. MPT_SCSI_HOST *hd;
  1132. int rc;
  1133. rc = mptscsih_ioc_reset(ioc, reset_phase);
  1134. if ((ioc->bus_type != SAS) || (!rc))
  1135. return rc;
  1136. hd = shost_priv(ioc->sh);
  1137. if (!hd->ioc)
  1138. goto out;
  1139. switch (reset_phase) {
  1140. case MPT_IOC_SETUP_RESET:
  1141. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  1142. "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
  1143. mptsas_fw_event_off(ioc);
  1144. break;
  1145. case MPT_IOC_PRE_RESET:
  1146. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  1147. "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
  1148. break;
  1149. case MPT_IOC_POST_RESET:
  1150. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  1151. "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
  1152. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
  1153. ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
  1154. complete(&ioc->sas_mgmt.done);
  1155. }
  1156. mptsas_cleanup_fw_event_q(ioc);
  1157. mptsas_queue_rescan(ioc);
  1158. break;
  1159. default:
  1160. break;
  1161. }
  1162. out:
  1163. return rc;
  1164. }
  1165. /**
  1166. * enum device_state -
  1167. * @DEVICE_RETRY: need to retry the TUR
  1168. * @DEVICE_ERROR: TUR return error, don't add device
  1169. * @DEVICE_READY: device can be added
  1170. *
  1171. */
  1172. enum device_state{
  1173. DEVICE_RETRY,
  1174. DEVICE_ERROR,
  1175. DEVICE_READY,
  1176. };
  1177. static int
  1178. mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
  1179. u32 form, u32 form_specific)
  1180. {
  1181. ConfigExtendedPageHeader_t hdr;
  1182. CONFIGPARMS cfg;
  1183. SasEnclosurePage0_t *buffer;
  1184. dma_addr_t dma_handle;
  1185. int error;
  1186. __le64 le_identifier;
  1187. memset(&hdr, 0, sizeof(hdr));
  1188. hdr.PageVersion = MPI_SASENCLOSURE0_PAGEVERSION;
  1189. hdr.PageNumber = 0;
  1190. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  1191. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_ENCLOSURE;
  1192. cfg.cfghdr.ehdr = &hdr;
  1193. cfg.physAddr = -1;
  1194. cfg.pageAddr = form + form_specific;
  1195. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  1196. cfg.dir = 0; /* read */
  1197. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  1198. error = mpt_config(ioc, &cfg);
  1199. if (error)
  1200. goto out;
  1201. if (!hdr.ExtPageLength) {
  1202. error = -ENXIO;
  1203. goto out;
  1204. }
  1205. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  1206. &dma_handle);
  1207. if (!buffer) {
  1208. error = -ENOMEM;
  1209. goto out;
  1210. }
  1211. cfg.physAddr = dma_handle;
  1212. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  1213. error = mpt_config(ioc, &cfg);
  1214. if (error)
  1215. goto out_free_consistent;
  1216. /* save config data */
  1217. memcpy(&le_identifier, &buffer->EnclosureLogicalID, sizeof(__le64));
  1218. enclosure->enclosure_logical_id = le64_to_cpu(le_identifier);
  1219. enclosure->enclosure_handle = le16_to_cpu(buffer->EnclosureHandle);
  1220. enclosure->flags = le16_to_cpu(buffer->Flags);
  1221. enclosure->num_slot = le16_to_cpu(buffer->NumSlots);
  1222. enclosure->start_slot = le16_to_cpu(buffer->StartSlot);
  1223. enclosure->start_id = buffer->StartTargetID;
  1224. enclosure->start_channel = buffer->StartBus;
  1225. enclosure->sep_id = buffer->SEPTargetID;
  1226. enclosure->sep_channel = buffer->SEPBus;
  1227. out_free_consistent:
  1228. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  1229. buffer, dma_handle);
  1230. out:
  1231. return error;
  1232. }
  1233. /**
  1234. * mptsas_add_end_device - report a new end device to sas transport layer
  1235. * @ioc: Pointer to MPT_ADAPTER structure
  1236. * @phy_info: describes attached device
  1237. *
  1238. * return (0) success (1) failure
  1239. *
  1240. **/
  1241. static int
  1242. mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
  1243. {
  1244. struct sas_rphy *rphy;
  1245. struct sas_port *port;
  1246. struct sas_identify identify;
  1247. char *ds = NULL;
  1248. u8 fw_id;
  1249. if (!phy_info) {
  1250. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1251. "%s: exit at line=%d\n", ioc->name,
  1252. __func__, __LINE__));
  1253. return 1;
  1254. }
  1255. fw_id = phy_info->attached.id;
  1256. if (mptsas_get_rphy(phy_info)) {
  1257. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1258. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1259. __func__, fw_id, __LINE__));
  1260. return 2;
  1261. }
  1262. port = mptsas_get_port(phy_info);
  1263. if (!port) {
  1264. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1265. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1266. __func__, fw_id, __LINE__));
  1267. return 3;
  1268. }
  1269. if (phy_info->attached.device_info &
  1270. MPI_SAS_DEVICE_INFO_SSP_TARGET)
  1271. ds = "ssp";
  1272. if (phy_info->attached.device_info &
  1273. MPI_SAS_DEVICE_INFO_STP_TARGET)
  1274. ds = "stp";
  1275. if (phy_info->attached.device_info &
  1276. MPI_SAS_DEVICE_INFO_SATA_DEVICE)
  1277. ds = "sata";
  1278. printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
  1279. " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
  1280. phy_info->attached.channel, phy_info->attached.id,
  1281. phy_info->attached.phy_id, (unsigned long long)
  1282. phy_info->attached.sas_address);
  1283. mptsas_parse_device_info(&identify, &phy_info->attached);
  1284. rphy = sas_end_device_alloc(port);
  1285. if (!rphy) {
  1286. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1287. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1288. __func__, fw_id, __LINE__));
  1289. return 5; /* non-fatal: an rphy can be added later */
  1290. }
  1291. rphy->identify = identify;
  1292. if (sas_rphy_add(rphy)) {
  1293. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1294. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1295. __func__, fw_id, __LINE__));
  1296. sas_rphy_free(rphy);
  1297. return 6;
  1298. }
  1299. mptsas_set_rphy(ioc, phy_info, rphy);
  1300. return 0;
  1301. }
  1302. /**
  1303. * mptsas_del_end_device - report a deleted end device to sas transport layer
  1304. * @ioc: Pointer to MPT_ADAPTER structure
  1305. * @phy_info: describes attached device
  1306. *
  1307. **/
  1308. static void
  1309. mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
  1310. {
  1311. struct sas_rphy *rphy;
  1312. struct sas_port *port;
  1313. struct mptsas_portinfo *port_info;
  1314. struct mptsas_phyinfo *phy_info_parent;
  1315. int i;
  1316. char *ds = NULL;
  1317. u8 fw_id;
  1318. u64 sas_address;
  1319. if (!phy_info)
  1320. return;
  1321. fw_id = phy_info->attached.id;
  1322. sas_address = phy_info->attached.sas_address;
  1323. if (!phy_info->port_details) {
  1324. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1325. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1326. __func__, fw_id, __LINE__));
  1327. return;
  1328. }
  1329. rphy = mptsas_get_rphy(phy_info);
  1330. if (!rphy) {
  1331. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1332. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1333. __func__, fw_id, __LINE__));
  1334. return;
  1335. }
  1336. if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
  1337. || phy_info->attached.device_info
  1338. & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
  1339. || phy_info->attached.device_info
  1340. & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
  1341. ds = "initiator";
  1342. if (phy_info->attached.device_info &
  1343. MPI_SAS_DEVICE_INFO_SSP_TARGET)
  1344. ds = "ssp";
  1345. if (phy_info->attached.device_info &
  1346. MPI_SAS_DEVICE_INFO_STP_TARGET)
  1347. ds = "stp";
  1348. if (phy_info->attached.device_info &
  1349. MPI_SAS_DEVICE_INFO_SATA_DEVICE)
  1350. ds = "sata";
  1351. dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
  1352. "removing %s device: fw_channel %d, fw_id %d, phy %d,"
  1353. "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
  1354. phy_info->attached.id, phy_info->attached.phy_id,
  1355. (unsigned long long) sas_address);
  1356. port = mptsas_get_port(phy_info);
  1357. if (!port) {
  1358. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  1359. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  1360. __func__, fw_id, __LINE__));
  1361. return;
  1362. }
  1363. port_info = phy_info->portinfo;
  1364. phy_info_parent = port_info->phy_info;
  1365. for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
  1366. if (!phy_info_parent->phy)
  1367. continue;
  1368. if (phy_info_parent->attached.sas_address !=
  1369. sas_address)
  1370. continue;
  1371. dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
  1372. MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
  1373. ioc->name, phy_info_parent->phy_id,
  1374. phy_info_parent->phy);
  1375. sas_port_delete_phy(port, phy_info_parent->phy);
  1376. }
  1377. dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
  1378. "delete port %d, sas_addr (0x%llx)\n", ioc->name,
  1379. port->port_identifier, (unsigned long long)sas_address);
  1380. sas_port_delete(port);
  1381. mptsas_set_port(ioc, phy_info, NULL);
  1382. mptsas_port_delete(ioc, phy_info->port_details);
  1383. }
  1384. struct mptsas_phyinfo *
  1385. mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
  1386. struct mptsas_devinfo *sas_device)
  1387. {
  1388. struct mptsas_phyinfo *phy_info;
  1389. struct mptsas_portinfo *port_info;
  1390. int i;
  1391. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  1392. sas_device->sas_address);
  1393. if (!phy_info)
  1394. goto out;
  1395. port_info = phy_info->portinfo;
  1396. if (!port_info)
  1397. goto out;
  1398. mutex_lock(&ioc->sas_topology_mutex);
  1399. for (i = 0; i < port_info->num_phys; i++) {
  1400. if (port_info->phy_info[i].attached.sas_address !=
  1401. sas_device->sas_address)
  1402. continue;
  1403. port_info->phy_info[i].attached.channel = sas_device->channel;
  1404. port_info->phy_info[i].attached.id = sas_device->id;
  1405. port_info->phy_info[i].attached.sas_address =
  1406. sas_device->sas_address;
  1407. port_info->phy_info[i].attached.handle = sas_device->handle;
  1408. port_info->phy_info[i].attached.handle_parent =
  1409. sas_device->handle_parent;
  1410. port_info->phy_info[i].attached.handle_enclosure =
  1411. sas_device->handle_enclosure;
  1412. }
  1413. mutex_unlock(&ioc->sas_topology_mutex);
  1414. out:
  1415. return phy_info;
  1416. }
  1417. /**
  1418. * mptsas_firmware_event_work - work thread for processing fw events
  1419. * @work: work queue payload containing info describing the event
  1420. * Context: user
  1421. *
  1422. */
  1423. static void
  1424. mptsas_firmware_event_work(struct work_struct *work)
  1425. {
  1426. struct fw_event_work *fw_event =
  1427. container_of(work, struct fw_event_work, work.work);
  1428. MPT_ADAPTER *ioc = fw_event->ioc;
  1429. /* special rescan topology handling */
  1430. if (fw_event->event == -1) {
  1431. if (ioc->in_rescan) {
  1432. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  1433. "%s: rescan ignored as it is in progress\n",
  1434. ioc->name, __func__));
  1435. return;
  1436. }
  1437. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
  1438. "reset\n", ioc->name, __func__));
  1439. ioc->in_rescan = 1;
  1440. mptsas_not_responding_devices(ioc);
  1441. mptsas_scan_sas_topology(ioc);
  1442. ioc->in_rescan = 0;
  1443. mptsas_free_fw_event(ioc, fw_event);
  1444. mptsas_fw_event_on(ioc);
  1445. return;
  1446. }
  1447. /* events handling turned off during host reset */
  1448. if (ioc->fw_events_off) {
  1449. mptsas_free_fw_event(ioc, fw_event);
  1450. return;
  1451. }
  1452. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
  1453. "event = (0x%02x)\n", ioc->name, __func__, fw_event,
  1454. (fw_event->event & 0xFF)));
  1455. switch (fw_event->event) {
  1456. case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
  1457. mptsas_send_sas_event(fw_event);
  1458. break;
  1459. case MPI_EVENT_INTEGRATED_RAID:
  1460. mptsas_send_raid_event(fw_event);
  1461. break;
  1462. case MPI_EVENT_IR2:
  1463. mptsas_send_ir2_event(fw_event);
  1464. break;
  1465. case MPI_EVENT_PERSISTENT_TABLE_FULL:
  1466. mptbase_sas_persist_operation(ioc,
  1467. MPI_SAS_OP_CLEAR_NOT_PRESENT);
  1468. mptsas_free_fw_event(ioc, fw_event);
  1469. break;
  1470. case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
  1471. mptsas_broadcast_primative_work(fw_event);
  1472. break;
  1473. case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
  1474. mptsas_send_expander_event(fw_event);
  1475. break;
  1476. case MPI_EVENT_SAS_PHY_LINK_STATUS:
  1477. mptsas_send_link_status_event(fw_event);
  1478. break;
  1479. case MPI_EVENT_QUEUE_FULL:
  1480. mptsas_handle_queue_full_event(fw_event);
  1481. break;
  1482. }
  1483. }
  1484. static int
  1485. mptsas_slave_configure(struct scsi_device *sdev)
  1486. {
  1487. struct Scsi_Host *host = sdev->host;
  1488. MPT_SCSI_HOST *hd = shost_priv(host);
  1489. MPT_ADAPTER *ioc = hd->ioc;
  1490. VirtDevice *vdevice = sdev->hostdata;
  1491. if (vdevice->vtarget->deleted) {
  1492. sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
  1493. vdevice->vtarget->deleted = 0;
  1494. }
  1495. /*
  1496. * RAID volumes placed beyond the last expected port.
  1497. * Ignore sending sas mode pages in that case..
  1498. */
  1499. if (sdev->channel == MPTSAS_RAID_CHANNEL) {
  1500. mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
  1501. goto out;
  1502. }
  1503. sas_read_port_mode_page(sdev);
  1504. mptsas_add_device_component_starget(ioc, scsi_target(sdev));
  1505. out:
  1506. return mptscsih_slave_configure(sdev);
  1507. }
  1508. static int
  1509. mptsas_target_alloc(struct scsi_target *starget)
  1510. {
  1511. struct Scsi_Host *host = dev_to_shost(&starget->dev);
  1512. MPT_SCSI_HOST *hd = shost_priv(host);
  1513. VirtTarget *vtarget;
  1514. u8 id, channel;
  1515. struct sas_rphy *rphy;
  1516. struct mptsas_portinfo *p;
  1517. int i;
  1518. MPT_ADAPTER *ioc = hd->ioc;
  1519. vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
  1520. if (!vtarget)
  1521. return -ENOMEM;
  1522. vtarget->starget = starget;
  1523. vtarget->ioc_id = ioc->id;
  1524. vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
  1525. id = starget->id;
  1526. channel = 0;
  1527. /*
  1528. * RAID volumes placed beyond the last expected port.
  1529. */
  1530. if (starget->channel == MPTSAS_RAID_CHANNEL) {
  1531. if (!ioc->raid_data.pIocPg2) {
  1532. kfree(vtarget);
  1533. return -ENXIO;
  1534. }
  1535. for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
  1536. if (id == ioc->raid_data.pIocPg2->
  1537. RaidVolume[i].VolumeID) {
  1538. channel = ioc->raid_data.pIocPg2->
  1539. RaidVolume[i].VolumeBus;
  1540. }
  1541. }
  1542. vtarget->raidVolume = 1;
  1543. goto out;
  1544. }
  1545. rphy = dev_to_rphy(starget->dev.parent);
  1546. mutex_lock(&ioc->sas_topology_mutex);
  1547. list_for_each_entry(p, &ioc->sas_topology, list) {
  1548. for (i = 0; i < p->num_phys; i++) {
  1549. if (p->phy_info[i].attached.sas_address !=
  1550. rphy->identify.sas_address)
  1551. continue;
  1552. id = p->phy_info[i].attached.id;
  1553. channel = p->phy_info[i].attached.channel;
  1554. mptsas_set_starget(&p->phy_info[i], starget);
  1555. /*
  1556. * Exposing hidden raid components
  1557. */
  1558. if (mptscsih_is_phys_disk(ioc, channel, id)) {
  1559. id = mptscsih_raid_id_to_num(ioc,
  1560. channel, id);
  1561. vtarget->tflags |=
  1562. MPT_TARGET_FLAGS_RAID_COMPONENT;
  1563. p->phy_info[i].attached.phys_disk_num = id;
  1564. }
  1565. mutex_unlock(&ioc->sas_topology_mutex);
  1566. goto out;
  1567. }
  1568. }
  1569. mutex_unlock(&ioc->sas_topology_mutex);
  1570. kfree(vtarget);
  1571. return -ENXIO;
  1572. out:
  1573. vtarget->id = id;
  1574. vtarget->channel = channel;
  1575. starget->hostdata = vtarget;
  1576. return 0;
  1577. }
  1578. static void
  1579. mptsas_target_destroy(struct scsi_target *starget)
  1580. {
  1581. struct Scsi_Host *host = dev_to_shost(&starget->dev);
  1582. MPT_SCSI_HOST *hd = shost_priv(host);
  1583. struct sas_rphy *rphy;
  1584. struct mptsas_portinfo *p;
  1585. int i;
  1586. MPT_ADAPTER *ioc = hd->ioc;
  1587. VirtTarget *vtarget;
  1588. if (!starget->hostdata)
  1589. return;
  1590. vtarget = starget->hostdata;
  1591. mptsas_del_device_component_by_os(ioc, starget->channel,
  1592. starget->id);
  1593. if (starget->channel == MPTSAS_RAID_CHANNEL)
  1594. goto out;
  1595. rphy = dev_to_rphy(starget->dev.parent);
  1596. list_for_each_entry(p, &ioc->sas_topology, list) {
  1597. for (i = 0; i < p->num_phys; i++) {
  1598. if (p->phy_info[i].attached.sas_address !=
  1599. rphy->identify.sas_address)
  1600. continue;
  1601. starget_printk(KERN_INFO, starget, MYIOC_s_FMT
  1602. "delete device: fw_channel %d, fw_id %d, phy %d, "
  1603. "sas_addr 0x%llx\n", ioc->name,
  1604. p->phy_info[i].attached.channel,
  1605. p->phy_info[i].attached.id,
  1606. p->phy_info[i].attached.phy_id, (unsigned long long)
  1607. p->phy_info[i].attached.sas_address);
  1608. mptsas_set_starget(&p->phy_info[i], NULL);
  1609. }
  1610. }
  1611. out:
  1612. vtarget->starget = NULL;
  1613. kfree(starget->hostdata);
  1614. starget->hostdata = NULL;
  1615. }
  1616. static int
  1617. mptsas_slave_alloc(struct scsi_device *sdev)
  1618. {
  1619. struct Scsi_Host *host = sdev->host;
  1620. MPT_SCSI_HOST *hd = shost_priv(host);
  1621. struct sas_rphy *rphy;
  1622. struct mptsas_portinfo *p;
  1623. VirtDevice *vdevice;
  1624. struct scsi_target *starget;
  1625. int i;
  1626. MPT_ADAPTER *ioc = hd->ioc;
  1627. vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
  1628. if (!vdevice) {
  1629. printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
  1630. ioc->name, sizeof(VirtDevice));
  1631. return -ENOMEM;
  1632. }
  1633. starget = scsi_target(sdev);
  1634. vdevice->vtarget = starget->hostdata;
  1635. if (sdev->channel == MPTSAS_RAID_CHANNEL)
  1636. goto out;
  1637. rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
  1638. mutex_lock(&ioc->sas_topology_mutex);
  1639. list_for_each_entry(p, &ioc->sas_topology, list) {
  1640. for (i = 0; i < p->num_phys; i++) {
  1641. if (p->phy_info[i].attached.sas_address !=
  1642. rphy->identify.sas_address)
  1643. continue;
  1644. vdevice->lun = sdev->lun;
  1645. /*
  1646. * Exposing hidden raid components
  1647. */
  1648. if (mptscsih_is_phys_disk(ioc,
  1649. p->phy_info[i].attached.channel,
  1650. p->phy_info[i].attached.id))
  1651. sdev->no_uld_attach = 1;
  1652. mutex_unlock(&ioc->sas_topology_mutex);
  1653. goto out;
  1654. }
  1655. }
  1656. mutex_unlock(&ioc->sas_topology_mutex);
  1657. kfree(vdevice);
  1658. return -ENXIO;
  1659. out:
  1660. vdevice->vtarget->num_luns++;
  1661. sdev->hostdata = vdevice;
  1662. return 0;
  1663. }
  1664. static int
  1665. mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
  1666. {
  1667. MPT_SCSI_HOST *hd;
  1668. MPT_ADAPTER *ioc;
  1669. VirtDevice *vdevice = SCpnt->device->hostdata;
  1670. if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
  1671. SCpnt->result = DID_NO_CONNECT << 16;
  1672. done(SCpnt);
  1673. return 0;
  1674. }
  1675. hd = shost_priv(SCpnt->device->host);
  1676. ioc = hd->ioc;
  1677. if (ioc->sas_discovery_quiesce_io)
  1678. return SCSI_MLQUEUE_HOST_BUSY;
  1679. if (ioc->debug_level & MPT_DEBUG_SCSI)
  1680. scsi_print_command(SCpnt);
  1681. return mptscsih_qcmd(SCpnt,done);
  1682. }
  1683. static DEF_SCSI_QCMD(mptsas_qcmd)
  1684. /**
  1685. * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
  1686. * if the device under question is currently in the
  1687. * device removal delay.
  1688. * @sc: scsi command that the midlayer is about to time out
  1689. *
  1690. **/
  1691. static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
  1692. {
  1693. MPT_SCSI_HOST *hd;
  1694. MPT_ADAPTER *ioc;
  1695. VirtDevice *vdevice;
  1696. enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
  1697. hd = shost_priv(sc->device->host);
  1698. if (hd == NULL) {
  1699. printk(KERN_ERR MYNAM ": %s: Can't locate host! (sc=%p)\n",
  1700. __func__, sc);
  1701. goto done;
  1702. }
  1703. ioc = hd->ioc;
  1704. if (ioc->bus_type != SAS) {
  1705. printk(KERN_ERR MYNAM ": %s: Wrong bus type (sc=%p)\n",
  1706. __func__, sc);
  1707. goto done;
  1708. }
  1709. vdevice = sc->device->hostdata;
  1710. if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
  1711. || vdevice->vtarget->deleted)) {
  1712. dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed "
  1713. "or in device removal delay (sc=%p)\n",
  1714. ioc->name, __func__, sc));
  1715. rc = BLK_EH_RESET_TIMER;
  1716. goto done;
  1717. }
  1718. done:
  1719. return rc;
  1720. }
  1721. static struct scsi_host_template mptsas_driver_template = {
  1722. .module = THIS_MODULE,
  1723. .proc_name = "mptsas",
  1724. .proc_info = mptscsih_proc_info,
  1725. .name = "MPT SAS Host",
  1726. .info = mptscsih_info,
  1727. .queuecommand = mptsas_qcmd,
  1728. .target_alloc = mptsas_target_alloc,
  1729. .slave_alloc = mptsas_slave_alloc,
  1730. .slave_configure = mptsas_slave_configure,
  1731. .target_destroy = mptsas_target_destroy,
  1732. .slave_destroy = mptscsih_slave_destroy,
  1733. .change_queue_depth = mptscsih_change_queue_depth,
  1734. .eh_abort_handler = mptscsih_abort,
  1735. .eh_device_reset_handler = mptscsih_dev_reset,
  1736. .eh_host_reset_handler = mptscsih_host_reset,
  1737. .bios_param = mptscsih_bios_param,
  1738. .can_queue = MPT_SAS_CAN_QUEUE,
  1739. .this_id = -1,
  1740. .sg_tablesize = MPT_SCSI_SG_DEPTH,
  1741. .max_sectors = 8192,
  1742. .cmd_per_lun = 7,
  1743. .use_clustering = ENABLE_CLUSTERING,
  1744. .shost_attrs = mptscsih_host_attrs,
  1745. };
  1746. static int mptsas_get_linkerrors(struct sas_phy *phy)
  1747. {
  1748. MPT_ADAPTER *ioc = phy_to_ioc(phy);
  1749. ConfigExtendedPageHeader_t hdr;
  1750. CONFIGPARMS cfg;
  1751. SasPhyPage1_t *buffer;
  1752. dma_addr_t dma_handle;
  1753. int error;
  1754. /* FIXME: only have link errors on local phys */
  1755. if (!scsi_is_sas_phy_local(phy))
  1756. return -EINVAL;
  1757. hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
  1758. hdr.ExtPageLength = 0;
  1759. hdr.PageNumber = 1 /* page number 1*/;
  1760. hdr.Reserved1 = 0;
  1761. hdr.Reserved2 = 0;
  1762. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  1763. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
  1764. cfg.cfghdr.ehdr = &hdr;
  1765. cfg.physAddr = -1;
  1766. cfg.pageAddr = phy->identify.phy_identifier;
  1767. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  1768. cfg.dir = 0; /* read */
  1769. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  1770. error = mpt_config(ioc, &cfg);
  1771. if (error)
  1772. return error;
  1773. if (!hdr.ExtPageLength)
  1774. return -ENXIO;
  1775. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  1776. &dma_handle);
  1777. if (!buffer)
  1778. return -ENOMEM;
  1779. cfg.physAddr = dma_handle;
  1780. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  1781. error = mpt_config(ioc, &cfg);
  1782. if (error)
  1783. goto out_free_consistent;
  1784. mptsas_print_phy_pg1(ioc, buffer);
  1785. phy->invalid_dword_count = le32_to_cpu(buffer->InvalidDwordCount);
  1786. phy->running_disparity_error_count =
  1787. le32_to_cpu(buffer->RunningDisparityErrorCount);
  1788. phy->loss_of_dword_sync_count =
  1789. le32_to_cpu(buffer->LossDwordSynchCount);
  1790. phy->phy_reset_problem_count =
  1791. le32_to_cpu(buffer->PhyResetProblemCount);
  1792. out_free_consistent:
  1793. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  1794. buffer, dma_handle);
  1795. return error;
  1796. }
  1797. static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
  1798. MPT_FRAME_HDR *reply)
  1799. {
  1800. ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
  1801. if (reply != NULL) {
  1802. ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
  1803. memcpy(ioc->sas_mgmt.reply, reply,
  1804. min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
  1805. }
  1806. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
  1807. ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
  1808. complete(&ioc->sas_mgmt.done);
  1809. return 1;
  1810. }
  1811. return 0;
  1812. }
  1813. static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
  1814. {
  1815. MPT_ADAPTER *ioc = phy_to_ioc(phy);
  1816. SasIoUnitControlRequest_t *req;
  1817. SasIoUnitControlReply_t *reply;
  1818. MPT_FRAME_HDR *mf;
  1819. MPIHeader_t *hdr;
  1820. unsigned long timeleft;
  1821. int error = -ERESTARTSYS;
  1822. /* FIXME: fusion doesn't allow non-local phy reset */
  1823. if (!scsi_is_sas_phy_local(phy))
  1824. return -EINVAL;
  1825. /* not implemented for expanders */
  1826. if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
  1827. return -ENXIO;
  1828. if (mutex_lock_interruptible(&ioc->sas_mgmt.mutex))
  1829. goto out;
  1830. mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
  1831. if (!mf) {
  1832. error = -ENOMEM;
  1833. goto out_unlock;
  1834. }
  1835. hdr = (MPIHeader_t *) mf;
  1836. req = (SasIoUnitControlRequest_t *)mf;
  1837. memset(req, 0, sizeof(SasIoUnitControlRequest_t));
  1838. req->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
  1839. req->MsgContext = hdr->MsgContext;
  1840. req->Operation = hard_reset ?
  1841. MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
  1842. req->PhyNum = phy->identify.phy_identifier;
  1843. INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
  1844. mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
  1845. timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
  1846. 10 * HZ);
  1847. if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
  1848. error = -ETIME;
  1849. mpt_free_msg_frame(ioc, mf);
  1850. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
  1851. goto out_unlock;
  1852. if (!timeleft)
  1853. mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
  1854. goto out_unlock;
  1855. }
  1856. /* a reply frame is expected */
  1857. if ((ioc->sas_mgmt.status &
  1858. MPT_MGMT_STATUS_RF_VALID) == 0) {
  1859. error = -ENXIO;
  1860. goto out_unlock;
  1861. }
  1862. /* process the completed Reply Message Frame */
  1863. reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
  1864. if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
  1865. printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
  1866. ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
  1867. error = -ENXIO;
  1868. goto out_unlock;
  1869. }
  1870. error = 0;
  1871. out_unlock:
  1872. CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
  1873. mutex_unlock(&ioc->sas_mgmt.mutex);
  1874. out:
  1875. return error;
  1876. }
  1877. static int
  1878. mptsas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
  1879. {
  1880. MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
  1881. int i, error;
  1882. struct mptsas_portinfo *p;
  1883. struct mptsas_enclosure enclosure_info;
  1884. u64 enclosure_handle;
  1885. mutex_lock(&ioc->sas_topology_mutex);
  1886. list_for_each_entry(p, &ioc->sas_topology, list) {
  1887. for (i = 0; i < p->num_phys; i++) {
  1888. if (p->phy_info[i].attached.sas_address ==
  1889. rphy->identify.sas_address) {
  1890. enclosure_handle = p->phy_info[i].
  1891. attached.handle_enclosure;
  1892. goto found_info;
  1893. }
  1894. }
  1895. }
  1896. mutex_unlock(&ioc->sas_topology_mutex);
  1897. return -ENXIO;
  1898. found_info:
  1899. mutex_unlock(&ioc->sas_topology_mutex);
  1900. memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
  1901. error = mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
  1902. (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
  1903. MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), enclosure_handle);
  1904. if (!error)
  1905. *identifier = enclosure_info.enclosure_logical_id;
  1906. return error;
  1907. }
  1908. static int
  1909. mptsas_get_bay_identifier(struct sas_rphy *rphy)
  1910. {
  1911. MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
  1912. struct mptsas_portinfo *p;
  1913. int i, rc;
  1914. mutex_lock(&ioc->sas_topology_mutex);
  1915. list_for_each_entry(p, &ioc->sas_topology, list) {
  1916. for (i = 0; i < p->num_phys; i++) {
  1917. if (p->phy_info[i].attached.sas_address ==
  1918. rphy->identify.sas_address) {
  1919. rc = p->phy_info[i].attached.slot;
  1920. goto out;
  1921. }
  1922. }
  1923. }
  1924. rc = -ENXIO;
  1925. out:
  1926. mutex_unlock(&ioc->sas_topology_mutex);
  1927. return rc;
  1928. }
  1929. static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
  1930. struct request *req)
  1931. {
  1932. MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc;
  1933. MPT_FRAME_HDR *mf;
  1934. SmpPassthroughRequest_t *smpreq;
  1935. struct request *rsp = req->next_rq;
  1936. int ret;
  1937. int flagsLength;
  1938. unsigned long timeleft;
  1939. char *psge;
  1940. dma_addr_t dma_addr_in = 0;
  1941. dma_addr_t dma_addr_out = 0;
  1942. u64 sas_address = 0;
  1943. if (!rsp) {
  1944. printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
  1945. ioc->name, __func__);
  1946. return -EINVAL;
  1947. }
  1948. /* do we need to support multiple segments? */
  1949. if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
  1950. printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
  1951. ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
  1952. rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
  1953. return -EINVAL;
  1954. }
  1955. ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
  1956. if (ret)
  1957. goto out;
  1958. mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
  1959. if (!mf) {
  1960. ret = -ENOMEM;
  1961. goto out_unlock;
  1962. }
  1963. smpreq = (SmpPassthroughRequest_t *)mf;
  1964. memset(smpreq, 0, sizeof(*smpreq));
  1965. smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
  1966. smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
  1967. if (rphy)
  1968. sas_address = rphy->identify.sas_address;
  1969. else {
  1970. struct mptsas_portinfo *port_info;
  1971. mutex_lock(&ioc->sas_topology_mutex);
  1972. port_info = ioc->hba_port_info;
  1973. if (port_info && port_info->phy_info)
  1974. sas_address =
  1975. port_info->phy_info[0].phy->identify.sas_address;
  1976. mutex_unlock(&ioc->sas_topology_mutex);
  1977. }
  1978. *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
  1979. psge = (char *)
  1980. (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
  1981. /* request */
  1982. flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
  1983. MPI_SGE_FLAGS_END_OF_BUFFER |
  1984. MPI_SGE_FLAGS_DIRECTION)
  1985. << MPI_SGE_FLAGS_SHIFT;
  1986. flagsLength |= (blk_rq_bytes(req) - 4);
  1987. dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
  1988. blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
  1989. if (!dma_addr_out)
  1990. goto put_mf;
  1991. ioc->add_sge(psge, flagsLength, dma_addr_out);
  1992. psge += ioc->SGE_size;
  1993. /* response */
  1994. flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
  1995. MPI_SGE_FLAGS_SYSTEM_ADDRESS |
  1996. MPI_SGE_FLAGS_IOC_TO_HOST |
  1997. MPI_SGE_FLAGS_END_OF_BUFFER;
  1998. flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
  1999. flagsLength |= blk_rq_bytes(rsp) + 4;
  2000. dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
  2001. blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
  2002. if (!dma_addr_in)
  2003. goto unmap;
  2004. ioc->add_sge(psge, flagsLength, dma_addr_in);
  2005. INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
  2006. mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
  2007. timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
  2008. if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
  2009. ret = -ETIME;
  2010. mpt_free_msg_frame(ioc, mf);
  2011. mf = NULL;
  2012. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
  2013. goto unmap;
  2014. if (!timeleft)
  2015. mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
  2016. goto unmap;
  2017. }
  2018. mf = NULL;
  2019. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
  2020. SmpPassthroughReply_t *smprep;
  2021. smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
  2022. memcpy(req->sense, smprep, sizeof(*smprep));
  2023. req->sense_len = sizeof(*smprep);
  2024. req->resid_len = 0;
  2025. rsp->resid_len -= smprep->ResponseDataLength;
  2026. } else {
  2027. printk(MYIOC_s_ERR_FMT
  2028. "%s: smp passthru reply failed to be returned\n",
  2029. ioc->name, __func__);
  2030. ret = -ENXIO;
  2031. }
  2032. unmap:
  2033. if (dma_addr_out)
  2034. pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
  2035. PCI_DMA_BIDIRECTIONAL);
  2036. if (dma_addr_in)
  2037. pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
  2038. PCI_DMA_BIDIRECTIONAL);
  2039. put_mf:
  2040. if (mf)
  2041. mpt_free_msg_frame(ioc, mf);
  2042. out_unlock:
  2043. CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
  2044. mutex_unlock(&ioc->sas_mgmt.mutex);
  2045. out:
  2046. return ret;
  2047. }
  2048. static struct sas_function_template mptsas_transport_functions = {
  2049. .get_linkerrors = mptsas_get_linkerrors,
  2050. .get_enclosure_identifier = mptsas_get_enclosure_identifier,
  2051. .get_bay_identifier = mptsas_get_bay_identifier,
  2052. .phy_reset = mptsas_phy_reset,
  2053. .smp_handler = mptsas_smp_handler,
  2054. };
  2055. static struct scsi_transport_template *mptsas_transport_template;
  2056. static int
  2057. mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
  2058. {
  2059. ConfigExtendedPageHeader_t hdr;
  2060. CONFIGPARMS cfg;
  2061. SasIOUnitPage0_t *buffer;
  2062. dma_addr_t dma_handle;
  2063. int error, i;
  2064. hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
  2065. hdr.ExtPageLength = 0;
  2066. hdr.PageNumber = 0;
  2067. hdr.Reserved1 = 0;
  2068. hdr.Reserved2 = 0;
  2069. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  2070. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
  2071. cfg.cfghdr.ehdr = &hdr;
  2072. cfg.physAddr = -1;
  2073. cfg.pageAddr = 0;
  2074. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  2075. cfg.dir = 0; /* read */
  2076. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  2077. error = mpt_config(ioc, &cfg);
  2078. if (error)
  2079. goto out;
  2080. if (!hdr.ExtPageLength) {
  2081. error = -ENXIO;
  2082. goto out;
  2083. }
  2084. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2085. &dma_handle);
  2086. if (!buffer) {
  2087. error = -ENOMEM;
  2088. goto out;
  2089. }
  2090. cfg.physAddr = dma_handle;
  2091. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  2092. error = mpt_config(ioc, &cfg);
  2093. if (error)
  2094. goto out_free_consistent;
  2095. port_info->num_phys = buffer->NumPhys;
  2096. port_info->phy_info = kcalloc(port_info->num_phys,
  2097. sizeof(struct mptsas_phyinfo), GFP_KERNEL);
  2098. if (!port_info->phy_info) {
  2099. error = -ENOMEM;
  2100. goto out_free_consistent;
  2101. }
  2102. ioc->nvdata_version_persistent =
  2103. le16_to_cpu(buffer->NvdataVersionPersistent);
  2104. ioc->nvdata_version_default =
  2105. le16_to_cpu(buffer->NvdataVersionDefault);
  2106. for (i = 0; i < port_info->num_phys; i++) {
  2107. mptsas_print_phy_data(ioc, &buffer->PhyData[i]);
  2108. port_info->phy_info[i].phy_id = i;
  2109. port_info->phy_info[i].port_id =
  2110. buffer->PhyData[i].Port;
  2111. port_info->phy_info[i].negotiated_link_rate =
  2112. buffer->PhyData[i].NegotiatedLinkRate;
  2113. port_info->phy_info[i].portinfo = port_info;
  2114. port_info->phy_info[i].handle =
  2115. le16_to_cpu(buffer->PhyData[i].ControllerDevHandle);
  2116. }
  2117. out_free_consistent:
  2118. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2119. buffer, dma_handle);
  2120. out:
  2121. return error;
  2122. }
  2123. static int
  2124. mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
  2125. {
  2126. ConfigExtendedPageHeader_t hdr;
  2127. CONFIGPARMS cfg;
  2128. SasIOUnitPage1_t *buffer;
  2129. dma_addr_t dma_handle;
  2130. int error;
  2131. u8 device_missing_delay;
  2132. memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
  2133. memset(&cfg, 0, sizeof(CONFIGPARMS));
  2134. cfg.cfghdr.ehdr = &hdr;
  2135. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  2136. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  2137. cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  2138. cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
  2139. cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
  2140. cfg.cfghdr.ehdr->PageNumber = 1;
  2141. error = mpt_config(ioc, &cfg);
  2142. if (error)
  2143. goto out;
  2144. if (!hdr.ExtPageLength) {
  2145. error = -ENXIO;
  2146. goto out;
  2147. }
  2148. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2149. &dma_handle);
  2150. if (!buffer) {
  2151. error = -ENOMEM;
  2152. goto out;
  2153. }
  2154. cfg.physAddr = dma_handle;
  2155. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  2156. error = mpt_config(ioc, &cfg);
  2157. if (error)
  2158. goto out_free_consistent;
  2159. ioc->io_missing_delay =
  2160. le16_to_cpu(buffer->IODeviceMissingDelay);
  2161. device_missing_delay = buffer->ReportDeviceMissingDelay;
  2162. ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
  2163. (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
  2164. device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  2165. out_free_consistent:
  2166. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2167. buffer, dma_handle);
  2168. out:
  2169. return error;
  2170. }
  2171. static int
  2172. mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
  2173. u32 form, u32 form_specific)
  2174. {
  2175. ConfigExtendedPageHeader_t hdr;
  2176. CONFIGPARMS cfg;
  2177. SasPhyPage0_t *buffer;
  2178. dma_addr_t dma_handle;
  2179. int error;
  2180. hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
  2181. hdr.ExtPageLength = 0;
  2182. hdr.PageNumber = 0;
  2183. hdr.Reserved1 = 0;
  2184. hdr.Reserved2 = 0;
  2185. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  2186. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
  2187. cfg.cfghdr.ehdr = &hdr;
  2188. cfg.dir = 0; /* read */
  2189. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  2190. /* Get Phy Pg 0 for each Phy. */
  2191. cfg.physAddr = -1;
  2192. cfg.pageAddr = form + form_specific;
  2193. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  2194. error = mpt_config(ioc, &cfg);
  2195. if (error)
  2196. goto out;
  2197. if (!hdr.ExtPageLength) {
  2198. error = -ENXIO;
  2199. goto out;
  2200. }
  2201. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2202. &dma_handle);
  2203. if (!buffer) {
  2204. error = -ENOMEM;
  2205. goto out;
  2206. }
  2207. cfg.physAddr = dma_handle;
  2208. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  2209. error = mpt_config(ioc, &cfg);
  2210. if (error)
  2211. goto out_free_consistent;
  2212. mptsas_print_phy_pg0(ioc, buffer);
  2213. phy_info->hw_link_rate = buffer->HwLinkRate;
  2214. phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
  2215. phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
  2216. phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
  2217. out_free_consistent:
  2218. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2219. buffer, dma_handle);
  2220. out:
  2221. return error;
  2222. }
  2223. static int
  2224. mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
  2225. u32 form, u32 form_specific)
  2226. {
  2227. ConfigExtendedPageHeader_t hdr;
  2228. CONFIGPARMS cfg;
  2229. SasDevicePage0_t *buffer;
  2230. dma_addr_t dma_handle;
  2231. __le64 sas_address;
  2232. int error=0;
  2233. hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
  2234. hdr.ExtPageLength = 0;
  2235. hdr.PageNumber = 0;
  2236. hdr.Reserved1 = 0;
  2237. hdr.Reserved2 = 0;
  2238. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  2239. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
  2240. cfg.cfghdr.ehdr = &hdr;
  2241. cfg.pageAddr = form + form_specific;
  2242. cfg.physAddr = -1;
  2243. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  2244. cfg.dir = 0; /* read */
  2245. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  2246. memset(device_info, 0, sizeof(struct mptsas_devinfo));
  2247. error = mpt_config(ioc, &cfg);
  2248. if (error)
  2249. goto out;
  2250. if (!hdr.ExtPageLength) {
  2251. error = -ENXIO;
  2252. goto out;
  2253. }
  2254. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2255. &dma_handle);
  2256. if (!buffer) {
  2257. error = -ENOMEM;
  2258. goto out;
  2259. }
  2260. cfg.physAddr = dma_handle;
  2261. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  2262. error = mpt_config(ioc, &cfg);
  2263. if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
  2264. error = -ENODEV;
  2265. goto out_free_consistent;
  2266. }
  2267. if (error)
  2268. goto out_free_consistent;
  2269. mptsas_print_device_pg0(ioc, buffer);
  2270. memset(device_info, 0, sizeof(struct mptsas_devinfo));
  2271. device_info->handle = le16_to_cpu(buffer->DevHandle);
  2272. device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
  2273. device_info->handle_enclosure =
  2274. le16_to_cpu(buffer->EnclosureHandle);
  2275. device_info->slot = le16_to_cpu(buffer->Slot);
  2276. device_info->phy_id = buffer->PhyNum;
  2277. device_info->port_id = buffer->PhysicalPort;
  2278. device_info->id = buffer->TargetID;
  2279. device_info->phys_disk_num = ~0;
  2280. device_info->channel = buffer->Bus;
  2281. memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
  2282. device_info->sas_address = le64_to_cpu(sas_address);
  2283. device_info->device_info =
  2284. le32_to_cpu(buffer->DeviceInfo);
  2285. device_info->flags = le16_to_cpu(buffer->Flags);
  2286. out_free_consistent:
  2287. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2288. buffer, dma_handle);
  2289. out:
  2290. return error;
  2291. }
  2292. static int
  2293. mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
  2294. u32 form, u32 form_specific)
  2295. {
  2296. ConfigExtendedPageHeader_t hdr;
  2297. CONFIGPARMS cfg;
  2298. SasExpanderPage0_t *buffer;
  2299. dma_addr_t dma_handle;
  2300. int i, error;
  2301. __le64 sas_address;
  2302. memset(port_info, 0, sizeof(struct mptsas_portinfo));
  2303. hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
  2304. hdr.ExtPageLength = 0;
  2305. hdr.PageNumber = 0;
  2306. hdr.Reserved1 = 0;
  2307. hdr.Reserved2 = 0;
  2308. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  2309. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
  2310. cfg.cfghdr.ehdr = &hdr;
  2311. cfg.physAddr = -1;
  2312. cfg.pageAddr = form + form_specific;
  2313. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  2314. cfg.dir = 0; /* read */
  2315. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  2316. memset(port_info, 0, sizeof(struct mptsas_portinfo));
  2317. error = mpt_config(ioc, &cfg);
  2318. if (error)
  2319. goto out;
  2320. if (!hdr.ExtPageLength) {
  2321. error = -ENXIO;
  2322. goto out;
  2323. }
  2324. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2325. &dma_handle);
  2326. if (!buffer) {
  2327. error = -ENOMEM;
  2328. goto out;
  2329. }
  2330. cfg.physAddr = dma_handle;
  2331. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  2332. error = mpt_config(ioc, &cfg);
  2333. if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
  2334. error = -ENODEV;
  2335. goto out_free_consistent;
  2336. }
  2337. if (error)
  2338. goto out_free_consistent;
  2339. /* save config data */
  2340. port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
  2341. port_info->phy_info = kcalloc(port_info->num_phys,
  2342. sizeof(struct mptsas_phyinfo), GFP_KERNEL);
  2343. if (!port_info->phy_info) {
  2344. error = -ENOMEM;
  2345. goto out_free_consistent;
  2346. }
  2347. memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
  2348. for (i = 0; i < port_info->num_phys; i++) {
  2349. port_info->phy_info[i].portinfo = port_info;
  2350. port_info->phy_info[i].handle =
  2351. le16_to_cpu(buffer->DevHandle);
  2352. port_info->phy_info[i].identify.sas_address =
  2353. le64_to_cpu(sas_address);
  2354. port_info->phy_info[i].identify.handle_parent =
  2355. le16_to_cpu(buffer->ParentDevHandle);
  2356. }
  2357. out_free_consistent:
  2358. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2359. buffer, dma_handle);
  2360. out:
  2361. return error;
  2362. }
  2363. static int
  2364. mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
  2365. u32 form, u32 form_specific)
  2366. {
  2367. ConfigExtendedPageHeader_t hdr;
  2368. CONFIGPARMS cfg;
  2369. SasExpanderPage1_t *buffer;
  2370. dma_addr_t dma_handle;
  2371. int error=0;
  2372. hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
  2373. hdr.ExtPageLength = 0;
  2374. hdr.PageNumber = 1;
  2375. hdr.Reserved1 = 0;
  2376. hdr.Reserved2 = 0;
  2377. hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
  2378. hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
  2379. cfg.cfghdr.ehdr = &hdr;
  2380. cfg.physAddr = -1;
  2381. cfg.pageAddr = form + form_specific;
  2382. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  2383. cfg.dir = 0; /* read */
  2384. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  2385. error = mpt_config(ioc, &cfg);
  2386. if (error)
  2387. goto out;
  2388. if (!hdr.ExtPageLength) {
  2389. error = -ENXIO;
  2390. goto out;
  2391. }
  2392. buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2393. &dma_handle);
  2394. if (!buffer) {
  2395. error = -ENOMEM;
  2396. goto out;
  2397. }
  2398. cfg.physAddr = dma_handle;
  2399. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  2400. error = mpt_config(ioc, &cfg);
  2401. if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
  2402. error = -ENODEV;
  2403. goto out_free_consistent;
  2404. }
  2405. if (error)
  2406. goto out_free_consistent;
  2407. mptsas_print_expander_pg1(ioc, buffer);
  2408. /* save config data */
  2409. phy_info->phy_id = buffer->PhyIdentifier;
  2410. phy_info->port_id = buffer->PhysicalPort;
  2411. phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
  2412. phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
  2413. phy_info->hw_link_rate = buffer->HwLinkRate;
  2414. phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
  2415. phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
  2416. out_free_consistent:
  2417. pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
  2418. buffer, dma_handle);
  2419. out:
  2420. return error;
  2421. }
  2422. struct rep_manu_request{
  2423. u8 smp_frame_type;
  2424. u8 function;
  2425. u8 reserved;
  2426. u8 request_length;
  2427. };
  2428. struct rep_manu_reply{
  2429. u8 smp_frame_type; /* 0x41 */
  2430. u8 function; /* 0x01 */
  2431. u8 function_result;
  2432. u8 response_length;
  2433. u16 expander_change_count;
  2434. u8 reserved0[2];
  2435. u8 sas_format:1;
  2436. u8 reserved1:7;
  2437. u8 reserved2[3];
  2438. u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
  2439. u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
  2440. u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
  2441. u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
  2442. u16 component_id;
  2443. u8 component_revision_id;
  2444. u8 reserved3;
  2445. u8 vendor_specific[8];
  2446. };
  2447. /**
  2448. * mptsas_exp_repmanufacture_info -
  2449. * @ioc: per adapter object
  2450. * @sas_address: expander sas address
  2451. * @edev: the sas_expander_device object
  2452. *
  2453. * Fills in the sas_expander_device object when SMP port is created.
  2454. *
  2455. * Returns 0 for success, non-zero for failure.
  2456. */
  2457. static int
  2458. mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
  2459. u64 sas_address, struct sas_expander_device *edev)
  2460. {
  2461. MPT_FRAME_HDR *mf;
  2462. SmpPassthroughRequest_t *smpreq;
  2463. SmpPassthroughReply_t *smprep;
  2464. struct rep_manu_reply *manufacture_reply;
  2465. struct rep_manu_request *manufacture_request;
  2466. int ret;
  2467. int flagsLength;
  2468. unsigned long timeleft;
  2469. char *psge;
  2470. unsigned long flags;
  2471. void *data_out = NULL;
  2472. dma_addr_t data_out_dma = 0;
  2473. u32 sz;
  2474. spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
  2475. if (ioc->ioc_reset_in_progress) {
  2476. spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
  2477. printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
  2478. __func__, ioc->name);
  2479. return -EFAULT;
  2480. }
  2481. spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
  2482. ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
  2483. if (ret)
  2484. goto out;
  2485. mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
  2486. if (!mf) {
  2487. ret = -ENOMEM;
  2488. goto out_unlock;
  2489. }
  2490. smpreq = (SmpPassthroughRequest_t *)mf;
  2491. memset(smpreq, 0, sizeof(*smpreq));
  2492. sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
  2493. data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
  2494. if (!data_out) {
  2495. printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
  2496. __FILE__, __LINE__, __func__);
  2497. ret = -ENOMEM;
  2498. goto put_mf;
  2499. }
  2500. manufacture_request = data_out;
  2501. manufacture_request->smp_frame_type = 0x40;
  2502. manufacture_request->function = 1;
  2503. manufacture_request->reserved = 0;
  2504. manufacture_request->request_length = 0;
  2505. smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
  2506. smpreq->PhysicalPort = 0xFF;
  2507. *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
  2508. smpreq->RequestDataLength = sizeof(struct rep_manu_request);
  2509. psge = (char *)
  2510. (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
  2511. flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
  2512. MPI_SGE_FLAGS_SYSTEM_ADDRESS |
  2513. MPI_SGE_FLAGS_HOST_TO_IOC |
  2514. MPI_SGE_FLAGS_END_OF_BUFFER;
  2515. flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
  2516. flagsLength |= sizeof(struct rep_manu_request);
  2517. ioc->add_sge(psge, flagsLength, data_out_dma);
  2518. psge += ioc->SGE_size;
  2519. flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
  2520. MPI_SGE_FLAGS_SYSTEM_ADDRESS |
  2521. MPI_SGE_FLAGS_IOC_TO_HOST |
  2522. MPI_SGE_FLAGS_END_OF_BUFFER;
  2523. flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
  2524. flagsLength |= sizeof(struct rep_manu_reply);
  2525. ioc->add_sge(psge, flagsLength, data_out_dma +
  2526. sizeof(struct rep_manu_request));
  2527. INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
  2528. mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
  2529. timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
  2530. if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
  2531. ret = -ETIME;
  2532. mpt_free_msg_frame(ioc, mf);
  2533. mf = NULL;
  2534. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
  2535. goto out_free;
  2536. if (!timeleft)
  2537. mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
  2538. goto out_free;
  2539. }
  2540. mf = NULL;
  2541. if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
  2542. u8 *tmp;
  2543. smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
  2544. if (le16_to_cpu(smprep->ResponseDataLength) !=
  2545. sizeof(struct rep_manu_reply))
  2546. goto out_free;
  2547. manufacture_reply = data_out + sizeof(struct rep_manu_request);
  2548. strncpy(edev->vendor_id, manufacture_reply->vendor_id,
  2549. SAS_EXPANDER_VENDOR_ID_LEN);
  2550. strncpy(edev->product_id, manufacture_reply->product_id,
  2551. SAS_EXPANDER_PRODUCT_ID_LEN);
  2552. strncpy(edev->product_rev, manufacture_reply->product_rev,
  2553. SAS_EXPANDER_PRODUCT_REV_LEN);
  2554. edev->level = manufacture_reply->sas_format;
  2555. if (manufacture_reply->sas_format) {
  2556. strncpy(edev->component_vendor_id,
  2557. manufacture_reply->component_vendor_id,
  2558. SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
  2559. tmp = (u8 *)&manufacture_reply->component_id;
  2560. edev->component_id = tmp[0] << 8 | tmp[1];
  2561. edev->component_revision_id =
  2562. manufacture_reply->component_revision_id;
  2563. }
  2564. } else {
  2565. printk(MYIOC_s_ERR_FMT
  2566. "%s: smp passthru reply failed to be returned\n",
  2567. ioc->name, __func__);
  2568. ret = -ENXIO;
  2569. }
  2570. out_free:
  2571. if (data_out_dma)
  2572. pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
  2573. put_mf:
  2574. if (mf)
  2575. mpt_free_msg_frame(ioc, mf);
  2576. out_unlock:
  2577. CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
  2578. mutex_unlock(&ioc->sas_mgmt.mutex);
  2579. out:
  2580. return ret;
  2581. }
  2582. static void
  2583. mptsas_parse_device_info(struct sas_identify *identify,
  2584. struct mptsas_devinfo *device_info)
  2585. {
  2586. u16 protocols;
  2587. identify->sas_address = device_info->sas_address;
  2588. identify->phy_identifier = device_info->phy_id;
  2589. /*
  2590. * Fill in Phy Initiator Port Protocol.
  2591. * Bits 6:3, more than one bit can be set, fall through cases.
  2592. */
  2593. protocols = device_info->device_info & 0x78;
  2594. identify->initiator_port_protocols = 0;
  2595. if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
  2596. identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
  2597. if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
  2598. identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
  2599. if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
  2600. identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
  2601. if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
  2602. identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
  2603. /*
  2604. * Fill in Phy Target Port Protocol.
  2605. * Bits 10:7, more than one bit can be set, fall through cases.
  2606. */
  2607. protocols = device_info->device_info & 0x780;
  2608. identify->target_port_protocols = 0;
  2609. if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
  2610. identify->target_port_protocols |= SAS_PROTOCOL_SSP;
  2611. if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
  2612. identify->target_port_protocols |= SAS_PROTOCOL_STP;
  2613. if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
  2614. identify->target_port_protocols |= SAS_PROTOCOL_SMP;
  2615. if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
  2616. identify->target_port_protocols |= SAS_PROTOCOL_SATA;
  2617. /*
  2618. * Fill in Attached device type.
  2619. */
  2620. switch (device_info->device_info &
  2621. MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
  2622. case MPI_SAS_DEVICE_INFO_NO_DEVICE:
  2623. identify->device_type = SAS_PHY_UNUSED;
  2624. break;
  2625. case MPI_SAS_DEVICE_INFO_END_DEVICE:
  2626. identify->device_type = SAS_END_DEVICE;
  2627. break;
  2628. case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
  2629. identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
  2630. break;
  2631. case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
  2632. identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
  2633. break;
  2634. }
  2635. }
  2636. static int mptsas_probe_one_phy(struct device *dev,
  2637. struct mptsas_phyinfo *phy_info, int index, int local)
  2638. {
  2639. MPT_ADAPTER *ioc;
  2640. struct sas_phy *phy;
  2641. struct sas_port *port;
  2642. int error = 0;
  2643. VirtTarget *vtarget;
  2644. if (!dev) {
  2645. error = -ENODEV;
  2646. goto out;
  2647. }
  2648. if (!phy_info->phy) {
  2649. phy = sas_phy_alloc(dev, index);
  2650. if (!phy) {
  2651. error = -ENOMEM;
  2652. goto out;
  2653. }
  2654. } else
  2655. phy = phy_info->phy;
  2656. mptsas_parse_device_info(&phy->identify, &phy_info->identify);
  2657. /*
  2658. * Set Negotiated link rate.
  2659. */
  2660. switch (phy_info->negotiated_link_rate) {
  2661. case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
  2662. phy->negotiated_linkrate = SAS_PHY_DISABLED;
  2663. break;
  2664. case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
  2665. phy->negotiated_linkrate = SAS_LINK_RATE_FAILED;
  2666. break;
  2667. case MPI_SAS_IOUNIT0_RATE_1_5:
  2668. phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
  2669. break;
  2670. case MPI_SAS_IOUNIT0_RATE_3_0:
  2671. phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
  2672. break;
  2673. case MPI_SAS_IOUNIT0_RATE_6_0:
  2674. phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
  2675. break;
  2676. case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
  2677. case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
  2678. default:
  2679. phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
  2680. break;
  2681. }
  2682. /*
  2683. * Set Max hardware link rate.
  2684. */
  2685. switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
  2686. case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
  2687. phy->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
  2688. break;
  2689. case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
  2690. phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
  2691. break;
  2692. default:
  2693. break;
  2694. }
  2695. /*
  2696. * Set Max programmed link rate.
  2697. */
  2698. switch (phy_info->programmed_link_rate &
  2699. MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
  2700. case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
  2701. phy->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
  2702. break;
  2703. case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
  2704. phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
  2705. break;
  2706. default:
  2707. break;
  2708. }
  2709. /*
  2710. * Set Min hardware link rate.
  2711. */
  2712. switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
  2713. case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
  2714. phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
  2715. break;
  2716. case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
  2717. phy->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
  2718. break;
  2719. default:
  2720. break;
  2721. }
  2722. /*
  2723. * Set Min programmed link rate.
  2724. */
  2725. switch (phy_info->programmed_link_rate &
  2726. MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
  2727. case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
  2728. phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
  2729. break;
  2730. case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
  2731. phy->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
  2732. break;
  2733. default:
  2734. break;
  2735. }
  2736. if (!phy_info->phy) {
  2737. error = sas_phy_add(phy);
  2738. if (error) {
  2739. sas_phy_free(phy);
  2740. goto out;
  2741. }
  2742. phy_info->phy = phy;
  2743. }
  2744. if (!phy_info->attached.handle ||
  2745. !phy_info->port_details)
  2746. goto out;
  2747. port = mptsas_get_port(phy_info);
  2748. ioc = phy_to_ioc(phy_info->phy);
  2749. if (phy_info->sas_port_add_phy) {
  2750. if (!port) {
  2751. port = sas_port_alloc_num(dev);
  2752. if (!port) {
  2753. error = -ENOMEM;
  2754. goto out;
  2755. }
  2756. error = sas_port_add(port);
  2757. if (error) {
  2758. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  2759. "%s: exit at line=%d\n", ioc->name,
  2760. __func__, __LINE__));
  2761. goto out;
  2762. }
  2763. mptsas_set_port(ioc, phy_info, port);
  2764. devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
  2765. MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
  2766. ioc->name, port->port_identifier,
  2767. (unsigned long long)phy_info->
  2768. attached.sas_address));
  2769. }
  2770. dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  2771. "sas_port_add_phy: phy_id=%d\n",
  2772. ioc->name, phy_info->phy_id));
  2773. sas_port_add_phy(port, phy_info->phy);
  2774. phy_info->sas_port_add_phy = 0;
  2775. devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
  2776. MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
  2777. phy_info->phy_id, phy_info->phy));
  2778. }
  2779. if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
  2780. struct sas_rphy *rphy;
  2781. struct device *parent;
  2782. struct sas_identify identify;
  2783. parent = dev->parent->parent;
  2784. /*
  2785. * Let the hotplug_work thread handle processing
  2786. * the adding/removing of devices that occur
  2787. * after start of day.
  2788. */
  2789. if (mptsas_is_end_device(&phy_info->attached) &&
  2790. phy_info->attached.handle_parent) {
  2791. goto out;
  2792. }
  2793. mptsas_parse_device_info(&identify, &phy_info->attached);
  2794. if (scsi_is_host_device(parent)) {
  2795. struct mptsas_portinfo *port_info;
  2796. int i;
  2797. port_info = ioc->hba_port_info;
  2798. for (i = 0; i < port_info->num_phys; i++)
  2799. if (port_info->phy_info[i].identify.sas_address ==
  2800. identify.sas_address) {
  2801. sas_port_mark_backlink(port);
  2802. goto out;
  2803. }
  2804. } else if (scsi_is_sas_rphy(parent)) {
  2805. struct sas_rphy *parent_rphy = dev_to_rphy(parent);
  2806. if (identify.sas_address ==
  2807. parent_rphy->identify.sas_address) {
  2808. sas_port_mark_backlink(port);
  2809. goto out;
  2810. }
  2811. }
  2812. switch (identify.device_type) {
  2813. case SAS_END_DEVICE:
  2814. rphy = sas_end_device_alloc(port);
  2815. break;
  2816. case SAS_EDGE_EXPANDER_DEVICE:
  2817. case SAS_FANOUT_EXPANDER_DEVICE:
  2818. rphy = sas_expander_alloc(port, identify.device_type);
  2819. break;
  2820. default:
  2821. rphy = NULL;
  2822. break;
  2823. }
  2824. if (!rphy) {
  2825. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  2826. "%s: exit at line=%d\n", ioc->name,
  2827. __func__, __LINE__));
  2828. goto out;
  2829. }
  2830. rphy->identify = identify;
  2831. error = sas_rphy_add(rphy);
  2832. if (error) {
  2833. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  2834. "%s: exit at line=%d\n", ioc->name,
  2835. __func__, __LINE__));
  2836. sas_rphy_free(rphy);
  2837. goto out;
  2838. }
  2839. mptsas_set_rphy(ioc, phy_info, rphy);
  2840. if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
  2841. identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
  2842. mptsas_exp_repmanufacture_info(ioc,
  2843. identify.sas_address,
  2844. rphy_to_expander_device(rphy));
  2845. }
  2846. /* If the device exists,verify it wasn't previously flagged
  2847. as a missing device. If so, clear it */
  2848. vtarget = mptsas_find_vtarget(ioc,
  2849. phy_info->attached.channel,
  2850. phy_info->attached.id);
  2851. if (vtarget && vtarget->inDMD) {
  2852. printk(KERN_INFO "Device returned, unsetting inDMD\n");
  2853. vtarget->inDMD = 0;
  2854. }
  2855. out:
  2856. return error;
  2857. }
  2858. static int
  2859. mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
  2860. {
  2861. struct mptsas_portinfo *port_info, *hba;
  2862. int error = -ENOMEM, i;
  2863. hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
  2864. if (! hba)
  2865. goto out;
  2866. error = mptsas_sas_io_unit_pg0(ioc, hba);
  2867. if (error)
  2868. goto out_free_port_info;
  2869. mptsas_sas_io_unit_pg1(ioc);
  2870. mutex_lock(&ioc->sas_topology_mutex);
  2871. port_info = ioc->hba_port_info;
  2872. if (!port_info) {
  2873. ioc->hba_port_info = port_info = hba;
  2874. ioc->hba_port_num_phy = port_info->num_phys;
  2875. list_add_tail(&port_info->list, &ioc->sas_topology);
  2876. } else {
  2877. for (i = 0; i < hba->num_phys; i++) {
  2878. port_info->phy_info[i].negotiated_link_rate =
  2879. hba->phy_info[i].negotiated_link_rate;
  2880. port_info->phy_info[i].handle =
  2881. hba->phy_info[i].handle;
  2882. port_info->phy_info[i].port_id =
  2883. hba->phy_info[i].port_id;
  2884. }
  2885. kfree(hba->phy_info);
  2886. kfree(hba);
  2887. hba = NULL;
  2888. }
  2889. mutex_unlock(&ioc->sas_topology_mutex);
  2890. #if defined(CPQ_CIM)
  2891. ioc->num_ports = port_info->num_phys;
  2892. #endif
  2893. for (i = 0; i < port_info->num_phys; i++) {
  2894. mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
  2895. (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
  2896. MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
  2897. port_info->phy_info[i].identify.handle =
  2898. port_info->phy_info[i].handle;
  2899. mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
  2900. (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
  2901. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  2902. port_info->phy_info[i].identify.handle);
  2903. if (!ioc->hba_port_sas_addr)
  2904. ioc->hba_port_sas_addr =
  2905. port_info->phy_info[i].identify.sas_address;
  2906. port_info->phy_info[i].identify.phy_id =
  2907. port_info->phy_info[i].phy_id = i;
  2908. if (port_info->phy_info[i].attached.handle)
  2909. mptsas_sas_device_pg0(ioc,
  2910. &port_info->phy_info[i].attached,
  2911. (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
  2912. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  2913. port_info->phy_info[i].attached.handle);
  2914. }
  2915. mptsas_setup_wide_ports(ioc, port_info);
  2916. for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
  2917. mptsas_probe_one_phy(&ioc->sh->shost_gendev,
  2918. &port_info->phy_info[i], ioc->sas_index, 1);
  2919. return 0;
  2920. out_free_port_info:
  2921. kfree(hba);
  2922. out:
  2923. return error;
  2924. }
  2925. static void
  2926. mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
  2927. {
  2928. struct mptsas_portinfo *parent;
  2929. struct device *parent_dev;
  2930. struct sas_rphy *rphy;
  2931. int i;
  2932. u64 sas_address; /* expander sas address */
  2933. u32 handle;
  2934. handle = port_info->phy_info[0].handle;
  2935. sas_address = port_info->phy_info[0].identify.sas_address;
  2936. for (i = 0; i < port_info->num_phys; i++) {
  2937. mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
  2938. (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
  2939. MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
  2940. mptsas_sas_device_pg0(ioc,
  2941. &port_info->phy_info[i].identify,
  2942. (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
  2943. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  2944. port_info->phy_info[i].identify.handle);
  2945. port_info->phy_info[i].identify.phy_id =
  2946. port_info->phy_info[i].phy_id;
  2947. if (port_info->phy_info[i].attached.handle) {
  2948. mptsas_sas_device_pg0(ioc,
  2949. &port_info->phy_info[i].attached,
  2950. (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
  2951. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  2952. port_info->phy_info[i].attached.handle);
  2953. port_info->phy_info[i].attached.phy_id =
  2954. port_info->phy_info[i].phy_id;
  2955. }
  2956. }
  2957. mutex_lock(&ioc->sas_topology_mutex);
  2958. parent = mptsas_find_portinfo_by_handle(ioc,
  2959. port_info->phy_info[0].identify.handle_parent);
  2960. if (!parent) {
  2961. mutex_unlock(&ioc->sas_topology_mutex);
  2962. return;
  2963. }
  2964. for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
  2965. i++) {
  2966. if (parent->phy_info[i].attached.sas_address == sas_address) {
  2967. rphy = mptsas_get_rphy(&parent->phy_info[i]);
  2968. parent_dev = &rphy->dev;
  2969. }
  2970. }
  2971. mutex_unlock(&ioc->sas_topology_mutex);
  2972. mptsas_setup_wide_ports(ioc, port_info);
  2973. for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
  2974. mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
  2975. ioc->sas_index, 0);
  2976. }
  2977. static void
  2978. mptsas_expander_event_add(MPT_ADAPTER *ioc,
  2979. MpiEventDataSasExpanderStatusChange_t *expander_data)
  2980. {
  2981. struct mptsas_portinfo *port_info;
  2982. int i;
  2983. __le64 sas_address;
  2984. port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
  2985. if (!port_info)
  2986. BUG();
  2987. port_info->num_phys = (expander_data->NumPhys) ?
  2988. expander_data->NumPhys : 1;
  2989. port_info->phy_info = kcalloc(port_info->num_phys,
  2990. sizeof(struct mptsas_phyinfo), GFP_KERNEL);
  2991. if (!port_info->phy_info)
  2992. BUG();
  2993. memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
  2994. for (i = 0; i < port_info->num_phys; i++) {
  2995. port_info->phy_info[i].portinfo = port_info;
  2996. port_info->phy_info[i].handle =
  2997. le16_to_cpu(expander_data->DevHandle);
  2998. port_info->phy_info[i].identify.sas_address =
  2999. le64_to_cpu(sas_address);
  3000. port_info->phy_info[i].identify.handle_parent =
  3001. le16_to_cpu(expander_data->ParentDevHandle);
  3002. }
  3003. mutex_lock(&ioc->sas_topology_mutex);
  3004. list_add_tail(&port_info->list, &ioc->sas_topology);
  3005. mutex_unlock(&ioc->sas_topology_mutex);
  3006. printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
  3007. "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
  3008. (unsigned long long)sas_address);
  3009. mptsas_expander_refresh(ioc, port_info);
  3010. }
  3011. /**
  3012. * mptsas_delete_expander_siblings - remove siblings attached to expander
  3013. * @ioc: Pointer to MPT_ADAPTER structure
  3014. * @parent: the parent port_info object
  3015. * @expander: the expander port_info object
  3016. **/
  3017. static void
  3018. mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
  3019. *parent, struct mptsas_portinfo *expander)
  3020. {
  3021. struct mptsas_phyinfo *phy_info;
  3022. struct mptsas_portinfo *port_info;
  3023. struct sas_rphy *rphy;
  3024. int i;
  3025. phy_info = expander->phy_info;
  3026. for (i = 0; i < expander->num_phys; i++, phy_info++) {
  3027. rphy = mptsas_get_rphy(phy_info);
  3028. if (!rphy)
  3029. continue;
  3030. if (rphy->identify.device_type == SAS_END_DEVICE)
  3031. mptsas_del_end_device(ioc, phy_info);
  3032. }
  3033. phy_info = expander->phy_info;
  3034. for (i = 0; i < expander->num_phys; i++, phy_info++) {
  3035. rphy = mptsas_get_rphy(phy_info);
  3036. if (!rphy)
  3037. continue;
  3038. if (rphy->identify.device_type ==
  3039. MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
  3040. rphy->identify.device_type ==
  3041. MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
  3042. port_info = mptsas_find_portinfo_by_sas_address(ioc,
  3043. rphy->identify.sas_address);
  3044. if (!port_info)
  3045. continue;
  3046. if (port_info == parent) /* backlink rphy */
  3047. continue;
  3048. /*
  3049. Delete this expander even if the expdevpage is exists
  3050. because the parent expander is already deleted
  3051. */
  3052. mptsas_expander_delete(ioc, port_info, 1);
  3053. }
  3054. }
  3055. }
  3056. /**
  3057. * mptsas_expander_delete - remove this expander
  3058. * @ioc: Pointer to MPT_ADAPTER structure
  3059. * @port_info: expander port_info struct
  3060. * @force: Flag to forcefully delete the expander
  3061. *
  3062. **/
  3063. static void mptsas_expander_delete(MPT_ADAPTER *ioc,
  3064. struct mptsas_portinfo *port_info, u8 force)
  3065. {
  3066. struct mptsas_portinfo *parent;
  3067. int i;
  3068. u64 expander_sas_address;
  3069. struct mptsas_phyinfo *phy_info;
  3070. struct mptsas_portinfo buffer;
  3071. struct mptsas_portinfo_details *port_details;
  3072. struct sas_port *port;
  3073. if (!port_info)
  3074. return;
  3075. /* see if expander is still there before deleting */
  3076. mptsas_sas_expander_pg0(ioc, &buffer,
  3077. (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
  3078. MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
  3079. port_info->phy_info[0].identify.handle);
  3080. if (buffer.num_phys) {
  3081. kfree(buffer.phy_info);
  3082. if (!force)
  3083. return;
  3084. }
  3085. /*
  3086. * Obtain the port_info instance to the parent port
  3087. */
  3088. port_details = NULL;
  3089. expander_sas_address =
  3090. port_info->phy_info[0].identify.sas_address;
  3091. parent = mptsas_find_portinfo_by_handle(ioc,
  3092. port_info->phy_info[0].identify.handle_parent);
  3093. mptsas_delete_expander_siblings(ioc, parent, port_info);
  3094. if (!parent)
  3095. goto out;
  3096. /*
  3097. * Delete rphys in the parent that point
  3098. * to this expander.
  3099. */
  3100. phy_info = parent->phy_info;
  3101. port = NULL;
  3102. for (i = 0; i < parent->num_phys; i++, phy_info++) {
  3103. if (!phy_info->phy)
  3104. continue;
  3105. if (phy_info->attached.sas_address !=
  3106. expander_sas_address)
  3107. continue;
  3108. if (!port) {
  3109. port = mptsas_get_port(phy_info);
  3110. port_details = phy_info->port_details;
  3111. }
  3112. dev_printk(KERN_DEBUG, &phy_info->phy->dev,
  3113. MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
  3114. phy_info->phy_id, phy_info->phy);
  3115. sas_port_delete_phy(port, phy_info->phy);
  3116. }
  3117. if (port) {
  3118. dev_printk(KERN_DEBUG, &port->dev,
  3119. MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
  3120. ioc->name, port->port_identifier,
  3121. (unsigned long long)expander_sas_address);
  3122. sas_port_delete(port);
  3123. mptsas_port_delete(ioc, port_details);
  3124. }
  3125. out:
  3126. printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
  3127. "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
  3128. (unsigned long long)expander_sas_address);
  3129. /*
  3130. * free link
  3131. */
  3132. list_del(&port_info->list);
  3133. kfree(port_info->phy_info);
  3134. kfree(port_info);
  3135. }
  3136. /**
  3137. * mptsas_send_expander_event - expanders events
  3138. * @ioc: Pointer to MPT_ADAPTER structure
  3139. * @expander_data: event data
  3140. *
  3141. *
  3142. * This function handles adding, removing, and refreshing
  3143. * device handles within the expander objects.
  3144. */
  3145. static void
  3146. mptsas_send_expander_event(struct fw_event_work *fw_event)
  3147. {
  3148. MPT_ADAPTER *ioc;
  3149. MpiEventDataSasExpanderStatusChange_t *expander_data;
  3150. struct mptsas_portinfo *port_info;
  3151. __le64 sas_address;
  3152. int i;
  3153. ioc = fw_event->ioc;
  3154. expander_data = (MpiEventDataSasExpanderStatusChange_t *)
  3155. fw_event->event_data;
  3156. memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
  3157. sas_address = le64_to_cpu(sas_address);
  3158. port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
  3159. if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
  3160. if (port_info) {
  3161. for (i = 0; i < port_info->num_phys; i++) {
  3162. port_info->phy_info[i].portinfo = port_info;
  3163. port_info->phy_info[i].handle =
  3164. le16_to_cpu(expander_data->DevHandle);
  3165. port_info->phy_info[i].identify.sas_address =
  3166. le64_to_cpu(sas_address);
  3167. port_info->phy_info[i].identify.handle_parent =
  3168. le16_to_cpu(expander_data->ParentDevHandle);
  3169. }
  3170. mptsas_expander_refresh(ioc, port_info);
  3171. } else if (!port_info && expander_data->NumPhys)
  3172. mptsas_expander_event_add(ioc, expander_data);
  3173. } else if (expander_data->ReasonCode ==
  3174. MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
  3175. mptsas_expander_delete(ioc, port_info, 0);
  3176. mptsas_free_fw_event(ioc, fw_event);
  3177. }
  3178. /**
  3179. * mptsas_expander_add -
  3180. * @ioc: Pointer to MPT_ADAPTER structure
  3181. * @handle:
  3182. *
  3183. */
  3184. struct mptsas_portinfo *
  3185. mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
  3186. {
  3187. struct mptsas_portinfo buffer, *port_info;
  3188. int i;
  3189. if ((mptsas_sas_expander_pg0(ioc, &buffer,
  3190. (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
  3191. MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
  3192. return NULL;
  3193. port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
  3194. if (!port_info) {
  3195. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3196. "%s: exit at line=%d\n", ioc->name,
  3197. __func__, __LINE__));
  3198. return NULL;
  3199. }
  3200. port_info->num_phys = buffer.num_phys;
  3201. port_info->phy_info = buffer.phy_info;
  3202. for (i = 0; i < port_info->num_phys; i++)
  3203. port_info->phy_info[i].portinfo = port_info;
  3204. mutex_lock(&ioc->sas_topology_mutex);
  3205. list_add_tail(&port_info->list, &ioc->sas_topology);
  3206. mutex_unlock(&ioc->sas_topology_mutex);
  3207. printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
  3208. "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
  3209. (unsigned long long)buffer.phy_info[0].identify.sas_address);
  3210. mptsas_expander_refresh(ioc, port_info);
  3211. return port_info;
  3212. }
  3213. static void
  3214. mptsas_send_link_status_event(struct fw_event_work *fw_event)
  3215. {
  3216. MPT_ADAPTER *ioc;
  3217. MpiEventDataSasPhyLinkStatus_t *link_data;
  3218. struct mptsas_portinfo *port_info;
  3219. struct mptsas_phyinfo *phy_info = NULL;
  3220. __le64 sas_address;
  3221. u8 phy_num;
  3222. u8 link_rate;
  3223. ioc = fw_event->ioc;
  3224. link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
  3225. memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
  3226. sas_address = le64_to_cpu(sas_address);
  3227. link_rate = link_data->LinkRates >> 4;
  3228. phy_num = link_data->PhyNum;
  3229. port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
  3230. if (port_info) {
  3231. phy_info = &port_info->phy_info[phy_num];
  3232. if (phy_info)
  3233. phy_info->negotiated_link_rate = link_rate;
  3234. }
  3235. if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
  3236. link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
  3237. link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
  3238. if (!port_info) {
  3239. if (ioc->old_sas_discovery_protocal) {
  3240. port_info = mptsas_expander_add(ioc,
  3241. le16_to_cpu(link_data->DevHandle));
  3242. if (port_info)
  3243. goto out;
  3244. }
  3245. goto out;
  3246. }
  3247. if (port_info == ioc->hba_port_info)
  3248. mptsas_probe_hba_phys(ioc);
  3249. else
  3250. mptsas_expander_refresh(ioc, port_info);
  3251. } else if (phy_info && phy_info->phy) {
  3252. if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
  3253. phy_info->phy->negotiated_linkrate =
  3254. SAS_PHY_DISABLED;
  3255. else if (link_rate ==
  3256. MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
  3257. phy_info->phy->negotiated_linkrate =
  3258. SAS_LINK_RATE_FAILED;
  3259. else {
  3260. phy_info->phy->negotiated_linkrate =
  3261. SAS_LINK_RATE_UNKNOWN;
  3262. if (ioc->device_missing_delay &&
  3263. mptsas_is_end_device(&phy_info->attached)) {
  3264. struct scsi_device *sdev;
  3265. VirtDevice *vdevice;
  3266. u8 channel, id;
  3267. id = phy_info->attached.id;
  3268. channel = phy_info->attached.channel;
  3269. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  3270. "Link down for fw_id %d:fw_channel %d\n",
  3271. ioc->name, phy_info->attached.id,
  3272. phy_info->attached.channel));
  3273. shost_for_each_device(sdev, ioc->sh) {
  3274. vdevice = sdev->hostdata;
  3275. if ((vdevice == NULL) ||
  3276. (vdevice->vtarget == NULL))
  3277. continue;
  3278. if ((vdevice->vtarget->tflags &
  3279. MPT_TARGET_FLAGS_RAID_COMPONENT ||
  3280. vdevice->vtarget->raidVolume))
  3281. continue;
  3282. if (vdevice->vtarget->id == id &&
  3283. vdevice->vtarget->channel ==
  3284. channel)
  3285. devtprintk(ioc,
  3286. printk(MYIOC_s_DEBUG_FMT
  3287. "SDEV OUTSTANDING CMDS"
  3288. "%d\n", ioc->name,
  3289. sdev->device_busy));
  3290. }
  3291. }
  3292. }
  3293. }
  3294. out:
  3295. mptsas_free_fw_event(ioc, fw_event);
  3296. }
  3297. static void
  3298. mptsas_not_responding_devices(MPT_ADAPTER *ioc)
  3299. {
  3300. struct mptsas_portinfo buffer, *port_info;
  3301. struct mptsas_device_info *sas_info;
  3302. struct mptsas_devinfo sas_device;
  3303. u32 handle;
  3304. VirtTarget *vtarget = NULL;
  3305. struct mptsas_phyinfo *phy_info;
  3306. u8 found_expander;
  3307. int retval, retry_count;
  3308. unsigned long flags;
  3309. mpt_findImVolumes(ioc);
  3310. spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
  3311. if (ioc->ioc_reset_in_progress) {
  3312. dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  3313. "%s: exiting due to a parallel reset \n", ioc->name,
  3314. __func__));
  3315. spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
  3316. return;
  3317. }
  3318. spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
  3319. /* devices, logical volumes */
  3320. mutex_lock(&ioc->sas_device_info_mutex);
  3321. redo_device_scan:
  3322. list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
  3323. if (sas_info->is_cached)
  3324. continue;
  3325. if (!sas_info->is_logical_volume) {
  3326. sas_device.handle = 0;
  3327. retry_count = 0;
  3328. retry_page:
  3329. retval = mptsas_sas_device_pg0(ioc, &sas_device,
  3330. (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
  3331. << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  3332. (sas_info->fw.channel << 8) +
  3333. sas_info->fw.id);
  3334. if (sas_device.handle)
  3335. continue;
  3336. if (retval == -EBUSY) {
  3337. spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
  3338. if (ioc->ioc_reset_in_progress) {
  3339. dfailprintk(ioc,
  3340. printk(MYIOC_s_DEBUG_FMT
  3341. "%s: exiting due to reset\n",
  3342. ioc->name, __func__));
  3343. spin_unlock_irqrestore
  3344. (&ioc->taskmgmt_lock, flags);
  3345. mutex_unlock(&ioc->
  3346. sas_device_info_mutex);
  3347. return;
  3348. }
  3349. spin_unlock_irqrestore(&ioc->taskmgmt_lock,
  3350. flags);
  3351. }
  3352. if (retval && (retval != -ENODEV)) {
  3353. if (retry_count < 10) {
  3354. retry_count++;
  3355. goto retry_page;
  3356. } else {
  3357. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  3358. "%s: Config page retry exceeded retry "
  3359. "count deleting device 0x%llx\n",
  3360. ioc->name, __func__,
  3361. sas_info->sas_address));
  3362. }
  3363. }
  3364. /* delete device */
  3365. vtarget = mptsas_find_vtarget(ioc,
  3366. sas_info->fw.channel, sas_info->fw.id);
  3367. if (vtarget)
  3368. vtarget->deleted = 1;
  3369. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  3370. sas_info->sas_address);
  3371. if (phy_info) {
  3372. mptsas_del_end_device(ioc, phy_info);
  3373. goto redo_device_scan;
  3374. }
  3375. } else
  3376. mptsas_volume_delete(ioc, sas_info->fw.id);
  3377. }
  3378. mutex_unlock(&ioc->sas_device_info_mutex);
  3379. /* expanders */
  3380. mutex_lock(&ioc->sas_topology_mutex);
  3381. redo_expander_scan:
  3382. list_for_each_entry(port_info, &ioc->sas_topology, list) {
  3383. if (port_info->phy_info &&
  3384. (!(port_info->phy_info[0].identify.device_info &
  3385. MPI_SAS_DEVICE_INFO_SMP_TARGET)))
  3386. continue;
  3387. found_expander = 0;
  3388. handle = 0xFFFF;
  3389. while (!mptsas_sas_expander_pg0(ioc, &buffer,
  3390. (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
  3391. MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
  3392. !found_expander) {
  3393. handle = buffer.phy_info[0].handle;
  3394. if (buffer.phy_info[0].identify.sas_address ==
  3395. port_info->phy_info[0].identify.sas_address) {
  3396. found_expander = 1;
  3397. }
  3398. kfree(buffer.phy_info);
  3399. }
  3400. if (!found_expander) {
  3401. mptsas_expander_delete(ioc, port_info, 0);
  3402. goto redo_expander_scan;
  3403. }
  3404. }
  3405. mutex_unlock(&ioc->sas_topology_mutex);
  3406. }
  3407. /**
  3408. * mptsas_probe_expanders - adding expanders
  3409. * @ioc: Pointer to MPT_ADAPTER structure
  3410. *
  3411. **/
  3412. static void
  3413. mptsas_probe_expanders(MPT_ADAPTER *ioc)
  3414. {
  3415. struct mptsas_portinfo buffer, *port_info;
  3416. u32 handle;
  3417. int i;
  3418. handle = 0xFFFF;
  3419. while (!mptsas_sas_expander_pg0(ioc, &buffer,
  3420. (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
  3421. MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
  3422. handle = buffer.phy_info[0].handle;
  3423. port_info = mptsas_find_portinfo_by_sas_address(ioc,
  3424. buffer.phy_info[0].identify.sas_address);
  3425. if (port_info) {
  3426. /* refreshing handles */
  3427. for (i = 0; i < buffer.num_phys; i++) {
  3428. port_info->phy_info[i].handle = handle;
  3429. port_info->phy_info[i].identify.handle_parent =
  3430. buffer.phy_info[0].identify.handle_parent;
  3431. }
  3432. mptsas_expander_refresh(ioc, port_info);
  3433. kfree(buffer.phy_info);
  3434. continue;
  3435. }
  3436. port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
  3437. if (!port_info) {
  3438. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3439. "%s: exit at line=%d\n", ioc->name,
  3440. __func__, __LINE__));
  3441. return;
  3442. }
  3443. port_info->num_phys = buffer.num_phys;
  3444. port_info->phy_info = buffer.phy_info;
  3445. for (i = 0; i < port_info->num_phys; i++)
  3446. port_info->phy_info[i].portinfo = port_info;
  3447. mutex_lock(&ioc->sas_topology_mutex);
  3448. list_add_tail(&port_info->list, &ioc->sas_topology);
  3449. mutex_unlock(&ioc->sas_topology_mutex);
  3450. printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
  3451. "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
  3452. (unsigned long long)buffer.phy_info[0].identify.sas_address);
  3453. mptsas_expander_refresh(ioc, port_info);
  3454. }
  3455. }
  3456. static void
  3457. mptsas_probe_devices(MPT_ADAPTER *ioc)
  3458. {
  3459. u16 handle;
  3460. struct mptsas_devinfo sas_device;
  3461. struct mptsas_phyinfo *phy_info;
  3462. handle = 0xFFFF;
  3463. while (!(mptsas_sas_device_pg0(ioc, &sas_device,
  3464. MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
  3465. handle = sas_device.handle;
  3466. if ((sas_device.device_info &
  3467. (MPI_SAS_DEVICE_INFO_SSP_TARGET |
  3468. MPI_SAS_DEVICE_INFO_STP_TARGET |
  3469. MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
  3470. continue;
  3471. /* If there is no FW B_T mapping for this device then continue
  3472. * */
  3473. if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
  3474. || !(sas_device.flags &
  3475. MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
  3476. continue;
  3477. phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
  3478. if (!phy_info)
  3479. continue;
  3480. if (mptsas_get_rphy(phy_info))
  3481. continue;
  3482. mptsas_add_end_device(ioc, phy_info);
  3483. }
  3484. }
  3485. /**
  3486. * mptsas_scan_sas_topology -
  3487. * @ioc: Pointer to MPT_ADAPTER structure
  3488. * @sas_address:
  3489. *
  3490. **/
  3491. static void
  3492. mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
  3493. {
  3494. struct scsi_device *sdev;
  3495. int i;
  3496. mptsas_probe_hba_phys(ioc);
  3497. mptsas_probe_expanders(ioc);
  3498. mptsas_probe_devices(ioc);
  3499. /*
  3500. Reporting RAID volumes.
  3501. */
  3502. if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
  3503. !ioc->raid_data.pIocPg2->NumActiveVolumes)
  3504. return;
  3505. for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
  3506. sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
  3507. ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
  3508. if (sdev) {
  3509. scsi_device_put(sdev);
  3510. continue;
  3511. }
  3512. printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
  3513. "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
  3514. ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
  3515. scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
  3516. ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
  3517. }
  3518. }
  3519. static void
  3520. mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
  3521. {
  3522. MPT_ADAPTER *ioc;
  3523. EventDataQueueFull_t *qfull_data;
  3524. struct mptsas_device_info *sas_info;
  3525. struct scsi_device *sdev;
  3526. int depth;
  3527. int id = -1;
  3528. int channel = -1;
  3529. int fw_id, fw_channel;
  3530. u16 current_depth;
  3531. ioc = fw_event->ioc;
  3532. qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
  3533. fw_id = qfull_data->TargetID;
  3534. fw_channel = qfull_data->Bus;
  3535. current_depth = le16_to_cpu(qfull_data->CurrentDepth);
  3536. /* if hidden raid component, look for the volume id */
  3537. mutex_lock(&ioc->sas_device_info_mutex);
  3538. if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
  3539. list_for_each_entry(sas_info, &ioc->sas_device_info_list,
  3540. list) {
  3541. if (sas_info->is_cached ||
  3542. sas_info->is_logical_volume)
  3543. continue;
  3544. if (sas_info->is_hidden_raid_component &&
  3545. (sas_info->fw.channel == fw_channel &&
  3546. sas_info->fw.id == fw_id)) {
  3547. id = sas_info->volume_id;
  3548. channel = MPTSAS_RAID_CHANNEL;
  3549. goto out;
  3550. }
  3551. }
  3552. } else {
  3553. list_for_each_entry(sas_info, &ioc->sas_device_info_list,
  3554. list) {
  3555. if (sas_info->is_cached ||
  3556. sas_info->is_hidden_raid_component ||
  3557. sas_info->is_logical_volume)
  3558. continue;
  3559. if (sas_info->fw.channel == fw_channel &&
  3560. sas_info->fw.id == fw_id) {
  3561. id = sas_info->os.id;
  3562. channel = sas_info->os.channel;
  3563. goto out;
  3564. }
  3565. }
  3566. }
  3567. out:
  3568. mutex_unlock(&ioc->sas_device_info_mutex);
  3569. if (id != -1) {
  3570. shost_for_each_device(sdev, ioc->sh) {
  3571. if (sdev->id == id && sdev->channel == channel) {
  3572. if (current_depth > sdev->queue_depth) {
  3573. sdev_printk(KERN_INFO, sdev,
  3574. "strange observation, the queue "
  3575. "depth is (%d) meanwhile fw queue "
  3576. "depth (%d)\n", sdev->queue_depth,
  3577. current_depth);
  3578. continue;
  3579. }
  3580. depth = scsi_track_queue_full(sdev,
  3581. current_depth - 1);
  3582. if (depth > 0)
  3583. sdev_printk(KERN_INFO, sdev,
  3584. "Queue depth reduced to (%d)\n",
  3585. depth);
  3586. else if (depth < 0)
  3587. sdev_printk(KERN_INFO, sdev,
  3588. "Tagged Command Queueing is being "
  3589. "disabled\n");
  3590. else if (depth == 0)
  3591. sdev_printk(KERN_INFO, sdev,
  3592. "Queue depth not changed yet\n");
  3593. }
  3594. }
  3595. }
  3596. mptsas_free_fw_event(ioc, fw_event);
  3597. }
  3598. static struct mptsas_phyinfo *
  3599. mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
  3600. {
  3601. struct mptsas_portinfo *port_info;
  3602. struct mptsas_phyinfo *phy_info = NULL;
  3603. int i;
  3604. mutex_lock(&ioc->sas_topology_mutex);
  3605. list_for_each_entry(port_info, &ioc->sas_topology, list) {
  3606. for (i = 0; i < port_info->num_phys; i++) {
  3607. if (!mptsas_is_end_device(
  3608. &port_info->phy_info[i].attached))
  3609. continue;
  3610. if (port_info->phy_info[i].attached.sas_address
  3611. != sas_address)
  3612. continue;
  3613. phy_info = &port_info->phy_info[i];
  3614. break;
  3615. }
  3616. }
  3617. mutex_unlock(&ioc->sas_topology_mutex);
  3618. return phy_info;
  3619. }
  3620. /**
  3621. * mptsas_find_phyinfo_by_phys_disk_num -
  3622. * @ioc: Pointer to MPT_ADAPTER structure
  3623. * @phys_disk_num:
  3624. * @channel:
  3625. * @id:
  3626. *
  3627. **/
  3628. static struct mptsas_phyinfo *
  3629. mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
  3630. u8 channel, u8 id)
  3631. {
  3632. struct mptsas_phyinfo *phy_info = NULL;
  3633. struct mptsas_portinfo *port_info;
  3634. RaidPhysDiskPage1_t *phys_disk = NULL;
  3635. int num_paths;
  3636. u64 sas_address = 0;
  3637. int i;
  3638. phy_info = NULL;
  3639. if (!ioc->raid_data.pIocPg3)
  3640. return NULL;
  3641. /* dual port support */
  3642. num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
  3643. if (!num_paths)
  3644. goto out;
  3645. phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
  3646. (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
  3647. if (!phys_disk)
  3648. goto out;
  3649. mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
  3650. for (i = 0; i < num_paths; i++) {
  3651. if ((phys_disk->Path[i].Flags & 1) != 0)
  3652. /* entry no longer valid */
  3653. continue;
  3654. if ((id == phys_disk->Path[i].PhysDiskID) &&
  3655. (channel == phys_disk->Path[i].PhysDiskBus)) {
  3656. memcpy(&sas_address, &phys_disk->Path[i].WWID,
  3657. sizeof(u64));
  3658. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  3659. sas_address);
  3660. goto out;
  3661. }
  3662. }
  3663. out:
  3664. kfree(phys_disk);
  3665. if (phy_info)
  3666. return phy_info;
  3667. /*
  3668. * Extra code to handle RAID0 case, where the sas_address is not updated
  3669. * in phys_disk_page_1 when hotswapped
  3670. */
  3671. mutex_lock(&ioc->sas_topology_mutex);
  3672. list_for_each_entry(port_info, &ioc->sas_topology, list) {
  3673. for (i = 0; i < port_info->num_phys && !phy_info; i++) {
  3674. if (!mptsas_is_end_device(
  3675. &port_info->phy_info[i].attached))
  3676. continue;
  3677. if (port_info->phy_info[i].attached.phys_disk_num == ~0)
  3678. continue;
  3679. if ((port_info->phy_info[i].attached.phys_disk_num ==
  3680. phys_disk_num) &&
  3681. (port_info->phy_info[i].attached.id == id) &&
  3682. (port_info->phy_info[i].attached.channel ==
  3683. channel))
  3684. phy_info = &port_info->phy_info[i];
  3685. }
  3686. }
  3687. mutex_unlock(&ioc->sas_topology_mutex);
  3688. return phy_info;
  3689. }
  3690. static void
  3691. mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
  3692. {
  3693. int rc;
  3694. sdev->no_uld_attach = data ? 1 : 0;
  3695. rc = scsi_device_reprobe(sdev);
  3696. }
  3697. static void
  3698. mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
  3699. {
  3700. starget_for_each_device(starget, uld_attach ? (void *)1 : NULL,
  3701. mptsas_reprobe_lun);
  3702. }
  3703. static void
  3704. mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
  3705. {
  3706. CONFIGPARMS cfg;
  3707. ConfigPageHeader_t hdr;
  3708. dma_addr_t dma_handle;
  3709. pRaidVolumePage0_t buffer = NULL;
  3710. RaidPhysDiskPage0_t phys_disk;
  3711. int i;
  3712. struct mptsas_phyinfo *phy_info;
  3713. struct mptsas_devinfo sas_device;
  3714. memset(&cfg, 0 , sizeof(CONFIGPARMS));
  3715. memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
  3716. hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
  3717. cfg.pageAddr = (channel << 8) + id;
  3718. cfg.cfghdr.hdr = &hdr;
  3719. cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
  3720. cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
  3721. if (mpt_config(ioc, &cfg) != 0)
  3722. goto out;
  3723. if (!hdr.PageLength)
  3724. goto out;
  3725. buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
  3726. &dma_handle);
  3727. if (!buffer)
  3728. goto out;
  3729. cfg.physAddr = dma_handle;
  3730. cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  3731. if (mpt_config(ioc, &cfg) != 0)
  3732. goto out;
  3733. if (!(buffer->VolumeStatus.Flags &
  3734. MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE))
  3735. goto out;
  3736. if (!buffer->NumPhysDisks)
  3737. goto out;
  3738. for (i = 0; i < buffer->NumPhysDisks; i++) {
  3739. if (mpt_raid_phys_disk_pg0(ioc,
  3740. buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
  3741. continue;
  3742. if (mptsas_sas_device_pg0(ioc, &sas_device,
  3743. (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
  3744. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  3745. (phys_disk.PhysDiskBus << 8) +
  3746. phys_disk.PhysDiskID))
  3747. continue;
  3748. /* If there is no FW B_T mapping for this device then continue
  3749. * */
  3750. if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
  3751. || !(sas_device.flags &
  3752. MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
  3753. continue;
  3754. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  3755. sas_device.sas_address);
  3756. mptsas_add_end_device(ioc, phy_info);
  3757. }
  3758. out:
  3759. if (buffer)
  3760. pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
  3761. dma_handle);
  3762. }
  3763. /*
  3764. * Work queue thread to handle SAS hotplug events
  3765. */
  3766. static void
  3767. mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
  3768. struct mptsas_hotplug_event *hot_plug_info)
  3769. {
  3770. struct mptsas_phyinfo *phy_info;
  3771. struct scsi_target * starget;
  3772. struct mptsas_devinfo sas_device;
  3773. VirtTarget *vtarget;
  3774. int i;
  3775. struct mptsas_portinfo *port_info;
  3776. switch (hot_plug_info->event_type) {
  3777. case MPTSAS_ADD_PHYSDISK:
  3778. if (!ioc->raid_data.pIocPg2)
  3779. break;
  3780. for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
  3781. if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
  3782. hot_plug_info->id) {
  3783. printk(MYIOC_s_WARN_FMT "firmware bug: unable "
  3784. "to add hidden disk - target_id matchs "
  3785. "volume_id\n", ioc->name);
  3786. mptsas_free_fw_event(ioc, fw_event);
  3787. return;
  3788. }
  3789. }
  3790. mpt_findImVolumes(ioc);
  3791. case MPTSAS_ADD_DEVICE:
  3792. memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
  3793. mptsas_sas_device_pg0(ioc, &sas_device,
  3794. (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
  3795. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  3796. (hot_plug_info->channel << 8) +
  3797. hot_plug_info->id);
  3798. /* If there is no FW B_T mapping for this device then break
  3799. * */
  3800. if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
  3801. || !(sas_device.flags &
  3802. MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
  3803. break;
  3804. if (!sas_device.handle)
  3805. return;
  3806. phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
  3807. /* Only For SATA Device ADD */
  3808. if (!phy_info && (sas_device.device_info &
  3809. MPI_SAS_DEVICE_INFO_SATA_DEVICE)) {
  3810. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  3811. "%s %d SATA HOT PLUG: "
  3812. "parent handle of device %x\n", ioc->name,
  3813. __func__, __LINE__, sas_device.handle_parent));
  3814. port_info = mptsas_find_portinfo_by_handle(ioc,
  3815. sas_device.handle_parent);
  3816. if (port_info == ioc->hba_port_info)
  3817. mptsas_probe_hba_phys(ioc);
  3818. else if (port_info)
  3819. mptsas_expander_refresh(ioc, port_info);
  3820. else {
  3821. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3822. "%s %d port info is NULL\n",
  3823. ioc->name, __func__, __LINE__));
  3824. break;
  3825. }
  3826. phy_info = mptsas_refreshing_device_handles
  3827. (ioc, &sas_device);
  3828. }
  3829. if (!phy_info) {
  3830. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3831. "%s %d phy info is NULL\n",
  3832. ioc->name, __func__, __LINE__));
  3833. break;
  3834. }
  3835. if (mptsas_get_rphy(phy_info))
  3836. break;
  3837. mptsas_add_end_device(ioc, phy_info);
  3838. break;
  3839. case MPTSAS_DEL_DEVICE:
  3840. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  3841. hot_plug_info->sas_address);
  3842. mptsas_del_end_device(ioc, phy_info);
  3843. break;
  3844. case MPTSAS_DEL_PHYSDISK:
  3845. mpt_findImVolumes(ioc);
  3846. phy_info = mptsas_find_phyinfo_by_phys_disk_num(
  3847. ioc, hot_plug_info->phys_disk_num,
  3848. hot_plug_info->channel,
  3849. hot_plug_info->id);
  3850. mptsas_del_end_device(ioc, phy_info);
  3851. break;
  3852. case MPTSAS_ADD_PHYSDISK_REPROBE:
  3853. if (mptsas_sas_device_pg0(ioc, &sas_device,
  3854. (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
  3855. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  3856. (hot_plug_info->channel << 8) + hot_plug_info->id)) {
  3857. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3858. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3859. __func__, hot_plug_info->id, __LINE__));
  3860. break;
  3861. }
  3862. /* If there is no FW B_T mapping for this device then break
  3863. * */
  3864. if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
  3865. || !(sas_device.flags &
  3866. MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
  3867. break;
  3868. phy_info = mptsas_find_phyinfo_by_sas_address(
  3869. ioc, sas_device.sas_address);
  3870. if (!phy_info) {
  3871. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3872. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3873. __func__, hot_plug_info->id, __LINE__));
  3874. break;
  3875. }
  3876. starget = mptsas_get_starget(phy_info);
  3877. if (!starget) {
  3878. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3879. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3880. __func__, hot_plug_info->id, __LINE__));
  3881. break;
  3882. }
  3883. vtarget = starget->hostdata;
  3884. if (!vtarget) {
  3885. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3886. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3887. __func__, hot_plug_info->id, __LINE__));
  3888. break;
  3889. }
  3890. mpt_findImVolumes(ioc);
  3891. starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
  3892. "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
  3893. ioc->name, hot_plug_info->channel, hot_plug_info->id,
  3894. hot_plug_info->phys_disk_num, (unsigned long long)
  3895. sas_device.sas_address);
  3896. vtarget->id = hot_plug_info->phys_disk_num;
  3897. vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
  3898. phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
  3899. mptsas_reprobe_target(starget, 1);
  3900. break;
  3901. case MPTSAS_DEL_PHYSDISK_REPROBE:
  3902. if (mptsas_sas_device_pg0(ioc, &sas_device,
  3903. (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
  3904. MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
  3905. (hot_plug_info->channel << 8) + hot_plug_info->id)) {
  3906. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3907. "%s: fw_id=%d exit at line=%d\n",
  3908. ioc->name, __func__,
  3909. hot_plug_info->id, __LINE__));
  3910. break;
  3911. }
  3912. /* If there is no FW B_T mapping for this device then break
  3913. * */
  3914. if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
  3915. || !(sas_device.flags &
  3916. MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
  3917. break;
  3918. phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
  3919. sas_device.sas_address);
  3920. if (!phy_info) {
  3921. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3922. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3923. __func__, hot_plug_info->id, __LINE__));
  3924. break;
  3925. }
  3926. starget = mptsas_get_starget(phy_info);
  3927. if (!starget) {
  3928. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3929. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3930. __func__, hot_plug_info->id, __LINE__));
  3931. break;
  3932. }
  3933. vtarget = starget->hostdata;
  3934. if (!vtarget) {
  3935. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3936. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3937. __func__, hot_plug_info->id, __LINE__));
  3938. break;
  3939. }
  3940. if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
  3941. dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
  3942. "%s: fw_id=%d exit at line=%d\n", ioc->name,
  3943. __func__, hot_plug_info->id, __LINE__));
  3944. break;
  3945. }
  3946. mpt_findImVolumes(ioc);
  3947. starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
  3948. " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
  3949. ioc->name, hot_plug_info->channel, hot_plug_info->id,
  3950. hot_plug_info->phys_disk_num, (unsigned long long)
  3951. sas_device.sas_address);
  3952. vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
  3953. vtarget->id = hot_plug_info->id;
  3954. phy_info->attached.phys_disk_num = ~0;
  3955. mptsas_reprobe_target(starget, 0);
  3956. mptsas_add_device_component_by_fw(ioc,
  3957. hot_plug_info->channel, hot_plug_info->id);
  3958. break;
  3959. case MPTSAS_ADD_RAID:
  3960. mpt_findImVolumes(ioc);
  3961. printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
  3962. "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
  3963. hot_plug_info->id);
  3964. scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
  3965. hot_plug_info->id, 0);
  3966. break;
  3967. case MPTSAS_DEL_RAID:
  3968. mpt_findImVolumes(ioc);
  3969. printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
  3970. "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
  3971. hot_plug_info->id);
  3972. scsi_remove_device(hot_plug_info->sdev);
  3973. scsi_device_put(hot_plug_info->sdev);
  3974. break;
  3975. case MPTSAS_ADD_INACTIVE_VOLUME:
  3976. mpt_findImVolumes(ioc);
  3977. mptsas_adding_inactive_raid_components(ioc,
  3978. hot_plug_info->channel, hot_plug_info->id);
  3979. break;
  3980. default:
  3981. break;
  3982. }
  3983. mptsas_free_fw_event(ioc, fw_event);
  3984. }
  3985. static void
  3986. mptsas_send_sas_event(struct fw_event_work *fw_event)
  3987. {
  3988. MPT_ADAPTER *ioc;
  3989. struct mptsas_hotplug_event hot_plug_info;
  3990. EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
  3991. u32 device_info;
  3992. u64 sas_address;
  3993. ioc = fw_event->ioc;
  3994. sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
  3995. fw_event->event_data;
  3996. device_info = le32_to_cpu(sas_event_data->DeviceInfo);
  3997. if ((device_info &
  3998. (MPI_SAS_DEVICE_INFO_SSP_TARGET |
  3999. MPI_SAS_DEVICE_INFO_STP_TARGET |
  4000. MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
  4001. mptsas_free_fw_event(ioc, fw_event);
  4002. return;
  4003. }
  4004. if (sas_event_data->ReasonCode ==
  4005. MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
  4006. mptbase_sas_persist_operation(ioc,
  4007. MPI_SAS_OP_CLEAR_NOT_PRESENT);
  4008. mptsas_free_fw_event(ioc, fw_event);
  4009. return;
  4010. }
  4011. switch (sas_event_data->ReasonCode) {
  4012. case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
  4013. case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
  4014. memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
  4015. hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
  4016. hot_plug_info.channel = sas_event_data->Bus;
  4017. hot_plug_info.id = sas_event_data->TargetID;
  4018. hot_plug_info.phy_id = sas_event_data->PhyNum;
  4019. memcpy(&sas_address, &sas_event_data->SASAddress,
  4020. sizeof(u64));
  4021. hot_plug_info.sas_address = le64_to_cpu(sas_address);
  4022. hot_plug_info.device_info = device_info;
  4023. if (sas_event_data->ReasonCode &
  4024. MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
  4025. hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
  4026. else
  4027. hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
  4028. mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
  4029. break;
  4030. case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
  4031. mptbase_sas_persist_operation(ioc,
  4032. MPI_SAS_OP_CLEAR_NOT_PRESENT);
  4033. mptsas_free_fw_event(ioc, fw_event);
  4034. break;
  4035. case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
  4036. /* TODO */
  4037. case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
  4038. /* TODO */
  4039. default:
  4040. mptsas_free_fw_event(ioc, fw_event);
  4041. break;
  4042. }
  4043. }
  4044. static void
  4045. mptsas_send_raid_event(struct fw_event_work *fw_event)
  4046. {
  4047. MPT_ADAPTER *ioc;
  4048. EVENT_DATA_RAID *raid_event_data;
  4049. struct mptsas_hotplug_event hot_plug_info;
  4050. int status;
  4051. int state;
  4052. struct scsi_device *sdev = NULL;
  4053. VirtDevice *vdevice = NULL;
  4054. RaidPhysDiskPage0_t phys_disk;
  4055. ioc = fw_event->ioc;
  4056. raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
  4057. status = le32_to_cpu(raid_event_data->SettingsStatus);
  4058. state = (status >> 8) & 0xff;
  4059. memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
  4060. hot_plug_info.id = raid_event_data->VolumeID;
  4061. hot_plug_info.channel = raid_event_data->VolumeBus;
  4062. hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
  4063. if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
  4064. raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
  4065. raid_event_data->ReasonCode ==
  4066. MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
  4067. sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
  4068. hot_plug_info.id, 0);
  4069. hot_plug_info.sdev = sdev;
  4070. if (sdev)
  4071. vdevice = sdev->hostdata;
  4072. }
  4073. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
  4074. "ReasonCode=%02x\n", ioc->name, __func__,
  4075. raid_event_data->ReasonCode));
  4076. switch (raid_event_data->ReasonCode) {
  4077. case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
  4078. hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
  4079. break;
  4080. case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
  4081. hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
  4082. break;
  4083. case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
  4084. switch (state) {
  4085. case MPI_PD_STATE_ONLINE:
  4086. case MPI_PD_STATE_NOT_COMPATIBLE:
  4087. mpt_raid_phys_disk_pg0(ioc,
  4088. raid_event_data->PhysDiskNum, &phys_disk);
  4089. hot_plug_info.id = phys_disk.PhysDiskID;
  4090. hot_plug_info.channel = phys_disk.PhysDiskBus;
  4091. hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
  4092. break;
  4093. case MPI_PD_STATE_FAILED:
  4094. case MPI_PD_STATE_MISSING:
  4095. case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
  4096. case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
  4097. case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
  4098. hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
  4099. break;
  4100. default:
  4101. break;
  4102. }
  4103. break;
  4104. case MPI_EVENT_RAID_RC_VOLUME_DELETED:
  4105. if (!sdev)
  4106. break;
  4107. vdevice->vtarget->deleted = 1; /* block IO */
  4108. hot_plug_info.event_type = MPTSAS_DEL_RAID;
  4109. break;
  4110. case MPI_EVENT_RAID_RC_VOLUME_CREATED:
  4111. if (sdev) {
  4112. scsi_device_put(sdev);
  4113. break;
  4114. }
  4115. hot_plug_info.event_type = MPTSAS_ADD_RAID;
  4116. break;
  4117. case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
  4118. if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
  4119. if (!sdev)
  4120. break;
  4121. vdevice->vtarget->deleted = 1; /* block IO */
  4122. hot_plug_info.event_type = MPTSAS_DEL_RAID;
  4123. break;
  4124. }
  4125. switch (state) {
  4126. case MPI_RAIDVOL0_STATUS_STATE_FAILED:
  4127. case MPI_RAIDVOL0_STATUS_STATE_MISSING:
  4128. if (!sdev)
  4129. break;
  4130. vdevice->vtarget->deleted = 1; /* block IO */
  4131. hot_plug_info.event_type = MPTSAS_DEL_RAID;
  4132. break;
  4133. case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
  4134. case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
  4135. if (sdev) {
  4136. scsi_device_put(sdev);
  4137. break;
  4138. }
  4139. hot_plug_info.event_type = MPTSAS_ADD_RAID;
  4140. break;
  4141. default:
  4142. break;
  4143. }
  4144. break;
  4145. default:
  4146. break;
  4147. }
  4148. if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
  4149. mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
  4150. else
  4151. mptsas_free_fw_event(ioc, fw_event);
  4152. }
  4153. /**
  4154. * mptsas_issue_tm - send mptsas internal tm request
  4155. * @ioc: Pointer to MPT_ADAPTER structure
  4156. * @type: Task Management type
  4157. * @channel: channel number for task management
  4158. * @id: Logical Target ID for reset (if appropriate)
  4159. * @lun: Logical unit for reset (if appropriate)
  4160. * @task_context: Context for the task to be aborted
  4161. * @timeout: timeout for task management control
  4162. *
  4163. * return 0 on success and -1 on failure:
  4164. *
  4165. */
  4166. static int
  4167. mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
  4168. int task_context, ulong timeout, u8 *issue_reset)
  4169. {
  4170. MPT_FRAME_HDR *mf;
  4171. SCSITaskMgmt_t *pScsiTm;
  4172. int retval;
  4173. unsigned long timeleft;
  4174. *issue_reset = 0;
  4175. mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
  4176. if (mf == NULL) {
  4177. retval = -1; /* return failure */
  4178. dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
  4179. "msg frames!!\n", ioc->name));
  4180. goto out;
  4181. }
  4182. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
  4183. "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
  4184. "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
  4185. type, timeout, channel, id, (unsigned long long)lun,
  4186. task_context));
  4187. pScsiTm = (SCSITaskMgmt_t *) mf;
  4188. memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
  4189. pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
  4190. pScsiTm->TaskType = type;
  4191. pScsiTm->MsgFlags = 0;
  4192. pScsiTm->TargetID = id;
  4193. pScsiTm->Bus = channel;
  4194. pScsiTm->ChainOffset = 0;
  4195. pScsiTm->Reserved = 0;
  4196. pScsiTm->Reserved1 = 0;
  4197. pScsiTm->TaskMsgContext = task_context;
  4198. int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
  4199. INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
  4200. CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
  4201. retval = 0;
  4202. mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
  4203. /* Now wait for the command to complete */
  4204. timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
  4205. timeout*HZ);
  4206. if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
  4207. retval = -1; /* return failure */
  4208. dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
  4209. "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
  4210. mpt_free_msg_frame(ioc, mf);
  4211. if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
  4212. goto out;
  4213. *issue_reset = 1;
  4214. goto out;
  4215. }
  4216. if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
  4217. retval = -1; /* return failure */
  4218. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4219. "TaskMgmt request: failed with no reply\n", ioc->name));
  4220. goto out;
  4221. }
  4222. out:
  4223. CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
  4224. return retval;
  4225. }
  4226. /**
  4227. * mptsas_broadcast_primative_work - Handle broadcast primitives
  4228. * @work: work queue payload containing info describing the event
  4229. *
  4230. * this will be handled in workqueue context.
  4231. */
  4232. static void
  4233. mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
  4234. {
  4235. MPT_ADAPTER *ioc = fw_event->ioc;
  4236. MPT_FRAME_HDR *mf;
  4237. VirtDevice *vdevice;
  4238. int ii;
  4239. struct scsi_cmnd *sc;
  4240. SCSITaskMgmtReply_t *pScsiTmReply;
  4241. u8 issue_reset;
  4242. int task_context;
  4243. u8 channel, id;
  4244. int lun;
  4245. u32 termination_count;
  4246. u32 query_count;
  4247. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4248. "%s - enter\n", ioc->name, __func__));
  4249. mutex_lock(&ioc->taskmgmt_cmds.mutex);
  4250. if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
  4251. mutex_unlock(&ioc->taskmgmt_cmds.mutex);
  4252. mptsas_requeue_fw_event(ioc, fw_event, 1000);
  4253. return;
  4254. }
  4255. issue_reset = 0;
  4256. termination_count = 0;
  4257. query_count = 0;
  4258. mpt_findImVolumes(ioc);
  4259. pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
  4260. for (ii = 0; ii < ioc->req_depth; ii++) {
  4261. if (ioc->fw_events_off)
  4262. goto out;
  4263. sc = mptscsih_get_scsi_lookup(ioc, ii);
  4264. if (!sc)
  4265. continue;
  4266. mf = MPT_INDEX_2_MFPTR(ioc, ii);
  4267. if (!mf)
  4268. continue;
  4269. task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
  4270. vdevice = sc->device->hostdata;
  4271. if (!vdevice || !vdevice->vtarget)
  4272. continue;
  4273. if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
  4274. continue; /* skip hidden raid components */
  4275. if (vdevice->vtarget->raidVolume)
  4276. continue; /* skip hidden raid components */
  4277. channel = vdevice->vtarget->channel;
  4278. id = vdevice->vtarget->id;
  4279. lun = vdevice->lun;
  4280. if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
  4281. channel, id, (u64)lun, task_context, 30, &issue_reset))
  4282. goto out;
  4283. query_count++;
  4284. termination_count +=
  4285. le32_to_cpu(pScsiTmReply->TerminationCount);
  4286. if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
  4287. (pScsiTmReply->ResponseCode ==
  4288. MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
  4289. pScsiTmReply->ResponseCode ==
  4290. MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
  4291. continue;
  4292. if (mptsas_issue_tm(ioc,
  4293. MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
  4294. channel, id, (u64)lun, 0, 30, &issue_reset))
  4295. goto out;
  4296. termination_count +=
  4297. le32_to_cpu(pScsiTmReply->TerminationCount);
  4298. }
  4299. out:
  4300. dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4301. "%s - exit, query_count = %d termination_count = %d\n",
  4302. ioc->name, __func__, query_count, termination_count));
  4303. ioc->broadcast_aen_busy = 0;
  4304. mpt_clear_taskmgmt_in_progress_flag(ioc);
  4305. mutex_unlock(&ioc->taskmgmt_cmds.mutex);
  4306. if (issue_reset) {
  4307. printk(MYIOC_s_WARN_FMT
  4308. "Issuing Reset from %s!! doorbell=0x%08x\n",
  4309. ioc->name, __func__, mpt_GetIocState(ioc, 0));
  4310. mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
  4311. }
  4312. mptsas_free_fw_event(ioc, fw_event);
  4313. }
  4314. /*
  4315. * mptsas_send_ir2_event - handle exposing hidden disk when
  4316. * an inactive raid volume is added
  4317. *
  4318. * @ioc: Pointer to MPT_ADAPTER structure
  4319. * @ir2_data
  4320. *
  4321. */
  4322. static void
  4323. mptsas_send_ir2_event(struct fw_event_work *fw_event)
  4324. {
  4325. MPT_ADAPTER *ioc;
  4326. struct mptsas_hotplug_event hot_plug_info;
  4327. MPI_EVENT_DATA_IR2 *ir2_data;
  4328. u8 reasonCode;
  4329. RaidPhysDiskPage0_t phys_disk;
  4330. ioc = fw_event->ioc;
  4331. ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
  4332. reasonCode = ir2_data->ReasonCode;
  4333. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
  4334. "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
  4335. memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
  4336. hot_plug_info.id = ir2_data->TargetID;
  4337. hot_plug_info.channel = ir2_data->Bus;
  4338. switch (reasonCode) {
  4339. case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
  4340. hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
  4341. break;
  4342. case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
  4343. hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
  4344. hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
  4345. break;
  4346. case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
  4347. hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
  4348. mpt_raid_phys_disk_pg0(ioc,
  4349. ir2_data->PhysDiskNum, &phys_disk);
  4350. hot_plug_info.id = phys_disk.PhysDiskID;
  4351. hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
  4352. break;
  4353. default:
  4354. mptsas_free_fw_event(ioc, fw_event);
  4355. return;
  4356. }
  4357. mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
  4358. }
  4359. static int
  4360. mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
  4361. {
  4362. u32 event = le32_to_cpu(reply->Event);
  4363. int sz, event_data_sz;
  4364. struct fw_event_work *fw_event;
  4365. unsigned long delay;
  4366. if (ioc->bus_type != SAS)
  4367. return 0;
  4368. /* events turned off due to host reset or driver unloading */
  4369. if (ioc->fw_events_off)
  4370. return 0;
  4371. delay = msecs_to_jiffies(1);
  4372. switch (event) {
  4373. case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
  4374. {
  4375. EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
  4376. (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
  4377. if (broadcast_event_data->Primitive !=
  4378. MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
  4379. return 0;
  4380. if (ioc->broadcast_aen_busy)
  4381. return 0;
  4382. ioc->broadcast_aen_busy = 1;
  4383. break;
  4384. }
  4385. case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
  4386. {
  4387. EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
  4388. (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
  4389. u16 ioc_stat;
  4390. ioc_stat = le16_to_cpu(reply->IOCStatus);
  4391. if (sas_event_data->ReasonCode ==
  4392. MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
  4393. mptsas_target_reset_queue(ioc, sas_event_data);
  4394. return 0;
  4395. }
  4396. if (sas_event_data->ReasonCode ==
  4397. MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
  4398. ioc->device_missing_delay &&
  4399. (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
  4400. VirtTarget *vtarget = NULL;
  4401. u8 id, channel;
  4402. id = sas_event_data->TargetID;
  4403. channel = sas_event_data->Bus;
  4404. vtarget = mptsas_find_vtarget(ioc, channel, id);
  4405. if (vtarget) {
  4406. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4407. "LogInfo (0x%x) available for "
  4408. "INTERNAL_DEVICE_RESET"
  4409. "fw_id %d fw_channel %d\n", ioc->name,
  4410. le32_to_cpu(reply->IOCLogInfo),
  4411. id, channel));
  4412. if (vtarget->raidVolume) {
  4413. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4414. "Skipping Raid Volume for inDMD\n",
  4415. ioc->name));
  4416. } else {
  4417. devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4418. "Setting device flag inDMD\n",
  4419. ioc->name));
  4420. vtarget->inDMD = 1;
  4421. }
  4422. }
  4423. }
  4424. break;
  4425. }
  4426. case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
  4427. {
  4428. MpiEventDataSasExpanderStatusChange_t *expander_data =
  4429. (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
  4430. if (ioc->old_sas_discovery_protocal)
  4431. return 0;
  4432. if (expander_data->ReasonCode ==
  4433. MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
  4434. ioc->device_missing_delay)
  4435. delay = HZ * ioc->device_missing_delay;
  4436. break;
  4437. }
  4438. case MPI_EVENT_SAS_DISCOVERY:
  4439. {
  4440. u32 discovery_status;
  4441. EventDataSasDiscovery_t *discovery_data =
  4442. (EventDataSasDiscovery_t *)reply->Data;
  4443. discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
  4444. ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
  4445. if (ioc->old_sas_discovery_protocal && !discovery_status)
  4446. mptsas_queue_rescan(ioc);
  4447. return 0;
  4448. }
  4449. case MPI_EVENT_INTEGRATED_RAID:
  4450. case MPI_EVENT_PERSISTENT_TABLE_FULL:
  4451. case MPI_EVENT_IR2:
  4452. case MPI_EVENT_SAS_PHY_LINK_STATUS:
  4453. case MPI_EVENT_QUEUE_FULL:
  4454. break;
  4455. default:
  4456. return 0;
  4457. }
  4458. event_data_sz = ((reply->MsgLength * 4) -
  4459. offsetof(EventNotificationReply_t, Data));
  4460. sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
  4461. fw_event = kzalloc(sz, GFP_ATOMIC);
  4462. if (!fw_event) {
  4463. printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
  4464. __func__, __LINE__);
  4465. return 0;
  4466. }
  4467. memcpy(fw_event->event_data, reply->Data, event_data_sz);
  4468. fw_event->event = event;
  4469. fw_event->ioc = ioc;
  4470. mptsas_add_fw_event(ioc, fw_event, delay);
  4471. return 0;
  4472. }
  4473. /* Delete a volume when no longer listed in ioc pg2
  4474. */
  4475. static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
  4476. {
  4477. struct scsi_device *sdev;
  4478. int i;
  4479. sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
  4480. if (!sdev)
  4481. return;
  4482. if (!ioc->raid_data.pIocPg2)
  4483. goto out;
  4484. if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
  4485. goto out;
  4486. for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
  4487. if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
  4488. goto release_sdev;
  4489. out:
  4490. printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
  4491. "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
  4492. scsi_remove_device(sdev);
  4493. release_sdev:
  4494. scsi_device_put(sdev);
  4495. }
  4496. static int
  4497. mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  4498. {
  4499. struct Scsi_Host *sh;
  4500. MPT_SCSI_HOST *hd;
  4501. MPT_ADAPTER *ioc;
  4502. unsigned long flags;
  4503. int ii;
  4504. int numSGE = 0;
  4505. int scale;
  4506. int ioc_cap;
  4507. int error=0;
  4508. int r;
  4509. r = mpt_attach(pdev,id);
  4510. if (r)
  4511. return r;
  4512. ioc = pci_get_drvdata(pdev);
  4513. mptsas_fw_event_off(ioc);
  4514. ioc->DoneCtx = mptsasDoneCtx;
  4515. ioc->TaskCtx = mptsasTaskCtx;
  4516. ioc->InternalCtx = mptsasInternalCtx;
  4517. ioc->schedule_target_reset = &mptsas_schedule_target_reset;
  4518. /* Added sanity check on readiness of the MPT adapter.
  4519. */
  4520. if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
  4521. printk(MYIOC_s_WARN_FMT
  4522. "Skipping because it's not operational!\n",
  4523. ioc->name);
  4524. error = -ENODEV;
  4525. goto out_mptsas_probe;
  4526. }
  4527. if (!ioc->active) {
  4528. printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
  4529. ioc->name);
  4530. error = -ENODEV;
  4531. goto out_mptsas_probe;
  4532. }
  4533. /* Sanity check - ensure at least 1 port is INITIATOR capable
  4534. */
  4535. ioc_cap = 0;
  4536. for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
  4537. if (ioc->pfacts[ii].ProtocolFlags &
  4538. MPI_PORTFACTS_PROTOCOL_INITIATOR)
  4539. ioc_cap++;
  4540. }
  4541. if (!ioc_cap) {
  4542. printk(MYIOC_s_WARN_FMT
  4543. "Skipping ioc=%p because SCSI Initiator mode "
  4544. "is NOT enabled!\n", ioc->name, ioc);
  4545. return 0;
  4546. }
  4547. sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
  4548. if (!sh) {
  4549. printk(MYIOC_s_WARN_FMT
  4550. "Unable to register controller with SCSI subsystem\n",
  4551. ioc->name);
  4552. error = -1;
  4553. goto out_mptsas_probe;
  4554. }
  4555. spin_lock_irqsave(&ioc->FreeQlock, flags);
  4556. /* Attach the SCSI Host to the IOC structure
  4557. */
  4558. ioc->sh = sh;
  4559. sh->io_port = 0;
  4560. sh->n_io_port = 0;
  4561. sh->irq = 0;
  4562. /* set 16 byte cdb's */
  4563. sh->max_cmd_len = 16;
  4564. sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
  4565. sh->max_id = -1;
  4566. sh->max_lun = max_lun;
  4567. sh->transportt = mptsas_transport_template;
  4568. /* Required entry.
  4569. */
  4570. sh->unique_id = ioc->id;
  4571. INIT_LIST_HEAD(&ioc->sas_topology);
  4572. mutex_init(&ioc->sas_topology_mutex);
  4573. mutex_init(&ioc->sas_discovery_mutex);
  4574. mutex_init(&ioc->sas_mgmt.mutex);
  4575. init_completion(&ioc->sas_mgmt.done);
  4576. /* Verify that we won't exceed the maximum
  4577. * number of chain buffers
  4578. * We can optimize: ZZ = req_sz/sizeof(SGE)
  4579. * For 32bit SGE's:
  4580. * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
  4581. * + (req_sz - 64)/sizeof(SGE)
  4582. * A slightly different algorithm is required for
  4583. * 64bit SGEs.
  4584. */
  4585. scale = ioc->req_sz/ioc->SGE_size;
  4586. if (ioc->sg_addr_size == sizeof(u64)) {
  4587. numSGE = (scale - 1) *
  4588. (ioc->facts.MaxChainDepth-1) + scale +
  4589. (ioc->req_sz - 60) / ioc->SGE_size;
  4590. } else {
  4591. numSGE = 1 + (scale - 1) *
  4592. (ioc->facts.MaxChainDepth-1) + scale +
  4593. (ioc->req_sz - 64) / ioc->SGE_size;
  4594. }
  4595. if (numSGE < sh->sg_tablesize) {
  4596. /* Reset this value */
  4597. dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
  4598. "Resetting sg_tablesize to %d from %d\n",
  4599. ioc->name, numSGE, sh->sg_tablesize));
  4600. sh->sg_tablesize = numSGE;
  4601. }
  4602. hd = shost_priv(sh);
  4603. hd->ioc = ioc;
  4604. /* SCSI needs scsi_cmnd lookup table!
  4605. * (with size equal to req_depth*PtrSz!)
  4606. */
  4607. ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
  4608. if (!ioc->ScsiLookup) {
  4609. error = -ENOMEM;
  4610. spin_unlock_irqrestore(&ioc->FreeQlock, flags);
  4611. goto out_mptsas_probe;
  4612. }
  4613. spin_lock_init(&ioc->scsi_lookup_lock);
  4614. dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
  4615. ioc->name, ioc->ScsiLookup));
  4616. ioc->sas_data.ptClear = mpt_pt_clear;
  4617. hd->last_queue_full = 0;
  4618. INIT_LIST_HEAD(&hd->target_reset_list);
  4619. INIT_LIST_HEAD(&ioc->sas_device_info_list);
  4620. mutex_init(&ioc->sas_device_info_mutex);
  4621. spin_unlock_irqrestore(&ioc->FreeQlock, flags);
  4622. if (ioc->sas_data.ptClear==1) {
  4623. mptbase_sas_persist_operation(
  4624. ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
  4625. }
  4626. error = scsi_add_host(sh, &ioc->pcidev->dev);
  4627. if (error) {
  4628. dprintk(ioc, printk(MYIOC_s_ERR_FMT
  4629. "scsi_add_host failed\n", ioc->name));
  4630. goto out_mptsas_probe;
  4631. }
  4632. /* older firmware doesn't support expander events */
  4633. if ((ioc->facts.HeaderVersion >> 8) < 0xE)
  4634. ioc->old_sas_discovery_protocal = 1;
  4635. mptsas_scan_sas_topology(ioc);
  4636. mptsas_fw_event_on(ioc);
  4637. return 0;
  4638. out_mptsas_probe:
  4639. mptscsih_remove(pdev);
  4640. return error;
  4641. }
  4642. void
  4643. mptsas_shutdown(struct pci_dev *pdev)
  4644. {
  4645. MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
  4646. mptsas_fw_event_off(ioc);
  4647. mptsas_cleanup_fw_event_q(ioc);
  4648. }
  4649. static void __devexit mptsas_remove(struct pci_dev *pdev)
  4650. {
  4651. MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
  4652. struct mptsas_portinfo *p, *n;
  4653. int i;
  4654. if (!ioc->sh) {
  4655. printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
  4656. mpt_detach(pdev);
  4657. return;
  4658. }
  4659. mptsas_shutdown(pdev);
  4660. mptsas_del_device_components(ioc);
  4661. ioc->sas_discovery_ignore_events = 1;
  4662. sas_remove_host(ioc->sh);
  4663. mutex_lock(&ioc->sas_topology_mutex);
  4664. list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
  4665. list_del(&p->list);
  4666. for (i = 0 ; i < p->num_phys ; i++)
  4667. mptsas_port_delete(ioc, p->phy_info[i].port_details);
  4668. kfree(p->phy_info);
  4669. kfree(p);
  4670. }
  4671. mutex_unlock(&ioc->sas_topology_mutex);
  4672. ioc->hba_port_info = NULL;
  4673. mptscsih_remove(pdev);
  4674. }
  4675. static struct pci_device_id mptsas_pci_table[] = {
  4676. { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064,
  4677. PCI_ANY_ID, PCI_ANY_ID },
  4678. { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068,
  4679. PCI_ANY_ID, PCI_ANY_ID },
  4680. { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064E,
  4681. PCI_ANY_ID, PCI_ANY_ID },
  4682. { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068E,
  4683. PCI_ANY_ID, PCI_ANY_ID },
  4684. { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
  4685. PCI_ANY_ID, PCI_ANY_ID },
  4686. {0} /* Terminating entry */
  4687. };
  4688. MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
  4689. static struct pci_driver mptsas_driver = {
  4690. .name = "mptsas",
  4691. .id_table = mptsas_pci_table,
  4692. .probe = mptsas_probe,
  4693. .remove = __devexit_p(mptsas_remove),
  4694. .shutdown = mptsas_shutdown,
  4695. #ifdef CONFIG_PM
  4696. .suspend = mptscsih_suspend,
  4697. .resume = mptscsih_resume,
  4698. #endif
  4699. };
  4700. static int __init
  4701. mptsas_init(void)
  4702. {
  4703. int error;
  4704. show_mptmod_ver(my_NAME, my_VERSION);
  4705. mptsas_transport_template =
  4706. sas_attach_transport(&mptsas_transport_functions);
  4707. if (!mptsas_transport_template)
  4708. return -ENODEV;
  4709. mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
  4710. mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
  4711. "mptscsih_io_done");
  4712. mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER,
  4713. "mptscsih_taskmgmt_complete");
  4714. mptsasInternalCtx =
  4715. mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER,
  4716. "mptscsih_scandv_complete");
  4717. mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER,
  4718. "mptsas_mgmt_done");
  4719. mptsasDeviceResetCtx =
  4720. mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER,
  4721. "mptsas_taskmgmt_complete");
  4722. mpt_event_register(mptsasDoneCtx, mptsas_event_process);
  4723. mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
  4724. error = pci_register_driver(&mptsas_driver);
  4725. if (error)
  4726. sas_release_transport(mptsas_transport_template);
  4727. return error;
  4728. }
  4729. static void __exit
  4730. mptsas_exit(void)
  4731. {
  4732. pci_unregister_driver(&mptsas_driver);
  4733. sas_release_transport(mptsas_transport_template);
  4734. mpt_reset_deregister(mptsasDoneCtx);
  4735. mpt_event_deregister(mptsasDoneCtx);
  4736. mpt_deregister(mptsasMgmtCtx);
  4737. mpt_deregister(mptsasInternalCtx);
  4738. mpt_deregister(mptsasTaskCtx);
  4739. mpt_deregister(mptsasDoneCtx);
  4740. mpt_deregister(mptsasDeviceResetCtx);
  4741. }
  4742. module_init(mptsas_init);
  4743. module_exit(mptsas_exit);