PageRenderTime 64ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/ieee1394/sbp2.c

https://bitbucket.org/evzijst/gittest
C | 2864 lines | 1736 code | 497 blank | 631 comment | 208 complexity | b41d19ec207fb703398f1a5252443a78 MD5 | raw file
Possible License(s): CC-BY-SA-3.0, GPL-2.0, LGPL-2.0
  1. /*
  2. * sbp2.c - SBP-2 protocol driver for IEEE-1394
  3. *
  4. * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
  5. * jamesg@filanet.com (JSG)
  6. *
  7. * Copyright (C) 2003 Ben Collins <bcollins@debian.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software Foundation,
  21. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  22. */
  23. /*
  24. * Brief Description:
  25. *
  26. * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
  27. * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
  28. * driver. It also registers as a SCSI lower-level driver in order to accept
  29. * SCSI commands for transport using SBP-2.
  30. *
  31. * You may access any attached SBP-2 storage devices as if they were SCSI
  32. * devices (e.g. mount /dev/sda1, fdisk, mkfs, etc.).
  33. *
  34. * Current Issues:
  35. *
  36. * - Error Handling: SCSI aborts and bus reset requests are handled somewhat
  37. * but the code needs additional debugging.
  38. */
  39. #include <linux/config.h>
  40. #include <linux/kernel.h>
  41. #include <linux/list.h>
  42. #include <linux/string.h>
  43. #include <linux/slab.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/fs.h>
  46. #include <linux/poll.h>
  47. #include <linux/module.h>
  48. #include <linux/moduleparam.h>
  49. #include <linux/types.h>
  50. #include <linux/delay.h>
  51. #include <linux/sched.h>
  52. #include <linux/blkdev.h>
  53. #include <linux/smp_lock.h>
  54. #include <linux/init.h>
  55. #include <linux/pci.h>
  56. #include <asm/current.h>
  57. #include <asm/uaccess.h>
  58. #include <asm/io.h>
  59. #include <asm/byteorder.h>
  60. #include <asm/atomic.h>
  61. #include <asm/system.h>
  62. #include <asm/scatterlist.h>
  63. #include <scsi/scsi.h>
  64. #include <scsi/scsi_cmnd.h>
  65. #include <scsi/scsi_dbg.h>
  66. #include <scsi/scsi_device.h>
  67. #include <scsi/scsi_host.h>
  68. #include "csr1212.h"
  69. #include "ieee1394.h"
  70. #include "ieee1394_types.h"
  71. #include "ieee1394_core.h"
  72. #include "nodemgr.h"
  73. #include "hosts.h"
  74. #include "highlevel.h"
  75. #include "ieee1394_transactions.h"
  76. #include "sbp2.h"
  77. static char version[] __devinitdata =
  78. "$Rev: 1219 $ Ben Collins <bcollins@debian.org>";
  79. /*
  80. * Module load parameter definitions
  81. */
  82. /*
  83. * Change max_speed on module load if you have a bad IEEE-1394
  84. * controller that has trouble running 2KB packets at 400mb.
  85. *
  86. * NOTE: On certain OHCI parts I have seen short packets on async transmit
  87. * (probably due to PCI latency/throughput issues with the part). You can
  88. * bump down the speed if you are running into problems.
  89. */
  90. static int max_speed = IEEE1394_SPEED_MAX;
  91. module_param(max_speed, int, 0644);
  92. MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)");
  93. /*
  94. * Set serialize_io to 1 if you'd like only one scsi command sent
  95. * down to us at a time (debugging). This might be necessary for very
  96. * badly behaved sbp2 devices.
  97. */
  98. static int serialize_io = 0;
  99. module_param(serialize_io, int, 0444);
  100. MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)");
  101. /*
  102. * Bump up max_sectors if you'd like to support very large sized
  103. * transfers. Please note that some older sbp2 bridge chips are broken for
  104. * transfers greater or equal to 128KB. Default is a value of 255
  105. * sectors, or just under 128KB (at 512 byte sector size). I can note that
  106. * the Oxsemi sbp2 chipsets have no problems supporting very large
  107. * transfer sizes.
  108. */
  109. static int max_sectors = SBP2_MAX_SECTORS;
  110. module_param(max_sectors, int, 0444);
  111. MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
  112. /*
  113. * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
  114. * do an exclusive login, as it's generally unsafe to have two hosts
  115. * talking to a single sbp2 device at the same time (filesystem coherency,
  116. * etc.). If you're running an sbp2 device that supports multiple logins,
  117. * and you're either running read-only filesystems or some sort of special
  118. * filesystem supporting multiple hosts (one such filesystem is OpenGFS,
  119. * see opengfs.sourceforge.net for more info), then set exclusive_login
  120. * to zero. Note: The Oxsemi OXFW911 sbp2 chipset supports up to four
  121. * concurrent logins.
  122. */
  123. static int exclusive_login = 1;
  124. module_param(exclusive_login, int, 0644);
  125. MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
  126. /*
  127. * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
  128. * if your sbp2 device is not properly handling the SCSI inquiry command.
  129. * This hack makes the inquiry look more like a typical MS Windows
  130. * inquiry.
  131. *
  132. * If force_inquiry_hack=1 is required for your device to work,
  133. * please submit the logged sbp2_firmware_revision value of this device to
  134. * the linux1394-devel mailing list.
  135. */
  136. static int force_inquiry_hack = 0;
  137. module_param(force_inquiry_hack, int, 0444);
  138. MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
  139. /*
  140. * Export information about protocols/devices supported by this driver.
  141. */
  142. static struct ieee1394_device_id sbp2_id_table[] = {
  143. {
  144. .match_flags =IEEE1394_MATCH_SPECIFIER_ID |
  145. IEEE1394_MATCH_VERSION,
  146. .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
  147. .version = SBP2_SW_VERSION_ENTRY & 0xffffff
  148. },
  149. { }
  150. };
  151. MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
  152. /*
  153. * Debug levels, configured via kernel config, or enable here.
  154. */
  155. /* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
  156. /* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
  157. /* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
  158. /* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
  159. /* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
  160. #ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
  161. #define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
  162. static u32 global_outstanding_command_orbs = 0;
  163. #define outstanding_orb_incr global_outstanding_command_orbs++
  164. #define outstanding_orb_decr global_outstanding_command_orbs--
  165. #else
  166. #define SBP2_ORB_DEBUG(fmt, args...)
  167. #define outstanding_orb_incr
  168. #define outstanding_orb_decr
  169. #endif
  170. #ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
  171. #define SBP2_DMA_ALLOC(fmt, args...) \
  172. HPSB_ERR("sbp2(%s)alloc(%d): "fmt, __FUNCTION__, \
  173. ++global_outstanding_dmas, ## args)
  174. #define SBP2_DMA_FREE(fmt, args...) \
  175. HPSB_ERR("sbp2(%s)free(%d): "fmt, __FUNCTION__, \
  176. --global_outstanding_dmas, ## args)
  177. static u32 global_outstanding_dmas = 0;
  178. #else
  179. #define SBP2_DMA_ALLOC(fmt, args...)
  180. #define SBP2_DMA_FREE(fmt, args...)
  181. #endif
  182. #if CONFIG_IEEE1394_SBP2_DEBUG >= 2
  183. #define SBP2_DEBUG(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
  184. #define SBP2_INFO(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
  185. #define SBP2_NOTICE(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
  186. #define SBP2_WARN(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
  187. #elif CONFIG_IEEE1394_SBP2_DEBUG == 1
  188. #define SBP2_DEBUG(fmt, args...) HPSB_DEBUG("sbp2: "fmt, ## args)
  189. #define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
  190. #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
  191. #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
  192. #else
  193. #define SBP2_DEBUG(fmt, args...)
  194. #define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
  195. #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
  196. #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
  197. #endif
  198. #define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
  199. /*
  200. * Globals
  201. */
  202. static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
  203. u32 status);
  204. static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
  205. u32 scsi_status, struct scsi_cmnd *SCpnt,
  206. void (*done)(struct scsi_cmnd *));
  207. static struct scsi_host_template scsi_driver_template;
  208. static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
  209. static void sbp2_host_reset(struct hpsb_host *host);
  210. static int sbp2_probe(struct device *dev);
  211. static int sbp2_remove(struct device *dev);
  212. static int sbp2_update(struct unit_directory *ud);
  213. static struct hpsb_highlevel sbp2_highlevel = {
  214. .name = SBP2_DEVICE_NAME,
  215. .host_reset = sbp2_host_reset,
  216. };
  217. static struct hpsb_address_ops sbp2_ops = {
  218. .write = sbp2_handle_status_write
  219. };
  220. #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
  221. static struct hpsb_address_ops sbp2_physdma_ops = {
  222. .read = sbp2_handle_physdma_read,
  223. .write = sbp2_handle_physdma_write,
  224. };
  225. #endif
  226. static struct hpsb_protocol_driver sbp2_driver = {
  227. .name = "SBP2 Driver",
  228. .id_table = sbp2_id_table,
  229. .update = sbp2_update,
  230. .driver = {
  231. .name = SBP2_DEVICE_NAME,
  232. .bus = &ieee1394_bus_type,
  233. .probe = sbp2_probe,
  234. .remove = sbp2_remove,
  235. },
  236. };
  237. /* List of device firmware's that require a forced 36 byte inquiry. */
  238. static u32 sbp2_broken_inquiry_list[] = {
  239. 0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
  240. /* DViCO Momobay CX-1 */
  241. 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
  242. /* QPS Fire DVDBurner */
  243. };
  244. #define NUM_BROKEN_INQUIRY_DEVS \
  245. (sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list))
  246. /**************************************
  247. * General utility functions
  248. **************************************/
  249. #ifndef __BIG_ENDIAN
  250. /*
  251. * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
  252. */
  253. static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
  254. {
  255. u32 *temp = buffer;
  256. for (length = (length >> 2); length--; )
  257. temp[length] = be32_to_cpu(temp[length]);
  258. return;
  259. }
  260. /*
  261. * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
  262. */
  263. static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
  264. {
  265. u32 *temp = buffer;
  266. for (length = (length >> 2); length--; )
  267. temp[length] = cpu_to_be32(temp[length]);
  268. return;
  269. }
  270. #else /* BIG_ENDIAN */
  271. /* Why waste the cpu cycles? */
  272. #define sbp2util_be32_to_cpu_buffer(x,y)
  273. #define sbp2util_cpu_to_be32_buffer(x,y)
  274. #endif
  275. #ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
  276. /*
  277. * Debug packet dump routine. Length is in bytes.
  278. */
  279. static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr)
  280. {
  281. int i;
  282. unsigned char *dump = buffer;
  283. if (!dump || !length || !dump_name)
  284. return;
  285. if (dump_phys_addr)
  286. printk("[%s, 0x%x]", dump_name, dump_phys_addr);
  287. else
  288. printk("[%s]", dump_name);
  289. for (i = 0; i < length; i++) {
  290. if (i > 0x3f) {
  291. printk("\n ...");
  292. break;
  293. }
  294. if ((i & 0x3) == 0)
  295. printk(" ");
  296. if ((i & 0xf) == 0)
  297. printk("\n ");
  298. printk("%02x ", (int) dump[i]);
  299. }
  300. printk("\n");
  301. return;
  302. }
  303. #else
  304. #define sbp2util_packet_dump(w,x,y,z)
  305. #endif
  306. /*
  307. * Goofy routine that basically does a down_timeout function.
  308. */
  309. static int sbp2util_down_timeout(atomic_t *done, int timeout)
  310. {
  311. int i;
  312. for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
  313. if (msleep_interruptible(100)) /* 100ms */
  314. return(1);
  315. }
  316. return ((i > 0) ? 0:1);
  317. }
  318. /* Free's an allocated packet */
  319. static void sbp2_free_packet(struct hpsb_packet *packet)
  320. {
  321. hpsb_free_tlabel(packet);
  322. hpsb_free_packet(packet);
  323. }
  324. /* This is much like hpsb_node_write(), except it ignores the response
  325. * subaction and returns immediately. Can be used from interrupts.
  326. */
  327. static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
  328. quadlet_t *buffer, size_t length)
  329. {
  330. struct hpsb_packet *packet;
  331. packet = hpsb_make_writepacket(ne->host, ne->nodeid,
  332. addr, buffer, length);
  333. if (!packet)
  334. return -ENOMEM;
  335. hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet,
  336. packet);
  337. hpsb_node_fill_packet(ne, packet);
  338. if (hpsb_send_packet(packet) < 0) {
  339. sbp2_free_packet(packet);
  340. return -EIO;
  341. }
  342. return 0;
  343. }
  344. /*
  345. * This function is called to create a pool of command orbs used for
  346. * command processing. It is called when a new sbp2 device is detected.
  347. */
  348. static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
  349. {
  350. struct sbp2scsi_host_info *hi = scsi_id->hi;
  351. int i;
  352. unsigned long flags, orbs;
  353. struct sbp2_command_info *command;
  354. orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
  355. spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
  356. for (i = 0; i < orbs; i++) {
  357. command = (struct sbp2_command_info *)
  358. kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC);
  359. if (!command) {
  360. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  361. return(-ENOMEM);
  362. }
  363. memset(command, '\0', sizeof(struct sbp2_command_info));
  364. command->command_orb_dma =
  365. pci_map_single (hi->host->pdev, &command->command_orb,
  366. sizeof(struct sbp2_command_orb),
  367. PCI_DMA_BIDIRECTIONAL);
  368. SBP2_DMA_ALLOC("single command orb DMA");
  369. command->sge_dma =
  370. pci_map_single (hi->host->pdev, &command->scatter_gather_element,
  371. sizeof(command->scatter_gather_element),
  372. PCI_DMA_BIDIRECTIONAL);
  373. SBP2_DMA_ALLOC("scatter_gather_element");
  374. INIT_LIST_HEAD(&command->list);
  375. list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
  376. }
  377. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  378. return 0;
  379. }
  380. /*
  381. * This function is called to delete a pool of command orbs.
  382. */
  383. static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
  384. {
  385. struct hpsb_host *host = scsi_id->hi->host;
  386. struct list_head *lh, *next;
  387. struct sbp2_command_info *command;
  388. unsigned long flags;
  389. spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
  390. if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
  391. list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
  392. command = list_entry(lh, struct sbp2_command_info, list);
  393. /* Release our generic DMA's */
  394. pci_unmap_single(host->pdev, command->command_orb_dma,
  395. sizeof(struct sbp2_command_orb),
  396. PCI_DMA_BIDIRECTIONAL);
  397. SBP2_DMA_FREE("single command orb DMA");
  398. pci_unmap_single(host->pdev, command->sge_dma,
  399. sizeof(command->scatter_gather_element),
  400. PCI_DMA_BIDIRECTIONAL);
  401. SBP2_DMA_FREE("scatter_gather_element");
  402. kfree(command);
  403. }
  404. }
  405. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  406. return;
  407. }
  408. /*
  409. * This function finds the sbp2_command for a given outstanding command
  410. * orb.Only looks at the inuse list.
  411. */
  412. static struct sbp2_command_info *sbp2util_find_command_for_orb(
  413. struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
  414. {
  415. struct sbp2_command_info *command;
  416. unsigned long flags;
  417. spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
  418. if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
  419. list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
  420. if (command->command_orb_dma == orb) {
  421. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  422. return (command);
  423. }
  424. }
  425. }
  426. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  427. SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
  428. return(NULL);
  429. }
  430. /*
  431. * This function finds the sbp2_command for a given outstanding SCpnt.
  432. * Only looks at the inuse list.
  433. */
  434. static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt)
  435. {
  436. struct sbp2_command_info *command;
  437. unsigned long flags;
  438. spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
  439. if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
  440. list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
  441. if (command->Current_SCpnt == SCpnt) {
  442. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  443. return (command);
  444. }
  445. }
  446. }
  447. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  448. return(NULL);
  449. }
  450. /*
  451. * This function allocates a command orb used to send a scsi command.
  452. */
  453. static struct sbp2_command_info *sbp2util_allocate_command_orb(
  454. struct scsi_id_instance_data *scsi_id,
  455. struct scsi_cmnd *Current_SCpnt,
  456. void (*Current_done)(struct scsi_cmnd *))
  457. {
  458. struct list_head *lh;
  459. struct sbp2_command_info *command = NULL;
  460. unsigned long flags;
  461. spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
  462. if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
  463. lh = scsi_id->sbp2_command_orb_completed.next;
  464. list_del(lh);
  465. command = list_entry(lh, struct sbp2_command_info, list);
  466. command->Current_done = Current_done;
  467. command->Current_SCpnt = Current_SCpnt;
  468. list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
  469. } else {
  470. SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
  471. }
  472. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  473. return (command);
  474. }
  475. /* Free our DMA's */
  476. static void sbp2util_free_command_dma(struct sbp2_command_info *command)
  477. {
  478. struct scsi_id_instance_data *scsi_id =
  479. (struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
  480. struct hpsb_host *host;
  481. if (!scsi_id) {
  482. printk(KERN_ERR "%s: scsi_id == NULL\n", __FUNCTION__);
  483. return;
  484. }
  485. host = scsi_id->ud->ne->host;
  486. if (command->cmd_dma) {
  487. if (command->dma_type == CMD_DMA_SINGLE) {
  488. pci_unmap_single(host->pdev, command->cmd_dma,
  489. command->dma_size, command->dma_dir);
  490. SBP2_DMA_FREE("single bulk");
  491. } else if (command->dma_type == CMD_DMA_PAGE) {
  492. pci_unmap_page(host->pdev, command->cmd_dma,
  493. command->dma_size, command->dma_dir);
  494. SBP2_DMA_FREE("single page");
  495. } /* XXX: Check for CMD_DMA_NONE bug */
  496. command->dma_type = CMD_DMA_NONE;
  497. command->cmd_dma = 0;
  498. }
  499. if (command->sge_buffer) {
  500. pci_unmap_sg(host->pdev, command->sge_buffer,
  501. command->dma_size, command->dma_dir);
  502. SBP2_DMA_FREE("scatter list");
  503. command->sge_buffer = NULL;
  504. }
  505. }
  506. /*
  507. * This function moves a command to the completed orb list.
  508. */
  509. static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command)
  510. {
  511. unsigned long flags;
  512. spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
  513. list_del(&command->list);
  514. sbp2util_free_command_dma(command);
  515. list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
  516. spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
  517. }
  518. /*********************************************
  519. * IEEE-1394 core driver stack related section
  520. *********************************************/
  521. static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
  522. static int sbp2_probe(struct device *dev)
  523. {
  524. struct unit_directory *ud;
  525. struct scsi_id_instance_data *scsi_id;
  526. SBP2_DEBUG("sbp2_probe");
  527. ud = container_of(dev, struct unit_directory, device);
  528. /* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
  529. * instead. */
  530. if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
  531. return -ENODEV;
  532. scsi_id = sbp2_alloc_device(ud);
  533. if (!scsi_id)
  534. return -ENOMEM;
  535. sbp2_parse_unit_directory(scsi_id, ud);
  536. return sbp2_start_device(scsi_id);
  537. }
  538. static int sbp2_remove(struct device *dev)
  539. {
  540. struct unit_directory *ud;
  541. struct scsi_id_instance_data *scsi_id;
  542. SBP2_DEBUG("sbp2_remove");
  543. ud = container_of(dev, struct unit_directory, device);
  544. scsi_id = ud->device.driver_data;
  545. sbp2_logout_device(scsi_id);
  546. sbp2_remove_device(scsi_id);
  547. return 0;
  548. }
  549. static int sbp2_update(struct unit_directory *ud)
  550. {
  551. struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
  552. SBP2_DEBUG("sbp2_update");
  553. if (sbp2_reconnect_device(scsi_id)) {
  554. /*
  555. * Ok, reconnect has failed. Perhaps we didn't
  556. * reconnect fast enough. Try doing a regular login, but
  557. * first do a logout just in case of any weirdness.
  558. */
  559. sbp2_logout_device(scsi_id);
  560. if (sbp2_login_device(scsi_id)) {
  561. /* Login failed too, just fail, and the backend
  562. * will call our sbp2_remove for us */
  563. SBP2_ERR("Failed to reconnect to sbp2 device!");
  564. return -EBUSY;
  565. }
  566. }
  567. /* Set max retries to something large on the device. */
  568. sbp2_set_busy_timeout(scsi_id);
  569. /* Do a SBP-2 fetch agent reset. */
  570. sbp2_agent_reset(scsi_id, 1);
  571. /* Get the max speed and packet size that we can use. */
  572. sbp2_max_speed_and_size(scsi_id);
  573. /* Complete any pending commands with busy (so they get
  574. * retried) and remove them from our queue
  575. */
  576. sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
  577. /* Make sure we unblock requests (since this is likely after a bus
  578. * reset). */
  579. scsi_unblock_requests(scsi_id->scsi_host);
  580. return 0;
  581. }
  582. /* This functions is called by the sbp2_probe, for each new device. We now
  583. * allocate one scsi host for each scsi_id (unit directory). */
  584. static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
  585. {
  586. struct sbp2scsi_host_info *hi;
  587. struct Scsi_Host *scsi_host = NULL;
  588. struct scsi_id_instance_data *scsi_id = NULL;
  589. SBP2_DEBUG("sbp2_alloc_device");
  590. scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL);
  591. if (!scsi_id) {
  592. SBP2_ERR("failed to create scsi_id");
  593. goto failed_alloc;
  594. }
  595. memset(scsi_id, 0, sizeof(*scsi_id));
  596. scsi_id->ne = ud->ne;
  597. scsi_id->ud = ud;
  598. scsi_id->speed_code = IEEE1394_SPEED_100;
  599. scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
  600. atomic_set(&scsi_id->sbp2_login_complete, 0);
  601. INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
  602. INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
  603. INIT_LIST_HEAD(&scsi_id->scsi_list);
  604. spin_lock_init(&scsi_id->sbp2_command_orb_lock);
  605. scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED;
  606. ud->device.driver_data = scsi_id;
  607. hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
  608. if (!hi) {
  609. hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi));
  610. if (!hi) {
  611. SBP2_ERR("failed to allocate hostinfo");
  612. goto failed_alloc;
  613. }
  614. SBP2_DEBUG("sbp2_alloc_device: allocated hostinfo");
  615. hi->host = ud->ne->host;
  616. INIT_LIST_HEAD(&hi->scsi_ids);
  617. /* Register our sbp2 status address space... */
  618. hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops,
  619. SBP2_STATUS_FIFO_ADDRESS,
  620. SBP2_STATUS_FIFO_ADDRESS +
  621. SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1));
  622. #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
  623. /* Handle data movement if physical dma is not
  624. * enabled/supportedon host controller */
  625. hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_physdma_ops,
  626. 0x0ULL, 0xfffffffcULL);
  627. #endif
  628. }
  629. scsi_id->hi = hi;
  630. list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
  631. /* Register our host with the SCSI stack. */
  632. scsi_host = scsi_host_alloc(&scsi_driver_template, 0);
  633. if (!scsi_host) {
  634. SBP2_ERR("failed to register scsi host");
  635. goto failed_alloc;
  636. }
  637. scsi_host->hostdata[0] = (unsigned long)scsi_id;
  638. if (!scsi_add_host(scsi_host, &ud->device)) {
  639. scsi_id->scsi_host = scsi_host;
  640. return scsi_id;
  641. }
  642. SBP2_ERR("failed to add scsi host");
  643. scsi_host_put(scsi_host);
  644. failed_alloc:
  645. sbp2_remove_device(scsi_id);
  646. return NULL;
  647. }
  648. static void sbp2_host_reset(struct hpsb_host *host)
  649. {
  650. struct sbp2scsi_host_info *hi;
  651. struct scsi_id_instance_data *scsi_id;
  652. hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
  653. if (hi) {
  654. list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list)
  655. scsi_block_requests(scsi_id->scsi_host);
  656. }
  657. }
  658. /*
  659. * This function is where we first pull the node unique ids, and then
  660. * allocate memory and register a SBP-2 device.
  661. */
  662. static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
  663. {
  664. struct sbp2scsi_host_info *hi = scsi_id->hi;
  665. struct scsi_device *sdev;
  666. SBP2_DEBUG("sbp2_start_device");
  667. /* Login FIFO DMA */
  668. scsi_id->login_response =
  669. pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response),
  670. &scsi_id->login_response_dma);
  671. if (!scsi_id->login_response)
  672. goto alloc_fail;
  673. SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
  674. /* Query logins ORB DMA */
  675. scsi_id->query_logins_orb =
  676. pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb),
  677. &scsi_id->query_logins_orb_dma);
  678. if (!scsi_id->query_logins_orb)
  679. goto alloc_fail;
  680. SBP2_DMA_ALLOC("consistent DMA region for query logins ORB");
  681. /* Query logins response DMA */
  682. scsi_id->query_logins_response =
  683. pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response),
  684. &scsi_id->query_logins_response_dma);
  685. if (!scsi_id->query_logins_response)
  686. goto alloc_fail;
  687. SBP2_DMA_ALLOC("consistent DMA region for query logins response");
  688. /* Reconnect ORB DMA */
  689. scsi_id->reconnect_orb =
  690. pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb),
  691. &scsi_id->reconnect_orb_dma);
  692. if (!scsi_id->reconnect_orb)
  693. goto alloc_fail;
  694. SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
  695. /* Logout ORB DMA */
  696. scsi_id->logout_orb =
  697. pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb),
  698. &scsi_id->logout_orb_dma);
  699. if (!scsi_id->logout_orb)
  700. goto alloc_fail;
  701. SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
  702. /* Login ORB DMA */
  703. scsi_id->login_orb =
  704. pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb),
  705. &scsi_id->login_orb_dma);
  706. if (!scsi_id->login_orb) {
  707. alloc_fail:
  708. if (scsi_id->query_logins_response) {
  709. pci_free_consistent(hi->host->pdev,
  710. sizeof(struct sbp2_query_logins_response),
  711. scsi_id->query_logins_response,
  712. scsi_id->query_logins_response_dma);
  713. SBP2_DMA_FREE("query logins response DMA");
  714. }
  715. if (scsi_id->query_logins_orb) {
  716. pci_free_consistent(hi->host->pdev,
  717. sizeof(struct sbp2_query_logins_orb),
  718. scsi_id->query_logins_orb,
  719. scsi_id->query_logins_orb_dma);
  720. SBP2_DMA_FREE("query logins ORB DMA");
  721. }
  722. if (scsi_id->logout_orb) {
  723. pci_free_consistent(hi->host->pdev,
  724. sizeof(struct sbp2_logout_orb),
  725. scsi_id->logout_orb,
  726. scsi_id->logout_orb_dma);
  727. SBP2_DMA_FREE("logout ORB DMA");
  728. }
  729. if (scsi_id->reconnect_orb) {
  730. pci_free_consistent(hi->host->pdev,
  731. sizeof(struct sbp2_reconnect_orb),
  732. scsi_id->reconnect_orb,
  733. scsi_id->reconnect_orb_dma);
  734. SBP2_DMA_FREE("reconnect ORB DMA");
  735. }
  736. if (scsi_id->login_response) {
  737. pci_free_consistent(hi->host->pdev,
  738. sizeof(struct sbp2_login_response),
  739. scsi_id->login_response,
  740. scsi_id->login_response_dma);
  741. SBP2_DMA_FREE("login FIFO DMA");
  742. }
  743. list_del(&scsi_id->scsi_list);
  744. kfree(scsi_id);
  745. SBP2_ERR ("Could not allocate memory for scsi_id");
  746. return -ENOMEM;
  747. }
  748. SBP2_DMA_ALLOC("consistent DMA region for login ORB");
  749. SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
  750. /*
  751. * Create our command orb pool
  752. */
  753. if (sbp2util_create_command_orb_pool(scsi_id)) {
  754. SBP2_ERR("sbp2util_create_command_orb_pool failed!");
  755. sbp2_remove_device(scsi_id);
  756. return -ENOMEM;
  757. }
  758. /* Schedule a timeout here. The reason is that we may be so close
  759. * to a bus reset, that the device is not available for logins.
  760. * This can happen when the bus reset is caused by the host
  761. * connected to the sbp2 device being removed. That host would
  762. * have a certain amount of time to relogin before the sbp2 device
  763. * allows someone else to login instead. One second makes sense. */
  764. msleep_interruptible(1000);
  765. if (signal_pending(current)) {
  766. SBP2_WARN("aborting sbp2_start_device due to event");
  767. sbp2_remove_device(scsi_id);
  768. return -EINTR;
  769. }
  770. /*
  771. * Login to the sbp-2 device
  772. */
  773. if (sbp2_login_device(scsi_id)) {
  774. /* Login failed, just remove the device. */
  775. sbp2_remove_device(scsi_id);
  776. return -EBUSY;
  777. }
  778. /*
  779. * Set max retries to something large on the device
  780. */
  781. sbp2_set_busy_timeout(scsi_id);
  782. /*
  783. * Do a SBP-2 fetch agent reset
  784. */
  785. sbp2_agent_reset(scsi_id, 1);
  786. /*
  787. * Get the max speed and packet size that we can use
  788. */
  789. sbp2_max_speed_and_size(scsi_id);
  790. /* Add this device to the scsi layer now */
  791. sdev = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
  792. if (IS_ERR(sdev)) {
  793. SBP2_ERR("scsi_add_device failed");
  794. return PTR_ERR(sdev);
  795. }
  796. return 0;
  797. }
  798. /*
  799. * This function removes an sbp2 device from the sbp2scsi_host_info struct.
  800. */
  801. static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
  802. {
  803. struct sbp2scsi_host_info *hi;
  804. SBP2_DEBUG("sbp2_remove_device");
  805. if (!scsi_id)
  806. return;
  807. hi = scsi_id->hi;
  808. /* This will remove our scsi device aswell */
  809. if (scsi_id->scsi_host) {
  810. scsi_remove_host(scsi_id->scsi_host);
  811. scsi_host_put(scsi_id->scsi_host);
  812. }
  813. sbp2util_remove_command_orb_pool(scsi_id);
  814. list_del(&scsi_id->scsi_list);
  815. if (scsi_id->login_response) {
  816. pci_free_consistent(hi->host->pdev,
  817. sizeof(struct sbp2_login_response),
  818. scsi_id->login_response,
  819. scsi_id->login_response_dma);
  820. SBP2_DMA_FREE("single login FIFO");
  821. }
  822. if (scsi_id->login_orb) {
  823. pci_free_consistent(hi->host->pdev,
  824. sizeof(struct sbp2_login_orb),
  825. scsi_id->login_orb,
  826. scsi_id->login_orb_dma);
  827. SBP2_DMA_FREE("single login ORB");
  828. }
  829. if (scsi_id->reconnect_orb) {
  830. pci_free_consistent(hi->host->pdev,
  831. sizeof(struct sbp2_reconnect_orb),
  832. scsi_id->reconnect_orb,
  833. scsi_id->reconnect_orb_dma);
  834. SBP2_DMA_FREE("single reconnect orb");
  835. }
  836. if (scsi_id->logout_orb) {
  837. pci_free_consistent(hi->host->pdev,
  838. sizeof(struct sbp2_logout_orb),
  839. scsi_id->logout_orb,
  840. scsi_id->logout_orb_dma);
  841. SBP2_DMA_FREE("single logout orb");
  842. }
  843. if (scsi_id->query_logins_orb) {
  844. pci_free_consistent(hi->host->pdev,
  845. sizeof(struct sbp2_query_logins_orb),
  846. scsi_id->query_logins_orb,
  847. scsi_id->query_logins_orb_dma);
  848. SBP2_DMA_FREE("single query logins orb");
  849. }
  850. if (scsi_id->query_logins_response) {
  851. pci_free_consistent(hi->host->pdev,
  852. sizeof(struct sbp2_query_logins_response),
  853. scsi_id->query_logins_response,
  854. scsi_id->query_logins_response_dma);
  855. SBP2_DMA_FREE("single query logins data");
  856. }
  857. scsi_id->ud->device.driver_data = NULL;
  858. SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
  859. kfree(scsi_id);
  860. }
  861. #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
  862. /*
  863. * This function deals with physical dma write requests (for adapters that do not support
  864. * physical dma in hardware). Mostly just here for debugging...
  865. */
  866. static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
  867. u64 addr, size_t length, u16 flags)
  868. {
  869. /*
  870. * Manually put the data in the right place.
  871. */
  872. memcpy(bus_to_virt((u32)addr), data, length);
  873. sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr);
  874. return(RCODE_COMPLETE);
  875. }
  876. /*
  877. * This function deals with physical dma read requests (for adapters that do not support
  878. * physical dma in hardware). Mostly just here for debugging...
  879. */
  880. static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
  881. u64 addr, size_t length, u16 flags)
  882. {
  883. /*
  884. * Grab data from memory and send a read response.
  885. */
  886. memcpy(data, bus_to_virt((u32)addr), length);
  887. sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr);
  888. return(RCODE_COMPLETE);
  889. }
  890. #endif
  891. /**************************************
  892. * SBP-2 protocol related section
  893. **************************************/
  894. /*
  895. * This function determines if we should convert scsi commands for a particular sbp2 device type
  896. */
  897. static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
  898. {
  899. return (((device_type == TYPE_DISK) ||
  900. (device_type == TYPE_SDAD) ||
  901. (device_type == TYPE_ROM)) ? 1:0);
  902. }
  903. /*
  904. * This function queries the device for the maximum concurrent logins it
  905. * supports.
  906. */
  907. static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
  908. {
  909. struct sbp2scsi_host_info *hi = scsi_id->hi;
  910. quadlet_t data[2];
  911. int max_logins;
  912. int active_logins;
  913. SBP2_DEBUG("sbp2_query_logins");
  914. scsi_id->query_logins_orb->reserved1 = 0x0;
  915. scsi_id->query_logins_orb->reserved2 = 0x0;
  916. scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
  917. scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
  918. SBP2_DEBUG("sbp2_query_logins: query_response_hi/lo initialized");
  919. scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
  920. scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
  921. if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
  922. scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
  923. SBP2_DEBUG("sbp2_query_logins: set lun to %d",
  924. ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
  925. }
  926. SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
  927. scsi_id->query_logins_orb->reserved_resp_length =
  928. ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
  929. SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
  930. scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
  931. SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
  932. scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
  933. SBP2_STATUS_FIFO_ADDRESS_HI);
  934. SBP2_DEBUG("sbp2_query_logins: status FIFO initialized");
  935. sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
  936. SBP2_DEBUG("sbp2_query_logins: orb byte-swapped");
  937. sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb),
  938. "sbp2 query logins orb", scsi_id->query_logins_orb_dma);
  939. memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
  940. memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
  941. SBP2_DEBUG("sbp2_query_logins: query_logins_response/status FIFO memset");
  942. data[0] = ORB_SET_NODE_ID(hi->host->node_id);
  943. data[1] = scsi_id->query_logins_orb_dma;
  944. sbp2util_cpu_to_be32_buffer(data, 8);
  945. atomic_set(&scsi_id->sbp2_login_complete, 0);
  946. SBP2_DEBUG("sbp2_query_logins: prepared to write");
  947. hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
  948. SBP2_DEBUG("sbp2_query_logins: written");
  949. if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
  950. SBP2_INFO("Error querying logins to SBP-2 device - timed out");
  951. return(-EIO);
  952. }
  953. if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
  954. SBP2_INFO("Error querying logins to SBP-2 device - timed out");
  955. return(-EIO);
  956. }
  957. if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
  958. STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
  959. STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
  960. SBP2_INFO("Error querying logins to SBP-2 device - timed out");
  961. return(-EIO);
  962. }
  963. sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
  964. SBP2_DEBUG("length_max_logins = %x",
  965. (unsigned int)scsi_id->query_logins_response->length_max_logins);
  966. SBP2_DEBUG("Query logins to SBP-2 device successful");
  967. max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
  968. SBP2_DEBUG("Maximum concurrent logins supported: %d", max_logins);
  969. active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
  970. SBP2_DEBUG("Number of active logins: %d", active_logins);
  971. if (active_logins >= max_logins) {
  972. return(-EIO);
  973. }
  974. return 0;
  975. }
  976. /*
  977. * This function is called in order to login to a particular SBP-2 device,
  978. * after a bus reset.
  979. */
  980. static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
  981. {
  982. struct sbp2scsi_host_info *hi = scsi_id->hi;
  983. quadlet_t data[2];
  984. SBP2_DEBUG("sbp2_login_device");
  985. if (!scsi_id->login_orb) {
  986. SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!");
  987. return(-EIO);
  988. }
  989. if (!exclusive_login) {
  990. if (sbp2_query_logins(scsi_id)) {
  991. SBP2_INFO("Device does not support any more concurrent logins");
  992. return(-EIO);
  993. }
  994. }
  995. /* Set-up login ORB, assume no password */
  996. scsi_id->login_orb->password_hi = 0;
  997. scsi_id->login_orb->password_lo = 0;
  998. SBP2_DEBUG("sbp2_login_device: password_hi/lo initialized");
  999. scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
  1000. scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
  1001. SBP2_DEBUG("sbp2_login_device: login_response_hi/lo initialized");
  1002. scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
  1003. scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
  1004. scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
  1005. scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
  1006. /* Set the lun if we were able to pull it from the device's unit directory */
  1007. if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
  1008. scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
  1009. SBP2_DEBUG("sbp2_query_logins: set lun to %d",
  1010. ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
  1011. }
  1012. SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
  1013. scsi_id->login_orb->passwd_resp_lengths =
  1014. ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
  1015. SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
  1016. scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
  1017. SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
  1018. scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
  1019. SBP2_STATUS_FIFO_ADDRESS_HI);
  1020. SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
  1021. /*
  1022. * Byte swap ORB if necessary
  1023. */
  1024. sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
  1025. SBP2_DEBUG("sbp2_login_device: orb byte-swapped");
  1026. sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
  1027. "sbp2 login orb", scsi_id->login_orb_dma);
  1028. /*
  1029. * Initialize login response and status fifo
  1030. */
  1031. memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
  1032. memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
  1033. SBP2_DEBUG("sbp2_login_device: login_response/status FIFO memset");
  1034. /*
  1035. * Ok, let's write to the target's management agent register
  1036. */
  1037. data[0] = ORB_SET_NODE_ID(hi->host->node_id);
  1038. data[1] = scsi_id->login_orb_dma;
  1039. sbp2util_cpu_to_be32_buffer(data, 8);
  1040. atomic_set(&scsi_id->sbp2_login_complete, 0);
  1041. SBP2_DEBUG("sbp2_login_device: prepared to write to %08x",
  1042. (unsigned int)scsi_id->sbp2_management_agent_addr);
  1043. hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
  1044. SBP2_DEBUG("sbp2_login_device: written");
  1045. /*
  1046. * Wait for login status (up to 20 seconds)...
  1047. */
  1048. if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
  1049. SBP2_ERR("Error logging into SBP-2 device - login timed-out");
  1050. return(-EIO);
  1051. }
  1052. /*
  1053. * Sanity. Make sure status returned matches login orb.
  1054. */
  1055. if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
  1056. SBP2_ERR("Error logging into SBP-2 device - login timed-out");
  1057. return(-EIO);
  1058. }
  1059. /*
  1060. * Check status
  1061. */
  1062. if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
  1063. STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
  1064. STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
  1065. SBP2_ERR("Error logging into SBP-2 device - login failed");
  1066. return(-EIO);
  1067. }
  1068. /*
  1069. * Byte swap the login response, for use when reconnecting or
  1070. * logging out.
  1071. */
  1072. sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response));
  1073. /*
  1074. * Grab our command block agent address from the login response.
  1075. */
  1076. SBP2_DEBUG("command_block_agent_hi = %x",
  1077. (unsigned int)scsi_id->login_response->command_block_agent_hi);
  1078. SBP2_DEBUG("command_block_agent_lo = %x",
  1079. (unsigned int)scsi_id->login_response->command_block_agent_lo);
  1080. scsi_id->sbp2_command_block_agent_addr =
  1081. ((u64)scsi_id->login_response->command_block_agent_hi) << 32;
  1082. scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
  1083. scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
  1084. SBP2_INFO("Logged into SBP-2 device");
  1085. return(0);
  1086. }
  1087. /*
  1088. * This function is called in order to logout from a particular SBP-2
  1089. * device, usually called during driver unload.
  1090. */
  1091. static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
  1092. {
  1093. struct sbp2scsi_host_info *hi = scsi_id->hi;
  1094. quadlet_t data[2];
  1095. int error;
  1096. SBP2_DEBUG("sbp2_logout_device");
  1097. /*
  1098. * Set-up logout ORB
  1099. */
  1100. scsi_id->logout_orb->reserved1 = 0x0;
  1101. scsi_id->logout_orb->reserved2 = 0x0;
  1102. scsi_id->logout_orb->reserved3 = 0x0;
  1103. scsi_id->logout_orb->reserved4 = 0x0;
  1104. scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
  1105. scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
  1106. /* Notify us when complete */
  1107. scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
  1108. scsi_id->logout_orb->reserved5 = 0x0;
  1109. scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
  1110. SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
  1111. scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
  1112. SBP2_STATUS_FIFO_ADDRESS_HI);
  1113. /*
  1114. * Byte swap ORB if necessary
  1115. */
  1116. sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
  1117. sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
  1118. "sbp2 logout orb", scsi_id->logout_orb_dma);
  1119. /*
  1120. * Ok, let's write to the target's management agent register
  1121. */
  1122. data[0] = ORB_SET_NODE_ID(hi->host->node_id);
  1123. data[1] = scsi_id->logout_orb_dma;
  1124. sbp2util_cpu_to_be32_buffer(data, 8);
  1125. atomic_set(&scsi_id->sbp2_login_complete, 0);
  1126. error = hpsb_node_write(scsi_id->ne,
  1127. scsi_id->sbp2_management_agent_addr,
  1128. data, 8);
  1129. if (error)
  1130. return error;
  1131. /* Wait for device to logout...1 second. */
  1132. if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ))
  1133. return -EIO;
  1134. SBP2_INFO("Logged out of SBP-2 device");
  1135. return(0);
  1136. }
  1137. /*
  1138. * This function is called in order to reconnect to a particular SBP-2
  1139. * device, after a bus reset.
  1140. */
  1141. static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
  1142. {
  1143. struct sbp2scsi_host_info *hi = scsi_id->hi;
  1144. quadlet_t data[2];
  1145. int error;
  1146. SBP2_DEBUG("sbp2_reconnect_device");
  1147. /*
  1148. * Set-up reconnect ORB
  1149. */
  1150. scsi_id->reconnect_orb->reserved1 = 0x0;
  1151. scsi_id->reconnect_orb->reserved2 = 0x0;
  1152. scsi_id->reconnect_orb->reserved3 = 0x0;
  1153. scsi_id->reconnect_orb->reserved4 = 0x0;
  1154. scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
  1155. scsi_id->reconnect_orb->login_ID_misc |=
  1156. ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
  1157. /* Notify us when complete */
  1158. scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
  1159. scsi_id->reconnect_orb->reserved5 = 0x0;
  1160. scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
  1161. SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
  1162. scsi_id->reconnect_orb->status_FIFO_hi =
  1163. (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
  1164. /*
  1165. * Byte swap ORB if necessary
  1166. */
  1167. sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
  1168. sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
  1169. "sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
  1170. /*
  1171. * Initialize status fifo
  1172. */
  1173. memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
  1174. /*
  1175. * Ok, let's write to the target's management agent register
  1176. */
  1177. data[0] = ORB_SET_NODE_ID(hi->host->node_id);
  1178. data[1] = scsi_id->reconnect_orb_dma;
  1179. sbp2util_cpu_to_be32_buffer(data, 8);
  1180. atomic_set(&scsi_id->sbp2_login_complete, 0);
  1181. error = hpsb_node_write(scsi_id->ne,
  1182. scsi_id->sbp2_management_agent_addr,
  1183. data, 8);
  1184. if (error)
  1185. return error;
  1186. /*
  1187. * Wait for reconnect status (up to 1 second)...
  1188. */
  1189. if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
  1190. SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
  1191. return(-EIO);
  1192. }
  1193. /*
  1194. * Sanity. Make sure status returned matches reconnect orb.
  1195. */
  1196. if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
  1197. SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
  1198. return(-EIO);
  1199. }
  1200. /*
  1201. * Check status
  1202. */
  1203. if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
  1204. STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
  1205. STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
  1206. SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
  1207. return(-EIO);
  1208. }
  1209. HPSB_DEBUG("Reconnected to SBP-2 device");
  1210. return(0);
  1211. }
  1212. /*
  1213. * This function is called in order to set the busy timeout (number of
  1214. * retries to attempt) on the sbp2 device.
  1215. */
  1216. static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
  1217. {
  1218. quadlet_t data;
  1219. SBP2_DEBUG("sbp2_set_busy_timeout");
  1220. /*
  1221. * Ok, let's write to the target's busy timeout register
  1222. */
  1223. data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
  1224. if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) {
  1225. SBP2_ERR("sbp2_set_busy_timeout error");
  1226. }
  1227. return(0);
  1228. }
  1229. /*
  1230. * This function is called to parse sbp2 device's config rom unit
  1231. * directory. Used to determine things like sbp2 management agent offset,
  1232. * and command set used (SCSI or RBC).
  1233. */
  1234. static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
  1235. struct unit_directory *ud)
  1236. {
  1237. struct csr1212_keyval *kv;
  1238. struct csr1212_dentry *dentry;
  1239. u64 management_agent_addr;
  1240. u32 command_set_spec_id, command_set, unit_characteristics,
  1241. firmware_revision, workarounds;
  1242. int i;
  1243. SBP2_DEBUG("sbp2_parse_unit_directory");
  1244. management_agent_addr = 0x0;
  1245. command_set_spec_id = 0x0;
  1246. command_set = 0x0;
  1247. unit_characteristics = 0x0;
  1248. firmware_revision = 0x0;
  1249. /* Handle different fields in the unit directory, based on keys */
  1250. csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
  1251. switch (kv->key.id) {
  1252. case CSR1212_KV_ID_DEPENDENT_INFO:
  1253. if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
  1254. /* Save off the management agent address */
  1255. management_agent_addr =
  1256. CSR1212_REGISTER_SPACE_BASE +
  1257. (kv->value.csr_offset << 2);
  1258. SBP2_DEBUG("sbp2_management_agent_addr = %x",
  1259. (unsigned int) management_agent_addr);
  1260. } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
  1261. scsi_id->sbp2_device_type_and_lun = kv->value.immediate;
  1262. }
  1263. break;
  1264. case SBP2_COMMAND_SET_SPEC_ID_KEY:
  1265. /* Command spec organization */
  1266. command_set_spec_id = kv->value.immediate;
  1267. SBP2_DEBUG("sbp2_command_set_spec_id = %x",
  1268. (unsigned int) command_set_spec_id);
  1269. break;
  1270. case SBP2_COMMAND_SET_KEY:
  1271. /* Command set used by sbp2 device */
  1272. command_set = kv->value.immediate;
  1273. SBP2_DEBUG("sbp2_command_set = %x",
  1274. (unsigned int) command_set);
  1275. break;
  1276. case SBP2_UNIT_CHARACTERISTICS_KEY:
  1277. /*
  1278. * Unit characterisitcs (orb related stuff
  1279. * that I'm not yet paying attention to)
  1280. */
  1281. unit_characteristics = kv->value.immediate;
  1282. SBP2_DEBUG("sbp2_unit_characteristics = %x",
  1283. (unsigned int) unit_characteristics);
  1284. break;
  1285. case SBP2_FIRMWARE_REVISION_KEY:
  1286. /* Firmware revision */
  1287. firmware_revision = kv->value.immediate;
  1288. if (force_inquiry_hack)
  1289. SBP2_INFO("sbp2_firmware_revision = %x",
  1290. (unsigned int) firmware_revision);
  1291. else SBP2_DEBUG("sbp2_firmware_revision = %x",
  1292. (unsigned int) firmware_revision);
  1293. break;
  1294. default:
  1295. break;
  1296. }
  1297. }
  1298. /* This is the start of our broken device checking. We try to hack
  1299. * around oddities and known defects. */
  1300. workarounds = 0x0;
  1301. /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
  1302. * bridge with 128KB max transfer size limitation. For sanity, we
  1303. * only voice this when the current max_sectors setting
  1304. * exceeds the 128k limit. By default, that is not the case.
  1305. *
  1306. * It would be really nice if we could detect this before the scsi
  1307. * host gets initialized. That way we can down-force the
  1308. * max_sectors to account for it. That is not currently
  1309. * possible. */
  1310. if ((firmware_revision & 0xffff00) ==
  1311. SBP2_128KB_BROKEN_FIRMWARE &&
  1312. (max_sectors * 512) > (128*1024)) {
  1313. SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
  1314. NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
  1315. SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
  1316. max_sectors);
  1317. workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
  1318. }
  1319. /* Check for a blacklisted set of devices that require us to force
  1320. * a 36 byte host inquiry. This can be overriden as a module param
  1321. * (to force all hosts). */
  1322. for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) {
  1323. if ((firmware_revision & 0xffff00) ==
  1324. sbp2_broken_inquiry_list[i]) {
  1325. SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
  1326. NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
  1327. workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
  1328. break; /* No need to continue. */
  1329. }
  1330. }
  1331. /* If this is a logical unit directory entry, process the parent
  1332. * to get the values. */
  1333. if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
  1334. struct unit_directory *parent_ud =
  1335. container_of(ud->device.parent, struct unit_directory, device);
  1336. sbp2_parse_unit_directory(scsi_id, parent_ud);
  1337. } else {
  1338. scsi_id->sbp2_management_agent_addr = management_agent_addr;
  1339. scsi_id->sbp2_command_set_spec_id = command_set_spec_id;
  1340. scsi_id->sbp2_command_set = command_set;
  1341. scsi_id->sbp2_unit_characteristics = unit_characteristics;
  1342. scsi_id->sbp2_firmware_revision = firmware_revision;
  1343. scsi_id->workarounds = workarounds;
  1344. if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
  1345. scsi_id->sbp2_device_type_and_lun = ud->lun;
  1346. }
  1347. }
  1348. /*
  1349. * This function is called in order to determine the max speed and packet
  1350. * size we can use in our ORBs. Note, that we (the driver and host) only
  1351. * initiate the transaction. The SBP-2 device actually transfers the data
  1352. * (by reading from the DMA area we tell it). This means that the SBP-2
  1353. * device decides the actual maximum data it can transfer. We just tell it
  1354. * the speed that it needs to use, and the max_rec the host supports, and
  1355. * it takes care of the rest.
  1356. */
  1357. static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
  1358. {
  1359. struct sbp2scsi_host_info *hi = scsi_id->hi;
  1360. SBP2_DEBUG("sbp2_max_speed_and_size");
  1361. /* Initial setting comes from the hosts speed map */
  1362. scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64
  1363. + NODEID_TO_NODE(scsi_id->ne->nodeid)];
  1364. /* Bump down our speed if the user requested it */
  1365. if (scsi_id->speed_code > max_speed) {
  1366. scsi_id->speed_code = max_speed;
  1367. SBP2_ERR("Forcing SBP-2 max speed down to %s",
  1368. hpsb_speedto_str[scsi_id->speed_code]);
  1369. }
  1370. /* Payload size is the lesser of what our speed supports and what
  1371. * our host supports. */
  1372. scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code],
  1373. (u8)(hi->host->csr.max_rec - 1));
  1374. HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
  1375. NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
  1376. hpsb_speedto_str[scsi_id->speed_code],
  1377. 1 << ((u32)scsi_id->max_payload_size + 2));
  1378. return(0);
  1379. }
  1380. /*
  1381. * This function is called in order to perform a SBP-2 agent reset.
  1382. */
  1383. static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
  1384. {
  1385. quadlet_t data;
  1386. u64 addr;
  1387. int retval;
  1388. SBP2_DEBUG("sbp2_agent_reset");
  1389. /*
  1390. * Ok, let's write to the target's management agent register
  1391. */
  1392. data = ntohl(SBP2_AGENT_RESET_DATA);
  1393. addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
  1394. if (wait)
  1395. retval = hpsb_node_write(scsi_id->ne, addr, &data, 4);
  1396. else
  1397. retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4);
  1398. if (retval < 0) {
  1399. SBP2_ERR("hpsb_node_write failed.\n");
  1400. return -EIO;
  1401. }
  1402. /*
  1403. * Need to make sure orb pointer is written on next command
  1404. */
  1405. scsi_id->last_orb = NULL;
  1406. return(0);
  1407. }
  1408. /*
  1409. * This function is called to create the actual command orb and s/g list
  1410. * out of the scsi command itself.
  1411. */
  1412. static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
  1413. struct sbp2_command_info *command,
  1414. unchar *scsi_cmd,
  1415. unsigned int scsi_use_sg,
  1416. unsigned int scsi_request_bufflen,
  1417. void *scsi_request_buffer,
  1418. enum dma_data_direction dma_dir)
  1419. {
  1420. struct sbp2scsi_host_info *hi = scsi_id->hi;
  1421. struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer;
  1422. struct sbp2_command_orb *command_orb = &command->command_orb;
  1423. struct sbp2_unrestricted_page_table *scatter_gather_element =
  1424. &command->scatter_gather_element[0];
  1425. u32 sg_count, sg_len, orb_direction;
  1426. dma_addr_t sg_addr;
  1427. int i;
  1428. /*
  1429. * Set-up our command ORB..
  1430. *
  1431. * NOTE: We're doing unrestricted page tables (s/g), as this is
  1432. * best performance (at least with the devices I have). This means
  1433. * that data_size becomes the number of s/g elements, and
  1434. * page_size should be zero (for unrestricted).
  1435. */
  1436. command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
  1437. command_orb->next_ORB_lo = 0x0;
  1438. command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
  1439. command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
  1440. command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
  1441. /*
  1442. * Get the direction of the transfer. If the direction is unknown, then use our
  1443. * goofy table as a back-up.
  1444. */
  1445. switch (dma_dir) {
  1446. case DMA_NONE:
  1447. orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
  1448. break;
  1449. case DMA_TO_DEVICE:
  1450. orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
  1451. break;
  1452. case DMA_FROM_DEVICE:
  1453. orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
  1454. break;
  1455. case DMA_BIDIRECTIONAL:
  1456. default:
  1457. SBP2_ERR("SCSI data transfer direction not specified. "
  1458. "Update the SBP2 direction table in sbp2.h if "
  1459. "necessary for your application");
  1460. __scsi_print_command(scsi_cmd);
  1461. orb_direction = sbp2scsi_direction_table[*scsi_cmd];
  1462. break;
  1463. }
  1464. /*
  1465. * Set-up our pagetable stuff... unfortunately, this has become
  1466. * messier than I'd like. Need to clean this up a bit. ;-)
  1467. */
  1468. if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
  1469. SBP2_DEBUG("No data transfer");
  1470. /*
  1471. * Handle no data transfer
  1472. */
  1473. command_orb->data_descriptor_hi = 0x0;
  1474. command_orb->data_descriptor_lo = 0x0;
  1475. command_orb->misc |= ORB_SET_DIRECTION(1);
  1476. } else if (scsi_use_sg) {
  1477. SBP2_DEBUG("Use scatter/gather");
  1478. /*
  1479. * Special case if only one element (and less than 64KB in size)
  1480. */
  1481. if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
  1482. SBP2_DEBUG("Only one s/g element");
  1483. command->dma_dir = dma_dir;
  1484. command->dma_size = sgpnt[0].length;
  1485. command->dma_type = CMD_DMA_PAGE;
  1486. command->cmd_dma = pci_map_page(hi->host->pdev,
  1487. sgpnt[0].page,
  1488. sgpnt[0].offset,
  1489. command->dma_size,
  1490. command->dma_dir);
  1491. SBP2_DMA_ALLOC("single page scatter element");
  1492. command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
  1493. command_orb->data_descriptor_lo = command->cmd_dma;
  1494. command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
  1495. command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
  1496. } else {
  1497. int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
  1498. SBP2_DMA_ALLOC("scatter list");
  1499. command->dma_size = scsi_use_sg;
  1500. command->dma_dir = dma_dir;
  1501. command->sge_buffer = sgpnt;
  1502. /* use page tables (s/g) */
  1503. command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
  1504. command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
  1505. command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
  1506. command_orb->data_descriptor_lo = command->sge_dma;
  1507. /*
  1508. * Loop through and fill out our sbp-2 page tables
  1509. * (and split up anything too large)
  1510. */
  1511. for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
  1512. sg_len = sg_dma_len(sgpnt);
  1513. sg_addr = sg_dma_address(sgpnt);
  1514. while (sg_len) {
  1515. scatter_gather_element[sg_count].segment_base_lo = sg_addr;
  1516. if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
  1517. scatter_gather_element[sg_count].length_segment_base_hi =
  1518. PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
  1519. sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
  1520. sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
  1521. } else {
  1522. scatter_gather_element[sg_count].length_segment_base_hi =
  1523. PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
  1524. sg_len = 0;
  1525. }
  1526. sg_count++;
  1527. }
  1528. }
  1529. /* Number of page table (s/g) elements */
  1530. command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
  1531. sbp2util_packet_dump(scatter_gather_element,
  1532. (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
  1533. "sbp2 s/g list", command->sge_dma);
  1534. /*
  1535. * Byte swap page tables if necessary
  1536. */
  1537. sbp2util_cpu_to_be32_buffer(scatter_gather_element,
  1538. (sizeof(struct sbp2_unrestricted_page_table)) *
  1539. sg_count);
  1540. }
  1541. } else {
  1542. SBP2_DEBUG("No scatter/gather");
  1543. command->dma_dir = dma_dir;
  1544. command->dma_size = scsi_request_bufflen;
  1545. command->dma_type = CMD_DMA_SINGLE;
  1546. command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
  1547. command->dma_size,
  1548. command->dma_dir);
  1549. SBP2_DMA_ALLOC("single bulk");
  1550. /*
  1551. * Handle case where we get a command w/o s/g enabled (but
  1552. * check for transfers larger than 64K)
  1553. */
  1554. if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
  1555. command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
  1556. command_orb->data_descriptor_lo = command->cmd_dma;
  1557. command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
  1558. command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
  1559. /*
  1560. * Sanity, in case our direction table is not
  1561. * up-to-date
  1562. */
  1563. if (!scsi_request_bufflen) {
  1564. command_orb->data_descriptor_hi = 0x0;
  1565. command_orb->data_descriptor_lo = 0x0;
  1566. command_orb->misc |= ORB_SET_DIRECTION(1);
  1567. }
  1568. } else {
  1569. /*
  1570. * Need to turn this into page tables, since the
  1571. * buffer is too large.
  1572. */
  1573. command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
  1574. command_orb->data_descriptor_lo = command->sge_dma;
  1575. /* Use page tables (s/g) */
  1576. command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
  1577. command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
  1578. /*
  1579. * fill out our sbp-2 page tables (and split up
  1580. * the large buffer)
  1581. */
  1582. sg_count = 0;
  1583. sg_len = scsi_request_bufflen;
  1584. sg_addr = command->cmd_dma;
  1585. while (sg_len) {
  1586. scatter_gather_element[sg_count].segment_base_lo = sg_addr;
  1587. if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
  1588. scatter_gather_element[sg_count].length_segment_base_hi =
  1589. PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
  1590. sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
  1591. sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
  1592. } else {
  1593. scatter_gather_element[sg_count].length_segment_base_hi =
  1594. PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
  1595. sg_len = 0;
  1596. }
  1597. sg_count++;
  1598. }
  1599. /* Number of page table (s/g) elements */
  1600. command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
  1601. sbp2util_packet_dump(scatter_gather_element,
  1602. (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
  1603. "sbp2 s/g list", command->sge_dma);
  1604. /*
  1605. * Byte swap page tables if necessary
  1606. */
  1607. sbp2util_cpu_to_be32_buffer(scatter_gather_element,
  1608. (sizeof(struct sbp2_unrestricted_page_table)) *
  1609. sg_count);
  1610. }
  1611. }
  1612. /*
  1613. * Byte swap command ORB if necessary
  1614. */
  1615. sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
  1616. /*
  1617. * Put our scsi command in the command ORB
  1618. */
  1619. memset(command_orb->cdb, 0, 12);
  1620. memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
  1621. return(0);
  1622. }
  1623. /*
  1624. * This function is called in order to begin a regular SBP-2 command.
  1625. */
  1626. static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
  1627. struct sbp2_command_info *command)
  1628. {
  1629. struct sbp2scsi_host_info *hi = scsi_id->hi;
  1630. struct sbp2_command_orb *command_orb = &command->command_orb;
  1631. struct node_entry *ne = scsi_id->ne;
  1632. u64 addr;
  1633. outstanding_orb_incr;
  1634. SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
  1635. command_orb, global_outstanding_command_orbs);
  1636. pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
  1637. sizeof(struct sbp2_command_orb),
  1638. PCI_DMA_BIDIRECTIONAL);
  1639. pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma,
  1640. sizeof(command->scatter_gather_element),
  1641. PCI_DMA_BIDIRECTIONAL);
  1642. /*
  1643. * Check to see if there are any previous orbs to use
  1644. */
  1645. if (scsi_id->last_orb == NULL) {
  1646. quadlet_t data[2];
  1647. /*
  1648. * Ok, let's write to the target's management agent register
  1649. */
  1650. addr = scsi_id->sbp2_command_block_agent_addr + SBP2_ORB_POINTER_OFFSET;
  1651. data[0] = ORB_SET_NODE_ID(hi->host->node_id);
  1652. data[1] = command->command_orb_dma;
  1653. sbp2util_cpu_to_be32_buffer(data, 8);
  1654. SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb);
  1655. if (sbp2util_node_write_no_wait(ne, addr, data, 8) < 0) {
  1656. SBP2_ERR("sbp2util_node_write_no_wait failed.\n");
  1657. return -EIO;
  1658. }
  1659. SBP2_ORB_DEBUG("write command agent complete");
  1660. scsi_id->last_orb = command_orb;
  1661. scsi_id->last_orb_dma = command->command_orb_dma;
  1662. } else {
  1663. quadlet_t data;
  1664. /*
  1665. * We have an orb already sent (maybe or maybe not
  1666. * processed) that we can append this orb to. So do so,
  1667. * and ring the doorbell. Have to be very careful
  1668. * modifying these next orb pointers, as they are accessed
  1669. * both by the sbp2 device and us.
  1670. */
  1671. scsi_id->last_orb->next_ORB_lo =
  1672. cpu_to_be32(command->command_orb_dma);
  1673. /* Tells hardware that this pointer is valid */
  1674. scsi_id->last_orb->next_ORB_hi = 0x0;
  1675. pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma,
  1676. sizeof(struct sbp2_command_orb),
  1677. PCI_DMA_BIDIRECTIONAL);
  1678. /*
  1679. * Ring the doorbell
  1680. */
  1681. data = cpu_to_be32(command->command_orb_dma);
  1682. addr = scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET;
  1683. SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb);
  1684. if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) {
  1685. SBP2_ERR("sbp2util_node_write_no_wait failed");
  1686. return(-EIO);
  1687. }
  1688. scsi_id->last_orb = command_orb;
  1689. scsi_id->last_orb_dma = command->command_orb_dma;
  1690. }
  1691. return(0);
  1692. }
  1693. /*
  1694. * This function is called in order to begin a regular SBP-2 command.
  1695. */
  1696. static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
  1697. struct scsi_cmnd *SCpnt,
  1698. void (*done)(struct scsi_cmnd *))
  1699. {
  1700. unchar *cmd = (unchar *) SCpnt->cmnd;
  1701. unsigned int request_bufflen = SCpnt->request_bufflen;
  1702. struct sbp2_command_info *command;
  1703. SBP2_DEBUG("sbp2_send_command");
  1704. #if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
  1705. printk("[scsi command]\n ");
  1706. scsi_print_command(SCpnt);
  1707. #endif
  1708. SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
  1709. SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
  1710. /*
  1711. * Allocate a command orb and s/g structure
  1712. */
  1713. command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
  1714. if (!command) {
  1715. return(-EIO);
  1716. }
  1717. /*
  1718. * The scsi stack sends down a request_bufflen which does not match the
  1719. * length field in the scsi cdb. This causes some sbp2 devices to
  1720. * reject this inquiry command. Fix the request_bufflen.
  1721. */
  1722. if (*cmd == INQUIRY) {
  1723. if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
  1724. request_bufflen = cmd[4] = 0x24;
  1725. else
  1726. request_bufflen = cmd[4];
  1727. }
  1728. /*
  1729. * Now actually fill in the comamnd orb and sbp2 s/g list
  1730. */
  1731. sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
  1732. request_bufflen, SCpnt->request_buffer,
  1733. SCpnt->sc_data_direction);
  1734. /*
  1735. * Update our cdb if necessary (to handle sbp2 RBC command set
  1736. * differences). This is where the command set hacks go! =)
  1737. */
  1738. sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
  1739. sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
  1740. "sbp2 command orb", command->command_orb_dma);
  1741. /*
  1742. * Initialize status fifo
  1743. */
  1744. memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
  1745. /*
  1746. * Link up the orb, and ring the doorbell if needed
  1747. */
  1748. sbp2_link_orb_command(scsi_id, command);
  1749. return(0);
  1750. }
  1751. /*
  1752. * This function deals with command set differences between Linux scsi
  1753. * command set and sbp2 RBC command set.
  1754. */
  1755. static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
  1756. {
  1757. unchar new_cmd[16];
  1758. u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
  1759. SBP2_DEBUG("sbp2_check_sbp2_command");
  1760. switch (*cmd) {
  1761. case READ_6:
  1762. if (sbp2_command_conversion_device_type(device_type)) {
  1763. SBP2_DEBUG("Convert READ_6 to READ_10");
  1764. /*
  1765. * Need to turn read_6 into read_10
  1766. */
  1767. new_cmd[0] = 0x28;
  1768. new_cmd[1] = (cmd[1] & 0xe0);
  1769. new_cmd[2] = 0x0;
  1770. new_cmd[3] = (cmd[1] & 0x1f);
  1771. new_cmd[4] = cmd[2];
  1772. new_cmd[5] = cmd[3];
  1773. new_cmd[6] = 0x0;
  1774. new_cmd[7] = 0x0;
  1775. new_cmd[8] = cmd[4];
  1776. new_cmd[9] = cmd[5];
  1777. memcpy(cmd, new_cmd, 10);
  1778. }
  1779. break;
  1780. case WRITE_6:
  1781. if (sbp2_command_conversion_device_type(device_type)) {
  1782. SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
  1783. /*
  1784. * Need to turn write_6 into write_10
  1785. */
  1786. new_cmd[0] = 0x2a;
  1787. new_cmd[1] = (cmd[1] & 0xe0);
  1788. new_cmd[2] = 0x0;
  1789. new_cmd[3] = (cmd[1] & 0x1f);
  1790. new_cmd[4] = cmd[2];
  1791. new_cmd[5] = cmd[3];
  1792. new_cmd[6] = 0x0;
  1793. new_cmd[7] = 0x0;
  1794. new_cmd[8] = cmd[4];
  1795. new_cmd[9] = cmd[5];
  1796. memcpy(cmd, new_cmd, 10);
  1797. }
  1798. break;
  1799. case MODE_SENSE:
  1800. if (sbp2_command_conversion_device_type(device_type)) {
  1801. SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
  1802. /*
  1803. * Need to turn mode_sense_6 into mode_sense_10
  1804. */
  1805. new_cmd[0] = 0x5a;
  1806. new_cmd[1] = cmd[1];
  1807. new_cmd[2] = cmd[2];
  1808. new_cmd[3] = 0x0;
  1809. new_cmd[4] = 0x0;
  1810. new_cmd[5] = 0x0;
  1811. new_cmd[6] = 0x0;
  1812. new_cmd[7] = 0x0;
  1813. new_cmd[8] = cmd[4];
  1814. new_cmd[9] = cmd[5];
  1815. memcpy(cmd, new_cmd, 10);
  1816. }
  1817. break;
  1818. case MODE_SELECT:
  1819. /*
  1820. * TODO. Probably need to change mode select to 10 byte version
  1821. */
  1822. default:
  1823. break;
  1824. }
  1825. return;
  1826. }
  1827. /*
  1828. * Translates SBP-2 status into SCSI sense data for check conditions
  1829. */
  1830. static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
  1831. {
  1832. SBP2_DEBUG("sbp2_status_to_sense_data");
  1833. /*
  1834. * Ok, it's pretty ugly... ;-)
  1835. */
  1836. sense_data[0] = 0x70;
  1837. sense_data[1] = 0x0;
  1838. sense_data[2] = sbp2_status[9];
  1839. sense_data[3] = sbp2_status[12];
  1840. sense_data[4] = sbp2_status[13];
  1841. sense_data[5] = sbp2_status[14];
  1842. sense_data[6] = sbp2_status[15];
  1843. sense_data[7] = 10;
  1844. sense_data[8] = sbp2_status[16];
  1845. sense_data[9] = sbp2_status[17];
  1846. sense_data[10] = sbp2_status[18];
  1847. sense_data[11] = sbp2_status[19];
  1848. sense_data[12] = sbp2_status[10];
  1849. sense_data[13] = sbp2_status[11];
  1850. sense_data[14] = sbp2_status[20];
  1851. sense_data[15] = sbp2_status[21];
  1852. return(sbp2_status[8] & 0x3f); /* return scsi status */
  1853. }
  1854. /*
  1855. * This function is called after a command is completed, in order to do any necessary SBP-2
  1856. * response data translations for the SCSI stack
  1857. */
  1858. static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
  1859. struct scsi_cmnd *SCpnt)
  1860. {
  1861. u8 *scsi_buf = SCpnt->request_buffer;
  1862. u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
  1863. SBP2_DEBUG("sbp2_check_sbp2_response");
  1864. switch (SCpnt->cmnd[0]) {
  1865. case INQUIRY:
  1866. /*
  1867. * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill
  1868. * this information in from the inquiry response data. Lun is set to zero.
  1869. */
  1870. if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
  1871. SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
  1872. scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
  1873. }
  1874. /*
  1875. * Make sure data length is ok. Minimum length is 36 bytes
  1876. */
  1877. if (scsi_buf[4] == 0) {
  1878. scsi_buf[4] = 36 - 5;
  1879. }
  1880. /*
  1881. * Check for Simple Direct Access Device and change it to TYPE_DISK
  1882. */
  1883. if ((scsi_buf[0] & 0x1f) == TYPE_SDAD) {
  1884. SBP2_DEBUG("Changing TYPE_SDAD to TYPE_DISK");
  1885. scsi_buf[0] &= 0xe0;
  1886. }
  1887. /*
  1888. * Fix ansi revision and response data format
  1889. */
  1890. scsi_buf[2] |= 2;
  1891. scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
  1892. break;
  1893. case MODE_SENSE:
  1894. if (sbp2_command_conversion_device_type(device_type)) {
  1895. SBP2_DEBUG("Modify mode sense response (10 byte version)");
  1896. scsi_buf[0] = scsi_buf[1]; /* Mode data length */
  1897. scsi_buf[1] = scsi_buf[2]; /* Medium type */
  1898. scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
  1899. scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
  1900. memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
  1901. }
  1902. break;
  1903. case MODE_SELECT:
  1904. /*
  1905. * TODO. Probably need to change mode select to 10 byte version
  1906. */
  1907. default:
  1908. break;
  1909. }
  1910. return;
  1911. }
  1912. /*
  1913. * This function deals with status writes from the SBP-2 device
  1914. */
  1915. static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
  1916. quadlet_t *data, u64 addr, size_t length, u16 fl)
  1917. {
  1918. struct sbp2scsi_host_info *hi;
  1919. struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
  1920. u32 id;
  1921. struct scsi_cmnd *SCpnt = NULL;
  1922. u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
  1923. struct sbp2_command_info *command;
  1924. SBP2_DEBUG("sbp2_handle_status_write");
  1925. sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
  1926. if (!host) {
  1927. SBP2_ERR("host is NULL - this is bad!");
  1928. return(RCODE_ADDRESS_ERROR);
  1929. }
  1930. hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
  1931. if (!hi) {
  1932. SBP2_ERR("host info is NULL - this is bad!");
  1933. return(RCODE_ADDRESS_ERROR);
  1934. }
  1935. /*
  1936. * Find our scsi_id structure by looking at the status fifo address written to by
  1937. * the sbp2 device.
  1938. */
  1939. id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
  1940. list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
  1941. if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) {
  1942. scsi_id = scsi_id_tmp;
  1943. break;
  1944. }
  1945. }
  1946. if (!scsi_id) {
  1947. SBP2_ERR("scsi_id is NULL - device is gone?");
  1948. return(RCODE_ADDRESS_ERROR);
  1949. }
  1950. /*
  1951. * Put response into scsi_id status fifo...
  1952. */
  1953. memcpy(&scsi_id->status_block, data, length);
  1954. /*
  1955. * Byte swap first two quadlets (8 bytes) of status for processing
  1956. */
  1957. sbp2util_be32_to_cpu_buffer(&scsi_id->status_block, 8);
  1958. /*
  1959. * Handle command ORB status here if necessary. First, need to match status with command.
  1960. */
  1961. command = sbp2util_find_command_for_orb(scsi_id, scsi_id->status_block.ORB_offset_lo);
  1962. if (command) {
  1963. SBP2_DEBUG("Found status for command ORB");
  1964. pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
  1965. sizeof(struct sbp2_command_orb),
  1966. PCI_DMA_BIDIRECTIONAL);
  1967. pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
  1968. sizeof(command->scatter_gather_element),
  1969. PCI_DMA_BIDIRECTIONAL);
  1970. SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
  1971. outstanding_orb_decr;
  1972. /*
  1973. * Matched status with command, now grab scsi command pointers and check status
  1974. */
  1975. SCpnt = command->Current_SCpnt;
  1976. sbp2util_mark_command_completed(scsi_id, command);
  1977. if (SCpnt) {
  1978. /*
  1979. * See if the target stored any scsi status information
  1980. */
  1981. if (STATUS_GET_LENGTH(scsi_id->status_block.ORB_offset_hi_misc) > 1) {
  1982. /*
  1983. * Translate SBP-2 status to SCSI sense data
  1984. */
  1985. SBP2_DEBUG("CHECK CONDITION");
  1986. scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer);
  1987. }
  1988. /*
  1989. * Check to see if the dead bit is set. If so, we'll have to initiate
  1990. * a fetch agent reset.
  1991. */
  1992. if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) {
  1993. /*
  1994. * Initiate a fetch agent reset.
  1995. */
  1996. SBP2_DEBUG("Dead bit set - initiating fetch agent reset");
  1997. sbp2_agent_reset(scsi_id, 0);
  1998. }
  1999. SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
  2000. }
  2001. /*
  2002. * Check here to see if there are no commands in-use. If there are none, we can
  2003. * null out last orb so that next time around we write directly to the orb pointer...
  2004. * Quick start saves one 1394 bus transaction.
  2005. */
  2006. if (list_empty(&scsi_id->sbp2_command_orb_inuse)) {
  2007. scsi_id->last_orb = NULL;
  2008. }
  2009. } else {
  2010. /*
  2011. * It's probably a login/logout/reconnect status.
  2012. */
  2013. if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
  2014. (scsi_id->query_logins_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
  2015. (scsi_id->reconnect_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
  2016. (scsi_id->logout_orb_dma == scsi_id->status_block.ORB_offset_lo)) {
  2017. atomic_set(&scsi_id->sbp2_login_complete, 1);
  2018. }
  2019. }
  2020. if (SCpnt) {
  2021. /* Complete the SCSI command. */
  2022. SBP2_DEBUG("Completing SCSI command");
  2023. sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt,
  2024. command->Current_done);
  2025. SBP2_ORB_DEBUG("command orb completed");
  2026. }
  2027. return(RCODE_COMPLETE);
  2028. }
  2029. /**************************************
  2030. * SCSI interface related section
  2031. **************************************/
  2032. /*
  2033. * This routine is the main request entry routine for doing I/O. It is
  2034. * called from the scsi stack directly.
  2035. */
  2036. static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
  2037. void (*done)(struct scsi_cmnd *))
  2038. {
  2039. struct scsi_id_instance_data *scsi_id =
  2040. (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
  2041. struct sbp2scsi_host_info *hi;
  2042. SBP2_DEBUG("sbp2scsi_queuecommand");
  2043. /*
  2044. * If scsi_id is null, it means there is no device in this slot,
  2045. * so we should return selection timeout.
  2046. */
  2047. if (!scsi_id) {
  2048. SCpnt->result = DID_NO_CONNECT << 16;
  2049. done (SCpnt);
  2050. return 0;
  2051. }
  2052. hi = scsi_id->hi;
  2053. if (!hi) {
  2054. SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
  2055. SCpnt->result = DID_NO_CONNECT << 16;
  2056. done (SCpnt);
  2057. return(0);
  2058. }
  2059. /*
  2060. * Until we handle multiple luns, just return selection time-out
  2061. * to any IO directed at non-zero LUNs
  2062. */
  2063. if (SCpnt->device->lun) {
  2064. SCpnt->result = DID_NO_CONNECT << 16;
  2065. done (SCpnt);
  2066. return(0);
  2067. }
  2068. /*
  2069. * Check for request sense command, and handle it here
  2070. * (autorequest sense)
  2071. */
  2072. if (SCpnt->cmnd[0] == REQUEST_SENSE) {
  2073. SBP2_DEBUG("REQUEST_SENSE");
  2074. memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
  2075. memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
  2076. sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
  2077. return(0);
  2078. }
  2079. /*
  2080. * Check to see if we are in the middle of a bus reset.
  2081. */
  2082. if (!hpsb_node_entry_valid(scsi_id->ne)) {
  2083. SBP2_ERR("Bus reset in progress - rejecting command");
  2084. SCpnt->result = DID_BUS_BUSY << 16;
  2085. done (SCpnt);
  2086. return(0);
  2087. }
  2088. /*
  2089. * Try and send our SCSI command
  2090. */
  2091. if (sbp2_send_command(scsi_id, SCpnt, done)) {
  2092. SBP2_ERR("Error sending SCSI command");
  2093. sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
  2094. SCpnt, done);
  2095. }
  2096. return(0);
  2097. }
  2098. /*
  2099. * This function is called in order to complete all outstanding SBP-2
  2100. * commands (in case of resets, etc.).
  2101. */
  2102. static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
  2103. u32 status)
  2104. {
  2105. struct sbp2scsi_host_info *hi = scsi_id->hi;
  2106. struct list_head *lh;
  2107. struct sbp2_command_info *command;
  2108. SBP2_DEBUG("sbp2scsi_complete_all_commands");
  2109. while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
  2110. SBP2_DEBUG("Found pending command to complete");
  2111. lh = scsi_id->sbp2_command_orb_inuse.next;
  2112. command = list_entry(lh, struct sbp2_command_info, list);
  2113. pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
  2114. sizeof(struct sbp2_command_orb),
  2115. PCI_DMA_BIDIRECTIONAL);
  2116. pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
  2117. sizeof(command->scatter_gather_element),
  2118. PCI_DMA_BIDIRECTIONAL);
  2119. sbp2util_mark_command_completed(scsi_id, command);
  2120. if (command->Current_SCpnt) {
  2121. command->Current_SCpnt->result = status << 16;
  2122. command->Current_done(command->Current_SCpnt);
  2123. }
  2124. }
  2125. return;
  2126. }
  2127. /*
  2128. * This function is called in order to complete a regular SBP-2 command.
  2129. *
  2130. * This can be called in interrupt context.
  2131. */
  2132. static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
  2133. u32 scsi_status, struct scsi_cmnd *SCpnt,
  2134. void (*done)(struct scsi_cmnd *))
  2135. {
  2136. unsigned long flags;
  2137. SBP2_DEBUG("sbp2scsi_complete_command");
  2138. /*
  2139. * Sanity
  2140. */
  2141. if (!SCpnt) {
  2142. SBP2_ERR("SCpnt is NULL");
  2143. return;
  2144. }
  2145. /*
  2146. * If a bus reset is in progress and there was an error, don't
  2147. * complete the command, just let it get retried at the end of the
  2148. * bus reset.
  2149. */
  2150. if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
  2151. SBP2_ERR("Bus reset in progress - retry command later");
  2152. return;
  2153. }
  2154. /*
  2155. * Switch on scsi status
  2156. */
  2157. switch (scsi_status) {
  2158. case SBP2_SCSI_STATUS_GOOD:
  2159. SCpnt->result = DID_OK;
  2160. break;
  2161. case SBP2_SCSI_STATUS_BUSY:
  2162. SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
  2163. SCpnt->result = DID_BUS_BUSY << 16;
  2164. break;
  2165. case SBP2_SCSI_STATUS_CHECK_CONDITION:
  2166. SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
  2167. SCpnt->result = CHECK_CONDITION << 1;
  2168. /*
  2169. * Debug stuff
  2170. */
  2171. #if CONFIG_IEEE1394_SBP2_DEBUG >= 1
  2172. scsi_print_command(SCpnt);
  2173. scsi_print_sense("bh", SCpnt);
  2174. #endif
  2175. break;
  2176. case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
  2177. SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
  2178. SCpnt->result = DID_NO_CONNECT << 16;
  2179. scsi_print_command(SCpnt);
  2180. break;
  2181. case SBP2_SCSI_STATUS_CONDITION_MET:
  2182. case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
  2183. case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
  2184. SBP2_ERR("Bad SCSI status = %x", scsi_status);
  2185. SCpnt->result = DID_ERROR << 16;
  2186. scsi_print_command(SCpnt);
  2187. break;
  2188. default:
  2189. SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
  2190. SCpnt->result = DID_ERROR << 16;
  2191. }
  2192. /*
  2193. * Take care of any sbp2 response data mucking here (RBC stuff, etc.)
  2194. */
  2195. if (SCpnt->result == DID_OK) {
  2196. sbp2_check_sbp2_response(scsi_id, SCpnt);
  2197. }
  2198. /*
  2199. * If a bus reset is in progress and there was an error, complete
  2200. * the command as busy so that it will get retried.
  2201. */
  2202. if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
  2203. SBP2_ERR("Completing command with busy (bus reset)");
  2204. SCpnt->result = DID_BUS_BUSY << 16;
  2205. }
  2206. /*
  2207. * If a unit attention occurs, return busy status so it gets
  2208. * retried... it could have happened because of a 1394 bus reset
  2209. * or hot-plug...
  2210. */
  2211. #if 0
  2212. if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
  2213. (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
  2214. SBP2_DEBUG("UNIT ATTENTION - return busy");
  2215. SCpnt->result = DID_BUS_BUSY << 16;
  2216. }
  2217. #endif
  2218. /*
  2219. * Tell scsi stack that we're done with this command
  2220. */
  2221. spin_lock_irqsave(scsi_id->scsi_host->host_lock,flags);
  2222. done (SCpnt);
  2223. spin_unlock_irqrestore(scsi_id->scsi_host->host_lock,flags);
  2224. return;
  2225. }
  2226. static int sbp2scsi_slave_configure (struct scsi_device *sdev)
  2227. {
  2228. blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
  2229. return 0;
  2230. }
  2231. /*
  2232. * Called by scsi stack when something has really gone wrong. Usually
  2233. * called when a command has timed-out for some reason.
  2234. */
  2235. static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
  2236. {
  2237. struct scsi_id_instance_data *scsi_id =
  2238. (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
  2239. struct sbp2scsi_host_info *hi = scsi_id->hi;
  2240. struct sbp2_command_info *command;
  2241. SBP2_ERR("aborting sbp2 command");
  2242. scsi_print_command(SCpnt);
  2243. if (scsi_id) {
  2244. /*
  2245. * Right now, just return any matching command structures
  2246. * to the free pool.
  2247. */
  2248. command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
  2249. if (command) {
  2250. SBP2_DEBUG("Found command to abort");
  2251. pci_dma_sync_single_for_cpu(hi->host->pdev,
  2252. command->command_orb_dma,
  2253. sizeof(struct sbp2_command_orb),
  2254. PCI_DMA_BIDIRECTIONAL);
  2255. pci_dma_sync_single_for_cpu(hi->host->pdev,
  2256. command->sge_dma,
  2257. sizeof(command->scatter_gather_element),
  2258. PCI_DMA_BIDIRECTIONAL);
  2259. sbp2util_mark_command_completed(scsi_id, command);
  2260. if (command->Current_SCpnt) {
  2261. command->Current_SCpnt->result = DID_ABORT << 16;
  2262. command->Current_done(command->Current_SCpnt);
  2263. }
  2264. }
  2265. /*
  2266. * Initiate a fetch agent reset.
  2267. */
  2268. sbp2_agent_reset(scsi_id, 0);
  2269. sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
  2270. }
  2271. return(SUCCESS);
  2272. }
  2273. /*
  2274. * Called by scsi stack when something has really gone wrong.
  2275. */
  2276. static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
  2277. {
  2278. struct scsi_id_instance_data *scsi_id =
  2279. (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
  2280. SBP2_ERR("reset requested");
  2281. if (scsi_id) {
  2282. SBP2_ERR("Generating sbp2 fetch agent reset");
  2283. sbp2_agent_reset(scsi_id, 0);
  2284. }
  2285. return(SUCCESS);
  2286. }
  2287. static const char *sbp2scsi_info (struct Scsi_Host *host)
  2288. {
  2289. return "SCSI emulation for IEEE-1394 SBP-2 Devices";
  2290. }
  2291. static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, char *buf)
  2292. {
  2293. struct scsi_device *sdev;
  2294. struct scsi_id_instance_data *scsi_id;
  2295. int lun;
  2296. if (!(sdev = to_scsi_device(dev)))
  2297. return 0;
  2298. if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
  2299. return 0;
  2300. if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED)
  2301. lun = 0;
  2302. else
  2303. lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
  2304. return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
  2305. scsi_id->ud->id, lun);
  2306. }
  2307. static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
  2308. static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
  2309. &dev_attr_ieee1394_id,
  2310. NULL
  2311. };
  2312. MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
  2313. MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
  2314. MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
  2315. MODULE_LICENSE("GPL");
  2316. /* SCSI host template */
  2317. static struct scsi_host_template scsi_driver_template = {
  2318. .module = THIS_MODULE,
  2319. .name = "SBP-2 IEEE-1394",
  2320. .proc_name = SBP2_DEVICE_NAME,
  2321. .info = sbp2scsi_info,
  2322. .queuecommand = sbp2scsi_queuecommand,
  2323. .eh_abort_handler = sbp2scsi_abort,
  2324. .eh_device_reset_handler = sbp2scsi_reset,
  2325. .eh_bus_reset_handler = sbp2scsi_reset,
  2326. .eh_host_reset_handler = sbp2scsi_reset,
  2327. .slave_configure = sbp2scsi_slave_configure,
  2328. .this_id = -1,
  2329. .sg_tablesize = SG_ALL,
  2330. .use_clustering = ENABLE_CLUSTERING,
  2331. .cmd_per_lun = SBP2_MAX_CMDS,
  2332. .can_queue = SBP2_MAX_CMDS,
  2333. .emulated = 1,
  2334. .sdev_attrs = sbp2_sysfs_sdev_attrs,
  2335. };
  2336. static int sbp2_module_init(void)
  2337. {
  2338. int ret;
  2339. SBP2_DEBUG("sbp2_module_init");
  2340. printk(KERN_INFO "sbp2: %s\n", version);
  2341. /* Module load debug option to force one command at a time (serializing I/O) */
  2342. if (serialize_io) {
  2343. SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)");
  2344. scsi_driver_template.can_queue = 1;
  2345. scsi_driver_template.cmd_per_lun = 1;
  2346. }
  2347. /* Set max sectors (module load option). Default is 255 sectors. */
  2348. scsi_driver_template.max_sectors = max_sectors;
  2349. /* Register our high level driver with 1394 stack */
  2350. hpsb_register_highlevel(&sbp2_highlevel);
  2351. ret = hpsb_register_protocol(&sbp2_driver);
  2352. if (ret) {
  2353. SBP2_ERR("Failed to register protocol");
  2354. hpsb_unregister_highlevel(&sbp2_highlevel);
  2355. return ret;
  2356. }
  2357. return 0;
  2358. }
  2359. static void __exit sbp2_module_exit(void)
  2360. {
  2361. SBP2_DEBUG("sbp2_module_exit");
  2362. hpsb_unregister_protocol(&sbp2_driver);
  2363. hpsb_unregister_highlevel(&sbp2_highlevel);
  2364. }
  2365. module_init(sbp2_module_init);
  2366. module_exit(sbp2_module_exit);