PageRenderTime 71ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/scsi/qla1280.c

http://github.com/mirrors/linux
C | 4425 lines | 2753 code | 630 blank | 1042 comment | 401 complexity | f8ede82520affdcda53b6acedfcd0c18 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /******************************************************************************
  3. * QLOGIC LINUX SOFTWARE
  4. *
  5. * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
  6. * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
  7. * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
  8. * Copyright (C) 2003-2004 Christoph Hellwig
  9. *
  10. ******************************************************************************/
  11. #define QLA1280_VERSION "3.27.1"
  12. /*****************************************************************************
  13. Revision History:
  14. Rev 3.27.1, February 8, 2010, Michael Reed
  15. - Retain firmware image for error recovery.
  16. Rev 3.27, February 10, 2009, Michael Reed
  17. - General code cleanup.
  18. - Improve error recovery.
  19. Rev 3.26, January 16, 2006 Jes Sorensen
  20. - Ditch all < 2.6 support
  21. Rev 3.25.1, February 10, 2005 Christoph Hellwig
  22. - use pci_map_single to map non-S/G requests
  23. - remove qla1280_proc_info
  24. Rev 3.25, September 28, 2004, Christoph Hellwig
  25. - add support for ISP1020/1040
  26. - don't include "scsi.h" anymore for 2.6.x
  27. Rev 3.24.4 June 7, 2004 Christoph Hellwig
  28. - restructure firmware loading, cleanup initialization code
  29. - prepare support for ISP1020/1040 chips
  30. Rev 3.24.3 January 19, 2004, Jes Sorensen
  31. - Handle PCI DMA mask settings correctly
  32. - Correct order of error handling in probe_one, free_irq should not
  33. be called if request_irq failed
  34. Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
  35. - Big endian fixes (James)
  36. - Remove bogus IOCB content on zero data transfer commands (Andrew)
  37. Rev 3.24.1 January 5, 2004, Jes Sorensen
  38. - Initialize completion queue to avoid OOPS on probe
  39. - Handle interrupts during mailbox testing
  40. Rev 3.24 November 17, 2003, Christoph Hellwig
  41. - use struct list_head for completion queue
  42. - avoid old Scsi_FOO typedefs
  43. - cleanup 2.4 compat glue a bit
  44. - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
  45. - make initialization for memory mapped vs port I/O more similar
  46. - remove broken pci config space manipulation
  47. - kill more cruft
  48. - this is an almost perfect 2.6 scsi driver now! ;)
  49. Rev 3.23.39 December 17, 2003, Jes Sorensen
  50. - Delete completion queue from srb if mailbox command failed to
  51. to avoid qla1280_done completeting qla1280_error_action's
  52. obsolete context
  53. - Reduce arguments for qla1280_done
  54. Rev 3.23.38 October 18, 2003, Christoph Hellwig
  55. - Convert to new-style hotplugable driver for 2.6
  56. - Fix missing scsi_unregister/scsi_host_put on HBA removal
  57. - Kill some more cruft
  58. Rev 3.23.37 October 1, 2003, Jes Sorensen
  59. - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
  60. random CONFIG option
  61. - Clean up locking in probe path
  62. Rev 3.23.36 October 1, 2003, Christoph Hellwig
  63. - queuecommand only ever receives new commands - clear flags
  64. - Reintegrate lost fixes from Linux 2.5
  65. Rev 3.23.35 August 14, 2003, Jes Sorensen
  66. - Build against 2.6
  67. Rev 3.23.34 July 23, 2003, Jes Sorensen
  68. - Remove pointless TRUE/FALSE macros
  69. - Clean up vchan handling
  70. Rev 3.23.33 July 3, 2003, Jes Sorensen
  71. - Don't define register access macros before define determining MMIO.
  72. This just happened to work out on ia64 but not elsewhere.
  73. - Don't try and read from the card while it is in reset as
  74. it won't respond and causes an MCA
  75. Rev 3.23.32 June 23, 2003, Jes Sorensen
  76. - Basic support for boot time arguments
  77. Rev 3.23.31 June 8, 2003, Jes Sorensen
  78. - Reduce boot time messages
  79. Rev 3.23.30 June 6, 2003, Jes Sorensen
  80. - Do not enable sync/wide/ppr before it has been determined
  81. that the target device actually supports it
  82. - Enable DMA arbitration for multi channel controllers
  83. Rev 3.23.29 June 3, 2003, Jes Sorensen
  84. - Port to 2.5.69
  85. Rev 3.23.28 June 3, 2003, Jes Sorensen
  86. - Eliminate duplicate marker commands on bus resets
  87. - Handle outstanding commands appropriately on bus/device resets
  88. Rev 3.23.27 May 28, 2003, Jes Sorensen
  89. - Remove bogus input queue code, let the Linux SCSI layer do the work
  90. - Clean up NVRAM handling, only read it once from the card
  91. - Add a number of missing default nvram parameters
  92. Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
  93. - Use completion queue for mailbox commands instead of busy wait
  94. Rev 3.23.25 Beta May 27, 2003, James Bottomley
  95. - Migrate to use new error handling code
  96. Rev 3.23.24 Beta May 21, 2003, James Bottomley
  97. - Big endian support
  98. - Cleanup data direction code
  99. Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
  100. - Switch to using MMIO instead of PIO
  101. Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
  102. - Fix PCI parity problem with 12160 during reset.
  103. Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
  104. - Use pci_map_page()/pci_unmap_page() instead of map_single version.
  105. Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
  106. - Remove < 2.4.x support
  107. - Introduce HOST_LOCK to make the spin lock changes portable.
  108. - Remove a bunch of idiotic and unnecessary typedef's
  109. - Kill all leftovers of target-mode support which never worked anyway
  110. Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
  111. - Do qla1280_pci_config() before calling request_irq() and
  112. request_region()
  113. - Use pci_dma_hi32() to handle upper word of DMA addresses instead
  114. of large shifts
  115. - Hand correct arguments to free_irq() in case of failure
  116. Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
  117. - Run source through Lindent and clean up the output
  118. Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
  119. - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
  120. Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
  121. - Rely on mailbox commands generating interrupts - do not
  122. run qla1280_isr() from ql1280_mailbox_command()
  123. - Remove device_reg_t
  124. - Integrate ql12160_set_target_parameters() with 1280 version
  125. - Make qla1280_setup() non static
  126. - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
  127. sent to the card - this command pauses the firmware!!!
  128. Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
  129. - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
  130. - Remove a pile of pointless and confusing (srb_t **) and
  131. (scsi_lu_t *) typecasts
  132. - Explicit mark that we do not use the new error handling (for now)
  133. - Remove scsi_qla_host_t and use 'struct' instead
  134. - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
  135. pci_64bit_slot flags which weren't used for anything anyway
  136. - Grab host->host_lock while calling qla1280_isr() from abort()
  137. - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
  138. do not need to save/restore flags in the interrupt handler
  139. - Enable interrupts early (before any mailbox access) in preparation
  140. for cleaning up the mailbox handling
  141. Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
  142. - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
  143. it with proper use of dprintk().
  144. - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
  145. a debug level argument to determine if data is to be printed
  146. - Add KERN_* info to printk()
  147. Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
  148. - Significant cosmetic cleanups
  149. - Change debug code to use dprintk() and remove #if mess
  150. Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
  151. - More cosmetic cleanups, fix places treating return as function
  152. - use cpu_relax() in qla1280_debounce_register()
  153. Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
  154. - Make it compile under 2.5.5
  155. Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
  156. - Do no typecast short * to long * in QL1280BoardTbl, this
  157. broke miserably on big endian boxes
  158. Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
  159. - Remove pre 2.2 hack for checking for reentrance in interrupt handler
  160. - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
  161. unsigned int to match the types from struct scsi_cmnd
  162. Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
  163. - Remove bogus timer_t typedef from qla1280.h
  164. - Remove obsolete pre 2.2 PCI setup code, use proper #define's
  165. for PCI_ values, call pci_set_master()
  166. - Fix memleak of qla1280_buffer on module unload
  167. - Only compile module parsing code #ifdef MODULE - should be
  168. changed to use individual MODULE_PARM's later
  169. - Remove dummy_buffer that was never modified nor printed
  170. - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
  171. #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
  172. - Remove \r from print statements, this is Linux, not DOS
  173. - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
  174. dummy macros
  175. - Remove C++ compile hack in header file as Linux driver are not
  176. supposed to be compiled as C++
  177. - Kill MS_64BITS macro as it makes the code more readable
  178. - Remove unnecessary flags.in_interrupts bit
  179. Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
  180. - Dont' check for set flags on q->q_flag one by one in qla1280_next()
  181. - Check whether the interrupt was generated by the QLA1280 before
  182. doing any processing
  183. - qla1280_status_entry(): Only zero out part of sense_buffer that
  184. is not being copied into
  185. - Remove more superflouous typecasts
  186. - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
  187. Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
  188. - Don't walk the entire list in qla1280_putq_t() just to directly
  189. grab the pointer to the last element afterwards
  190. Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
  191. - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
  192. Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
  193. - Set dev->max_sectors to 1024
  194. Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
  195. - Provide compat macros for pci_enable_device(), pci_find_subsys()
  196. and scsi_set_pci_device()
  197. - Call scsi_set_pci_device() for all devices
  198. - Reduce size of kernel version dependent device probe code
  199. - Move duplicate probe/init code to separate function
  200. - Handle error if qla1280_mem_alloc() fails
  201. - Kill OFFSET() macro and use Linux's PCI definitions instead
  202. - Kill private structure defining PCI config space (struct config_reg)
  203. - Only allocate I/O port region if not in MMIO mode
  204. - Remove duplicate (unused) sanity check of sife of srb_t
  205. Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
  206. - Change home-brew memset() implementations to use memset()
  207. - Remove all references to COMTRACE() - accessing a PC's COM2 serial
  208. port directly is not legal under Linux.
  209. Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
  210. - Remove pre 2.2 kernel support
  211. - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
  212. - Fix MMIO access to use readl/writel instead of directly
  213. dereferencing pointers
  214. - Nuke MSDOS debugging code
  215. - Change true/false data types to int from uint8_t
  216. - Use int for counters instead of uint8_t etc.
  217. - Clean up size & byte order conversion macro usage
  218. Rev 3.23 Beta January 11, 2001 BN Qlogic
  219. - Added check of device_id when handling non
  220. QLA12160s during detect().
  221. Rev 3.22 Beta January 5, 2001 BN Qlogic
  222. - Changed queue_task() to schedule_task()
  223. for kernels 2.4.0 and higher.
  224. Note: 2.4.0-testxx kernels released prior to
  225. the actual 2.4.0 kernel release on January 2001
  226. will get compile/link errors with schedule_task().
  227. Please update your kernel to released 2.4.0 level,
  228. or comment lines in this file flagged with 3.22
  229. to resolve compile/link error of schedule_task().
  230. - Added -DCONFIG_SMP in addition to -D__SMP__
  231. in Makefile for 2.4.0 builds of driver as module.
  232. Rev 3.21 Beta January 4, 2001 BN Qlogic
  233. - Changed criteria of 64/32 Bit mode of HBA
  234. operation according to BITS_PER_LONG rather
  235. than HBA's NVRAM setting of >4Gig memory bit;
  236. so that the HBA auto-configures without the need
  237. to setup each system individually.
  238. Rev 3.20 Beta December 5, 2000 BN Qlogic
  239. - Added priority handling to IA-64 onboard SCSI
  240. ISP12160 chip for kernels greater than 2.3.18.
  241. - Added irqrestore for qla1280_intr_handler.
  242. - Enabled /proc/scsi/qla1280 interface.
  243. - Clear /proc/scsi/qla1280 counters in detect().
  244. Rev 3.19 Beta October 13, 2000 BN Qlogic
  245. - Declare driver_template for new kernel
  246. (2.4.0 and greater) scsi initialization scheme.
  247. - Update /proc/scsi entry for 2.3.18 kernels and
  248. above as qla1280
  249. Rev 3.18 Beta October 10, 2000 BN Qlogic
  250. - Changed scan order of adapters to map
  251. the QLA12160 followed by the QLA1280.
  252. Rev 3.17 Beta September 18, 2000 BN Qlogic
  253. - Removed warnings for 32 bit 2.4.x compiles
  254. - Corrected declared size for request and response
  255. DMA addresses that are kept in each ha
  256. Rev. 3.16 Beta August 25, 2000 BN Qlogic
  257. - Corrected 64 bit addressing issue on IA-64
  258. where the upper 32 bits were not properly
  259. passed to the RISC engine.
  260. Rev. 3.15 Beta August 22, 2000 BN Qlogic
  261. - Modified qla1280_setup_chip to properly load
  262. ISP firmware for greater that 4 Gig memory on IA-64
  263. Rev. 3.14 Beta August 16, 2000 BN Qlogic
  264. - Added setting of dma_mask to full 64 bit
  265. if flags.enable_64bit_addressing is set in NVRAM
  266. Rev. 3.13 Beta August 16, 2000 BN Qlogic
  267. - Use new PCI DMA mapping APIs for 2.4.x kernel
  268. Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
  269. - Added check of pci_enable_device to detect() for 2.3.x
  270. - Use pci_resource_start() instead of
  271. pdev->resource[0].start in detect() for 2.3.x
  272. - Updated driver version
  273. Rev. 3.11 July 14, 2000 BN Qlogic
  274. - Updated SCSI Firmware to following versions:
  275. qla1x80: 8.13.08
  276. qla1x160: 10.04.08
  277. - Updated driver version to 3.11
  278. Rev. 3.10 June 23, 2000 BN Qlogic
  279. - Added filtering of AMI SubSys Vendor ID devices
  280. Rev. 3.9
  281. - DEBUG_QLA1280 undefined and new version BN Qlogic
  282. Rev. 3.08b May 9, 2000 MD Dell
  283. - Added logic to check against AMI subsystem vendor ID
  284. Rev. 3.08 May 4, 2000 DG Qlogic
  285. - Added logic to check for PCI subsystem ID.
  286. Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
  287. - Updated SCSI Firmware to following versions:
  288. qla12160: 10.01.19
  289. qla1280: 8.09.00
  290. Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
  291. - Internal revision; not released
  292. Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
  293. - Edit correction for virt_to_bus and PROC.
  294. Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
  295. - Merge changes from ia64 port.
  296. Rev. 3.03 Mar 28, 2000 BN Qlogic
  297. - Increase version to reflect new code drop with compile fix
  298. of issue with inclusion of linux/spinlock for 2.3 kernels
  299. Rev. 3.02 Mar 15, 2000 BN Qlogic
  300. - Merge qla1280_proc_info from 2.10 code base
  301. Rev. 3.01 Feb 10, 2000 BN Qlogic
  302. - Corrected code to compile on a 2.2.x kernel.
  303. Rev. 3.00 Jan 17, 2000 DG Qlogic
  304. - Added 64-bit support.
  305. Rev. 2.07 Nov 9, 1999 DG Qlogic
  306. - Added new routine to set target parameters for ISP12160.
  307. Rev. 2.06 Sept 10, 1999 DG Qlogic
  308. - Added support for ISP12160 Ultra 3 chip.
  309. Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
  310. - Modified code to remove errors generated when compiling with
  311. Cygnus IA64 Compiler.
  312. - Changed conversion of pointers to unsigned longs instead of integers.
  313. - Changed type of I/O port variables from uint32_t to unsigned long.
  314. - Modified OFFSET macro to work with 64-bit as well as 32-bit.
  315. - Changed sprintf and printk format specifiers for pointers to %p.
  316. - Changed some int to long type casts where needed in sprintf & printk.
  317. - Added l modifiers to sprintf and printk format specifiers for longs.
  318. - Removed unused local variables.
  319. Rev. 1.20 June 8, 1999 DG, Qlogic
  320. Changes to support RedHat release 6.0 (kernel 2.2.5).
  321. - Added SCSI exclusive access lock (io_request_lock) when accessing
  322. the adapter.
  323. - Added changes for the new LINUX interface template. Some new error
  324. handling routines have been added to the template, but for now we
  325. will use the old ones.
  326. - Initial Beta Release.
  327. *****************************************************************************/
  328. #include <linux/module.h>
  329. #include <linux/types.h>
  330. #include <linux/string.h>
  331. #include <linux/errno.h>
  332. #include <linux/kernel.h>
  333. #include <linux/ioport.h>
  334. #include <linux/delay.h>
  335. #include <linux/timer.h>
  336. #include <linux/pci.h>
  337. #include <linux/proc_fs.h>
  338. #include <linux/stat.h>
  339. #include <linux/pci_ids.h>
  340. #include <linux/interrupt.h>
  341. #include <linux/init.h>
  342. #include <linux/dma-mapping.h>
  343. #include <linux/firmware.h>
  344. #include <asm/io.h>
  345. #include <asm/irq.h>
  346. #include <asm/byteorder.h>
  347. #include <asm/processor.h>
  348. #include <asm/types.h>
  349. #include <scsi/scsi.h>
  350. #include <scsi/scsi_cmnd.h>
  351. #include <scsi/scsi_device.h>
  352. #include <scsi/scsi_host.h>
  353. #include <scsi/scsi_tcq.h>
  354. /*
  355. * Compile time Options:
  356. * 0 - Disable and 1 - Enable
  357. */
  358. #define DEBUG_QLA1280_INTR 0
  359. #define DEBUG_PRINT_NVRAM 0
  360. #define DEBUG_QLA1280 0
  361. #define MEMORY_MAPPED_IO 1
  362. #include "qla1280.h"
  363. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  364. #define QLA_64BIT_PTR 1
  365. #endif
  366. #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
  367. #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
  368. #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
  369. ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
  370. #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
  371. ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
  372. static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
  373. static void qla1280_remove_one(struct pci_dev *);
  374. /*
  375. * QLogic Driver Support Function Prototypes.
  376. */
  377. static void qla1280_done(struct scsi_qla_host *);
  378. static int qla1280_get_token(char *);
  379. static int qla1280_setup(char *s) __init;
  380. /*
  381. * QLogic ISP1280 Hardware Support Function Prototypes.
  382. */
  383. static int qla1280_load_firmware(struct scsi_qla_host *);
  384. static int qla1280_init_rings(struct scsi_qla_host *);
  385. static int qla1280_nvram_config(struct scsi_qla_host *);
  386. static int qla1280_mailbox_command(struct scsi_qla_host *,
  387. uint8_t, uint16_t *);
  388. static int qla1280_bus_reset(struct scsi_qla_host *, int);
  389. static int qla1280_device_reset(struct scsi_qla_host *, int, int);
  390. static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
  391. static int qla1280_abort_isp(struct scsi_qla_host *);
  392. #ifdef QLA_64BIT_PTR
  393. static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
  394. #else
  395. static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
  396. #endif
  397. static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
  398. static void qla1280_poll(struct scsi_qla_host *);
  399. static void qla1280_reset_adapter(struct scsi_qla_host *);
  400. static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
  401. static void qla1280_isp_cmd(struct scsi_qla_host *);
  402. static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
  403. static void qla1280_rst_aen(struct scsi_qla_host *);
  404. static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
  405. struct list_head *);
  406. static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
  407. struct list_head *);
  408. static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
  409. static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
  410. static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
  411. static request_t *qla1280_req_pkt(struct scsi_qla_host *);
  412. static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
  413. unsigned int);
  414. static void qla1280_get_target_parameters(struct scsi_qla_host *,
  415. struct scsi_device *);
  416. static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
  417. static struct qla_driver_setup driver_setup;
  418. /*
  419. * convert scsi data direction to request_t control flags
  420. */
  421. static inline uint16_t
  422. qla1280_data_direction(struct scsi_cmnd *cmnd)
  423. {
  424. switch(cmnd->sc_data_direction) {
  425. case DMA_FROM_DEVICE:
  426. return BIT_5;
  427. case DMA_TO_DEVICE:
  428. return BIT_6;
  429. case DMA_BIDIRECTIONAL:
  430. return BIT_5 | BIT_6;
  431. /*
  432. * We could BUG() on default here if one of the four cases aren't
  433. * met, but then again if we receive something like that from the
  434. * SCSI layer we have more serious problems. This shuts up GCC.
  435. */
  436. case DMA_NONE:
  437. default:
  438. return 0;
  439. }
  440. }
  441. #if DEBUG_QLA1280
  442. static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
  443. static void __qla1280_dump_buffer(char *, int);
  444. #endif
  445. /*
  446. * insmod needs to find the variable and make it point to something
  447. */
  448. #ifdef MODULE
  449. static char *qla1280;
  450. /* insmod qla1280 options=verbose" */
  451. module_param(qla1280, charp, 0);
  452. #else
  453. __setup("qla1280=", qla1280_setup);
  454. #endif
  455. /*
  456. * We use the scsi_pointer structure that's included with each scsi_command
  457. * to overlay our struct srb over it. qla1280_init() checks that a srb is not
  458. * bigger than a scsi_pointer.
  459. */
  460. #define CMD_SP(Cmnd) &Cmnd->SCp
  461. #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
  462. #define CMD_CDBP(Cmnd) Cmnd->cmnd
  463. #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
  464. #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
  465. #define CMD_RESULT(Cmnd) Cmnd->result
  466. #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
  467. #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
  468. #define CMD_HOST(Cmnd) Cmnd->device->host
  469. #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
  470. #define SCSI_TCN_32(Cmnd) Cmnd->device->id
  471. #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
  472. /*****************************************/
  473. /* ISP Boards supported by this driver */
  474. /*****************************************/
  475. struct qla_boards {
  476. char *name; /* Board ID String */
  477. int numPorts; /* Number of SCSI ports */
  478. int fw_index; /* index into qla1280_fw_tbl for firmware */
  479. };
  480. /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
  481. static struct pci_device_id qla1280_pci_tbl[] = {
  482. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
  483. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  484. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
  485. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
  486. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
  487. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
  488. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
  489. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
  490. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
  491. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
  492. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
  493. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
  494. {0,}
  495. };
  496. MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
  497. DEFINE_MUTEX(qla1280_firmware_mutex);
  498. struct qla_fw {
  499. char *fwname;
  500. const struct firmware *fw;
  501. };
  502. #define QL_NUM_FW_IMAGES 3
  503. struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
  504. {"qlogic/1040.bin", NULL}, /* image 0 */
  505. {"qlogic/1280.bin", NULL}, /* image 1 */
  506. {"qlogic/12160.bin", NULL}, /* image 2 */
  507. };
  508. /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
  509. static struct qla_boards ql1280_board_tbl[] = {
  510. {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
  511. {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
  512. {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
  513. {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
  514. {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
  515. {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
  516. {.name = " ", .numPorts = 0, .fw_index = -1},
  517. };
  518. static int qla1280_verbose = 1;
  519. #if DEBUG_QLA1280
  520. static int ql_debug_level = 1;
  521. #define dprintk(level, format, a...) \
  522. do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
  523. #define qla1280_dump_buffer(level, buf, size) \
  524. if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
  525. #define qla1280_print_scsi_cmd(level, cmd) \
  526. if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
  527. #else
  528. #define ql_debug_level 0
  529. #define dprintk(level, format, a...) do{}while(0)
  530. #define qla1280_dump_buffer(a, b, c) do{}while(0)
  531. #define qla1280_print_scsi_cmd(a, b) do{}while(0)
  532. #endif
  533. #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
  534. #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
  535. #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
  536. #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
  537. static int qla1280_read_nvram(struct scsi_qla_host *ha)
  538. {
  539. uint16_t *wptr;
  540. uint8_t chksum;
  541. int cnt, i;
  542. struct nvram *nv;
  543. ENTER("qla1280_read_nvram");
  544. if (driver_setup.no_nvram)
  545. return 1;
  546. printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
  547. wptr = (uint16_t *)&ha->nvram;
  548. nv = &ha->nvram;
  549. chksum = 0;
  550. for (cnt = 0; cnt < 3; cnt++) {
  551. *wptr = qla1280_get_nvram_word(ha, cnt);
  552. chksum += *wptr & 0xff;
  553. chksum += (*wptr >> 8) & 0xff;
  554. wptr++;
  555. }
  556. if (nv->id0 != 'I' || nv->id1 != 'S' ||
  557. nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
  558. dprintk(2, "Invalid nvram ID or version!\n");
  559. chksum = 1;
  560. } else {
  561. for (; cnt < sizeof(struct nvram); cnt++) {
  562. *wptr = qla1280_get_nvram_word(ha, cnt);
  563. chksum += *wptr & 0xff;
  564. chksum += (*wptr >> 8) & 0xff;
  565. wptr++;
  566. }
  567. }
  568. dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
  569. " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
  570. nv->version);
  571. if (chksum) {
  572. if (!driver_setup.no_nvram)
  573. printk(KERN_WARNING "scsi(%ld): Unable to identify or "
  574. "validate NVRAM checksum, using default "
  575. "settings\n", ha->host_no);
  576. ha->nvram_valid = 0;
  577. } else
  578. ha->nvram_valid = 1;
  579. /* The firmware interface is, um, interesting, in that the
  580. * actual firmware image on the chip is little endian, thus,
  581. * the process of taking that image to the CPU would end up
  582. * little endian. However, the firmware interface requires it
  583. * to be read a word (two bytes) at a time.
  584. *
  585. * The net result of this would be that the word (and
  586. * doubleword) quantites in the firmware would be correct, but
  587. * the bytes would be pairwise reversed. Since most of the
  588. * firmware quantites are, in fact, bytes, we do an extra
  589. * le16_to_cpu() in the firmware read routine.
  590. *
  591. * The upshot of all this is that the bytes in the firmware
  592. * are in the correct places, but the 16 and 32 bit quantites
  593. * are still in little endian format. We fix that up below by
  594. * doing extra reverses on them */
  595. nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
  596. nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
  597. for(i = 0; i < MAX_BUSES; i++) {
  598. nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
  599. nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
  600. }
  601. dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
  602. LEAVE("qla1280_read_nvram");
  603. return chksum;
  604. }
  605. /**************************************************************************
  606. * qla1280_info
  607. * Return a string describing the driver.
  608. **************************************************************************/
  609. static const char *
  610. qla1280_info(struct Scsi_Host *host)
  611. {
  612. static char qla1280_scsi_name_buffer[125];
  613. char *bp;
  614. struct scsi_qla_host *ha;
  615. struct qla_boards *bdp;
  616. bp = &qla1280_scsi_name_buffer[0];
  617. ha = (struct scsi_qla_host *)host->hostdata;
  618. bdp = &ql1280_board_tbl[ha->devnum];
  619. memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
  620. sprintf (bp,
  621. "QLogic %s PCI to SCSI Host Adapter\n"
  622. " Firmware version: %2d.%02d.%02d, Driver version %s",
  623. &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
  624. QLA1280_VERSION);
  625. return bp;
  626. }
  627. /**************************************************************************
  628. * qla1280_queuecommand
  629. * Queue a command to the controller.
  630. *
  631. * Note:
  632. * The mid-level driver tries to ensures that queuecommand never gets invoked
  633. * concurrently with itself or the interrupt handler (although the
  634. * interrupt handler may call this routine as part of request-completion
  635. * handling). Unfortunely, it sometimes calls the scheduler in interrupt
  636. * context which is a big NO! NO!.
  637. **************************************************************************/
  638. static int
  639. qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
  640. {
  641. struct Scsi_Host *host = cmd->device->host;
  642. struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
  643. struct srb *sp = (struct srb *)CMD_SP(cmd);
  644. int status;
  645. cmd->scsi_done = fn;
  646. sp->cmd = cmd;
  647. sp->flags = 0;
  648. sp->wait = NULL;
  649. CMD_HANDLE(cmd) = (unsigned char *)NULL;
  650. qla1280_print_scsi_cmd(5, cmd);
  651. #ifdef QLA_64BIT_PTR
  652. /*
  653. * Using 64 bit commands if the PCI bridge doesn't support it is a
  654. * bit wasteful, however this should really only happen if one's
  655. * PCI controller is completely broken, like the BCM1250. For
  656. * sane hardware this is not an issue.
  657. */
  658. status = qla1280_64bit_start_scsi(ha, sp);
  659. #else
  660. status = qla1280_32bit_start_scsi(ha, sp);
  661. #endif
  662. return status;
  663. }
  664. static DEF_SCSI_QCMD(qla1280_queuecommand)
  665. enum action {
  666. ABORT_COMMAND,
  667. DEVICE_RESET,
  668. BUS_RESET,
  669. ADAPTER_RESET,
  670. };
  671. static void qla1280_mailbox_timeout(struct timer_list *t)
  672. {
  673. struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
  674. struct device_reg __iomem *reg;
  675. reg = ha->iobase;
  676. ha->mailbox_out[0] = RD_REG_WORD(&reg->mailbox0);
  677. printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
  678. "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
  679. RD_REG_WORD(&reg->ictrl), RD_REG_WORD(&reg->istatus));
  680. complete(ha->mailbox_wait);
  681. }
  682. static int
  683. _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
  684. struct completion *wait)
  685. {
  686. int status = FAILED;
  687. struct scsi_cmnd *cmd = sp->cmd;
  688. spin_unlock_irq(ha->host->host_lock);
  689. wait_for_completion_timeout(wait, 4*HZ);
  690. spin_lock_irq(ha->host->host_lock);
  691. sp->wait = NULL;
  692. if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
  693. status = SUCCESS;
  694. (*cmd->scsi_done)(cmd);
  695. }
  696. return status;
  697. }
  698. static int
  699. qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
  700. {
  701. DECLARE_COMPLETION_ONSTACK(wait);
  702. sp->wait = &wait;
  703. return _qla1280_wait_for_single_command(ha, sp, &wait);
  704. }
  705. static int
  706. qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
  707. {
  708. int cnt;
  709. int status;
  710. struct srb *sp;
  711. struct scsi_cmnd *cmd;
  712. status = SUCCESS;
  713. /*
  714. * Wait for all commands with the designated bus/target
  715. * to be completed by the firmware
  716. */
  717. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  718. sp = ha->outstanding_cmds[cnt];
  719. if (sp) {
  720. cmd = sp->cmd;
  721. if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
  722. continue;
  723. if (target >= 0 && SCSI_TCN_32(cmd) != target)
  724. continue;
  725. status = qla1280_wait_for_single_command(ha, sp);
  726. if (status == FAILED)
  727. break;
  728. }
  729. }
  730. return status;
  731. }
  732. /**************************************************************************
  733. * qla1280_error_action
  734. * The function will attempt to perform a specified error action and
  735. * wait for the results (or time out).
  736. *
  737. * Input:
  738. * cmd = Linux SCSI command packet of the command that cause the
  739. * bus reset.
  740. * action = error action to take (see action_t)
  741. *
  742. * Returns:
  743. * SUCCESS or FAILED
  744. *
  745. **************************************************************************/
  746. static int
  747. qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
  748. {
  749. struct scsi_qla_host *ha;
  750. int bus, target, lun;
  751. struct srb *sp;
  752. int i, found;
  753. int result=FAILED;
  754. int wait_for_bus=-1;
  755. int wait_for_target = -1;
  756. DECLARE_COMPLETION_ONSTACK(wait);
  757. ENTER("qla1280_error_action");
  758. ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
  759. sp = (struct srb *)CMD_SP(cmd);
  760. bus = SCSI_BUS_32(cmd);
  761. target = SCSI_TCN_32(cmd);
  762. lun = SCSI_LUN_32(cmd);
  763. dprintk(4, "error_action %i, istatus 0x%04x\n", action,
  764. RD_REG_WORD(&ha->iobase->istatus));
  765. dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
  766. RD_REG_WORD(&ha->iobase->host_cmd),
  767. RD_REG_WORD(&ha->iobase->ictrl), jiffies);
  768. if (qla1280_verbose)
  769. printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
  770. "Handle=0x%p, action=0x%x\n",
  771. ha->host_no, cmd, CMD_HANDLE(cmd), action);
  772. /*
  773. * Check to see if we have the command in the outstanding_cmds[]
  774. * array. If not then it must have completed before this error
  775. * action was initiated. If the error_action isn't ABORT_COMMAND
  776. * then the driver must proceed with the requested action.
  777. */
  778. found = -1;
  779. for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
  780. if (sp == ha->outstanding_cmds[i]) {
  781. found = i;
  782. sp->wait = &wait; /* we'll wait for it to complete */
  783. break;
  784. }
  785. }
  786. if (found < 0) { /* driver doesn't have command */
  787. result = SUCCESS;
  788. if (qla1280_verbose) {
  789. printk(KERN_INFO
  790. "scsi(%ld:%d:%d:%d): specified command has "
  791. "already completed.\n", ha->host_no, bus,
  792. target, lun);
  793. }
  794. }
  795. switch (action) {
  796. case ABORT_COMMAND:
  797. dprintk(1, "qla1280: RISC aborting command\n");
  798. /*
  799. * The abort might fail due to race when the host_lock
  800. * is released to issue the abort. As such, we
  801. * don't bother to check the return status.
  802. */
  803. if (found >= 0)
  804. qla1280_abort_command(ha, sp, found);
  805. break;
  806. case DEVICE_RESET:
  807. if (qla1280_verbose)
  808. printk(KERN_INFO
  809. "scsi(%ld:%d:%d:%d): Queueing device reset "
  810. "command.\n", ha->host_no, bus, target, lun);
  811. if (qla1280_device_reset(ha, bus, target) == 0) {
  812. /* issued device reset, set wait conditions */
  813. wait_for_bus = bus;
  814. wait_for_target = target;
  815. }
  816. break;
  817. case BUS_RESET:
  818. if (qla1280_verbose)
  819. printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
  820. "reset.\n", ha->host_no, bus);
  821. if (qla1280_bus_reset(ha, bus) == 0) {
  822. /* issued bus reset, set wait conditions */
  823. wait_for_bus = bus;
  824. }
  825. break;
  826. case ADAPTER_RESET:
  827. default:
  828. if (qla1280_verbose) {
  829. printk(KERN_INFO
  830. "scsi(%ld): Issued ADAPTER RESET\n",
  831. ha->host_no);
  832. printk(KERN_INFO "scsi(%ld): I/O processing will "
  833. "continue automatically\n", ha->host_no);
  834. }
  835. ha->flags.reset_active = 1;
  836. if (qla1280_abort_isp(ha) != 0) { /* it's dead */
  837. result = FAILED;
  838. }
  839. ha->flags.reset_active = 0;
  840. }
  841. /*
  842. * At this point, the host_lock has been released and retaken
  843. * by the issuance of the mailbox command.
  844. * Wait for the command passed in by the mid-layer if it
  845. * was found by the driver. It might have been returned
  846. * between eh recovery steps, hence the check of the "found"
  847. * variable.
  848. */
  849. if (found >= 0)
  850. result = _qla1280_wait_for_single_command(ha, sp, &wait);
  851. if (action == ABORT_COMMAND && result != SUCCESS) {
  852. printk(KERN_WARNING
  853. "scsi(%li:%i:%i:%i): "
  854. "Unable to abort command!\n",
  855. ha->host_no, bus, target, lun);
  856. }
  857. /*
  858. * If the command passed in by the mid-layer has been
  859. * returned by the board, then wait for any additional
  860. * commands which are supposed to complete based upon
  861. * the error action.
  862. *
  863. * All commands are unconditionally returned during a
  864. * call to qla1280_abort_isp(), ADAPTER_RESET. No need
  865. * to wait for them.
  866. */
  867. if (result == SUCCESS && wait_for_bus >= 0) {
  868. result = qla1280_wait_for_pending_commands(ha,
  869. wait_for_bus, wait_for_target);
  870. }
  871. dprintk(1, "RESET returning %d\n", result);
  872. LEAVE("qla1280_error_action");
  873. return result;
  874. }
  875. /**************************************************************************
  876. * qla1280_abort
  877. * Abort the specified SCSI command(s).
  878. **************************************************************************/
  879. static int
  880. qla1280_eh_abort(struct scsi_cmnd * cmd)
  881. {
  882. int rc;
  883. spin_lock_irq(cmd->device->host->host_lock);
  884. rc = qla1280_error_action(cmd, ABORT_COMMAND);
  885. spin_unlock_irq(cmd->device->host->host_lock);
  886. return rc;
  887. }
  888. /**************************************************************************
  889. * qla1280_device_reset
  890. * Reset the specified SCSI device
  891. **************************************************************************/
  892. static int
  893. qla1280_eh_device_reset(struct scsi_cmnd *cmd)
  894. {
  895. int rc;
  896. spin_lock_irq(cmd->device->host->host_lock);
  897. rc = qla1280_error_action(cmd, DEVICE_RESET);
  898. spin_unlock_irq(cmd->device->host->host_lock);
  899. return rc;
  900. }
  901. /**************************************************************************
  902. * qla1280_bus_reset
  903. * Reset the specified bus.
  904. **************************************************************************/
  905. static int
  906. qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
  907. {
  908. int rc;
  909. spin_lock_irq(cmd->device->host->host_lock);
  910. rc = qla1280_error_action(cmd, BUS_RESET);
  911. spin_unlock_irq(cmd->device->host->host_lock);
  912. return rc;
  913. }
  914. /**************************************************************************
  915. * qla1280_adapter_reset
  916. * Reset the specified adapter (both channels)
  917. **************************************************************************/
  918. static int
  919. qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
  920. {
  921. int rc;
  922. spin_lock_irq(cmd->device->host->host_lock);
  923. rc = qla1280_error_action(cmd, ADAPTER_RESET);
  924. spin_unlock_irq(cmd->device->host->host_lock);
  925. return rc;
  926. }
  927. static int
  928. qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
  929. sector_t capacity, int geom[])
  930. {
  931. int heads, sectors, cylinders;
  932. heads = 64;
  933. sectors = 32;
  934. cylinders = (unsigned long)capacity / (heads * sectors);
  935. if (cylinders > 1024) {
  936. heads = 255;
  937. sectors = 63;
  938. cylinders = (unsigned long)capacity / (heads * sectors);
  939. /* if (cylinders > 1023)
  940. cylinders = 1023; */
  941. }
  942. geom[0] = heads;
  943. geom[1] = sectors;
  944. geom[2] = cylinders;
  945. return 0;
  946. }
  947. /* disable risc and host interrupts */
  948. static inline void
  949. qla1280_disable_intrs(struct scsi_qla_host *ha)
  950. {
  951. WRT_REG_WORD(&ha->iobase->ictrl, 0);
  952. RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
  953. }
  954. /* enable risc and host interrupts */
  955. static inline void
  956. qla1280_enable_intrs(struct scsi_qla_host *ha)
  957. {
  958. WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
  959. RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
  960. }
  961. /**************************************************************************
  962. * qla1280_intr_handler
  963. * Handles the H/W interrupt
  964. **************************************************************************/
  965. static irqreturn_t
  966. qla1280_intr_handler(int irq, void *dev_id)
  967. {
  968. struct scsi_qla_host *ha;
  969. struct device_reg __iomem *reg;
  970. u16 data;
  971. int handled = 0;
  972. ENTER_INTR ("qla1280_intr_handler");
  973. ha = (struct scsi_qla_host *)dev_id;
  974. spin_lock(ha->host->host_lock);
  975. ha->isr_count++;
  976. reg = ha->iobase;
  977. qla1280_disable_intrs(ha);
  978. data = qla1280_debounce_register(&reg->istatus);
  979. /* Check for pending interrupts. */
  980. if (data & RISC_INT) {
  981. qla1280_isr(ha, &ha->done_q);
  982. handled = 1;
  983. }
  984. if (!list_empty(&ha->done_q))
  985. qla1280_done(ha);
  986. spin_unlock(ha->host->host_lock);
  987. qla1280_enable_intrs(ha);
  988. LEAVE_INTR("qla1280_intr_handler");
  989. return IRQ_RETVAL(handled);
  990. }
  991. static int
  992. qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
  993. {
  994. uint8_t mr;
  995. uint16_t mb[MAILBOX_REGISTER_COUNT];
  996. struct nvram *nv;
  997. int status, lun;
  998. nv = &ha->nvram;
  999. mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
  1000. /* Set Target Parameters. */
  1001. mb[0] = MBC_SET_TARGET_PARAMETERS;
  1002. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1003. mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
  1004. mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
  1005. mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
  1006. mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
  1007. mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
  1008. mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
  1009. mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
  1010. mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
  1011. if (IS_ISP1x160(ha)) {
  1012. mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
  1013. mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
  1014. mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
  1015. nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
  1016. mr |= BIT_6;
  1017. } else {
  1018. mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
  1019. }
  1020. mb[3] |= nv->bus[bus].target[target].sync_period;
  1021. status = qla1280_mailbox_command(ha, mr, mb);
  1022. /* Set Device Queue Parameters. */
  1023. for (lun = 0; lun < MAX_LUNS; lun++) {
  1024. mb[0] = MBC_SET_DEVICE_QUEUE;
  1025. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1026. mb[1] |= lun;
  1027. mb[2] = nv->bus[bus].max_queue_depth;
  1028. mb[3] = nv->bus[bus].target[target].execution_throttle;
  1029. status |= qla1280_mailbox_command(ha, 0x0f, mb);
  1030. }
  1031. if (status)
  1032. printk(KERN_WARNING "scsi(%ld:%i:%i): "
  1033. "qla1280_set_target_parameters() failed\n",
  1034. ha->host_no, bus, target);
  1035. return status;
  1036. }
  1037. /**************************************************************************
  1038. * qla1280_slave_configure
  1039. *
  1040. * Description:
  1041. * Determines the queue depth for a given device. There are two ways
  1042. * a queue depth can be obtained for a tagged queueing device. One
  1043. * way is the default queue depth which is determined by whether
  1044. * If it is defined, then it is used
  1045. * as the default queue depth. Otherwise, we use either 4 or 8 as the
  1046. * default queue depth (dependent on the number of hardware SCBs).
  1047. **************************************************************************/
  1048. static int
  1049. qla1280_slave_configure(struct scsi_device *device)
  1050. {
  1051. struct scsi_qla_host *ha;
  1052. int default_depth = 3;
  1053. int bus = device->channel;
  1054. int target = device->id;
  1055. int status = 0;
  1056. struct nvram *nv;
  1057. unsigned long flags;
  1058. ha = (struct scsi_qla_host *)device->host->hostdata;
  1059. nv = &ha->nvram;
  1060. if (qla1280_check_for_dead_scsi_bus(ha, bus))
  1061. return 1;
  1062. if (device->tagged_supported &&
  1063. (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
  1064. scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
  1065. } else {
  1066. scsi_change_queue_depth(device, default_depth);
  1067. }
  1068. nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
  1069. nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
  1070. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
  1071. if (driver_setup.no_sync ||
  1072. (driver_setup.sync_mask &&
  1073. (~driver_setup.sync_mask & (1 << target))))
  1074. nv->bus[bus].target[target].parameter.enable_sync = 0;
  1075. if (driver_setup.no_wide ||
  1076. (driver_setup.wide_mask &&
  1077. (~driver_setup.wide_mask & (1 << target))))
  1078. nv->bus[bus].target[target].parameter.enable_wide = 0;
  1079. if (IS_ISP1x160(ha)) {
  1080. if (driver_setup.no_ppr ||
  1081. (driver_setup.ppr_mask &&
  1082. (~driver_setup.ppr_mask & (1 << target))))
  1083. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
  1084. }
  1085. spin_lock_irqsave(ha->host->host_lock, flags);
  1086. if (nv->bus[bus].target[target].parameter.enable_sync)
  1087. status = qla1280_set_target_parameters(ha, bus, target);
  1088. qla1280_get_target_parameters(ha, device);
  1089. spin_unlock_irqrestore(ha->host->host_lock, flags);
  1090. return status;
  1091. }
  1092. /*
  1093. * qla1280_done
  1094. * Process completed commands.
  1095. *
  1096. * Input:
  1097. * ha = adapter block pointer.
  1098. */
  1099. static void
  1100. qla1280_done(struct scsi_qla_host *ha)
  1101. {
  1102. struct srb *sp;
  1103. struct list_head *done_q;
  1104. int bus, target, lun;
  1105. struct scsi_cmnd *cmd;
  1106. ENTER("qla1280_done");
  1107. done_q = &ha->done_q;
  1108. while (!list_empty(done_q)) {
  1109. sp = list_entry(done_q->next, struct srb, list);
  1110. list_del(&sp->list);
  1111. cmd = sp->cmd;
  1112. bus = SCSI_BUS_32(cmd);
  1113. target = SCSI_TCN_32(cmd);
  1114. lun = SCSI_LUN_32(cmd);
  1115. switch ((CMD_RESULT(cmd) >> 16)) {
  1116. case DID_RESET:
  1117. /* Issue marker command. */
  1118. if (!ha->flags.abort_isp_active)
  1119. qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
  1120. break;
  1121. case DID_ABORT:
  1122. sp->flags &= ~SRB_ABORT_PENDING;
  1123. sp->flags |= SRB_ABORTED;
  1124. break;
  1125. default:
  1126. break;
  1127. }
  1128. /* Release memory used for this I/O */
  1129. scsi_dma_unmap(cmd);
  1130. /* Call the mid-level driver interrupt handler */
  1131. ha->actthreads--;
  1132. if (sp->wait == NULL)
  1133. (*(cmd)->scsi_done)(cmd);
  1134. else
  1135. complete(sp->wait);
  1136. }
  1137. LEAVE("qla1280_done");
  1138. }
  1139. /*
  1140. * Translates a ISP error to a Linux SCSI error
  1141. */
  1142. static int
  1143. qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
  1144. {
  1145. int host_status = DID_ERROR;
  1146. uint16_t comp_status = le16_to_cpu(sts->comp_status);
  1147. uint16_t state_flags = le16_to_cpu(sts->state_flags);
  1148. uint32_t residual_length = le32_to_cpu(sts->residual_length);
  1149. uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
  1150. #if DEBUG_QLA1280_INTR
  1151. static char *reason[] = {
  1152. "DID_OK",
  1153. "DID_NO_CONNECT",
  1154. "DID_BUS_BUSY",
  1155. "DID_TIME_OUT",
  1156. "DID_BAD_TARGET",
  1157. "DID_ABORT",
  1158. "DID_PARITY",
  1159. "DID_ERROR",
  1160. "DID_RESET",
  1161. "DID_BAD_INTR"
  1162. };
  1163. #endif /* DEBUG_QLA1280_INTR */
  1164. ENTER("qla1280_return_status");
  1165. #if DEBUG_QLA1280_INTR
  1166. /*
  1167. dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
  1168. comp_status);
  1169. */
  1170. #endif
  1171. switch (comp_status) {
  1172. case CS_COMPLETE:
  1173. host_status = DID_OK;
  1174. break;
  1175. case CS_INCOMPLETE:
  1176. if (!(state_flags & SF_GOT_BUS))
  1177. host_status = DID_NO_CONNECT;
  1178. else if (!(state_flags & SF_GOT_TARGET))
  1179. host_status = DID_BAD_TARGET;
  1180. else if (!(state_flags & SF_SENT_CDB))
  1181. host_status = DID_ERROR;
  1182. else if (!(state_flags & SF_TRANSFERRED_DATA))
  1183. host_status = DID_ERROR;
  1184. else if (!(state_flags & SF_GOT_STATUS))
  1185. host_status = DID_ERROR;
  1186. else if (!(state_flags & SF_GOT_SENSE))
  1187. host_status = DID_ERROR;
  1188. break;
  1189. case CS_RESET:
  1190. host_status = DID_RESET;
  1191. break;
  1192. case CS_ABORTED:
  1193. host_status = DID_ABORT;
  1194. break;
  1195. case CS_TIMEOUT:
  1196. host_status = DID_TIME_OUT;
  1197. break;
  1198. case CS_DATA_OVERRUN:
  1199. dprintk(2, "Data overrun 0x%x\n", residual_length);
  1200. dprintk(2, "qla1280_return_status: response packet data\n");
  1201. qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
  1202. host_status = DID_ERROR;
  1203. break;
  1204. case CS_DATA_UNDERRUN:
  1205. if ((scsi_bufflen(cp) - residual_length) <
  1206. cp->underflow) {
  1207. printk(KERN_WARNING
  1208. "scsi: Underflow detected - retrying "
  1209. "command.\n");
  1210. host_status = DID_ERROR;
  1211. } else {
  1212. scsi_set_resid(cp, residual_length);
  1213. host_status = DID_OK;
  1214. }
  1215. break;
  1216. default:
  1217. host_status = DID_ERROR;
  1218. break;
  1219. }
  1220. #if DEBUG_QLA1280_INTR
  1221. dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
  1222. reason[host_status], scsi_status);
  1223. #endif
  1224. LEAVE("qla1280_return_status");
  1225. return (scsi_status & 0xff) | (host_status << 16);
  1226. }
  1227. /****************************************************************************/
  1228. /* QLogic ISP1280 Hardware Support Functions. */
  1229. /****************************************************************************/
  1230. /*
  1231. * qla1280_initialize_adapter
  1232. * Initialize board.
  1233. *
  1234. * Input:
  1235. * ha = adapter block pointer.
  1236. *
  1237. * Returns:
  1238. * 0 = success
  1239. */
  1240. static int
  1241. qla1280_initialize_adapter(struct scsi_qla_host *ha)
  1242. {
  1243. struct device_reg __iomem *reg;
  1244. int status;
  1245. int bus;
  1246. unsigned long flags;
  1247. ENTER("qla1280_initialize_adapter");
  1248. /* Clear adapter flags. */
  1249. ha->flags.online = 0;
  1250. ha->flags.disable_host_adapter = 0;
  1251. ha->flags.reset_active = 0;
  1252. ha->flags.abort_isp_active = 0;
  1253. /* TODO: implement support for the 1040 nvram format */
  1254. if (IS_ISP1040(ha))
  1255. driver_setup.no_nvram = 1;
  1256. dprintk(1, "Configure PCI space for adapter...\n");
  1257. reg = ha->iobase;
  1258. /* Insure mailbox registers are free. */
  1259. WRT_REG_WORD(&reg->semaphore, 0);
  1260. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  1261. WRT_REG_WORD(&reg->host_cmd, HC_CLR_HOST_INT);
  1262. RD_REG_WORD(&reg->host_cmd);
  1263. if (qla1280_read_nvram(ha)) {
  1264. dprintk(2, "qla1280_initialize_adapter: failed to read "
  1265. "NVRAM\n");
  1266. }
  1267. /*
  1268. * It's necessary to grab the spin here as qla1280_mailbox_command
  1269. * needs to be able to drop the lock unconditionally to wait
  1270. * for completion.
  1271. */
  1272. spin_lock_irqsave(ha->host->host_lock, flags);
  1273. status = qla1280_load_firmware(ha);
  1274. if (status) {
  1275. printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
  1276. ha->host_no);
  1277. goto out;
  1278. }
  1279. /* Setup adapter based on NVRAM parameters. */
  1280. dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
  1281. qla1280_nvram_config(ha);
  1282. if (ha->flags.disable_host_adapter) {
  1283. status = 1;
  1284. goto out;
  1285. }
  1286. status = qla1280_init_rings(ha);
  1287. if (status)
  1288. goto out;
  1289. /* Issue SCSI reset, if we can't reset twice then bus is dead */
  1290. for (bus = 0; bus < ha->ports; bus++) {
  1291. if (!ha->bus_settings[bus].disable_scsi_reset &&
  1292. qla1280_bus_reset(ha, bus) &&
  1293. qla1280_bus_reset(ha, bus))
  1294. ha->bus_settings[bus].scsi_bus_dead = 1;
  1295. }
  1296. ha->flags.online = 1;
  1297. out:
  1298. spin_unlock_irqrestore(ha->host->host_lock, flags);
  1299. if (status)
  1300. dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
  1301. LEAVE("qla1280_initialize_adapter");
  1302. return status;
  1303. }
  1304. /*
  1305. * qla1280_request_firmware
  1306. * Acquire firmware for chip. Retain in memory
  1307. * for error recovery.
  1308. *
  1309. * Input:
  1310. * ha = adapter block pointer.
  1311. *
  1312. * Returns:
  1313. * Pointer to firmware image or an error code
  1314. * cast to pointer via ERR_PTR().
  1315. */
  1316. static const struct firmware *
  1317. qla1280_request_firmware(struct scsi_qla_host *ha)
  1318. {
  1319. const struct firmware *fw;
  1320. int err;
  1321. int index;
  1322. char *fwname;
  1323. spin_unlock_irq(ha->host->host_lock);
  1324. mutex_lock(&qla1280_firmware_mutex);
  1325. index = ql1280_board_tbl[ha->devnum].fw_index;
  1326. fw = qla1280_fw_tbl[index].fw;
  1327. if (fw)
  1328. goto out;
  1329. fwname = qla1280_fw_tbl[index].fwname;
  1330. err = request_firmware(&fw, fwname, &ha->pdev->dev);
  1331. if (err) {
  1332. printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
  1333. fwname, err);
  1334. fw = ERR_PTR(err);
  1335. goto unlock;
  1336. }
  1337. if ((fw->size % 2) || (fw->size < 6)) {
  1338. printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
  1339. fw->size, fwname);
  1340. release_firmware(fw);
  1341. fw = ERR_PTR(-EINVAL);
  1342. goto unlock;
  1343. }
  1344. qla1280_fw_tbl[index].fw = fw;
  1345. out:
  1346. ha->fwver1 = fw->data[0];
  1347. ha->fwver2 = fw->data[1];
  1348. ha->fwver3 = fw->data[2];
  1349. unlock:
  1350. mutex_unlock(&qla1280_firmware_mutex);
  1351. spin_lock_irq(ha->host->host_lock);
  1352. return fw;
  1353. }
  1354. /*
  1355. * Chip diagnostics
  1356. * Test chip for proper operation.
  1357. *
  1358. * Input:
  1359. * ha = adapter block pointer.
  1360. *
  1361. * Returns:
  1362. * 0 = success.
  1363. */
  1364. static int
  1365. qla1280_chip_diag(struct scsi_qla_host *ha)
  1366. {
  1367. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1368. struct device_reg __iomem *reg = ha->iobase;
  1369. int status = 0;
  1370. int cnt;
  1371. uint16_t data;
  1372. dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", &reg->id_l);
  1373. dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
  1374. /* Soft reset chip and wait for it to finish. */
  1375. WRT_REG_WORD(&reg->ictrl, ISP_RESET);
  1376. /*
  1377. * We can't do a traditional PCI write flush here by reading
  1378. * back the register. The card will not respond once the reset
  1379. * is in action and we end up with a machine check exception
  1380. * instead. Nothing to do but wait and hope for the best.
  1381. * A portable pci_write_flush(pdev) call would be very useful here.
  1382. */
  1383. udelay(20);
  1384. data = qla1280_debounce_register(&reg->ictrl);
  1385. /*
  1386. * Yet another QLogic gem ;-(
  1387. */
  1388. for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
  1389. udelay(5);
  1390. data = RD_REG_WORD(&reg->ictrl);
  1391. }
  1392. if (!cnt)
  1393. goto fail;
  1394. /* Reset register cleared by chip reset. */
  1395. dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
  1396. WRT_REG_WORD(&reg->cfg_1, 0);
  1397. /* Reset RISC and disable BIOS which
  1398. allows RISC to execute out of RAM. */
  1399. WRT_REG_WORD(&reg->host_cmd, HC_RESET_RISC |
  1400. HC_RELEASE_RISC | HC_DISABLE_BIOS);
  1401. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1402. data = qla1280_debounce_register(&reg->mailbox0);
  1403. /*
  1404. * I *LOVE* this code!
  1405. */
  1406. for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
  1407. udelay(5);
  1408. data = RD_REG_WORD(&reg->mailbox0);
  1409. }
  1410. if (!cnt)
  1411. goto fail;
  1412. /* Check product ID of chip */
  1413. dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
  1414. if (RD_REG_WORD(&reg->mailbox1) != PROD_ID_1 ||
  1415. (RD_REG_WORD(&reg->mailbox2) != PROD_ID_2 &&
  1416. RD_REG_WORD(&reg->mailbox2) != PROD_ID_2a) ||
  1417. RD_REG_WORD(&reg->mailbox3) != PROD_ID_3 ||
  1418. RD_REG_WORD(&reg->mailbox4) != PROD_ID_4) {
  1419. printk(KERN_INFO "qla1280: Wrong product ID = "
  1420. "0x%x,0x%x,0x%x,0x%x\n",
  1421. RD_REG_WORD(&reg->mailbox1),
  1422. RD_REG_WORD(&reg->mailbox2),
  1423. RD_REG_WORD(&reg->mailbox3),
  1424. RD_REG_WORD(&reg->mailbox4));
  1425. goto fail;
  1426. }
  1427. /*
  1428. * Enable ints early!!!
  1429. */
  1430. qla1280_enable_intrs(ha);
  1431. dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
  1432. /* Wrap Incoming Mailboxes Test. */
  1433. mb[0] = MBC_MAILBOX_REGISTER_TEST;
  1434. mb[1] = 0xAAAA;
  1435. mb[2] = 0x5555;
  1436. mb[3] = 0xAA55;
  1437. mb[4] = 0x55AA;
  1438. mb[5] = 0xA5A5;
  1439. mb[6] = 0x5A5A;
  1440. mb[7] = 0x2525;
  1441. status = qla1280_mailbox_command(ha, 0xff, mb);
  1442. if (status)
  1443. goto fail;
  1444. if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
  1445. mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
  1446. mb[7] != 0x2525) {
  1447. printk(KERN_INFO "qla1280: Failed mbox check\n");
  1448. goto fail;
  1449. }
  1450. dprintk(3, "qla1280_chip_diag: exiting normally\n");
  1451. return 0;
  1452. fail:
  1453. dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
  1454. return status;
  1455. }
  1456. static int
  1457. qla1280_load_firmware_pio(struct scsi_qla_host *ha)
  1458. {
  1459. /* enter with host_lock acquired */
  1460. const struct firmware *fw;
  1461. const __le16 *fw_data;
  1462. uint16_t risc_address, risc_code_size;
  1463. uint16_t mb[MAILBOX_REGISTER_COUNT], i;
  1464. int err = 0;
  1465. fw = qla1280_request_firmware(ha);
  1466. if (IS_ERR(fw))
  1467. return PTR_ERR(fw);
  1468. fw_data = (const __le16 *)&fw->data[0];
  1469. ha->fwstart = __le16_to_cpu(fw_data[2]);
  1470. /* Load RISC code. */
  1471. risc_address = ha->fwstart;
  1472. fw_data = (const __le16 *)&fw->data[6];
  1473. risc_code_size = (fw->size - 6) / 2;
  1474. for (i = 0; i < risc_code_size; i++) {
  1475. mb[0] = MBC_WRITE_RAM_WORD;
  1476. mb[1] = risc_address + i;
  1477. mb[2] = __le16_to_cpu(fw_data[i]);
  1478. err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
  1479. if (err) {
  1480. printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
  1481. ha->host_no);
  1482. break;
  1483. }
  1484. }
  1485. return err;
  1486. }
  1487. #ifdef QLA_64BIT_PTR
  1488. #define LOAD_CMD MBC_LOAD_RAM_A64_ROM
  1489. #define DUMP_CMD MBC_DUMP_RAM_A64_ROM
  1490. #define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
  1491. #else
  1492. #define LOAD_CMD MBC_LOAD_RAM
  1493. #define DUMP_CMD MBC_DUMP_RAM
  1494. #define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
  1495. #endif
  1496. #define DUMP_IT_BACK 0 /* for debug of RISC loading */
  1497. static int
  1498. qla1280_load_firmware_dma(struct scsi_qla_host *ha)
  1499. {
  1500. /* enter with host_lock acquired */
  1501. const struct firmware *fw;
  1502. const __le16 *fw_data;
  1503. uint16_t risc_address, risc_code_size;
  1504. uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
  1505. int err = 0, num, i;
  1506. #if DUMP_IT_BACK
  1507. uint8_t *sp, *tbuf;
  1508. dma_addr_t p_tbuf;
  1509. tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
  1510. if (!tbuf)
  1511. return -ENOMEM;
  1512. #endif
  1513. fw = qla1280_request_firmware(ha);
  1514. if (IS_ERR(fw))
  1515. return PTR_ERR(fw);
  1516. fw_data = (const __le16 *)&fw->data[0];
  1517. ha->fwstart = __le16_to_cpu(fw_data[2]);
  1518. /* Load RISC code. */
  1519. risc_address = ha->fwstart;
  1520. fw_data = (const __le16 *)&fw->data[6];
  1521. risc_code_size = (fw->size - 6) / 2;
  1522. dprintk(1, "%s: DMA RISC code (%i) words\n",
  1523. __func__, risc_code_size);
  1524. num = 0;
  1525. while (risc_code_size > 0) {
  1526. int warn __attribute__((unused)) = 0;
  1527. cnt = 2000 >> 1;
  1528. if (cnt > risc_code_size)
  1529. cnt = risc_code_size;
  1530. dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
  1531. "%d,%d(0x%x)\n",
  1532. fw_data, cnt, num, risc_address);
  1533. for(i = 0; i < cnt; i++)
  1534. ((__le16 *)ha->request_ring)[i] = fw_data[i];
  1535. mb[0] = LOAD_CMD;
  1536. mb[1] = risc_address;
  1537. mb[4] = cnt;
  1538. mb[3] = ha->request_dma & 0xffff;
  1539. mb[2] = (ha->request_dma >> 16) & 0xffff;
  1540. mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
  1541. mb[6] = upper_32_bits(ha->request_dma) >> 16;
  1542. dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
  1543. __func__, mb[0],
  1544. (void *)(long)ha->request_dma,
  1545. mb[6], mb[7], mb[2], mb[3]);
  1546. err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
  1547. if (err) {
  1548. printk(KERN_ERR "scsi(%li): Failed to load partial "
  1549. "segment of f\n", ha->host_no);
  1550. goto out;
  1551. }
  1552. #if DUMP_IT_BACK
  1553. mb[0] = DUMP_CMD;
  1554. mb[1] = risc_address;
  1555. mb[4] = cnt;
  1556. mb[3] = p_tbuf & 0xffff;
  1557. mb[2] = (p_tbuf >> 16) & 0xffff;
  1558. mb[7] = upper_32_bits(p_tbuf) & 0xffff;
  1559. mb[6] = upper_32_bits(p_tbuf) >> 16;
  1560. err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
  1561. if (err) {
  1562. printk(KERN_ERR
  1563. "Failed to dump partial segment of f/w\n");
  1564. goto out;
  1565. }
  1566. sp = (uint8_t *)ha->request_ring;
  1567. for (i = 0; i < (cnt << 1); i++) {
  1568. if (tbuf[i] != sp[i] && warn++ < 10) {
  1569. printk(KERN_ERR "%s: FW compare error @ "
  1570. "byte(0x%x) loop#=%x\n",
  1571. __func__, i, num);
  1572. printk(KERN_ERR "%s: FWbyte=%x "
  1573. "FWfromChip=%x\n",
  1574. __func__, sp[i], tbuf[i]);
  1575. /*break; */
  1576. }
  1577. }
  1578. #endif
  1579. risc_address += cnt;
  1580. risc_code_size = risc_code_size - cnt;
  1581. fw_data = fw_data + cnt;
  1582. num++;
  1583. }
  1584. out:
  1585. #if DUMP_IT_BACK
  1586. dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
  1587. #endif
  1588. return err;
  1589. }
  1590. static int
  1591. qla1280_start_firmware(struct scsi_qla_host *ha)
  1592. {
  1593. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1594. int err;
  1595. dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
  1596. __func__);
  1597. /* Verify checksum of loaded RISC code. */
  1598. mb[0] = MBC_VERIFY_CHECKSUM;
  1599. /* mb[1] = ql12_risc_code_addr01; */
  1600. mb[1] = ha->fwstart;
  1601. err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1602. if (err) {
  1603. printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
  1604. return err;
  1605. }
  1606. /* Start firmware execution. */
  1607. dprintk(1, "%s: start firmware running.\n", __func__);
  1608. mb[0] = MBC_EXECUTE_FIRMWARE;
  1609. mb[1] = ha->fwstart;
  1610. err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1611. if (err) {
  1612. printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
  1613. ha->host_no);
  1614. }
  1615. return err;
  1616. }
  1617. static int
  1618. qla1280_load_firmware(struct scsi_qla_host *ha)
  1619. {
  1620. /* enter with host_lock taken */
  1621. int err;
  1622. err = qla1280_chip_diag(ha);
  1623. if (err)
  1624. goto out;
  1625. if (IS_ISP1040(ha))
  1626. err = qla1280_load_firmware_pio(ha);
  1627. else
  1628. err = qla1280_load_firmware_dma(ha);
  1629. if (err)
  1630. goto out;
  1631. err = qla1280_start_firmware(ha);
  1632. out:
  1633. return err;
  1634. }
  1635. /*
  1636. * Initialize rings
  1637. *
  1638. * Input:
  1639. * ha = adapter block pointer.
  1640. * ha->request_ring = request ring virtual address
  1641. * ha->response_ring = response ring virtual address
  1642. * ha->request_dma = request ring physical address
  1643. * ha->response_dma = response ring physical address
  1644. *
  1645. * Returns:
  1646. * 0 = success.
  1647. */
  1648. static int
  1649. qla1280_init_rings(struct scsi_qla_host *ha)
  1650. {
  1651. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1652. int status = 0;
  1653. ENTER("qla1280_init_rings");
  1654. /* Clear outstanding commands array. */
  1655. memset(ha->outstanding_cmds, 0,
  1656. sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
  1657. /* Initialize request queue. */
  1658. ha->request_ring_ptr = ha->request_ring;
  1659. ha->req_ring_index = 0;
  1660. ha->req_q_cnt = REQUEST_ENTRY_CNT;
  1661. /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
  1662. mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
  1663. mb[1] = REQUEST_ENTRY_CNT;
  1664. mb[3] = ha->request_dma & 0xffff;
  1665. mb[2] = (ha->request_dma >> 16) & 0xffff;
  1666. mb[4] = 0;
  1667. mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
  1668. mb[6] = upper_32_bits(ha->request_dma) >> 16;
  1669. if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
  1670. BIT_3 | BIT_2 | BIT_1 | BIT_0,
  1671. &mb[0]))) {
  1672. /* Initialize response queue. */
  1673. ha->response_ring_ptr = ha->response_ring;
  1674. ha->rsp_ring_index = 0;
  1675. /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
  1676. mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
  1677. mb[1] = RESPONSE_ENTRY_CNT;
  1678. mb[3] = ha->response_dma & 0xffff;
  1679. mb[2] = (ha->response_dma >> 16) & 0xffff;
  1680. mb[5] = 0;
  1681. mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
  1682. mb[6] = upper_32_bits(ha->response_dma) >> 16;
  1683. status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
  1684. BIT_3 | BIT_2 | BIT_1 | BIT_0,
  1685. &mb[0]);
  1686. }
  1687. if (status)
  1688. dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
  1689. LEAVE("qla1280_init_rings");
  1690. return status;
  1691. }
  1692. static void
  1693. qla1280_print_settings(struct nvram *nv)
  1694. {
  1695. dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
  1696. nv->bus[0].config_1.initiator_id);
  1697. dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
  1698. nv->bus[1].config_1.initiator_id);
  1699. dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
  1700. nv->bus[0].bus_reset_delay);
  1701. dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
  1702. nv->bus[1].bus_reset_delay);
  1703. dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
  1704. dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
  1705. dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
  1706. dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
  1707. dprintk(1, "qla1280 : async data setup time[0]=%d\n",
  1708. nv->bus[0].config_2.async_data_setup_time);
  1709. dprintk(1, "qla1280 : async data setup time[1]=%d\n",
  1710. nv->bus[1].config_2.async_data_setup_time);
  1711. dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
  1712. nv->bus[0].config_2.req_ack_active_negation);
  1713. dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
  1714. nv->bus[1].config_2.req_ack_active_negation);
  1715. dprintk(1, "qla1280 : data line active negation[0]=%d\n",
  1716. nv->bus[0].config_2.data_line_active_negation);
  1717. dprintk(1, "qla1280 : data line active negation[1]=%d\n",
  1718. nv->bus[1].config_2.data_line_active_negation);
  1719. dprintk(1, "qla1280 : disable loading risc code=%d\n",
  1720. nv->cntr_flags_1.disable_loading_risc_code);
  1721. dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
  1722. nv->cntr_flags_1.enable_64bit_addressing);
  1723. dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
  1724. nv->bus[0].selection_timeout);
  1725. dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
  1726. nv->bus[1].selection_timeout);
  1727. dprintk(1, "qla1280 : max queue depth[0]=%d\n",
  1728. nv->bus[0].max_queue_depth);
  1729. dprintk(1, "qla1280 : max queue depth[1]=%d\n",
  1730. nv->bus[1].max_queue_depth);
  1731. }
  1732. static void
  1733. qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
  1734. {
  1735. struct nvram *nv = &ha->nvram;
  1736. nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
  1737. nv->bus[bus].target[target].parameter.auto_request_sense = 1;
  1738. nv->bus[bus].target[target].parameter.tag_queuing = 1;
  1739. nv->bus[bus].target[target].parameter.enable_sync = 1;
  1740. #if 1 /* Some SCSI Processors do not seem to like this */
  1741. nv->bus[bus].target[target].parameter.enable_wide = 1;
  1742. #endif
  1743. nv->bus[bus].target[target].execution_throttle =
  1744. nv->bus[bus].max_queue_depth - 1;
  1745. nv->bus[bus].target[target].parameter.parity_checking = 1;
  1746. nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
  1747. if (IS_ISP1x160(ha)) {
  1748. nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
  1749. nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
  1750. nv->bus[bus].target[target].sync_period = 9;
  1751. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
  1752. nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
  1753. nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
  1754. } else {
  1755. nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
  1756. nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
  1757. nv->bus[bus].target[target].sync_period = 10;
  1758. }
  1759. }
  1760. static void
  1761. qla1280_set_defaults(struct scsi_qla_host *ha)
  1762. {
  1763. struct nvram *nv = &ha->nvram;
  1764. int bus, target;
  1765. dprintk(1, "Using defaults for NVRAM: \n");
  1766. memset(nv, 0, sizeof(struct nvram));
  1767. /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
  1768. nv->firmware_feature.f.enable_fast_posting = 1;
  1769. nv->firmware_feature.f.disable_synchronous_backoff = 1;
  1770. nv->termination.scsi_bus_0_control = 3;
  1771. nv->termination.scsi_bus_1_control = 3;
  1772. nv->termination.auto_term_support = 1;
  1773. /*
  1774. * Set default FIFO magic - What appropriate values would be here
  1775. * is unknown. This is what I have found testing with 12160s.
  1776. *
  1777. * Now, I would love the magic decoder ring for this one, the
  1778. * header file provided by QLogic seems to be bogus or incomplete
  1779. * at best.
  1780. */
  1781. nv->isp_config.burst_enable = 1;
  1782. if (IS_ISP1040(ha))
  1783. nv->isp_config.fifo_threshold |= 3;
  1784. else
  1785. nv->isp_config.fifo_threshold |= 4;
  1786. if (IS_ISP1x160(ha))
  1787. nv->isp_parameter = 0x01; /* fast memory enable */
  1788. for (bus = 0; bus < MAX_BUSES; bus++) {
  1789. nv->bus[bus].config_1.initiator_id = 7;
  1790. nv->bus[bus].config_2.req_ack_active_negation = 1;
  1791. nv->bus[bus].config_2.data_line_active_negation = 1;
  1792. nv->bus[bus].selection_timeout = 250;
  1793. nv->bus[bus].max_queue_depth = 32;
  1794. if (IS_ISP1040(ha)) {
  1795. nv->bus[bus].bus_reset_delay = 3;
  1796. nv->bus[bus].config_2.async_data_setup_time = 6;
  1797. nv->bus[bus].retry_delay = 1;
  1798. } else {
  1799. nv->bus[bus].bus_reset_delay = 5;
  1800. nv->bus[bus].config_2.async_data_setup_time = 8;
  1801. }
  1802. for (target = 0; target < MAX_TARGETS; target++)
  1803. qla1280_set_target_defaults(ha, bus, target);
  1804. }
  1805. }
  1806. static int
  1807. qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
  1808. {
  1809. struct nvram *nv = &ha->nvram;
  1810. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1811. int status, lun;
  1812. uint16_t flag;
  1813. /* Set Target Parameters. */
  1814. mb[0] = MBC_SET_TARGET_PARAMETERS;
  1815. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1816. /*
  1817. * Do not enable sync and ppr for the initial INQUIRY run. We
  1818. * enable this later if we determine the target actually
  1819. * supports it.
  1820. */
  1821. mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
  1822. | TP_WIDE | TP_PARITY | TP_DISCONNECT);
  1823. if (IS_ISP1x160(ha))
  1824. mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
  1825. else
  1826. mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
  1827. mb[3] |= nv->bus[bus].target[target].sync_period;
  1828. status = qla1280_mailbox_command(ha, 0x0f, mb);
  1829. /* Save Tag queuing enable flag. */
  1830. flag = (BIT_0 << target);
  1831. if (nv->bus[bus].target[target].parameter.tag_queuing)
  1832. ha->bus_settings[bus].qtag_enables |= flag;
  1833. /* Save Device enable flag. */
  1834. if (IS_ISP1x160(ha)) {
  1835. if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
  1836. ha->bus_settings[bus].device_enables |= flag;
  1837. ha->bus_settings[bus].lun_disables |= 0;
  1838. } else {
  1839. if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
  1840. ha->bus_settings[bus].device_enables |= flag;
  1841. /* Save LUN disable flag. */
  1842. if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
  1843. ha->bus_settings[bus].lun_disables |= flag;
  1844. }
  1845. /* Set Device Queue Parameters. */
  1846. for (lun = 0; lun < MAX_LUNS; lun++) {
  1847. mb[0] = MBC_SET_DEVICE_QUEUE;
  1848. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1849. mb[1] |= lun;
  1850. mb[2] = nv->bus[bus].max_queue_depth;
  1851. mb[3] = nv->bus[bus].target[target].execution_throttle;
  1852. status |= qla1280_mailbox_command(ha, 0x0f, mb);
  1853. }
  1854. return status;
  1855. }
  1856. static int
  1857. qla1280_config_bus(struct scsi_qla_host *ha, int bus)
  1858. {
  1859. struct nvram *nv = &ha->nvram;
  1860. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1861. int target, status;
  1862. /* SCSI Reset Disable. */
  1863. ha->bus_settings[bus].disable_scsi_reset =
  1864. nv->bus[bus].config_1.scsi_reset_disable;
  1865. /* Initiator ID. */
  1866. ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
  1867. mb[0] = MBC_SET_INITIATOR_ID;
  1868. mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
  1869. ha->bus_settings[bus].id;
  1870. status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1871. /* Reset Delay. */
  1872. ha->bus_settings[bus].bus_reset_delay =
  1873. nv->bus[bus].bus_reset_delay;
  1874. /* Command queue depth per device. */
  1875. ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
  1876. /* Set target parameters. */
  1877. for (target = 0; target < MAX_TARGETS; target++)
  1878. status |= qla1280_config_target(ha, bus, target);
  1879. return status;
  1880. }
  1881. static int
  1882. qla1280_nvram_config(struct scsi_qla_host *ha)
  1883. {
  1884. struct device_reg __iomem *reg = ha->iobase;
  1885. struct nvram *nv = &ha->nvram;
  1886. int bus, target, status = 0;
  1887. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1888. ENTER("qla1280_nvram_config");
  1889. if (ha->nvram_valid) {
  1890. /* Always force AUTO sense for LINUX SCSI */
  1891. for (bus = 0; bus < MAX_BUSES; bus++)
  1892. for (target = 0; target < MAX_TARGETS; target++) {
  1893. nv->bus[bus].target[target].parameter.
  1894. auto_request_sense = 1;
  1895. }
  1896. } else {
  1897. qla1280_set_defaults(ha);
  1898. }
  1899. qla1280_print_settings(nv);
  1900. /* Disable RISC load of firmware. */
  1901. ha->flags.disable_risc_code_load =
  1902. nv->cntr_flags_1.disable_loading_risc_code;
  1903. if (IS_ISP1040(ha)) {
  1904. uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
  1905. hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
  1906. cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
  1907. cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
  1908. ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
  1909. /* Busted fifo, says mjacob. */
  1910. if (hwrev != ISP_CFG0_1040A)
  1911. cfg1 |= nv->isp_config.fifo_threshold << 4;
  1912. cfg1 |= nv->isp_config.burst_enable << 2;
  1913. WRT_REG_WORD(&reg->cfg_1, cfg1);
  1914. WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
  1915. WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
  1916. } else {
  1917. uint16_t cfg1, term;
  1918. /* Set ISP hardware DMA burst */
  1919. cfg1 = nv->isp_config.fifo_threshold << 4;
  1920. cfg1 |= nv->isp_config.burst_enable << 2;
  1921. /* Enable DMA arbitration on dual channel controllers */
  1922. if (ha->ports > 1)
  1923. cfg1 |= BIT_13;
  1924. WRT_REG_WORD(&reg->cfg_1, cfg1);
  1925. /* Set SCSI termination. */
  1926. WRT_REG_WORD(&reg->gpio_enable,
  1927. BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
  1928. term = nv->termination.scsi_bus_1_control;
  1929. term |= nv->termination.scsi_bus_0_control << 2;
  1930. term |= nv->termination.auto_term_support << 7;
  1931. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1932. WRT_REG_WORD(&reg->gpio_data, term);
  1933. }
  1934. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1935. /* ISP parameter word. */
  1936. mb[0] = MBC_SET_SYSTEM_PARAMETER;
  1937. mb[1] = nv->isp_parameter;
  1938. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1939. if (IS_ISP1x40(ha)) {
  1940. /* clock rate - for qla1240 and older, only */
  1941. mb[0] = MBC_SET_CLOCK_RATE;
  1942. mb[1] = 40;
  1943. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1944. }
  1945. /* Firmware feature word. */
  1946. mb[0] = MBC_SET_FIRMWARE_FEATURES;
  1947. mb[1] = nv->firmware_feature.f.enable_fast_posting;
  1948. mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
  1949. mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
  1950. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1951. /* Retry count and delay. */
  1952. mb[0] = MBC_SET_RETRY_COUNT;
  1953. mb[1] = nv->bus[0].retry_count;
  1954. mb[2] = nv->bus[0].retry_delay;
  1955. mb[6] = nv->bus[1].retry_count;
  1956. mb[7] = nv->bus[1].retry_delay;
  1957. status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
  1958. BIT_1 | BIT_0, &mb[0]);
  1959. /* ASYNC data setup time. */
  1960. mb[0] = MBC_SET_ASYNC_DATA_SETUP;
  1961. mb[1] = nv->bus[0].config_2.async_data_setup_time;
  1962. mb[2] = nv->bus[1].config_2.async_data_setup_time;
  1963. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  1964. /* Active negation states. */
  1965. mb[0] = MBC_SET_ACTIVE_NEGATION;
  1966. mb[1] = 0;
  1967. if (nv->bus[0].config_2.req_ack_active_negation)
  1968. mb[1] |= BIT_5;
  1969. if (nv->bus[0].config_2.data_line_active_negation)
  1970. mb[1] |= BIT_4;
  1971. mb[2] = 0;
  1972. if (nv->bus[1].config_2.req_ack_active_negation)
  1973. mb[2] |= BIT_5;
  1974. if (nv->bus[1].config_2.data_line_active_negation)
  1975. mb[2] |= BIT_4;
  1976. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  1977. mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
  1978. mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
  1979. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1980. /* thingy */
  1981. mb[0] = MBC_SET_PCI_CONTROL;
  1982. mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
  1983. mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
  1984. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  1985. mb[0] = MBC_SET_TAG_AGE_LIMIT;
  1986. mb[1] = 8;
  1987. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1988. /* Selection timeout. */
  1989. mb[0] = MBC_SET_SELECTION_TIMEOUT;
  1990. mb[1] = nv->bus[0].selection_timeout;
  1991. mb[2] = nv->bus[1].selection_timeout;
  1992. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  1993. for (bus = 0; bus < ha->ports; bus++)
  1994. status |= qla1280_config_bus(ha, bus);
  1995. if (status)
  1996. dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
  1997. LEAVE("qla1280_nvram_config");
  1998. return status;
  1999. }
  2000. /*
  2001. * Get NVRAM data word
  2002. * Calculates word position in NVRAM and calls request routine to
  2003. * get the word from NVRAM.
  2004. *
  2005. * Input:
  2006. * ha = adapter block pointer.
  2007. * address = NVRAM word address.
  2008. *
  2009. * Returns:
  2010. * data word.
  2011. */
  2012. static uint16_t
  2013. qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
  2014. {
  2015. uint32_t nv_cmd;
  2016. uint16_t data;
  2017. nv_cmd = address << 16;
  2018. nv_cmd |= NV_READ_OP;
  2019. data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
  2020. dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
  2021. "0x%x", data);
  2022. return data;
  2023. }
  2024. /*
  2025. * NVRAM request
  2026. * Sends read command to NVRAM and gets data from NVRAM.
  2027. *
  2028. * Input:
  2029. * ha = adapter block pointer.
  2030. * nv_cmd = Bit 26 = start bit
  2031. * Bit 25, 24 = opcode
  2032. * Bit 23-16 = address
  2033. * Bit 15-0 = write data
  2034. *
  2035. * Returns:
  2036. * data word.
  2037. */
  2038. static uint16_t
  2039. qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
  2040. {
  2041. struct device_reg __iomem *reg = ha->iobase;
  2042. int cnt;
  2043. uint16_t data = 0;
  2044. uint16_t reg_data;
  2045. /* Send command to NVRAM. */
  2046. nv_cmd <<= 5;
  2047. for (cnt = 0; cnt < 11; cnt++) {
  2048. if (nv_cmd & BIT_31)
  2049. qla1280_nv_write(ha, NV_DATA_OUT);
  2050. else
  2051. qla1280_nv_write(ha, 0);
  2052. nv_cmd <<= 1;
  2053. }
  2054. /* Read data from NVRAM. */
  2055. for (cnt = 0; cnt < 16; cnt++) {
  2056. WRT_REG_WORD(&reg->nvram, (NV_SELECT | NV_CLOCK));
  2057. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2058. NVRAM_DELAY();
  2059. data <<= 1;
  2060. reg_data = RD_REG_WORD(&reg->nvram);
  2061. if (reg_data & NV_DATA_IN)
  2062. data |= BIT_0;
  2063. WRT_REG_WORD(&reg->nvram, NV_SELECT);
  2064. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2065. NVRAM_DELAY();
  2066. }
  2067. /* Deselect chip. */
  2068. WRT_REG_WORD(&reg->nvram, NV_DESELECT);
  2069. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2070. NVRAM_DELAY();
  2071. return data;
  2072. }
  2073. static void
  2074. qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
  2075. {
  2076. struct device_reg __iomem *reg = ha->iobase;
  2077. WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
  2078. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2079. NVRAM_DELAY();
  2080. WRT_REG_WORD(&reg->nvram, data | NV_SELECT | NV_CLOCK);
  2081. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2082. NVRAM_DELAY();
  2083. WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
  2084. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2085. NVRAM_DELAY();
  2086. }
  2087. /*
  2088. * Mailbox Command
  2089. * Issue mailbox command and waits for completion.
  2090. *
  2091. * Input:
  2092. * ha = adapter block pointer.
  2093. * mr = mailbox registers to load.
  2094. * mb = data pointer for mailbox registers.
  2095. *
  2096. * Output:
  2097. * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
  2098. *
  2099. * Returns:
  2100. * 0 = success
  2101. */
  2102. static int
  2103. qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
  2104. {
  2105. struct device_reg __iomem *reg = ha->iobase;
  2106. int status = 0;
  2107. int cnt;
  2108. uint16_t *optr, *iptr;
  2109. uint16_t __iomem *mptr;
  2110. uint16_t data;
  2111. DECLARE_COMPLETION_ONSTACK(wait);
  2112. ENTER("qla1280_mailbox_command");
  2113. if (ha->mailbox_wait) {
  2114. printk(KERN_ERR "Warning mailbox wait already in use!\n");
  2115. }
  2116. ha->mailbox_wait = &wait;
  2117. /*
  2118. * We really should start out by verifying that the mailbox is
  2119. * available before starting sending the command data
  2120. */
  2121. /* Load mailbox registers. */
  2122. mptr = (uint16_t __iomem *) &reg->mailbox0;
  2123. iptr = mb;
  2124. for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
  2125. if (mr & BIT_0) {
  2126. WRT_REG_WORD(mptr, (*iptr));
  2127. }
  2128. mr >>= 1;
  2129. mptr++;
  2130. iptr++;
  2131. }
  2132. /* Issue set host interrupt command. */
  2133. /* set up a timer just in case we're really jammed */
  2134. timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
  2135. mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
  2136. spin_unlock_irq(ha->host->host_lock);
  2137. WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
  2138. data = qla1280_debounce_register(&reg->istatus);
  2139. wait_for_completion(&wait);
  2140. del_timer_sync(&ha->mailbox_timer);
  2141. spin_lock_irq(ha->host->host_lock);
  2142. ha->mailbox_wait = NULL;
  2143. /* Check for mailbox command timeout. */
  2144. if (ha->mailbox_out[0] != MBS_CMD_CMP) {
  2145. printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
  2146. "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
  2147. "0x%04x\n",
  2148. mb[0], ha->mailbox_out[0], RD_REG_WORD(&reg->istatus));
  2149. printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
  2150. RD_REG_WORD(&reg->mailbox0), RD_REG_WORD(&reg->mailbox1),
  2151. RD_REG_WORD(&reg->mailbox2), RD_REG_WORD(&reg->mailbox3));
  2152. printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
  2153. RD_REG_WORD(&reg->mailbox4), RD_REG_WORD(&reg->mailbox5),
  2154. RD_REG_WORD(&reg->mailbox6), RD_REG_WORD(&reg->mailbox7));
  2155. status = 1;
  2156. }
  2157. /* Load return mailbox registers. */
  2158. optr = mb;
  2159. iptr = (uint16_t *) &ha->mailbox_out[0];
  2160. mr = MAILBOX_REGISTER_COUNT;
  2161. memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
  2162. if (ha->flags.reset_marker)
  2163. qla1280_rst_aen(ha);
  2164. if (status)
  2165. dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
  2166. "0x%x ****\n", mb[0]);
  2167. LEAVE("qla1280_mailbox_command");
  2168. return status;
  2169. }
  2170. /*
  2171. * qla1280_poll
  2172. * Polls ISP for interrupts.
  2173. *
  2174. * Input:
  2175. * ha = adapter block pointer.
  2176. */
  2177. static void
  2178. qla1280_poll(struct scsi_qla_host *ha)
  2179. {
  2180. struct device_reg __iomem *reg = ha->iobase;
  2181. uint16_t data;
  2182. LIST_HEAD(done_q);
  2183. /* ENTER("qla1280_poll"); */
  2184. /* Check for pending interrupts. */
  2185. data = RD_REG_WORD(&reg->istatus);
  2186. if (data & RISC_INT)
  2187. qla1280_isr(ha, &done_q);
  2188. if (!ha->mailbox_wait) {
  2189. if (ha->flags.reset_marker)
  2190. qla1280_rst_aen(ha);
  2191. }
  2192. if (!list_empty(&done_q))
  2193. qla1280_done(ha);
  2194. /* LEAVE("qla1280_poll"); */
  2195. }
  2196. /*
  2197. * qla1280_bus_reset
  2198. * Issue SCSI bus reset.
  2199. *
  2200. * Input:
  2201. * ha = adapter block pointer.
  2202. * bus = SCSI bus number.
  2203. *
  2204. * Returns:
  2205. * 0 = success
  2206. */
  2207. static int
  2208. qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
  2209. {
  2210. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2211. uint16_t reset_delay;
  2212. int status;
  2213. dprintk(3, "qla1280_bus_reset: entered\n");
  2214. if (qla1280_verbose)
  2215. printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
  2216. ha->host_no, bus);
  2217. reset_delay = ha->bus_settings[bus].bus_reset_delay;
  2218. mb[0] = MBC_BUS_RESET;
  2219. mb[1] = reset_delay;
  2220. mb[2] = (uint16_t) bus;
  2221. status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2222. if (status) {
  2223. if (ha->bus_settings[bus].failed_reset_count > 2)
  2224. ha->bus_settings[bus].scsi_bus_dead = 1;
  2225. ha->bus_settings[bus].failed_reset_count++;
  2226. } else {
  2227. spin_unlock_irq(ha->host->host_lock);
  2228. ssleep(reset_delay);
  2229. spin_lock_irq(ha->host->host_lock);
  2230. ha->bus_settings[bus].scsi_bus_dead = 0;
  2231. ha->bus_settings[bus].failed_reset_count = 0;
  2232. ha->bus_settings[bus].reset_marker = 0;
  2233. /* Issue marker command. */
  2234. qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
  2235. }
  2236. /*
  2237. * We should probably call qla1280_set_target_parameters()
  2238. * here as well for all devices on the bus.
  2239. */
  2240. if (status)
  2241. dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
  2242. else
  2243. dprintk(3, "qla1280_bus_reset: exiting normally\n");
  2244. return status;
  2245. }
  2246. /*
  2247. * qla1280_device_reset
  2248. * Issue bus device reset message to the target.
  2249. *
  2250. * Input:
  2251. * ha = adapter block pointer.
  2252. * bus = SCSI BUS number.
  2253. * target = SCSI ID.
  2254. *
  2255. * Returns:
  2256. * 0 = success
  2257. */
  2258. static int
  2259. qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
  2260. {
  2261. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2262. int status;
  2263. ENTER("qla1280_device_reset");
  2264. mb[0] = MBC_ABORT_TARGET;
  2265. mb[1] = (bus ? (target | BIT_7) : target) << 8;
  2266. mb[2] = 1;
  2267. status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2268. /* Issue marker command. */
  2269. qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
  2270. if (status)
  2271. dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
  2272. LEAVE("qla1280_device_reset");
  2273. return status;
  2274. }
  2275. /*
  2276. * qla1280_abort_command
  2277. * Abort command aborts a specified IOCB.
  2278. *
  2279. * Input:
  2280. * ha = adapter block pointer.
  2281. * sp = SB structure pointer.
  2282. *
  2283. * Returns:
  2284. * 0 = success
  2285. */
  2286. static int
  2287. qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
  2288. {
  2289. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2290. unsigned int bus, target, lun;
  2291. int status;
  2292. ENTER("qla1280_abort_command");
  2293. bus = SCSI_BUS_32(sp->cmd);
  2294. target = SCSI_TCN_32(sp->cmd);
  2295. lun = SCSI_LUN_32(sp->cmd);
  2296. sp->flags |= SRB_ABORT_PENDING;
  2297. mb[0] = MBC_ABORT_COMMAND;
  2298. mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
  2299. mb[2] = handle >> 16;
  2300. mb[3] = handle & 0xffff;
  2301. status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
  2302. if (status) {
  2303. dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
  2304. sp->flags &= ~SRB_ABORT_PENDING;
  2305. }
  2306. LEAVE("qla1280_abort_command");
  2307. return status;
  2308. }
  2309. /*
  2310. * qla1280_reset_adapter
  2311. * Reset adapter.
  2312. *
  2313. * Input:
  2314. * ha = adapter block pointer.
  2315. */
  2316. static void
  2317. qla1280_reset_adapter(struct scsi_qla_host *ha)
  2318. {
  2319. struct device_reg __iomem *reg = ha->iobase;
  2320. ENTER("qla1280_reset_adapter");
  2321. /* Disable ISP chip */
  2322. ha->flags.online = 0;
  2323. WRT_REG_WORD(&reg->ictrl, ISP_RESET);
  2324. WRT_REG_WORD(&reg->host_cmd,
  2325. HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
  2326. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2327. LEAVE("qla1280_reset_adapter");
  2328. }
  2329. /*
  2330. * Issue marker command.
  2331. * Function issues marker IOCB.
  2332. *
  2333. * Input:
  2334. * ha = adapter block pointer.
  2335. * bus = SCSI BUS number
  2336. * id = SCSI ID
  2337. * lun = SCSI LUN
  2338. * type = marker modifier
  2339. */
  2340. static void
  2341. qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
  2342. {
  2343. struct mrk_entry *pkt;
  2344. ENTER("qla1280_marker");
  2345. /* Get request packet. */
  2346. if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
  2347. pkt->entry_type = MARKER_TYPE;
  2348. pkt->lun = (uint8_t) lun;
  2349. pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
  2350. pkt->modifier = type;
  2351. pkt->entry_status = 0;
  2352. /* Issue command to ISP */
  2353. qla1280_isp_cmd(ha);
  2354. }
  2355. LEAVE("qla1280_marker");
  2356. }
  2357. /*
  2358. * qla1280_64bit_start_scsi
  2359. * The start SCSI is responsible for building request packets on
  2360. * request ring and modifying ISP input pointer.
  2361. *
  2362. * Input:
  2363. * ha = adapter block pointer.
  2364. * sp = SB structure pointer.
  2365. *
  2366. * Returns:
  2367. * 0 = success, was able to issue command.
  2368. */
  2369. #ifdef QLA_64BIT_PTR
  2370. static int
  2371. qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
  2372. {
  2373. struct device_reg __iomem *reg = ha->iobase;
  2374. struct scsi_cmnd *cmd = sp->cmd;
  2375. cmd_a64_entry_t *pkt;
  2376. __le32 *dword_ptr;
  2377. dma_addr_t dma_handle;
  2378. int status = 0;
  2379. int cnt;
  2380. int req_cnt;
  2381. int seg_cnt;
  2382. u8 dir;
  2383. ENTER("qla1280_64bit_start_scsi:");
  2384. /* Calculate number of entries and segments required. */
  2385. req_cnt = 1;
  2386. seg_cnt = scsi_dma_map(cmd);
  2387. if (seg_cnt > 0) {
  2388. if (seg_cnt > 2) {
  2389. req_cnt += (seg_cnt - 2) / 5;
  2390. if ((seg_cnt - 2) % 5)
  2391. req_cnt++;
  2392. }
  2393. } else if (seg_cnt < 0) {
  2394. status = 1;
  2395. goto out;
  2396. }
  2397. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2398. /* Calculate number of free request entries. */
  2399. cnt = RD_REG_WORD(&reg->mailbox4);
  2400. if (ha->req_ring_index < cnt)
  2401. ha->req_q_cnt = cnt - ha->req_ring_index;
  2402. else
  2403. ha->req_q_cnt =
  2404. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2405. }
  2406. dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
  2407. ha->req_q_cnt, seg_cnt);
  2408. /* If room for request in request ring. */
  2409. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2410. status = SCSI_MLQUEUE_HOST_BUSY;
  2411. dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
  2412. "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
  2413. req_cnt);
  2414. goto out;
  2415. }
  2416. /* Check for room in outstanding command list. */
  2417. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
  2418. ha->outstanding_cmds[cnt] != NULL; cnt++);
  2419. if (cnt >= MAX_OUTSTANDING_COMMANDS) {
  2420. status = SCSI_MLQUEUE_HOST_BUSY;
  2421. dprintk(2, "qla1280_start_scsi: NO ROOM IN "
  2422. "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
  2423. goto out;
  2424. }
  2425. ha->outstanding_cmds[cnt] = sp;
  2426. ha->req_q_cnt -= req_cnt;
  2427. CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
  2428. dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
  2429. cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
  2430. dprintk(2, " bus %i, target %i, lun %i\n",
  2431. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2432. qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
  2433. /*
  2434. * Build command packet.
  2435. */
  2436. pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
  2437. pkt->entry_type = COMMAND_A64_TYPE;
  2438. pkt->entry_count = (uint8_t) req_cnt;
  2439. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2440. pkt->entry_status = 0;
  2441. pkt->handle = cpu_to_le32(cnt);
  2442. /* Zero out remaining portion of packet. */
  2443. memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
  2444. /* Set ISP command timeout. */
  2445. pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
  2446. /* Set device target ID and LUN */
  2447. pkt->lun = SCSI_LUN_32(cmd);
  2448. pkt->target = SCSI_BUS_32(cmd) ?
  2449. (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
  2450. /* Enable simple tag queuing if device supports it. */
  2451. if (cmd->device->simple_tags)
  2452. pkt->control_flags |= cpu_to_le16(BIT_3);
  2453. /* Load SCSI command packet. */
  2454. pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
  2455. memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
  2456. /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
  2457. /* Set transfer direction. */
  2458. dir = qla1280_data_direction(cmd);
  2459. pkt->control_flags |= cpu_to_le16(dir);
  2460. /* Set total data segment count. */
  2461. pkt->dseg_count = cpu_to_le16(seg_cnt);
  2462. /*
  2463. * Load data segments.
  2464. */
  2465. if (seg_cnt) { /* If data transfer. */
  2466. struct scatterlist *sg, *s;
  2467. int remseg = seg_cnt;
  2468. sg = scsi_sglist(cmd);
  2469. /* Setup packet address segment pointer. */
  2470. dword_ptr = (u32 *)&pkt->dseg_0_address;
  2471. /* Load command entry data segments. */
  2472. for_each_sg(sg, s, seg_cnt, cnt) {
  2473. if (cnt == 2)
  2474. break;
  2475. dma_handle = sg_dma_address(s);
  2476. *dword_ptr++ =
  2477. cpu_to_le32(lower_32_bits(dma_handle));
  2478. *dword_ptr++ =
  2479. cpu_to_le32(upper_32_bits(dma_handle));
  2480. *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
  2481. dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
  2482. cpu_to_le32(upper_32_bits(dma_handle)),
  2483. cpu_to_le32(lower_32_bits(dma_handle)),
  2484. cpu_to_le32(sg_dma_len(sg_next(s))));
  2485. remseg--;
  2486. }
  2487. dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
  2488. "command packet data - b %i, t %i, l %i \n",
  2489. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
  2490. SCSI_LUN_32(cmd));
  2491. qla1280_dump_buffer(5, (char *)pkt,
  2492. REQUEST_ENTRY_SIZE);
  2493. /*
  2494. * Build continuation packets.
  2495. */
  2496. dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
  2497. "remains\n", seg_cnt);
  2498. while (remseg > 0) {
  2499. /* Update sg start */
  2500. sg = s;
  2501. /* Adjust ring index. */
  2502. ha->req_ring_index++;
  2503. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2504. ha->req_ring_index = 0;
  2505. ha->request_ring_ptr =
  2506. ha->request_ring;
  2507. } else
  2508. ha->request_ring_ptr++;
  2509. pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
  2510. /* Zero out packet. */
  2511. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2512. /* Load packet defaults. */
  2513. ((struct cont_a64_entry *) pkt)->entry_type =
  2514. CONTINUE_A64_TYPE;
  2515. ((struct cont_a64_entry *) pkt)->entry_count = 1;
  2516. ((struct cont_a64_entry *) pkt)->sys_define =
  2517. (uint8_t)ha->req_ring_index;
  2518. /* Setup packet address segment pointer. */
  2519. dword_ptr =
  2520. (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
  2521. /* Load continuation entry data segments. */
  2522. for_each_sg(sg, s, remseg, cnt) {
  2523. if (cnt == 5)
  2524. break;
  2525. dma_handle = sg_dma_address(s);
  2526. *dword_ptr++ =
  2527. cpu_to_le32(lower_32_bits(dma_handle));
  2528. *dword_ptr++ =
  2529. cpu_to_le32(upper_32_bits(dma_handle));
  2530. *dword_ptr++ =
  2531. cpu_to_le32(sg_dma_len(s));
  2532. dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
  2533. cpu_to_le32(upper_32_bits(dma_handle)),
  2534. cpu_to_le32(lower_32_bits(dma_handle)),
  2535. cpu_to_le32(sg_dma_len(s)));
  2536. }
  2537. remseg -= cnt;
  2538. dprintk(5, "qla1280_64bit_start_scsi: "
  2539. "continuation packet data - b %i, t "
  2540. "%i, l %i \n", SCSI_BUS_32(cmd),
  2541. SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2542. qla1280_dump_buffer(5, (char *)pkt,
  2543. REQUEST_ENTRY_SIZE);
  2544. }
  2545. } else { /* No data transfer */
  2546. dprintk(5, "qla1280_64bit_start_scsi: No data, command "
  2547. "packet data - b %i, t %i, l %i \n",
  2548. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2549. qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
  2550. }
  2551. /* Adjust ring index. */
  2552. ha->req_ring_index++;
  2553. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2554. ha->req_ring_index = 0;
  2555. ha->request_ring_ptr = ha->request_ring;
  2556. } else
  2557. ha->request_ring_ptr++;
  2558. /* Set chip new ring index. */
  2559. dprintk(2,
  2560. "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
  2561. sp->flags |= SRB_SENT;
  2562. ha->actthreads++;
  2563. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2564. out:
  2565. if (status)
  2566. dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
  2567. else
  2568. dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
  2569. return status;
  2570. }
  2571. #else /* !QLA_64BIT_PTR */
  2572. /*
  2573. * qla1280_32bit_start_scsi
  2574. * The start SCSI is responsible for building request packets on
  2575. * request ring and modifying ISP input pointer.
  2576. *
  2577. * The Qlogic firmware interface allows every queue slot to have a SCSI
  2578. * command and up to 4 scatter/gather (SG) entries. If we need more
  2579. * than 4 SG entries, then continuation entries are used that can
  2580. * hold another 7 entries each. The start routine determines if there
  2581. * is eought empty slots then build the combination of requests to
  2582. * fulfill the OS request.
  2583. *
  2584. * Input:
  2585. * ha = adapter block pointer.
  2586. * sp = SCSI Request Block structure pointer.
  2587. *
  2588. * Returns:
  2589. * 0 = success, was able to issue command.
  2590. */
  2591. static int
  2592. qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
  2593. {
  2594. struct device_reg __iomem *reg = ha->iobase;
  2595. struct scsi_cmnd *cmd = sp->cmd;
  2596. struct cmd_entry *pkt;
  2597. __le32 *dword_ptr;
  2598. int status = 0;
  2599. int cnt;
  2600. int req_cnt;
  2601. int seg_cnt;
  2602. u8 dir;
  2603. ENTER("qla1280_32bit_start_scsi");
  2604. dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
  2605. cmd->cmnd[0]);
  2606. /* Calculate number of entries and segments required. */
  2607. req_cnt = 1;
  2608. seg_cnt = scsi_dma_map(cmd);
  2609. if (seg_cnt) {
  2610. /*
  2611. * if greater than four sg entries then we need to allocate
  2612. * continuation entries
  2613. */
  2614. if (seg_cnt > 4) {
  2615. req_cnt += (seg_cnt - 4) / 7;
  2616. if ((seg_cnt - 4) % 7)
  2617. req_cnt++;
  2618. }
  2619. dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
  2620. cmd, seg_cnt, req_cnt);
  2621. } else if (seg_cnt < 0) {
  2622. status = 1;
  2623. goto out;
  2624. }
  2625. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2626. /* Calculate number of free request entries. */
  2627. cnt = RD_REG_WORD(&reg->mailbox4);
  2628. if (ha->req_ring_index < cnt)
  2629. ha->req_q_cnt = cnt - ha->req_ring_index;
  2630. else
  2631. ha->req_q_cnt =
  2632. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2633. }
  2634. dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
  2635. ha->req_q_cnt, seg_cnt);
  2636. /* If room for request in request ring. */
  2637. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2638. status = SCSI_MLQUEUE_HOST_BUSY;
  2639. dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
  2640. "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
  2641. ha->req_q_cnt, req_cnt);
  2642. goto out;
  2643. }
  2644. /* Check for empty slot in outstanding command list. */
  2645. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
  2646. (ha->outstanding_cmds[cnt] != 0); cnt++) ;
  2647. if (cnt >= MAX_OUTSTANDING_COMMANDS) {
  2648. status = SCSI_MLQUEUE_HOST_BUSY;
  2649. dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
  2650. "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
  2651. goto out;
  2652. }
  2653. CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
  2654. ha->outstanding_cmds[cnt] = sp;
  2655. ha->req_q_cnt -= req_cnt;
  2656. /*
  2657. * Build command packet.
  2658. */
  2659. pkt = (struct cmd_entry *) ha->request_ring_ptr;
  2660. pkt->entry_type = COMMAND_TYPE;
  2661. pkt->entry_count = (uint8_t) req_cnt;
  2662. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2663. pkt->entry_status = 0;
  2664. pkt->handle = cpu_to_le32(cnt);
  2665. /* Zero out remaining portion of packet. */
  2666. memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
  2667. /* Set ISP command timeout. */
  2668. pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
  2669. /* Set device target ID and LUN */
  2670. pkt->lun = SCSI_LUN_32(cmd);
  2671. pkt->target = SCSI_BUS_32(cmd) ?
  2672. (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
  2673. /* Enable simple tag queuing if device supports it. */
  2674. if (cmd->device->simple_tags)
  2675. pkt->control_flags |= cpu_to_le16(BIT_3);
  2676. /* Load SCSI command packet. */
  2677. pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
  2678. memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
  2679. /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
  2680. /* Set transfer direction. */
  2681. dir = qla1280_data_direction(cmd);
  2682. pkt->control_flags |= cpu_to_le16(dir);
  2683. /* Set total data segment count. */
  2684. pkt->dseg_count = cpu_to_le16(seg_cnt);
  2685. /*
  2686. * Load data segments.
  2687. */
  2688. if (seg_cnt) {
  2689. struct scatterlist *sg, *s;
  2690. int remseg = seg_cnt;
  2691. sg = scsi_sglist(cmd);
  2692. /* Setup packet address segment pointer. */
  2693. dword_ptr = &pkt->dseg_0_address;
  2694. dprintk(3, "Building S/G data segments..\n");
  2695. qla1280_dump_buffer(1, (char *)sg, 4 * 16);
  2696. /* Load command entry data segments. */
  2697. for_each_sg(sg, s, seg_cnt, cnt) {
  2698. if (cnt == 4)
  2699. break;
  2700. *dword_ptr++ =
  2701. cpu_to_le32(lower_32_bits(sg_dma_address(s)));
  2702. *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
  2703. dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
  2704. (lower_32_bits(sg_dma_address(s))),
  2705. (sg_dma_len(s)));
  2706. remseg--;
  2707. }
  2708. /*
  2709. * Build continuation packets.
  2710. */
  2711. dprintk(3, "S/G Building Continuation"
  2712. "...seg_cnt=0x%x remains\n", seg_cnt);
  2713. while (remseg > 0) {
  2714. /* Continue from end point */
  2715. sg = s;
  2716. /* Adjust ring index. */
  2717. ha->req_ring_index++;
  2718. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2719. ha->req_ring_index = 0;
  2720. ha->request_ring_ptr =
  2721. ha->request_ring;
  2722. } else
  2723. ha->request_ring_ptr++;
  2724. pkt = (struct cmd_entry *)ha->request_ring_ptr;
  2725. /* Zero out packet. */
  2726. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2727. /* Load packet defaults. */
  2728. ((struct cont_entry *) pkt)->
  2729. entry_type = CONTINUE_TYPE;
  2730. ((struct cont_entry *) pkt)->entry_count = 1;
  2731. ((struct cont_entry *) pkt)->sys_define =
  2732. (uint8_t) ha->req_ring_index;
  2733. /* Setup packet address segment pointer. */
  2734. dword_ptr =
  2735. &((struct cont_entry *) pkt)->dseg_0_address;
  2736. /* Load continuation entry data segments. */
  2737. for_each_sg(sg, s, remseg, cnt) {
  2738. if (cnt == 7)
  2739. break;
  2740. *dword_ptr++ =
  2741. cpu_to_le32(lower_32_bits(sg_dma_address(s)));
  2742. *dword_ptr++ =
  2743. cpu_to_le32(sg_dma_len(s));
  2744. dprintk(1,
  2745. "S/G Segment Cont. phys_addr=0x%x, "
  2746. "len=0x%x\n",
  2747. cpu_to_le32(lower_32_bits(sg_dma_address(s))),
  2748. cpu_to_le32(sg_dma_len(s)));
  2749. }
  2750. remseg -= cnt;
  2751. dprintk(5, "qla1280_32bit_start_scsi: "
  2752. "continuation packet data - "
  2753. "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
  2754. SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2755. qla1280_dump_buffer(5, (char *)pkt,
  2756. REQUEST_ENTRY_SIZE);
  2757. }
  2758. } else { /* No data transfer at all */
  2759. dprintk(5, "qla1280_32bit_start_scsi: No data, command "
  2760. "packet data - \n");
  2761. qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
  2762. }
  2763. dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
  2764. qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
  2765. REQUEST_ENTRY_SIZE);
  2766. /* Adjust ring index. */
  2767. ha->req_ring_index++;
  2768. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2769. ha->req_ring_index = 0;
  2770. ha->request_ring_ptr = ha->request_ring;
  2771. } else
  2772. ha->request_ring_ptr++;
  2773. /* Set chip new ring index. */
  2774. dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
  2775. "for pending command\n");
  2776. sp->flags |= SRB_SENT;
  2777. ha->actthreads++;
  2778. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2779. out:
  2780. if (status)
  2781. dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
  2782. LEAVE("qla1280_32bit_start_scsi");
  2783. return status;
  2784. }
  2785. #endif
  2786. /*
  2787. * qla1280_req_pkt
  2788. * Function is responsible for locking ring and
  2789. * getting a zeroed out request packet.
  2790. *
  2791. * Input:
  2792. * ha = adapter block pointer.
  2793. *
  2794. * Returns:
  2795. * 0 = failed to get slot.
  2796. */
  2797. static request_t *
  2798. qla1280_req_pkt(struct scsi_qla_host *ha)
  2799. {
  2800. struct device_reg __iomem *reg = ha->iobase;
  2801. request_t *pkt = NULL;
  2802. int cnt;
  2803. uint32_t timer;
  2804. ENTER("qla1280_req_pkt");
  2805. /*
  2806. * This can be called from interrupt context, damn it!!!
  2807. */
  2808. /* Wait for 30 seconds for slot. */
  2809. for (timer = 15000000; timer; timer--) {
  2810. if (ha->req_q_cnt > 0) {
  2811. /* Calculate number of free request entries. */
  2812. cnt = RD_REG_WORD(&reg->mailbox4);
  2813. if (ha->req_ring_index < cnt)
  2814. ha->req_q_cnt = cnt - ha->req_ring_index;
  2815. else
  2816. ha->req_q_cnt =
  2817. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2818. }
  2819. /* Found empty request ring slot? */
  2820. if (ha->req_q_cnt > 0) {
  2821. ha->req_q_cnt--;
  2822. pkt = ha->request_ring_ptr;
  2823. /* Zero out packet. */
  2824. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2825. /*
  2826. * How can this be right when we have a ring
  2827. * size of 512???
  2828. */
  2829. /* Set system defined field. */
  2830. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2831. /* Set entry count. */
  2832. pkt->entry_count = 1;
  2833. break;
  2834. }
  2835. udelay(2); /* 10 */
  2836. /* Check for pending interrupts. */
  2837. qla1280_poll(ha);
  2838. }
  2839. if (!pkt)
  2840. dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
  2841. else
  2842. dprintk(3, "qla1280_req_pkt: exiting normally\n");
  2843. return pkt;
  2844. }
  2845. /*
  2846. * qla1280_isp_cmd
  2847. * Function is responsible for modifying ISP input pointer.
  2848. * Releases ring lock.
  2849. *
  2850. * Input:
  2851. * ha = adapter block pointer.
  2852. */
  2853. static void
  2854. qla1280_isp_cmd(struct scsi_qla_host *ha)
  2855. {
  2856. struct device_reg __iomem *reg = ha->iobase;
  2857. ENTER("qla1280_isp_cmd");
  2858. dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
  2859. qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
  2860. REQUEST_ENTRY_SIZE);
  2861. /* Adjust ring index. */
  2862. ha->req_ring_index++;
  2863. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2864. ha->req_ring_index = 0;
  2865. ha->request_ring_ptr = ha->request_ring;
  2866. } else
  2867. ha->request_ring_ptr++;
  2868. /*
  2869. * Update request index to mailbox4 (Request Queue In).
  2870. */
  2871. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2872. LEAVE("qla1280_isp_cmd");
  2873. }
  2874. /****************************************************************************/
  2875. /* Interrupt Service Routine. */
  2876. /****************************************************************************/
  2877. /****************************************************************************
  2878. * qla1280_isr
  2879. * Calls I/O done on command completion.
  2880. *
  2881. * Input:
  2882. * ha = adapter block pointer.
  2883. * done_q = done queue.
  2884. ****************************************************************************/
  2885. static void
  2886. qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
  2887. {
  2888. struct device_reg __iomem *reg = ha->iobase;
  2889. struct response *pkt;
  2890. struct srb *sp = NULL;
  2891. uint16_t mailbox[MAILBOX_REGISTER_COUNT];
  2892. uint16_t *wptr;
  2893. uint32_t index;
  2894. u16 istatus;
  2895. ENTER("qla1280_isr");
  2896. istatus = RD_REG_WORD(&reg->istatus);
  2897. if (!(istatus & (RISC_INT | PCI_INT)))
  2898. return;
  2899. /* Save mailbox register 5 */
  2900. mailbox[5] = RD_REG_WORD(&reg->mailbox5);
  2901. /* Check for mailbox interrupt. */
  2902. mailbox[0] = RD_REG_WORD_dmasync(&reg->semaphore);
  2903. if (mailbox[0] & BIT_0) {
  2904. /* Get mailbox data. */
  2905. /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
  2906. wptr = &mailbox[0];
  2907. *wptr++ = RD_REG_WORD(&reg->mailbox0);
  2908. *wptr++ = RD_REG_WORD(&reg->mailbox1);
  2909. *wptr = RD_REG_WORD(&reg->mailbox2);
  2910. if (mailbox[0] != MBA_SCSI_COMPLETION) {
  2911. wptr++;
  2912. *wptr++ = RD_REG_WORD(&reg->mailbox3);
  2913. *wptr++ = RD_REG_WORD(&reg->mailbox4);
  2914. wptr++;
  2915. *wptr++ = RD_REG_WORD(&reg->mailbox6);
  2916. *wptr = RD_REG_WORD(&reg->mailbox7);
  2917. }
  2918. /* Release mailbox registers. */
  2919. WRT_REG_WORD(&reg->semaphore, 0);
  2920. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  2921. dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
  2922. mailbox[0]);
  2923. /* Handle asynchronous event */
  2924. switch (mailbox[0]) {
  2925. case MBA_SCSI_COMPLETION: /* Response completion */
  2926. dprintk(5, "qla1280_isr: mailbox SCSI response "
  2927. "completion\n");
  2928. if (ha->flags.online) {
  2929. /* Get outstanding command index. */
  2930. index = mailbox[2] << 16 | mailbox[1];
  2931. /* Validate handle. */
  2932. if (index < MAX_OUTSTANDING_COMMANDS)
  2933. sp = ha->outstanding_cmds[index];
  2934. else
  2935. sp = NULL;
  2936. if (sp) {
  2937. /* Free outstanding command slot. */
  2938. ha->outstanding_cmds[index] = NULL;
  2939. /* Save ISP completion status */
  2940. CMD_RESULT(sp->cmd) = 0;
  2941. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  2942. /* Place block on done queue */
  2943. list_add_tail(&sp->list, done_q);
  2944. } else {
  2945. /*
  2946. * If we get here we have a real problem!
  2947. */
  2948. printk(KERN_WARNING
  2949. "qla1280: ISP invalid handle\n");
  2950. }
  2951. }
  2952. break;
  2953. case MBA_BUS_RESET: /* SCSI Bus Reset */
  2954. ha->flags.reset_marker = 1;
  2955. index = mailbox[6] & BIT_0;
  2956. ha->bus_settings[index].reset_marker = 1;
  2957. printk(KERN_DEBUG "qla1280_isr(): index %i "
  2958. "asynchronous BUS_RESET\n", index);
  2959. break;
  2960. case MBA_SYSTEM_ERR: /* System Error */
  2961. printk(KERN_WARNING
  2962. "qla1280: ISP System Error - mbx1=%xh, mbx2="
  2963. "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
  2964. mailbox[3]);
  2965. break;
  2966. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  2967. printk(KERN_WARNING
  2968. "qla1280: ISP Request Transfer Error\n");
  2969. break;
  2970. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  2971. printk(KERN_WARNING
  2972. "qla1280: ISP Response Transfer Error\n");
  2973. break;
  2974. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  2975. dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
  2976. break;
  2977. case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
  2978. dprintk(2,
  2979. "qla1280_isr: asynchronous TIMEOUT_RESET\n");
  2980. break;
  2981. case MBA_DEVICE_RESET: /* Bus Device Reset */
  2982. printk(KERN_INFO "qla1280_isr(): asynchronous "
  2983. "BUS_DEVICE_RESET\n");
  2984. ha->flags.reset_marker = 1;
  2985. index = mailbox[6] & BIT_0;
  2986. ha->bus_settings[index].reset_marker = 1;
  2987. break;
  2988. case MBA_BUS_MODE_CHANGE:
  2989. dprintk(2,
  2990. "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
  2991. break;
  2992. default:
  2993. /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
  2994. if (mailbox[0] < MBA_ASYNC_EVENT) {
  2995. wptr = &mailbox[0];
  2996. memcpy((uint16_t *) ha->mailbox_out, wptr,
  2997. MAILBOX_REGISTER_COUNT *
  2998. sizeof(uint16_t));
  2999. if(ha->mailbox_wait != NULL)
  3000. complete(ha->mailbox_wait);
  3001. }
  3002. break;
  3003. }
  3004. } else {
  3005. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  3006. }
  3007. /*
  3008. * We will receive interrupts during mailbox testing prior to
  3009. * the card being marked online, hence the double check.
  3010. */
  3011. if (!(ha->flags.online && !ha->mailbox_wait)) {
  3012. dprintk(2, "qla1280_isr: Response pointer Error\n");
  3013. goto out;
  3014. }
  3015. if (mailbox[5] >= RESPONSE_ENTRY_CNT)
  3016. goto out;
  3017. while (ha->rsp_ring_index != mailbox[5]) {
  3018. pkt = ha->response_ring_ptr;
  3019. dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
  3020. " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
  3021. dprintk(5,"qla1280_isr: response packet data\n");
  3022. qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
  3023. if (pkt->entry_type == STATUS_TYPE) {
  3024. if ((le16_to_cpu(pkt->scsi_status) & 0xff)
  3025. || pkt->comp_status || pkt->entry_status) {
  3026. dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
  3027. "0x%x mailbox[5] = 0x%x, comp_status "
  3028. "= 0x%x, scsi_status = 0x%x\n",
  3029. ha->rsp_ring_index, mailbox[5],
  3030. le16_to_cpu(pkt->comp_status),
  3031. le16_to_cpu(pkt->scsi_status));
  3032. }
  3033. } else {
  3034. dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
  3035. "0x%x, mailbox[5] = 0x%x\n",
  3036. ha->rsp_ring_index, mailbox[5]);
  3037. dprintk(2, "qla1280_isr: response packet data\n");
  3038. qla1280_dump_buffer(2, (char *)pkt,
  3039. RESPONSE_ENTRY_SIZE);
  3040. }
  3041. if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
  3042. dprintk(2, "status: Cmd %p, handle %i\n",
  3043. ha->outstanding_cmds[pkt->handle]->cmd,
  3044. pkt->handle);
  3045. if (pkt->entry_type == STATUS_TYPE)
  3046. qla1280_status_entry(ha, pkt, done_q);
  3047. else
  3048. qla1280_error_entry(ha, pkt, done_q);
  3049. /* Adjust ring index. */
  3050. ha->rsp_ring_index++;
  3051. if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
  3052. ha->rsp_ring_index = 0;
  3053. ha->response_ring_ptr = ha->response_ring;
  3054. } else
  3055. ha->response_ring_ptr++;
  3056. WRT_REG_WORD(&reg->mailbox5, ha->rsp_ring_index);
  3057. }
  3058. }
  3059. out:
  3060. LEAVE("qla1280_isr");
  3061. }
  3062. /*
  3063. * qla1280_rst_aen
  3064. * Processes asynchronous reset.
  3065. *
  3066. * Input:
  3067. * ha = adapter block pointer.
  3068. */
  3069. static void
  3070. qla1280_rst_aen(struct scsi_qla_host *ha)
  3071. {
  3072. uint8_t bus;
  3073. ENTER("qla1280_rst_aen");
  3074. if (ha->flags.online && !ha->flags.reset_active &&
  3075. !ha->flags.abort_isp_active) {
  3076. ha->flags.reset_active = 1;
  3077. while (ha->flags.reset_marker) {
  3078. /* Issue marker command. */
  3079. ha->flags.reset_marker = 0;
  3080. for (bus = 0; bus < ha->ports &&
  3081. !ha->flags.reset_marker; bus++) {
  3082. if (ha->bus_settings[bus].reset_marker) {
  3083. ha->bus_settings[bus].reset_marker = 0;
  3084. qla1280_marker(ha, bus, 0, 0,
  3085. MK_SYNC_ALL);
  3086. }
  3087. }
  3088. }
  3089. }
  3090. LEAVE("qla1280_rst_aen");
  3091. }
  3092. /*
  3093. * qla1280_status_entry
  3094. * Processes received ISP status entry.
  3095. *
  3096. * Input:
  3097. * ha = adapter block pointer.
  3098. * pkt = entry pointer.
  3099. * done_q = done queue.
  3100. */
  3101. static void
  3102. qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
  3103. struct list_head *done_q)
  3104. {
  3105. unsigned int bus, target, lun;
  3106. int sense_sz;
  3107. struct srb *sp;
  3108. struct scsi_cmnd *cmd;
  3109. uint32_t handle = le32_to_cpu(pkt->handle);
  3110. uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
  3111. uint16_t comp_status = le16_to_cpu(pkt->comp_status);
  3112. ENTER("qla1280_status_entry");
  3113. /* Validate handle. */
  3114. if (handle < MAX_OUTSTANDING_COMMANDS)
  3115. sp = ha->outstanding_cmds[handle];
  3116. else
  3117. sp = NULL;
  3118. if (!sp) {
  3119. printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
  3120. goto out;
  3121. }
  3122. /* Free outstanding command slot. */
  3123. ha->outstanding_cmds[handle] = NULL;
  3124. cmd = sp->cmd;
  3125. /* Generate LU queue on cntrl, target, LUN */
  3126. bus = SCSI_BUS_32(cmd);
  3127. target = SCSI_TCN_32(cmd);
  3128. lun = SCSI_LUN_32(cmd);
  3129. if (comp_status || scsi_status) {
  3130. dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
  3131. "0x%x, handle = 0x%x\n", comp_status,
  3132. scsi_status, handle);
  3133. }
  3134. /* Target busy or queue full */
  3135. if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
  3136. (scsi_status & 0xFF) == SAM_STAT_BUSY) {
  3137. CMD_RESULT(cmd) = scsi_status & 0xff;
  3138. } else {
  3139. /* Save ISP completion status */
  3140. CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
  3141. if (scsi_status & SAM_STAT_CHECK_CONDITION) {
  3142. if (comp_status != CS_ARS_FAILED) {
  3143. uint16_t req_sense_length =
  3144. le16_to_cpu(pkt->req_sense_length);
  3145. if (req_sense_length < CMD_SNSLEN(cmd))
  3146. sense_sz = req_sense_length;
  3147. else
  3148. /*
  3149. * scsi_cmnd->sense_buffer is
  3150. * 64 bytes, why only copy 63?
  3151. * This looks wrong! /Jes
  3152. */
  3153. sense_sz = CMD_SNSLEN(cmd) - 1;
  3154. memcpy(cmd->sense_buffer,
  3155. &pkt->req_sense_data, sense_sz);
  3156. } else
  3157. sense_sz = 0;
  3158. memset(cmd->sense_buffer + sense_sz, 0,
  3159. SCSI_SENSE_BUFFERSIZE - sense_sz);
  3160. dprintk(2, "qla1280_status_entry: Check "
  3161. "condition Sense data, b %i, t %i, "
  3162. "l %i\n", bus, target, lun);
  3163. if (sense_sz)
  3164. qla1280_dump_buffer(2,
  3165. (char *)cmd->sense_buffer,
  3166. sense_sz);
  3167. }
  3168. }
  3169. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3170. /* Place command on done queue. */
  3171. list_add_tail(&sp->list, done_q);
  3172. out:
  3173. LEAVE("qla1280_status_entry");
  3174. }
  3175. /*
  3176. * qla1280_error_entry
  3177. * Processes error entry.
  3178. *
  3179. * Input:
  3180. * ha = adapter block pointer.
  3181. * pkt = entry pointer.
  3182. * done_q = done queue.
  3183. */
  3184. static void
  3185. qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
  3186. struct list_head *done_q)
  3187. {
  3188. struct srb *sp;
  3189. uint32_t handle = le32_to_cpu(pkt->handle);
  3190. ENTER("qla1280_error_entry");
  3191. if (pkt->entry_status & BIT_3)
  3192. dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
  3193. else if (pkt->entry_status & BIT_2)
  3194. dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
  3195. else if (pkt->entry_status & BIT_1)
  3196. dprintk(2, "qla1280_error_entry: FULL flag error\n");
  3197. else
  3198. dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
  3199. /* Validate handle. */
  3200. if (handle < MAX_OUTSTANDING_COMMANDS)
  3201. sp = ha->outstanding_cmds[handle];
  3202. else
  3203. sp = NULL;
  3204. if (sp) {
  3205. /* Free outstanding command slot. */
  3206. ha->outstanding_cmds[handle] = NULL;
  3207. /* Bad payload or header */
  3208. if (pkt->entry_status & (BIT_3 + BIT_2)) {
  3209. /* Bad payload or header, set error status. */
  3210. /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
  3211. CMD_RESULT(sp->cmd) = DID_ERROR << 16;
  3212. } else if (pkt->entry_status & BIT_1) { /* FULL flag */
  3213. CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
  3214. } else {
  3215. /* Set error status. */
  3216. CMD_RESULT(sp->cmd) = DID_ERROR << 16;
  3217. }
  3218. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3219. /* Place command on done queue. */
  3220. list_add_tail(&sp->list, done_q);
  3221. }
  3222. #ifdef QLA_64BIT_PTR
  3223. else if (pkt->entry_type == COMMAND_A64_TYPE) {
  3224. printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
  3225. }
  3226. #endif
  3227. LEAVE("qla1280_error_entry");
  3228. }
  3229. /*
  3230. * qla1280_abort_isp
  3231. * Resets ISP and aborts all outstanding commands.
  3232. *
  3233. * Input:
  3234. * ha = adapter block pointer.
  3235. *
  3236. * Returns:
  3237. * 0 = success
  3238. */
  3239. static int
  3240. qla1280_abort_isp(struct scsi_qla_host *ha)
  3241. {
  3242. struct device_reg __iomem *reg = ha->iobase;
  3243. struct srb *sp;
  3244. int status = 0;
  3245. int cnt;
  3246. int bus;
  3247. ENTER("qla1280_abort_isp");
  3248. if (ha->flags.abort_isp_active || !ha->flags.online)
  3249. goto out;
  3250. ha->flags.abort_isp_active = 1;
  3251. /* Disable ISP interrupts. */
  3252. qla1280_disable_intrs(ha);
  3253. WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
  3254. RD_REG_WORD(&reg->id_l);
  3255. printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
  3256. ha->host_no);
  3257. /* Dequeue all commands in outstanding command list. */
  3258. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  3259. struct scsi_cmnd *cmd;
  3260. sp = ha->outstanding_cmds[cnt];
  3261. if (sp) {
  3262. cmd = sp->cmd;
  3263. CMD_RESULT(cmd) = DID_RESET << 16;
  3264. CMD_HANDLE(cmd) = COMPLETED_HANDLE;
  3265. ha->outstanding_cmds[cnt] = NULL;
  3266. list_add_tail(&sp->list, &ha->done_q);
  3267. }
  3268. }
  3269. qla1280_done(ha);
  3270. status = qla1280_load_firmware(ha);
  3271. if (status)
  3272. goto out;
  3273. /* Setup adapter based on NVRAM parameters. */
  3274. qla1280_nvram_config (ha);
  3275. status = qla1280_init_rings(ha);
  3276. if (status)
  3277. goto out;
  3278. /* Issue SCSI reset. */
  3279. for (bus = 0; bus < ha->ports; bus++)
  3280. qla1280_bus_reset(ha, bus);
  3281. ha->flags.abort_isp_active = 0;
  3282. out:
  3283. if (status) {
  3284. printk(KERN_WARNING
  3285. "qla1280: ISP error recovery failed, board disabled");
  3286. qla1280_reset_adapter(ha);
  3287. dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
  3288. }
  3289. LEAVE("qla1280_abort_isp");
  3290. return status;
  3291. }
  3292. /*
  3293. * qla1280_debounce_register
  3294. * Debounce register.
  3295. *
  3296. * Input:
  3297. * port = register address.
  3298. *
  3299. * Returns:
  3300. * register value.
  3301. */
  3302. static u16
  3303. qla1280_debounce_register(volatile u16 __iomem * addr)
  3304. {
  3305. volatile u16 ret;
  3306. volatile u16 ret2;
  3307. ret = RD_REG_WORD(addr);
  3308. ret2 = RD_REG_WORD(addr);
  3309. if (ret == ret2)
  3310. return ret;
  3311. do {
  3312. cpu_relax();
  3313. ret = RD_REG_WORD(addr);
  3314. ret2 = RD_REG_WORD(addr);
  3315. } while (ret != ret2);
  3316. return ret;
  3317. }
  3318. /************************************************************************
  3319. * qla1280_check_for_dead_scsi_bus *
  3320. * *
  3321. * This routine checks for a dead SCSI bus *
  3322. ************************************************************************/
  3323. #define SET_SXP_BANK 0x0100
  3324. #define SCSI_PHASE_INVALID 0x87FF
  3325. static int
  3326. qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
  3327. {
  3328. uint16_t config_reg, scsi_control;
  3329. struct device_reg __iomem *reg = ha->iobase;
  3330. if (ha->bus_settings[bus].scsi_bus_dead) {
  3331. WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
  3332. config_reg = RD_REG_WORD(&reg->cfg_1);
  3333. WRT_REG_WORD(&reg->cfg_1, SET_SXP_BANK);
  3334. scsi_control = RD_REG_WORD(&reg->scsiControlPins);
  3335. WRT_REG_WORD(&reg->cfg_1, config_reg);
  3336. WRT_REG_WORD(&reg->host_cmd, HC_RELEASE_RISC);
  3337. if (scsi_control == SCSI_PHASE_INVALID) {
  3338. ha->bus_settings[bus].scsi_bus_dead = 1;
  3339. return 1; /* bus is dead */
  3340. } else {
  3341. ha->bus_settings[bus].scsi_bus_dead = 0;
  3342. ha->bus_settings[bus].failed_reset_count = 0;
  3343. }
  3344. }
  3345. return 0; /* bus is not dead */
  3346. }
  3347. static void
  3348. qla1280_get_target_parameters(struct scsi_qla_host *ha,
  3349. struct scsi_device *device)
  3350. {
  3351. uint16_t mb[MAILBOX_REGISTER_COUNT];
  3352. int bus, target, lun;
  3353. bus = device->channel;
  3354. target = device->id;
  3355. lun = device->lun;
  3356. mb[0] = MBC_GET_TARGET_PARAMETERS;
  3357. mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
  3358. mb[1] <<= 8;
  3359. qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
  3360. &mb[0]);
  3361. printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
  3362. if (mb[3] != 0) {
  3363. printk(" Sync: period %d, offset %d",
  3364. (mb[3] & 0xff), (mb[3] >> 8));
  3365. if (mb[2] & BIT_13)
  3366. printk(", Wide");
  3367. if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
  3368. printk(", DT");
  3369. } else
  3370. printk(" Async");
  3371. if (device->simple_tags)
  3372. printk(", Tagged queuing: depth %d", device->queue_depth);
  3373. printk("\n");
  3374. }
  3375. #if DEBUG_QLA1280
  3376. static void
  3377. __qla1280_dump_buffer(char *b, int size)
  3378. {
  3379. int cnt;
  3380. u8 c;
  3381. printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
  3382. "Bh Ch Dh Eh Fh\n");
  3383. printk(KERN_DEBUG "---------------------------------------------"
  3384. "------------------\n");
  3385. for (cnt = 0; cnt < size;) {
  3386. c = *b++;
  3387. printk("0x%02x", c);
  3388. cnt++;
  3389. if (!(cnt % 16))
  3390. printk("\n");
  3391. else
  3392. printk(" ");
  3393. }
  3394. if (cnt % 16)
  3395. printk("\n");
  3396. }
  3397. /**************************************************************************
  3398. * ql1280_print_scsi_cmd
  3399. *
  3400. **************************************************************************/
  3401. static void
  3402. __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
  3403. {
  3404. struct scsi_qla_host *ha;
  3405. struct Scsi_Host *host = CMD_HOST(cmd);
  3406. struct srb *sp;
  3407. /* struct scatterlist *sg; */
  3408. int i;
  3409. ha = (struct scsi_qla_host *)host->hostdata;
  3410. sp = (struct srb *)CMD_SP(cmd);
  3411. printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
  3412. printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
  3413. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
  3414. CMD_CDBLEN(cmd));
  3415. printk(" CDB = ");
  3416. for (i = 0; i < cmd->cmd_len; i++) {
  3417. printk("0x%02x ", cmd->cmnd[i]);
  3418. }
  3419. printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
  3420. printk(" request buffer=0x%p, request buffer len=0x%x\n",
  3421. scsi_sglist(cmd), scsi_bufflen(cmd));
  3422. /* if (cmd->use_sg)
  3423. {
  3424. sg = (struct scatterlist *) cmd->request_buffer;
  3425. printk(" SG buffer: \n");
  3426. qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
  3427. } */
  3428. printk(" tag=%d, transfersize=0x%x \n",
  3429. cmd->tag, cmd->transfersize);
  3430. printk(" SP=0x%p\n", CMD_SP(cmd));
  3431. printk(" underflow size = 0x%x, direction=0x%x\n",
  3432. cmd->underflow, cmd->sc_data_direction);
  3433. }
  3434. /**************************************************************************
  3435. * ql1280_dump_device
  3436. *
  3437. **************************************************************************/
  3438. static void
  3439. ql1280_dump_device(struct scsi_qla_host *ha)
  3440. {
  3441. struct scsi_cmnd *cp;
  3442. struct srb *sp;
  3443. int i;
  3444. printk(KERN_DEBUG "Outstanding Commands on controller:\n");
  3445. for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
  3446. if ((sp = ha->outstanding_cmds[i]) == NULL)
  3447. continue;
  3448. if ((cp = sp->cmd) == NULL)
  3449. continue;
  3450. qla1280_print_scsi_cmd(1, cp);
  3451. }
  3452. }
  3453. #endif
  3454. enum tokens {
  3455. TOKEN_NVRAM,
  3456. TOKEN_SYNC,
  3457. TOKEN_WIDE,
  3458. TOKEN_PPR,
  3459. TOKEN_VERBOSE,
  3460. TOKEN_DEBUG,
  3461. };
  3462. struct setup_tokens {
  3463. char *token;
  3464. int val;
  3465. };
  3466. static struct setup_tokens setup_token[] __initdata =
  3467. {
  3468. { "nvram", TOKEN_NVRAM },
  3469. { "sync", TOKEN_SYNC },
  3470. { "wide", TOKEN_WIDE },
  3471. { "ppr", TOKEN_PPR },
  3472. { "verbose", TOKEN_VERBOSE },
  3473. { "debug", TOKEN_DEBUG },
  3474. };
  3475. /**************************************************************************
  3476. * qla1280_setup
  3477. *
  3478. * Handle boot parameters. This really needs to be changed so one
  3479. * can specify per adapter parameters.
  3480. **************************************************************************/
  3481. static int __init
  3482. qla1280_setup(char *s)
  3483. {
  3484. char *cp, *ptr;
  3485. unsigned long val;
  3486. int toke;
  3487. cp = s;
  3488. while (cp && (ptr = strchr(cp, ':'))) {
  3489. ptr++;
  3490. if (!strcmp(ptr, "yes")) {
  3491. val = 0x10000;
  3492. ptr += 3;
  3493. } else if (!strcmp(ptr, "no")) {
  3494. val = 0;
  3495. ptr += 2;
  3496. } else
  3497. val = simple_strtoul(ptr, &ptr, 0);
  3498. switch ((toke = qla1280_get_token(cp))) {
  3499. case TOKEN_NVRAM:
  3500. if (!val)
  3501. driver_setup.no_nvram = 1;
  3502. break;
  3503. case TOKEN_SYNC:
  3504. if (!val)
  3505. driver_setup.no_sync = 1;
  3506. else if (val != 0x10000)
  3507. driver_setup.sync_mask = val;
  3508. break;
  3509. case TOKEN_WIDE:
  3510. if (!val)
  3511. driver_setup.no_wide = 1;
  3512. else if (val != 0x10000)
  3513. driver_setup.wide_mask = val;
  3514. break;
  3515. case TOKEN_PPR:
  3516. if (!val)
  3517. driver_setup.no_ppr = 1;
  3518. else if (val != 0x10000)
  3519. driver_setup.ppr_mask = val;
  3520. break;
  3521. case TOKEN_VERBOSE:
  3522. qla1280_verbose = val;
  3523. break;
  3524. default:
  3525. printk(KERN_INFO "qla1280: unknown boot option %s\n",
  3526. cp);
  3527. }
  3528. cp = strchr(ptr, ';');
  3529. if (cp)
  3530. cp++;
  3531. else {
  3532. break;
  3533. }
  3534. }
  3535. return 1;
  3536. }
  3537. static int __init
  3538. qla1280_get_token(char *str)
  3539. {
  3540. char *sep;
  3541. long ret = -1;
  3542. int i;
  3543. sep = strchr(str, ':');
  3544. if (sep) {
  3545. for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
  3546. if (!strncmp(setup_token[i].token, str, (sep - str))) {
  3547. ret = setup_token[i].val;
  3548. break;
  3549. }
  3550. }
  3551. }
  3552. return ret;
  3553. }
  3554. static struct scsi_host_template qla1280_driver_template = {
  3555. .module = THIS_MODULE,
  3556. .proc_name = "qla1280",
  3557. .name = "Qlogic ISP 1280/12160",
  3558. .info = qla1280_info,
  3559. .slave_configure = qla1280_slave_configure,
  3560. .queuecommand = qla1280_queuecommand,
  3561. .eh_abort_handler = qla1280_eh_abort,
  3562. .eh_device_reset_handler= qla1280_eh_device_reset,
  3563. .eh_bus_reset_handler = qla1280_eh_bus_reset,
  3564. .eh_host_reset_handler = qla1280_eh_adapter_reset,
  3565. .bios_param = qla1280_biosparam,
  3566. .can_queue = MAX_OUTSTANDING_COMMANDS,
  3567. .this_id = -1,
  3568. .sg_tablesize = SG_ALL,
  3569. };
  3570. static int
  3571. qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  3572. {
  3573. int devnum = id->driver_data;
  3574. struct qla_boards *bdp = &ql1280_board_tbl[devnum];
  3575. struct Scsi_Host *host;
  3576. struct scsi_qla_host *ha;
  3577. int error = -ENODEV;
  3578. /* Bypass all AMI SUBSYS VENDOR IDs */
  3579. if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
  3580. printk(KERN_INFO
  3581. "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
  3582. goto error;
  3583. }
  3584. printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
  3585. bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
  3586. if (pci_enable_device(pdev)) {
  3587. printk(KERN_WARNING
  3588. "qla1280: Failed to enabled pci device, aborting.\n");
  3589. goto error;
  3590. }
  3591. pci_set_master(pdev);
  3592. error = -ENOMEM;
  3593. host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
  3594. if (!host) {
  3595. printk(KERN_WARNING
  3596. "qla1280: Failed to register host, aborting.\n");
  3597. goto error_disable_device;
  3598. }
  3599. ha = (struct scsi_qla_host *)host->hostdata;
  3600. memset(ha, 0, sizeof(struct scsi_qla_host));
  3601. ha->pdev = pdev;
  3602. ha->devnum = devnum; /* specifies microcode load address */
  3603. #ifdef QLA_64BIT_PTR
  3604. if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
  3605. if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
  3606. printk(KERN_WARNING "scsi(%li): Unable to set a "
  3607. "suitable DMA mask - aborting\n", ha->host_no);
  3608. error = -ENODEV;
  3609. goto error_put_host;
  3610. }
  3611. } else
  3612. dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
  3613. ha->host_no);
  3614. #else
  3615. if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
  3616. printk(KERN_WARNING "scsi(%li): Unable to set a "
  3617. "suitable DMA mask - aborting\n", ha->host_no);
  3618. error = -ENODEV;
  3619. goto error_put_host;
  3620. }
  3621. #endif
  3622. ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
  3623. ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
  3624. &ha->request_dma, GFP_KERNEL);
  3625. if (!ha->request_ring) {
  3626. printk(KERN_INFO "qla1280: Failed to get request memory\n");
  3627. goto error_put_host;
  3628. }
  3629. ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
  3630. ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
  3631. &ha->response_dma, GFP_KERNEL);
  3632. if (!ha->response_ring) {
  3633. printk(KERN_INFO "qla1280: Failed to get response memory\n");
  3634. goto error_free_request_ring;
  3635. }
  3636. ha->ports = bdp->numPorts;
  3637. ha->host = host;
  3638. ha->host_no = host->host_no;
  3639. host->irq = pdev->irq;
  3640. host->max_channel = bdp->numPorts - 1;
  3641. host->max_lun = MAX_LUNS - 1;
  3642. host->max_id = MAX_TARGETS;
  3643. host->max_sectors = 1024;
  3644. host->unique_id = host->host_no;
  3645. error = -ENODEV;
  3646. #if MEMORY_MAPPED_IO
  3647. ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
  3648. if (!ha->mmpbase) {
  3649. printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
  3650. goto error_free_response_ring;
  3651. }
  3652. host->base = (unsigned long)ha->mmpbase;
  3653. ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
  3654. #else
  3655. host->io_port = pci_resource_start(ha->pdev, 0);
  3656. if (!request_region(host->io_port, 0xff, "qla1280")) {
  3657. printk(KERN_INFO "qla1280: Failed to reserve i/o region "
  3658. "0x%04lx-0x%04lx - already in use\n",
  3659. host->io_port, host->io_port + 0xff);
  3660. goto error_free_response_ring;
  3661. }
  3662. ha->iobase = (struct device_reg *)host->io_port;
  3663. #endif
  3664. INIT_LIST_HEAD(&ha->done_q);
  3665. /* Disable ISP interrupts. */
  3666. qla1280_disable_intrs(ha);
  3667. if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
  3668. "qla1280", ha)) {
  3669. printk("qla1280 : Failed to reserve interrupt %d already "
  3670. "in use\n", pdev->irq);
  3671. goto error_release_region;
  3672. }
  3673. /* load the F/W, read paramaters, and init the H/W */
  3674. if (qla1280_initialize_adapter(ha)) {
  3675. printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
  3676. goto error_free_irq;
  3677. }
  3678. /* set our host ID (need to do something about our two IDs) */
  3679. host->this_id = ha->bus_settings[0].id;
  3680. pci_set_drvdata(pdev, host);
  3681. error = scsi_add_host(host, &pdev->dev);
  3682. if (error)
  3683. goto error_disable_adapter;
  3684. scsi_scan_host(host);
  3685. return 0;
  3686. error_disable_adapter:
  3687. qla1280_disable_intrs(ha);
  3688. error_free_irq:
  3689. free_irq(pdev->irq, ha);
  3690. error_release_region:
  3691. #if MEMORY_MAPPED_IO
  3692. iounmap(ha->mmpbase);
  3693. #else
  3694. release_region(host->io_port, 0xff);
  3695. #endif
  3696. error_free_response_ring:
  3697. dma_free_coherent(&ha->pdev->dev,
  3698. ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
  3699. ha->response_ring, ha->response_dma);
  3700. error_free_request_ring:
  3701. dma_free_coherent(&ha->pdev->dev,
  3702. ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
  3703. ha->request_ring, ha->request_dma);
  3704. error_put_host:
  3705. scsi_host_put(host);
  3706. error_disable_device:
  3707. pci_disable_device(pdev);
  3708. error:
  3709. return error;
  3710. }
  3711. static void
  3712. qla1280_remove_one(struct pci_dev *pdev)
  3713. {
  3714. struct Scsi_Host *host = pci_get_drvdata(pdev);
  3715. struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
  3716. scsi_remove_host(host);
  3717. qla1280_disable_intrs(ha);
  3718. free_irq(pdev->irq, ha);
  3719. #if MEMORY_MAPPED_IO
  3720. iounmap(ha->mmpbase);
  3721. #else
  3722. release_region(host->io_port, 0xff);
  3723. #endif
  3724. dma_free_coherent(&ha->pdev->dev,
  3725. ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
  3726. ha->request_ring, ha->request_dma);
  3727. dma_free_coherent(&ha->pdev->dev,
  3728. ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
  3729. ha->response_ring, ha->response_dma);
  3730. pci_disable_device(pdev);
  3731. scsi_host_put(host);
  3732. }
  3733. static struct pci_driver qla1280_pci_driver = {
  3734. .name = "qla1280",
  3735. .id_table = qla1280_pci_tbl,
  3736. .probe = qla1280_probe_one,
  3737. .remove = qla1280_remove_one,
  3738. };
  3739. static int __init
  3740. qla1280_init(void)
  3741. {
  3742. if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
  3743. printk(KERN_WARNING
  3744. "qla1280: struct srb too big, aborting\n");
  3745. return -EINVAL;
  3746. }
  3747. #ifdef MODULE
  3748. /*
  3749. * If we are called as a module, the qla1280 pointer may not be null
  3750. * and it would point to our bootup string, just like on the lilo
  3751. * command line. IF not NULL, then process this config string with
  3752. * qla1280_setup
  3753. *
  3754. * Boot time Options
  3755. * To add options at boot time add a line to your lilo.conf file like:
  3756. * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
  3757. * which will result in the first four devices on the first two
  3758. * controllers being set to a tagged queue depth of 32.
  3759. */
  3760. if (qla1280)
  3761. qla1280_setup(qla1280);
  3762. #endif
  3763. return pci_register_driver(&qla1280_pci_driver);
  3764. }
  3765. static void __exit
  3766. qla1280_exit(void)
  3767. {
  3768. int i;
  3769. pci_unregister_driver(&qla1280_pci_driver);
  3770. /* release any allocated firmware images */
  3771. for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
  3772. release_firmware(qla1280_fw_tbl[i].fw);
  3773. qla1280_fw_tbl[i].fw = NULL;
  3774. }
  3775. }
  3776. module_init(qla1280_init);
  3777. module_exit(qla1280_exit);
  3778. MODULE_AUTHOR("Qlogic & Jes Sorensen");
  3779. MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
  3780. MODULE_LICENSE("GPL");
  3781. MODULE_FIRMWARE("qlogic/1040.bin");
  3782. MODULE_FIRMWARE("qlogic/1280.bin");
  3783. MODULE_FIRMWARE("qlogic/12160.bin");
  3784. MODULE_VERSION(QLA1280_VERSION);
  3785. /*
  3786. * Overrides for Emacs so that we almost follow Linus's tabbing style.
  3787. * Emacs will notice this stuff at the end of the file and automatically
  3788. * adjust the settings for this buffer only. This must remain at the end
  3789. * of the file.
  3790. * ---------------------------------------------------------------------------
  3791. * Local variables:
  3792. * c-basic-offset: 8
  3793. * tab-width: 8
  3794. * End:
  3795. */