PageRenderTime 85ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/scsi/qla1280.c

https://bitbucket.org/abioy/linux
C | 4504 lines | 2804 code | 634 blank | 1066 comment | 414 complexity | 28cf0447a57ce1a5edb885a2ef69de56 MD5 | raw file
Possible License(s): CC-BY-SA-3.0, GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /******************************************************************************
  2. * QLOGIC LINUX SOFTWARE
  3. *
  4. * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
  5. * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
  6. * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
  7. * Copyright (C) 2003-2004 Christoph Hellwig
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2, or (at your option) any
  12. * later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. ******************************************************************************/
  20. #define QLA1280_VERSION "3.27.1"
  21. /*****************************************************************************
  22. Revision History:
  23. Rev 3.27.1, February 8, 2010, Michael Reed
  24. - Retain firmware image for error recovery.
  25. Rev 3.27, February 10, 2009, Michael Reed
  26. - General code cleanup.
  27. - Improve error recovery.
  28. Rev 3.26, January 16, 2006 Jes Sorensen
  29. - Ditch all < 2.6 support
  30. Rev 3.25.1, February 10, 2005 Christoph Hellwig
  31. - use pci_map_single to map non-S/G requests
  32. - remove qla1280_proc_info
  33. Rev 3.25, September 28, 2004, Christoph Hellwig
  34. - add support for ISP1020/1040
  35. - don't include "scsi.h" anymore for 2.6.x
  36. Rev 3.24.4 June 7, 2004 Christoph Hellwig
  37. - restructure firmware loading, cleanup initialization code
  38. - prepare support for ISP1020/1040 chips
  39. Rev 3.24.3 January 19, 2004, Jes Sorensen
  40. - Handle PCI DMA mask settings correctly
  41. - Correct order of error handling in probe_one, free_irq should not
  42. be called if request_irq failed
  43. Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
  44. - Big endian fixes (James)
  45. - Remove bogus IOCB content on zero data transfer commands (Andrew)
  46. Rev 3.24.1 January 5, 2004, Jes Sorensen
  47. - Initialize completion queue to avoid OOPS on probe
  48. - Handle interrupts during mailbox testing
  49. Rev 3.24 November 17, 2003, Christoph Hellwig
  50. - use struct list_head for completion queue
  51. - avoid old Scsi_FOO typedefs
  52. - cleanup 2.4 compat glue a bit
  53. - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
  54. - make initialization for memory mapped vs port I/O more similar
  55. - remove broken pci config space manipulation
  56. - kill more cruft
  57. - this is an almost perfect 2.6 scsi driver now! ;)
  58. Rev 3.23.39 December 17, 2003, Jes Sorensen
  59. - Delete completion queue from srb if mailbox command failed to
  60. to avoid qla1280_done completeting qla1280_error_action's
  61. obsolete context
  62. - Reduce arguments for qla1280_done
  63. Rev 3.23.38 October 18, 2003, Christoph Hellwig
  64. - Convert to new-style hotplugable driver for 2.6
  65. - Fix missing scsi_unregister/scsi_host_put on HBA removal
  66. - Kill some more cruft
  67. Rev 3.23.37 October 1, 2003, Jes Sorensen
  68. - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
  69. random CONFIG option
  70. - Clean up locking in probe path
  71. Rev 3.23.36 October 1, 2003, Christoph Hellwig
  72. - queuecommand only ever receives new commands - clear flags
  73. - Reintegrate lost fixes from Linux 2.5
  74. Rev 3.23.35 August 14, 2003, Jes Sorensen
  75. - Build against 2.6
  76. Rev 3.23.34 July 23, 2003, Jes Sorensen
  77. - Remove pointless TRUE/FALSE macros
  78. - Clean up vchan handling
  79. Rev 3.23.33 July 3, 2003, Jes Sorensen
  80. - Don't define register access macros before define determining MMIO.
  81. This just happend to work out on ia64 but not elsewhere.
  82. - Don't try and read from the card while it is in reset as
  83. it won't respond and causes an MCA
  84. Rev 3.23.32 June 23, 2003, Jes Sorensen
  85. - Basic support for boot time arguments
  86. Rev 3.23.31 June 8, 2003, Jes Sorensen
  87. - Reduce boot time messages
  88. Rev 3.23.30 June 6, 2003, Jes Sorensen
  89. - Do not enable sync/wide/ppr before it has been determined
  90. that the target device actually supports it
  91. - Enable DMA arbitration for multi channel controllers
  92. Rev 3.23.29 June 3, 2003, Jes Sorensen
  93. - Port to 2.5.69
  94. Rev 3.23.28 June 3, 2003, Jes Sorensen
  95. - Eliminate duplicate marker commands on bus resets
  96. - Handle outstanding commands appropriately on bus/device resets
  97. Rev 3.23.27 May 28, 2003, Jes Sorensen
  98. - Remove bogus input queue code, let the Linux SCSI layer do the work
  99. - Clean up NVRAM handling, only read it once from the card
  100. - Add a number of missing default nvram parameters
  101. Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
  102. - Use completion queue for mailbox commands instead of busy wait
  103. Rev 3.23.25 Beta May 27, 2003, James Bottomley
  104. - Migrate to use new error handling code
  105. Rev 3.23.24 Beta May 21, 2003, James Bottomley
  106. - Big endian support
  107. - Cleanup data direction code
  108. Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
  109. - Switch to using MMIO instead of PIO
  110. Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
  111. - Fix PCI parity problem with 12160 during reset.
  112. Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
  113. - Use pci_map_page()/pci_unmap_page() instead of map_single version.
  114. Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
  115. - Remove < 2.4.x support
  116. - Introduce HOST_LOCK to make the spin lock changes portable.
  117. - Remove a bunch of idiotic and unnecessary typedef's
  118. - Kill all leftovers of target-mode support which never worked anyway
  119. Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
  120. - Do qla1280_pci_config() before calling request_irq() and
  121. request_region()
  122. - Use pci_dma_hi32() to handle upper word of DMA addresses instead
  123. of large shifts
  124. - Hand correct arguments to free_irq() in case of failure
  125. Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
  126. - Run source through Lindent and clean up the output
  127. Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
  128. - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
  129. Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
  130. - Rely on mailbox commands generating interrupts - do not
  131. run qla1280_isr() from ql1280_mailbox_command()
  132. - Remove device_reg_t
  133. - Integrate ql12160_set_target_parameters() with 1280 version
  134. - Make qla1280_setup() non static
  135. - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
  136. sent to the card - this command pauses the firmware!!!
  137. Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
  138. - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
  139. - Remove a pile of pointless and confusing (srb_t **) and
  140. (scsi_lu_t *) typecasts
  141. - Explicit mark that we do not use the new error handling (for now)
  142. - Remove scsi_qla_host_t and use 'struct' instead
  143. - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
  144. pci_64bit_slot flags which weren't used for anything anyway
  145. - Grab host->host_lock while calling qla1280_isr() from abort()
  146. - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
  147. do not need to save/restore flags in the interrupt handler
  148. - Enable interrupts early (before any mailbox access) in preparation
  149. for cleaning up the mailbox handling
  150. Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
  151. - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
  152. it with proper use of dprintk().
  153. - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
  154. a debug level argument to determine if data is to be printed
  155. - Add KERN_* info to printk()
  156. Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
  157. - Significant cosmetic cleanups
  158. - Change debug code to use dprintk() and remove #if mess
  159. Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
  160. - More cosmetic cleanups, fix places treating return as function
  161. - use cpu_relax() in qla1280_debounce_register()
  162. Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
  163. - Make it compile under 2.5.5
  164. Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
  165. - Do no typecast short * to long * in QL1280BoardTbl, this
  166. broke miserably on big endian boxes
  167. Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
  168. - Remove pre 2.2 hack for checking for reentrance in interrupt handler
  169. - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
  170. unsigned int to match the types from struct scsi_cmnd
  171. Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
  172. - Remove bogus timer_t typedef from qla1280.h
  173. - Remove obsolete pre 2.2 PCI setup code, use proper #define's
  174. for PCI_ values, call pci_set_master()
  175. - Fix memleak of qla1280_buffer on module unload
  176. - Only compile module parsing code #ifdef MODULE - should be
  177. changed to use individual MODULE_PARM's later
  178. - Remove dummy_buffer that was never modified nor printed
  179. - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
  180. #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
  181. - Remove \r from print statements, this is Linux, not DOS
  182. - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
  183. dummy macros
  184. - Remove C++ compile hack in header file as Linux driver are not
  185. supposed to be compiled as C++
  186. - Kill MS_64BITS macro as it makes the code more readable
  187. - Remove unnecessary flags.in_interrupts bit
  188. Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
  189. - Dont' check for set flags on q->q_flag one by one in qla1280_next()
  190. - Check whether the interrupt was generated by the QLA1280 before
  191. doing any processing
  192. - qla1280_status_entry(): Only zero out part of sense_buffer that
  193. is not being copied into
  194. - Remove more superflouous typecasts
  195. - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
  196. Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
  197. - Don't walk the entire list in qla1280_putq_t() just to directly
  198. grab the pointer to the last element afterwards
  199. Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
  200. - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
  201. Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
  202. - Set dev->max_sectors to 1024
  203. Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
  204. - Provide compat macros for pci_enable_device(), pci_find_subsys()
  205. and scsi_set_pci_device()
  206. - Call scsi_set_pci_device() for all devices
  207. - Reduce size of kernel version dependent device probe code
  208. - Move duplicate probe/init code to separate function
  209. - Handle error if qla1280_mem_alloc() fails
  210. - Kill OFFSET() macro and use Linux's PCI definitions instead
  211. - Kill private structure defining PCI config space (struct config_reg)
  212. - Only allocate I/O port region if not in MMIO mode
  213. - Remove duplicate (unused) sanity check of sife of srb_t
  214. Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
  215. - Change home-brew memset() implementations to use memset()
  216. - Remove all references to COMTRACE() - accessing a PC's COM2 serial
  217. port directly is not legal under Linux.
  218. Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
  219. - Remove pre 2.2 kernel support
  220. - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
  221. - Fix MMIO access to use readl/writel instead of directly
  222. dereferencing pointers
  223. - Nuke MSDOS debugging code
  224. - Change true/false data types to int from uint8_t
  225. - Use int for counters instead of uint8_t etc.
  226. - Clean up size & byte order conversion macro usage
  227. Rev 3.23 Beta January 11, 2001 BN Qlogic
  228. - Added check of device_id when handling non
  229. QLA12160s during detect().
  230. Rev 3.22 Beta January 5, 2001 BN Qlogic
  231. - Changed queue_task() to schedule_task()
  232. for kernels 2.4.0 and higher.
  233. Note: 2.4.0-testxx kernels released prior to
  234. the actual 2.4.0 kernel release on January 2001
  235. will get compile/link errors with schedule_task().
  236. Please update your kernel to released 2.4.0 level,
  237. or comment lines in this file flagged with 3.22
  238. to resolve compile/link error of schedule_task().
  239. - Added -DCONFIG_SMP in addition to -D__SMP__
  240. in Makefile for 2.4.0 builds of driver as module.
  241. Rev 3.21 Beta January 4, 2001 BN Qlogic
  242. - Changed criteria of 64/32 Bit mode of HBA
  243. operation according to BITS_PER_LONG rather
  244. than HBA's NVRAM setting of >4Gig memory bit;
  245. so that the HBA auto-configures without the need
  246. to setup each system individually.
  247. Rev 3.20 Beta December 5, 2000 BN Qlogic
  248. - Added priority handling to IA-64 onboard SCSI
  249. ISP12160 chip for kernels greater than 2.3.18.
  250. - Added irqrestore for qla1280_intr_handler.
  251. - Enabled /proc/scsi/qla1280 interface.
  252. - Clear /proc/scsi/qla1280 counters in detect().
  253. Rev 3.19 Beta October 13, 2000 BN Qlogic
  254. - Declare driver_template for new kernel
  255. (2.4.0 and greater) scsi initialization scheme.
  256. - Update /proc/scsi entry for 2.3.18 kernels and
  257. above as qla1280
  258. Rev 3.18 Beta October 10, 2000 BN Qlogic
  259. - Changed scan order of adapters to map
  260. the QLA12160 followed by the QLA1280.
  261. Rev 3.17 Beta September 18, 2000 BN Qlogic
  262. - Removed warnings for 32 bit 2.4.x compiles
  263. - Corrected declared size for request and response
  264. DMA addresses that are kept in each ha
  265. Rev. 3.16 Beta August 25, 2000 BN Qlogic
  266. - Corrected 64 bit addressing issue on IA-64
  267. where the upper 32 bits were not properly
  268. passed to the RISC engine.
  269. Rev. 3.15 Beta August 22, 2000 BN Qlogic
  270. - Modified qla1280_setup_chip to properly load
  271. ISP firmware for greater that 4 Gig memory on IA-64
  272. Rev. 3.14 Beta August 16, 2000 BN Qlogic
  273. - Added setting of dma_mask to full 64 bit
  274. if flags.enable_64bit_addressing is set in NVRAM
  275. Rev. 3.13 Beta August 16, 2000 BN Qlogic
  276. - Use new PCI DMA mapping APIs for 2.4.x kernel
  277. Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
  278. - Added check of pci_enable_device to detect() for 2.3.x
  279. - Use pci_resource_start() instead of
  280. pdev->resource[0].start in detect() for 2.3.x
  281. - Updated driver version
  282. Rev. 3.11 July 14, 2000 BN Qlogic
  283. - Updated SCSI Firmware to following versions:
  284. qla1x80: 8.13.08
  285. qla1x160: 10.04.08
  286. - Updated driver version to 3.11
  287. Rev. 3.10 June 23, 2000 BN Qlogic
  288. - Added filtering of AMI SubSys Vendor ID devices
  289. Rev. 3.9
  290. - DEBUG_QLA1280 undefined and new version BN Qlogic
  291. Rev. 3.08b May 9, 2000 MD Dell
  292. - Added logic to check against AMI subsystem vendor ID
  293. Rev. 3.08 May 4, 2000 DG Qlogic
  294. - Added logic to check for PCI subsystem ID.
  295. Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
  296. - Updated SCSI Firmware to following versions:
  297. qla12160: 10.01.19
  298. qla1280: 8.09.00
  299. Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
  300. - Internal revision; not released
  301. Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
  302. - Edit correction for virt_to_bus and PROC.
  303. Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
  304. - Merge changes from ia64 port.
  305. Rev. 3.03 Mar 28, 2000 BN Qlogic
  306. - Increase version to reflect new code drop with compile fix
  307. of issue with inclusion of linux/spinlock for 2.3 kernels
  308. Rev. 3.02 Mar 15, 2000 BN Qlogic
  309. - Merge qla1280_proc_info from 2.10 code base
  310. Rev. 3.01 Feb 10, 2000 BN Qlogic
  311. - Corrected code to compile on a 2.2.x kernel.
  312. Rev. 3.00 Jan 17, 2000 DG Qlogic
  313. - Added 64-bit support.
  314. Rev. 2.07 Nov 9, 1999 DG Qlogic
  315. - Added new routine to set target parameters for ISP12160.
  316. Rev. 2.06 Sept 10, 1999 DG Qlogic
  317. - Added support for ISP12160 Ultra 3 chip.
  318. Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
  319. - Modified code to remove errors generated when compiling with
  320. Cygnus IA64 Compiler.
  321. - Changed conversion of pointers to unsigned longs instead of integers.
  322. - Changed type of I/O port variables from uint32_t to unsigned long.
  323. - Modified OFFSET macro to work with 64-bit as well as 32-bit.
  324. - Changed sprintf and printk format specifiers for pointers to %p.
  325. - Changed some int to long type casts where needed in sprintf & printk.
  326. - Added l modifiers to sprintf and printk format specifiers for longs.
  327. - Removed unused local variables.
  328. Rev. 1.20 June 8, 1999 DG, Qlogic
  329. Changes to support RedHat release 6.0 (kernel 2.2.5).
  330. - Added SCSI exclusive access lock (io_request_lock) when accessing
  331. the adapter.
  332. - Added changes for the new LINUX interface template. Some new error
  333. handling routines have been added to the template, but for now we
  334. will use the old ones.
  335. - Initial Beta Release.
  336. *****************************************************************************/
  337. #include <linux/module.h>
  338. #include <linux/types.h>
  339. #include <linux/string.h>
  340. #include <linux/errno.h>
  341. #include <linux/kernel.h>
  342. #include <linux/ioport.h>
  343. #include <linux/delay.h>
  344. #include <linux/timer.h>
  345. #include <linux/pci.h>
  346. #include <linux/proc_fs.h>
  347. #include <linux/stat.h>
  348. #include <linux/pci_ids.h>
  349. #include <linux/interrupt.h>
  350. #include <linux/init.h>
  351. #include <linux/dma-mapping.h>
  352. #include <linux/firmware.h>
  353. #include <asm/io.h>
  354. #include <asm/irq.h>
  355. #include <asm/byteorder.h>
  356. #include <asm/processor.h>
  357. #include <asm/types.h>
  358. #include <asm/system.h>
  359. #include <scsi/scsi.h>
  360. #include <scsi/scsi_cmnd.h>
  361. #include <scsi/scsi_device.h>
  362. #include <scsi/scsi_host.h>
  363. #include <scsi/scsi_tcq.h>
  364. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  365. #include <asm/sn/io.h>
  366. #endif
  367. /*
  368. * Compile time Options:
  369. * 0 - Disable and 1 - Enable
  370. */
  371. #define DEBUG_QLA1280_INTR 0
  372. #define DEBUG_PRINT_NVRAM 0
  373. #define DEBUG_QLA1280 0
  374. /*
  375. * The SGI VISWS is broken and doesn't support MMIO ;-(
  376. */
  377. #ifdef CONFIG_X86_VISWS
  378. #define MEMORY_MAPPED_IO 0
  379. #else
  380. #define MEMORY_MAPPED_IO 1
  381. #endif
  382. #include "qla1280.h"
  383. #ifndef BITS_PER_LONG
  384. #error "BITS_PER_LONG not defined!"
  385. #endif
  386. #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
  387. #define QLA_64BIT_PTR 1
  388. #endif
  389. #ifdef QLA_64BIT_PTR
  390. #define pci_dma_hi32(a) ((a >> 16) >> 16)
  391. #else
  392. #define pci_dma_hi32(a) 0
  393. #endif
  394. #define pci_dma_lo32(a) (a & 0xffffffff)
  395. #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
  396. #if defined(__ia64__) && !defined(ia64_platform_is)
  397. #define ia64_platform_is(foo) (!strcmp(x, platform_name))
  398. #endif
  399. #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
  400. #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
  401. ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
  402. #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
  403. ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
  404. static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
  405. static void qla1280_remove_one(struct pci_dev *);
  406. /*
  407. * QLogic Driver Support Function Prototypes.
  408. */
  409. static void qla1280_done(struct scsi_qla_host *);
  410. static int qla1280_get_token(char *);
  411. static int qla1280_setup(char *s) __init;
  412. /*
  413. * QLogic ISP1280 Hardware Support Function Prototypes.
  414. */
  415. static int qla1280_load_firmware(struct scsi_qla_host *);
  416. static int qla1280_init_rings(struct scsi_qla_host *);
  417. static int qla1280_nvram_config(struct scsi_qla_host *);
  418. static int qla1280_mailbox_command(struct scsi_qla_host *,
  419. uint8_t, uint16_t *);
  420. static int qla1280_bus_reset(struct scsi_qla_host *, int);
  421. static int qla1280_device_reset(struct scsi_qla_host *, int, int);
  422. static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
  423. static int qla1280_abort_isp(struct scsi_qla_host *);
  424. #ifdef QLA_64BIT_PTR
  425. static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
  426. #else
  427. static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
  428. #endif
  429. static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
  430. static void qla1280_poll(struct scsi_qla_host *);
  431. static void qla1280_reset_adapter(struct scsi_qla_host *);
  432. static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
  433. static void qla1280_isp_cmd(struct scsi_qla_host *);
  434. static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
  435. static void qla1280_rst_aen(struct scsi_qla_host *);
  436. static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
  437. struct list_head *);
  438. static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
  439. struct list_head *);
  440. static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
  441. static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
  442. static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
  443. static request_t *qla1280_req_pkt(struct scsi_qla_host *);
  444. static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
  445. unsigned int);
  446. static void qla1280_get_target_parameters(struct scsi_qla_host *,
  447. struct scsi_device *);
  448. static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
  449. static struct qla_driver_setup driver_setup;
  450. /*
  451. * convert scsi data direction to request_t control flags
  452. */
  453. static inline uint16_t
  454. qla1280_data_direction(struct scsi_cmnd *cmnd)
  455. {
  456. switch(cmnd->sc_data_direction) {
  457. case DMA_FROM_DEVICE:
  458. return BIT_5;
  459. case DMA_TO_DEVICE:
  460. return BIT_6;
  461. case DMA_BIDIRECTIONAL:
  462. return BIT_5 | BIT_6;
  463. /*
  464. * We could BUG() on default here if one of the four cases aren't
  465. * met, but then again if we receive something like that from the
  466. * SCSI layer we have more serious problems. This shuts up GCC.
  467. */
  468. case DMA_NONE:
  469. default:
  470. return 0;
  471. }
  472. }
  473. #if DEBUG_QLA1280
  474. static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
  475. static void __qla1280_dump_buffer(char *, int);
  476. #endif
  477. /*
  478. * insmod needs to find the variable and make it point to something
  479. */
  480. #ifdef MODULE
  481. static char *qla1280;
  482. /* insmod qla1280 options=verbose" */
  483. module_param(qla1280, charp, 0);
  484. #else
  485. __setup("qla1280=", qla1280_setup);
  486. #endif
  487. /*
  488. * We use the scsi_pointer structure that's included with each scsi_command
  489. * to overlay our struct srb over it. qla1280_init() checks that a srb is not
  490. * bigger than a scsi_pointer.
  491. */
  492. #define CMD_SP(Cmnd) &Cmnd->SCp
  493. #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
  494. #define CMD_CDBP(Cmnd) Cmnd->cmnd
  495. #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
  496. #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
  497. #define CMD_RESULT(Cmnd) Cmnd->result
  498. #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
  499. #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
  500. #define CMD_HOST(Cmnd) Cmnd->device->host
  501. #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
  502. #define SCSI_TCN_32(Cmnd) Cmnd->device->id
  503. #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
  504. /*****************************************/
  505. /* ISP Boards supported by this driver */
  506. /*****************************************/
  507. struct qla_boards {
  508. char *name; /* Board ID String */
  509. int numPorts; /* Number of SCSI ports */
  510. int fw_index; /* index into qla1280_fw_tbl for firmware */
  511. };
  512. /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
  513. static struct pci_device_id qla1280_pci_tbl[] = {
  514. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
  515. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  516. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
  517. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
  518. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
  519. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
  520. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
  521. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
  522. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
  523. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
  524. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
  525. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
  526. {0,}
  527. };
  528. MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
  529. DEFINE_MUTEX(qla1280_firmware_mutex);
  530. struct qla_fw {
  531. char *fwname;
  532. const struct firmware *fw;
  533. };
  534. #define QL_NUM_FW_IMAGES 3
  535. struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
  536. {"qlogic/1040.bin", NULL}, /* image 0 */
  537. {"qlogic/1280.bin", NULL}, /* image 1 */
  538. {"qlogic/12160.bin", NULL}, /* image 2 */
  539. };
  540. /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
  541. static struct qla_boards ql1280_board_tbl[] = {
  542. {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
  543. {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
  544. {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
  545. {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
  546. {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
  547. {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
  548. {.name = " ", .numPorts = 0, .fw_index = -1},
  549. };
  550. static int qla1280_verbose = 1;
  551. #if DEBUG_QLA1280
  552. static int ql_debug_level = 1;
  553. #define dprintk(level, format, a...) \
  554. do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
  555. #define qla1280_dump_buffer(level, buf, size) \
  556. if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
  557. #define qla1280_print_scsi_cmd(level, cmd) \
  558. if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
  559. #else
  560. #define ql_debug_level 0
  561. #define dprintk(level, format, a...) do{}while(0)
  562. #define qla1280_dump_buffer(a, b, c) do{}while(0)
  563. #define qla1280_print_scsi_cmd(a, b) do{}while(0)
  564. #endif
  565. #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
  566. #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
  567. #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
  568. #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
  569. static int qla1280_read_nvram(struct scsi_qla_host *ha)
  570. {
  571. uint16_t *wptr;
  572. uint8_t chksum;
  573. int cnt, i;
  574. struct nvram *nv;
  575. ENTER("qla1280_read_nvram");
  576. if (driver_setup.no_nvram)
  577. return 1;
  578. printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
  579. wptr = (uint16_t *)&ha->nvram;
  580. nv = &ha->nvram;
  581. chksum = 0;
  582. for (cnt = 0; cnt < 3; cnt++) {
  583. *wptr = qla1280_get_nvram_word(ha, cnt);
  584. chksum += *wptr & 0xff;
  585. chksum += (*wptr >> 8) & 0xff;
  586. wptr++;
  587. }
  588. if (nv->id0 != 'I' || nv->id1 != 'S' ||
  589. nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
  590. dprintk(2, "Invalid nvram ID or version!\n");
  591. chksum = 1;
  592. } else {
  593. for (; cnt < sizeof(struct nvram); cnt++) {
  594. *wptr = qla1280_get_nvram_word(ha, cnt);
  595. chksum += *wptr & 0xff;
  596. chksum += (*wptr >> 8) & 0xff;
  597. wptr++;
  598. }
  599. }
  600. dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
  601. " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
  602. nv->version);
  603. if (chksum) {
  604. if (!driver_setup.no_nvram)
  605. printk(KERN_WARNING "scsi(%ld): Unable to identify or "
  606. "validate NVRAM checksum, using default "
  607. "settings\n", ha->host_no);
  608. ha->nvram_valid = 0;
  609. } else
  610. ha->nvram_valid = 1;
  611. /* The firmware interface is, um, interesting, in that the
  612. * actual firmware image on the chip is little endian, thus,
  613. * the process of taking that image to the CPU would end up
  614. * little endian. However, the firmware interface requires it
  615. * to be read a word (two bytes) at a time.
  616. *
  617. * The net result of this would be that the word (and
  618. * doubleword) quantites in the firmware would be correct, but
  619. * the bytes would be pairwise reversed. Since most of the
  620. * firmware quantites are, in fact, bytes, we do an extra
  621. * le16_to_cpu() in the firmware read routine.
  622. *
  623. * The upshot of all this is that the bytes in the firmware
  624. * are in the correct places, but the 16 and 32 bit quantites
  625. * are still in little endian format. We fix that up below by
  626. * doing extra reverses on them */
  627. nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
  628. nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
  629. for(i = 0; i < MAX_BUSES; i++) {
  630. nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
  631. nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
  632. }
  633. dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
  634. LEAVE("qla1280_read_nvram");
  635. return chksum;
  636. }
  637. /**************************************************************************
  638. * qla1280_info
  639. * Return a string describing the driver.
  640. **************************************************************************/
  641. static const char *
  642. qla1280_info(struct Scsi_Host *host)
  643. {
  644. static char qla1280_scsi_name_buffer[125];
  645. char *bp;
  646. struct scsi_qla_host *ha;
  647. struct qla_boards *bdp;
  648. bp = &qla1280_scsi_name_buffer[0];
  649. ha = (struct scsi_qla_host *)host->hostdata;
  650. bdp = &ql1280_board_tbl[ha->devnum];
  651. memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
  652. sprintf (bp,
  653. "QLogic %s PCI to SCSI Host Adapter\n"
  654. " Firmware version: %2d.%02d.%02d, Driver version %s",
  655. &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
  656. QLA1280_VERSION);
  657. return bp;
  658. }
  659. /**************************************************************************
  660. * qla1280_queuecommand
  661. * Queue a command to the controller.
  662. *
  663. * Note:
  664. * The mid-level driver tries to ensures that queuecommand never gets invoked
  665. * concurrently with itself or the interrupt handler (although the
  666. * interrupt handler may call this routine as part of request-completion
  667. * handling). Unfortunely, it sometimes calls the scheduler in interrupt
  668. * context which is a big NO! NO!.
  669. **************************************************************************/
  670. static int
  671. qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
  672. {
  673. struct Scsi_Host *host = cmd->device->host;
  674. struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
  675. struct srb *sp = (struct srb *)CMD_SP(cmd);
  676. int status;
  677. cmd->scsi_done = fn;
  678. sp->cmd = cmd;
  679. sp->flags = 0;
  680. sp->wait = NULL;
  681. CMD_HANDLE(cmd) = (unsigned char *)NULL;
  682. qla1280_print_scsi_cmd(5, cmd);
  683. #ifdef QLA_64BIT_PTR
  684. /*
  685. * Using 64 bit commands if the PCI bridge doesn't support it is a
  686. * bit wasteful, however this should really only happen if one's
  687. * PCI controller is completely broken, like the BCM1250. For
  688. * sane hardware this is not an issue.
  689. */
  690. status = qla1280_64bit_start_scsi(ha, sp);
  691. #else
  692. status = qla1280_32bit_start_scsi(ha, sp);
  693. #endif
  694. return status;
  695. }
  696. enum action {
  697. ABORT_COMMAND,
  698. DEVICE_RESET,
  699. BUS_RESET,
  700. ADAPTER_RESET,
  701. };
  702. static void qla1280_mailbox_timeout(unsigned long __data)
  703. {
  704. struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
  705. struct device_reg __iomem *reg;
  706. reg = ha->iobase;
  707. ha->mailbox_out[0] = RD_REG_WORD(&reg->mailbox0);
  708. printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
  709. "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
  710. RD_REG_WORD(&reg->ictrl), RD_REG_WORD(&reg->istatus));
  711. complete(ha->mailbox_wait);
  712. }
  713. static int
  714. _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
  715. struct completion *wait)
  716. {
  717. int status = FAILED;
  718. struct scsi_cmnd *cmd = sp->cmd;
  719. spin_unlock_irq(ha->host->host_lock);
  720. wait_for_completion_timeout(wait, 4*HZ);
  721. spin_lock_irq(ha->host->host_lock);
  722. sp->wait = NULL;
  723. if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
  724. status = SUCCESS;
  725. (*cmd->scsi_done)(cmd);
  726. }
  727. return status;
  728. }
  729. static int
  730. qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
  731. {
  732. DECLARE_COMPLETION_ONSTACK(wait);
  733. sp->wait = &wait;
  734. return _qla1280_wait_for_single_command(ha, sp, &wait);
  735. }
  736. static int
  737. qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
  738. {
  739. int cnt;
  740. int status;
  741. struct srb *sp;
  742. struct scsi_cmnd *cmd;
  743. status = SUCCESS;
  744. /*
  745. * Wait for all commands with the designated bus/target
  746. * to be completed by the firmware
  747. */
  748. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  749. sp = ha->outstanding_cmds[cnt];
  750. if (sp) {
  751. cmd = sp->cmd;
  752. if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
  753. continue;
  754. if (target >= 0 && SCSI_TCN_32(cmd) != target)
  755. continue;
  756. status = qla1280_wait_for_single_command(ha, sp);
  757. if (status == FAILED)
  758. break;
  759. }
  760. }
  761. return status;
  762. }
  763. /**************************************************************************
  764. * qla1280_error_action
  765. * The function will attempt to perform a specified error action and
  766. * wait for the results (or time out).
  767. *
  768. * Input:
  769. * cmd = Linux SCSI command packet of the command that cause the
  770. * bus reset.
  771. * action = error action to take (see action_t)
  772. *
  773. * Returns:
  774. * SUCCESS or FAILED
  775. *
  776. **************************************************************************/
  777. static int
  778. qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
  779. {
  780. struct scsi_qla_host *ha;
  781. int bus, target, lun;
  782. struct srb *sp;
  783. int i, found;
  784. int result=FAILED;
  785. int wait_for_bus=-1;
  786. int wait_for_target = -1;
  787. DECLARE_COMPLETION_ONSTACK(wait);
  788. ENTER("qla1280_error_action");
  789. ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
  790. sp = (struct srb *)CMD_SP(cmd);
  791. bus = SCSI_BUS_32(cmd);
  792. target = SCSI_TCN_32(cmd);
  793. lun = SCSI_LUN_32(cmd);
  794. dprintk(4, "error_action %i, istatus 0x%04x\n", action,
  795. RD_REG_WORD(&ha->iobase->istatus));
  796. dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
  797. RD_REG_WORD(&ha->iobase->host_cmd),
  798. RD_REG_WORD(&ha->iobase->ictrl), jiffies);
  799. if (qla1280_verbose)
  800. printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
  801. "Handle=0x%p, action=0x%x\n",
  802. ha->host_no, cmd, CMD_HANDLE(cmd), action);
  803. /*
  804. * Check to see if we have the command in the outstanding_cmds[]
  805. * array. If not then it must have completed before this error
  806. * action was initiated. If the error_action isn't ABORT_COMMAND
  807. * then the driver must proceed with the requested action.
  808. */
  809. found = -1;
  810. for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
  811. if (sp == ha->outstanding_cmds[i]) {
  812. found = i;
  813. sp->wait = &wait; /* we'll wait for it to complete */
  814. break;
  815. }
  816. }
  817. if (found < 0) { /* driver doesn't have command */
  818. result = SUCCESS;
  819. if (qla1280_verbose) {
  820. printk(KERN_INFO
  821. "scsi(%ld:%d:%d:%d): specified command has "
  822. "already completed.\n", ha->host_no, bus,
  823. target, lun);
  824. }
  825. }
  826. switch (action) {
  827. case ABORT_COMMAND:
  828. dprintk(1, "qla1280: RISC aborting command\n");
  829. /*
  830. * The abort might fail due to race when the host_lock
  831. * is released to issue the abort. As such, we
  832. * don't bother to check the return status.
  833. */
  834. if (found >= 0)
  835. qla1280_abort_command(ha, sp, found);
  836. break;
  837. case DEVICE_RESET:
  838. if (qla1280_verbose)
  839. printk(KERN_INFO
  840. "scsi(%ld:%d:%d:%d): Queueing device reset "
  841. "command.\n", ha->host_no, bus, target, lun);
  842. if (qla1280_device_reset(ha, bus, target) == 0) {
  843. /* issued device reset, set wait conditions */
  844. wait_for_bus = bus;
  845. wait_for_target = target;
  846. }
  847. break;
  848. case BUS_RESET:
  849. if (qla1280_verbose)
  850. printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
  851. "reset.\n", ha->host_no, bus);
  852. if (qla1280_bus_reset(ha, bus) == 0) {
  853. /* issued bus reset, set wait conditions */
  854. wait_for_bus = bus;
  855. }
  856. break;
  857. case ADAPTER_RESET:
  858. default:
  859. if (qla1280_verbose) {
  860. printk(KERN_INFO
  861. "scsi(%ld): Issued ADAPTER RESET\n",
  862. ha->host_no);
  863. printk(KERN_INFO "scsi(%ld): I/O processing will "
  864. "continue automatically\n", ha->host_no);
  865. }
  866. ha->flags.reset_active = 1;
  867. if (qla1280_abort_isp(ha) != 0) { /* it's dead */
  868. result = FAILED;
  869. }
  870. ha->flags.reset_active = 0;
  871. }
  872. /*
  873. * At this point, the host_lock has been released and retaken
  874. * by the issuance of the mailbox command.
  875. * Wait for the command passed in by the mid-layer if it
  876. * was found by the driver. It might have been returned
  877. * between eh recovery steps, hence the check of the "found"
  878. * variable.
  879. */
  880. if (found >= 0)
  881. result = _qla1280_wait_for_single_command(ha, sp, &wait);
  882. if (action == ABORT_COMMAND && result != SUCCESS) {
  883. printk(KERN_WARNING
  884. "scsi(%li:%i:%i:%i): "
  885. "Unable to abort command!\n",
  886. ha->host_no, bus, target, lun);
  887. }
  888. /*
  889. * If the command passed in by the mid-layer has been
  890. * returned by the board, then wait for any additional
  891. * commands which are supposed to complete based upon
  892. * the error action.
  893. *
  894. * All commands are unconditionally returned during a
  895. * call to qla1280_abort_isp(), ADAPTER_RESET. No need
  896. * to wait for them.
  897. */
  898. if (result == SUCCESS && wait_for_bus >= 0) {
  899. result = qla1280_wait_for_pending_commands(ha,
  900. wait_for_bus, wait_for_target);
  901. }
  902. dprintk(1, "RESET returning %d\n", result);
  903. LEAVE("qla1280_error_action");
  904. return result;
  905. }
  906. /**************************************************************************
  907. * qla1280_abort
  908. * Abort the specified SCSI command(s).
  909. **************************************************************************/
  910. static int
  911. qla1280_eh_abort(struct scsi_cmnd * cmd)
  912. {
  913. int rc;
  914. spin_lock_irq(cmd->device->host->host_lock);
  915. rc = qla1280_error_action(cmd, ABORT_COMMAND);
  916. spin_unlock_irq(cmd->device->host->host_lock);
  917. return rc;
  918. }
  919. /**************************************************************************
  920. * qla1280_device_reset
  921. * Reset the specified SCSI device
  922. **************************************************************************/
  923. static int
  924. qla1280_eh_device_reset(struct scsi_cmnd *cmd)
  925. {
  926. int rc;
  927. spin_lock_irq(cmd->device->host->host_lock);
  928. rc = qla1280_error_action(cmd, DEVICE_RESET);
  929. spin_unlock_irq(cmd->device->host->host_lock);
  930. return rc;
  931. }
  932. /**************************************************************************
  933. * qla1280_bus_reset
  934. * Reset the specified bus.
  935. **************************************************************************/
  936. static int
  937. qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
  938. {
  939. int rc;
  940. spin_lock_irq(cmd->device->host->host_lock);
  941. rc = qla1280_error_action(cmd, BUS_RESET);
  942. spin_unlock_irq(cmd->device->host->host_lock);
  943. return rc;
  944. }
  945. /**************************************************************************
  946. * qla1280_adapter_reset
  947. * Reset the specified adapter (both channels)
  948. **************************************************************************/
  949. static int
  950. qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
  951. {
  952. int rc;
  953. spin_lock_irq(cmd->device->host->host_lock);
  954. rc = qla1280_error_action(cmd, ADAPTER_RESET);
  955. spin_unlock_irq(cmd->device->host->host_lock);
  956. return rc;
  957. }
  958. static int
  959. qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
  960. sector_t capacity, int geom[])
  961. {
  962. int heads, sectors, cylinders;
  963. heads = 64;
  964. sectors = 32;
  965. cylinders = (unsigned long)capacity / (heads * sectors);
  966. if (cylinders > 1024) {
  967. heads = 255;
  968. sectors = 63;
  969. cylinders = (unsigned long)capacity / (heads * sectors);
  970. /* if (cylinders > 1023)
  971. cylinders = 1023; */
  972. }
  973. geom[0] = heads;
  974. geom[1] = sectors;
  975. geom[2] = cylinders;
  976. return 0;
  977. }
  978. /* disable risc and host interrupts */
  979. static inline void
  980. qla1280_disable_intrs(struct scsi_qla_host *ha)
  981. {
  982. WRT_REG_WORD(&ha->iobase->ictrl, 0);
  983. RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
  984. }
  985. /* enable risc and host interrupts */
  986. static inline void
  987. qla1280_enable_intrs(struct scsi_qla_host *ha)
  988. {
  989. WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
  990. RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
  991. }
  992. /**************************************************************************
  993. * qla1280_intr_handler
  994. * Handles the H/W interrupt
  995. **************************************************************************/
  996. static irqreturn_t
  997. qla1280_intr_handler(int irq, void *dev_id)
  998. {
  999. struct scsi_qla_host *ha;
  1000. struct device_reg __iomem *reg;
  1001. u16 data;
  1002. int handled = 0;
  1003. ENTER_INTR ("qla1280_intr_handler");
  1004. ha = (struct scsi_qla_host *)dev_id;
  1005. spin_lock(ha->host->host_lock);
  1006. ha->isr_count++;
  1007. reg = ha->iobase;
  1008. qla1280_disable_intrs(ha);
  1009. data = qla1280_debounce_register(&reg->istatus);
  1010. /* Check for pending interrupts. */
  1011. if (data & RISC_INT) {
  1012. qla1280_isr(ha, &ha->done_q);
  1013. handled = 1;
  1014. }
  1015. if (!list_empty(&ha->done_q))
  1016. qla1280_done(ha);
  1017. spin_unlock(ha->host->host_lock);
  1018. qla1280_enable_intrs(ha);
  1019. LEAVE_INTR("qla1280_intr_handler");
  1020. return IRQ_RETVAL(handled);
  1021. }
  1022. static int
  1023. qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
  1024. {
  1025. uint8_t mr;
  1026. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1027. struct nvram *nv;
  1028. int status, lun;
  1029. nv = &ha->nvram;
  1030. mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
  1031. /* Set Target Parameters. */
  1032. mb[0] = MBC_SET_TARGET_PARAMETERS;
  1033. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1034. mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
  1035. mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
  1036. mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
  1037. mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
  1038. mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
  1039. mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
  1040. mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
  1041. mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
  1042. if (IS_ISP1x160(ha)) {
  1043. mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
  1044. mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
  1045. mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
  1046. nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
  1047. mr |= BIT_6;
  1048. } else {
  1049. mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
  1050. }
  1051. mb[3] |= nv->bus[bus].target[target].sync_period;
  1052. status = qla1280_mailbox_command(ha, mr, mb);
  1053. /* Set Device Queue Parameters. */
  1054. for (lun = 0; lun < MAX_LUNS; lun++) {
  1055. mb[0] = MBC_SET_DEVICE_QUEUE;
  1056. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1057. mb[1] |= lun;
  1058. mb[2] = nv->bus[bus].max_queue_depth;
  1059. mb[3] = nv->bus[bus].target[target].execution_throttle;
  1060. status |= qla1280_mailbox_command(ha, 0x0f, mb);
  1061. }
  1062. if (status)
  1063. printk(KERN_WARNING "scsi(%ld:%i:%i): "
  1064. "qla1280_set_target_parameters() failed\n",
  1065. ha->host_no, bus, target);
  1066. return status;
  1067. }
  1068. /**************************************************************************
  1069. * qla1280_slave_configure
  1070. *
  1071. * Description:
  1072. * Determines the queue depth for a given device. There are two ways
  1073. * a queue depth can be obtained for a tagged queueing device. One
  1074. * way is the default queue depth which is determined by whether
  1075. * If it is defined, then it is used
  1076. * as the default queue depth. Otherwise, we use either 4 or 8 as the
  1077. * default queue depth (dependent on the number of hardware SCBs).
  1078. **************************************************************************/
  1079. static int
  1080. qla1280_slave_configure(struct scsi_device *device)
  1081. {
  1082. struct scsi_qla_host *ha;
  1083. int default_depth = 3;
  1084. int bus = device->channel;
  1085. int target = device->id;
  1086. int status = 0;
  1087. struct nvram *nv;
  1088. unsigned long flags;
  1089. ha = (struct scsi_qla_host *)device->host->hostdata;
  1090. nv = &ha->nvram;
  1091. if (qla1280_check_for_dead_scsi_bus(ha, bus))
  1092. return 1;
  1093. if (device->tagged_supported &&
  1094. (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
  1095. scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
  1096. ha->bus_settings[bus].hiwat);
  1097. } else {
  1098. scsi_adjust_queue_depth(device, 0, default_depth);
  1099. }
  1100. nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
  1101. nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
  1102. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
  1103. if (driver_setup.no_sync ||
  1104. (driver_setup.sync_mask &&
  1105. (~driver_setup.sync_mask & (1 << target))))
  1106. nv->bus[bus].target[target].parameter.enable_sync = 0;
  1107. if (driver_setup.no_wide ||
  1108. (driver_setup.wide_mask &&
  1109. (~driver_setup.wide_mask & (1 << target))))
  1110. nv->bus[bus].target[target].parameter.enable_wide = 0;
  1111. if (IS_ISP1x160(ha)) {
  1112. if (driver_setup.no_ppr ||
  1113. (driver_setup.ppr_mask &&
  1114. (~driver_setup.ppr_mask & (1 << target))))
  1115. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
  1116. }
  1117. spin_lock_irqsave(ha->host->host_lock, flags);
  1118. if (nv->bus[bus].target[target].parameter.enable_sync)
  1119. status = qla1280_set_target_parameters(ha, bus, target);
  1120. qla1280_get_target_parameters(ha, device);
  1121. spin_unlock_irqrestore(ha->host->host_lock, flags);
  1122. return status;
  1123. }
  1124. /*
  1125. * qla1280_done
  1126. * Process completed commands.
  1127. *
  1128. * Input:
  1129. * ha = adapter block pointer.
  1130. */
  1131. static void
  1132. qla1280_done(struct scsi_qla_host *ha)
  1133. {
  1134. struct srb *sp;
  1135. struct list_head *done_q;
  1136. int bus, target, lun;
  1137. struct scsi_cmnd *cmd;
  1138. ENTER("qla1280_done");
  1139. done_q = &ha->done_q;
  1140. while (!list_empty(done_q)) {
  1141. sp = list_entry(done_q->next, struct srb, list);
  1142. list_del(&sp->list);
  1143. cmd = sp->cmd;
  1144. bus = SCSI_BUS_32(cmd);
  1145. target = SCSI_TCN_32(cmd);
  1146. lun = SCSI_LUN_32(cmd);
  1147. switch ((CMD_RESULT(cmd) >> 16)) {
  1148. case DID_RESET:
  1149. /* Issue marker command. */
  1150. if (!ha->flags.abort_isp_active)
  1151. qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
  1152. break;
  1153. case DID_ABORT:
  1154. sp->flags &= ~SRB_ABORT_PENDING;
  1155. sp->flags |= SRB_ABORTED;
  1156. break;
  1157. default:
  1158. break;
  1159. }
  1160. /* Release memory used for this I/O */
  1161. scsi_dma_unmap(cmd);
  1162. /* Call the mid-level driver interrupt handler */
  1163. ha->actthreads--;
  1164. if (sp->wait == NULL)
  1165. (*(cmd)->scsi_done)(cmd);
  1166. else
  1167. complete(sp->wait);
  1168. }
  1169. LEAVE("qla1280_done");
  1170. }
  1171. /*
  1172. * Translates a ISP error to a Linux SCSI error
  1173. */
  1174. static int
  1175. qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
  1176. {
  1177. int host_status = DID_ERROR;
  1178. uint16_t comp_status = le16_to_cpu(sts->comp_status);
  1179. uint16_t state_flags = le16_to_cpu(sts->state_flags);
  1180. uint32_t residual_length = le32_to_cpu(sts->residual_length);
  1181. uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
  1182. #if DEBUG_QLA1280_INTR
  1183. static char *reason[] = {
  1184. "DID_OK",
  1185. "DID_NO_CONNECT",
  1186. "DID_BUS_BUSY",
  1187. "DID_TIME_OUT",
  1188. "DID_BAD_TARGET",
  1189. "DID_ABORT",
  1190. "DID_PARITY",
  1191. "DID_ERROR",
  1192. "DID_RESET",
  1193. "DID_BAD_INTR"
  1194. };
  1195. #endif /* DEBUG_QLA1280_INTR */
  1196. ENTER("qla1280_return_status");
  1197. #if DEBUG_QLA1280_INTR
  1198. /*
  1199. dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
  1200. comp_status);
  1201. */
  1202. #endif
  1203. switch (comp_status) {
  1204. case CS_COMPLETE:
  1205. host_status = DID_OK;
  1206. break;
  1207. case CS_INCOMPLETE:
  1208. if (!(state_flags & SF_GOT_BUS))
  1209. host_status = DID_NO_CONNECT;
  1210. else if (!(state_flags & SF_GOT_TARGET))
  1211. host_status = DID_BAD_TARGET;
  1212. else if (!(state_flags & SF_SENT_CDB))
  1213. host_status = DID_ERROR;
  1214. else if (!(state_flags & SF_TRANSFERRED_DATA))
  1215. host_status = DID_ERROR;
  1216. else if (!(state_flags & SF_GOT_STATUS))
  1217. host_status = DID_ERROR;
  1218. else if (!(state_flags & SF_GOT_SENSE))
  1219. host_status = DID_ERROR;
  1220. break;
  1221. case CS_RESET:
  1222. host_status = DID_RESET;
  1223. break;
  1224. case CS_ABORTED:
  1225. host_status = DID_ABORT;
  1226. break;
  1227. case CS_TIMEOUT:
  1228. host_status = DID_TIME_OUT;
  1229. break;
  1230. case CS_DATA_OVERRUN:
  1231. dprintk(2, "Data overrun 0x%x\n", residual_length);
  1232. dprintk(2, "qla1280_return_status: response packet data\n");
  1233. qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
  1234. host_status = DID_ERROR;
  1235. break;
  1236. case CS_DATA_UNDERRUN:
  1237. if ((scsi_bufflen(cp) - residual_length) <
  1238. cp->underflow) {
  1239. printk(KERN_WARNING
  1240. "scsi: Underflow detected - retrying "
  1241. "command.\n");
  1242. host_status = DID_ERROR;
  1243. } else {
  1244. scsi_set_resid(cp, residual_length);
  1245. host_status = DID_OK;
  1246. }
  1247. break;
  1248. default:
  1249. host_status = DID_ERROR;
  1250. break;
  1251. }
  1252. #if DEBUG_QLA1280_INTR
  1253. dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
  1254. reason[host_status], scsi_status);
  1255. #endif
  1256. LEAVE("qla1280_return_status");
  1257. return (scsi_status & 0xff) | (host_status << 16);
  1258. }
  1259. /****************************************************************************/
  1260. /* QLogic ISP1280 Hardware Support Functions. */
  1261. /****************************************************************************/
  1262. /*
  1263. * qla1280_initialize_adapter
  1264. * Initialize board.
  1265. *
  1266. * Input:
  1267. * ha = adapter block pointer.
  1268. *
  1269. * Returns:
  1270. * 0 = success
  1271. */
  1272. static int __devinit
  1273. qla1280_initialize_adapter(struct scsi_qla_host *ha)
  1274. {
  1275. struct device_reg __iomem *reg;
  1276. int status;
  1277. int bus;
  1278. unsigned long flags;
  1279. ENTER("qla1280_initialize_adapter");
  1280. /* Clear adapter flags. */
  1281. ha->flags.online = 0;
  1282. ha->flags.disable_host_adapter = 0;
  1283. ha->flags.reset_active = 0;
  1284. ha->flags.abort_isp_active = 0;
  1285. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  1286. if (ia64_platform_is("sn2")) {
  1287. printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
  1288. "dual channel lockup workaround\n", ha->host_no);
  1289. ha->flags.use_pci_vchannel = 1;
  1290. driver_setup.no_nvram = 1;
  1291. }
  1292. #endif
  1293. /* TODO: implement support for the 1040 nvram format */
  1294. if (IS_ISP1040(ha))
  1295. driver_setup.no_nvram = 1;
  1296. dprintk(1, "Configure PCI space for adapter...\n");
  1297. reg = ha->iobase;
  1298. /* Insure mailbox registers are free. */
  1299. WRT_REG_WORD(&reg->semaphore, 0);
  1300. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  1301. WRT_REG_WORD(&reg->host_cmd, HC_CLR_HOST_INT);
  1302. RD_REG_WORD(&reg->host_cmd);
  1303. if (qla1280_read_nvram(ha)) {
  1304. dprintk(2, "qla1280_initialize_adapter: failed to read "
  1305. "NVRAM\n");
  1306. }
  1307. /*
  1308. * It's necessary to grab the spin here as qla1280_mailbox_command
  1309. * needs to be able to drop the lock unconditionally to wait
  1310. * for completion.
  1311. */
  1312. spin_lock_irqsave(ha->host->host_lock, flags);
  1313. status = qla1280_load_firmware(ha);
  1314. if (status) {
  1315. printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
  1316. ha->host_no);
  1317. goto out;
  1318. }
  1319. /* Setup adapter based on NVRAM parameters. */
  1320. dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
  1321. qla1280_nvram_config(ha);
  1322. if (ha->flags.disable_host_adapter) {
  1323. status = 1;
  1324. goto out;
  1325. }
  1326. status = qla1280_init_rings(ha);
  1327. if (status)
  1328. goto out;
  1329. /* Issue SCSI reset, if we can't reset twice then bus is dead */
  1330. for (bus = 0; bus < ha->ports; bus++) {
  1331. if (!ha->bus_settings[bus].disable_scsi_reset &&
  1332. qla1280_bus_reset(ha, bus) &&
  1333. qla1280_bus_reset(ha, bus))
  1334. ha->bus_settings[bus].scsi_bus_dead = 1;
  1335. }
  1336. ha->flags.online = 1;
  1337. out:
  1338. spin_unlock_irqrestore(ha->host->host_lock, flags);
  1339. if (status)
  1340. dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
  1341. LEAVE("qla1280_initialize_adapter");
  1342. return status;
  1343. }
  1344. /*
  1345. * qla1280_request_firmware
  1346. * Acquire firmware for chip. Retain in memory
  1347. * for error recovery.
  1348. *
  1349. * Input:
  1350. * ha = adapter block pointer.
  1351. *
  1352. * Returns:
  1353. * Pointer to firmware image or an error code
  1354. * cast to pointer via ERR_PTR().
  1355. */
  1356. static const struct firmware *
  1357. qla1280_request_firmware(struct scsi_qla_host *ha)
  1358. {
  1359. const struct firmware *fw;
  1360. int err;
  1361. int index;
  1362. char *fwname;
  1363. spin_unlock_irq(ha->host->host_lock);
  1364. mutex_lock(&qla1280_firmware_mutex);
  1365. index = ql1280_board_tbl[ha->devnum].fw_index;
  1366. fw = qla1280_fw_tbl[index].fw;
  1367. if (fw)
  1368. goto out;
  1369. fwname = qla1280_fw_tbl[index].fwname;
  1370. err = request_firmware(&fw, fwname, &ha->pdev->dev);
  1371. if (err) {
  1372. printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
  1373. fwname, err);
  1374. fw = ERR_PTR(err);
  1375. goto unlock;
  1376. }
  1377. if ((fw->size % 2) || (fw->size < 6)) {
  1378. printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
  1379. fw->size, fwname);
  1380. release_firmware(fw);
  1381. fw = ERR_PTR(-EINVAL);
  1382. goto unlock;
  1383. }
  1384. qla1280_fw_tbl[index].fw = fw;
  1385. out:
  1386. ha->fwver1 = fw->data[0];
  1387. ha->fwver2 = fw->data[1];
  1388. ha->fwver3 = fw->data[2];
  1389. unlock:
  1390. mutex_unlock(&qla1280_firmware_mutex);
  1391. spin_lock_irq(ha->host->host_lock);
  1392. return fw;
  1393. }
  1394. /*
  1395. * Chip diagnostics
  1396. * Test chip for proper operation.
  1397. *
  1398. * Input:
  1399. * ha = adapter block pointer.
  1400. *
  1401. * Returns:
  1402. * 0 = success.
  1403. */
  1404. static int
  1405. qla1280_chip_diag(struct scsi_qla_host *ha)
  1406. {
  1407. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1408. struct device_reg __iomem *reg = ha->iobase;
  1409. int status = 0;
  1410. int cnt;
  1411. uint16_t data;
  1412. dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", &reg->id_l);
  1413. dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
  1414. /* Soft reset chip and wait for it to finish. */
  1415. WRT_REG_WORD(&reg->ictrl, ISP_RESET);
  1416. /*
  1417. * We can't do a traditional PCI write flush here by reading
  1418. * back the register. The card will not respond once the reset
  1419. * is in action and we end up with a machine check exception
  1420. * instead. Nothing to do but wait and hope for the best.
  1421. * A portable pci_write_flush(pdev) call would be very useful here.
  1422. */
  1423. udelay(20);
  1424. data = qla1280_debounce_register(&reg->ictrl);
  1425. /*
  1426. * Yet another QLogic gem ;-(
  1427. */
  1428. for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
  1429. udelay(5);
  1430. data = RD_REG_WORD(&reg->ictrl);
  1431. }
  1432. if (!cnt)
  1433. goto fail;
  1434. /* Reset register cleared by chip reset. */
  1435. dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
  1436. WRT_REG_WORD(&reg->cfg_1, 0);
  1437. /* Reset RISC and disable BIOS which
  1438. allows RISC to execute out of RAM. */
  1439. WRT_REG_WORD(&reg->host_cmd, HC_RESET_RISC |
  1440. HC_RELEASE_RISC | HC_DISABLE_BIOS);
  1441. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1442. data = qla1280_debounce_register(&reg->mailbox0);
  1443. /*
  1444. * I *LOVE* this code!
  1445. */
  1446. for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
  1447. udelay(5);
  1448. data = RD_REG_WORD(&reg->mailbox0);
  1449. }
  1450. if (!cnt)
  1451. goto fail;
  1452. /* Check product ID of chip */
  1453. dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
  1454. if (RD_REG_WORD(&reg->mailbox1) != PROD_ID_1 ||
  1455. (RD_REG_WORD(&reg->mailbox2) != PROD_ID_2 &&
  1456. RD_REG_WORD(&reg->mailbox2) != PROD_ID_2a) ||
  1457. RD_REG_WORD(&reg->mailbox3) != PROD_ID_3 ||
  1458. RD_REG_WORD(&reg->mailbox4) != PROD_ID_4) {
  1459. printk(KERN_INFO "qla1280: Wrong product ID = "
  1460. "0x%x,0x%x,0x%x,0x%x\n",
  1461. RD_REG_WORD(&reg->mailbox1),
  1462. RD_REG_WORD(&reg->mailbox2),
  1463. RD_REG_WORD(&reg->mailbox3),
  1464. RD_REG_WORD(&reg->mailbox4));
  1465. goto fail;
  1466. }
  1467. /*
  1468. * Enable ints early!!!
  1469. */
  1470. qla1280_enable_intrs(ha);
  1471. dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
  1472. /* Wrap Incoming Mailboxes Test. */
  1473. mb[0] = MBC_MAILBOX_REGISTER_TEST;
  1474. mb[1] = 0xAAAA;
  1475. mb[2] = 0x5555;
  1476. mb[3] = 0xAA55;
  1477. mb[4] = 0x55AA;
  1478. mb[5] = 0xA5A5;
  1479. mb[6] = 0x5A5A;
  1480. mb[7] = 0x2525;
  1481. status = qla1280_mailbox_command(ha, 0xff, mb);
  1482. if (status)
  1483. goto fail;
  1484. if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
  1485. mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
  1486. mb[7] != 0x2525) {
  1487. printk(KERN_INFO "qla1280: Failed mbox check\n");
  1488. goto fail;
  1489. }
  1490. dprintk(3, "qla1280_chip_diag: exiting normally\n");
  1491. return 0;
  1492. fail:
  1493. dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
  1494. return status;
  1495. }
  1496. static int
  1497. qla1280_load_firmware_pio(struct scsi_qla_host *ha)
  1498. {
  1499. /* enter with host_lock acquired */
  1500. const struct firmware *fw;
  1501. const __le16 *fw_data;
  1502. uint16_t risc_address, risc_code_size;
  1503. uint16_t mb[MAILBOX_REGISTER_COUNT], i;
  1504. int err = 0;
  1505. fw = qla1280_request_firmware(ha);
  1506. if (IS_ERR(fw))
  1507. return PTR_ERR(fw);
  1508. fw_data = (const __le16 *)&fw->data[0];
  1509. ha->fwstart = __le16_to_cpu(fw_data[2]);
  1510. /* Load RISC code. */
  1511. risc_address = ha->fwstart;
  1512. fw_data = (const __le16 *)&fw->data[6];
  1513. risc_code_size = (fw->size - 6) / 2;
  1514. for (i = 0; i < risc_code_size; i++) {
  1515. mb[0] = MBC_WRITE_RAM_WORD;
  1516. mb[1] = risc_address + i;
  1517. mb[2] = __le16_to_cpu(fw_data[i]);
  1518. err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
  1519. if (err) {
  1520. printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
  1521. ha->host_no);
  1522. break;
  1523. }
  1524. }
  1525. return err;
  1526. }
  1527. #define DUMP_IT_BACK 0 /* for debug of RISC loading */
  1528. static int
  1529. qla1280_load_firmware_dma(struct scsi_qla_host *ha)
  1530. {
  1531. /* enter with host_lock acquired */
  1532. const struct firmware *fw;
  1533. const __le16 *fw_data;
  1534. uint16_t risc_address, risc_code_size;
  1535. uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
  1536. int err = 0, num, i;
  1537. #if DUMP_IT_BACK
  1538. uint8_t *sp, *tbuf;
  1539. dma_addr_t p_tbuf;
  1540. tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
  1541. if (!tbuf)
  1542. return -ENOMEM;
  1543. #endif
  1544. fw = qla1280_request_firmware(ha);
  1545. if (IS_ERR(fw))
  1546. return PTR_ERR(fw);
  1547. fw_data = (const __le16 *)&fw->data[0];
  1548. ha->fwstart = __le16_to_cpu(fw_data[2]);
  1549. /* Load RISC code. */
  1550. risc_address = ha->fwstart;
  1551. fw_data = (const __le16 *)&fw->data[6];
  1552. risc_code_size = (fw->size - 6) / 2;
  1553. dprintk(1, "%s: DMA RISC code (%i) words\n",
  1554. __func__, risc_code_size);
  1555. num = 0;
  1556. while (risc_code_size > 0) {
  1557. int warn __attribute__((unused)) = 0;
  1558. cnt = 2000 >> 1;
  1559. if (cnt > risc_code_size)
  1560. cnt = risc_code_size;
  1561. dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
  1562. "%d,%d(0x%x)\n",
  1563. fw_data, cnt, num, risc_address);
  1564. for(i = 0; i < cnt; i++)
  1565. ((__le16 *)ha->request_ring)[i] = fw_data[i];
  1566. mb[0] = MBC_LOAD_RAM;
  1567. mb[1] = risc_address;
  1568. mb[4] = cnt;
  1569. mb[3] = ha->request_dma & 0xffff;
  1570. mb[2] = (ha->request_dma >> 16) & 0xffff;
  1571. mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
  1572. mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
  1573. dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
  1574. __func__, mb[0],
  1575. (void *)(long)ha->request_dma,
  1576. mb[6], mb[7], mb[2], mb[3]);
  1577. err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
  1578. BIT_1 | BIT_0, mb);
  1579. if (err) {
  1580. printk(KERN_ERR "scsi(%li): Failed to load partial "
  1581. "segment of f\n", ha->host_no);
  1582. goto out;
  1583. }
  1584. #if DUMP_IT_BACK
  1585. mb[0] = MBC_DUMP_RAM;
  1586. mb[1] = risc_address;
  1587. mb[4] = cnt;
  1588. mb[3] = p_tbuf & 0xffff;
  1589. mb[2] = (p_tbuf >> 16) & 0xffff;
  1590. mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
  1591. mb[6] = pci_dma_hi32(p_tbuf) >> 16;
  1592. err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
  1593. BIT_1 | BIT_0, mb);
  1594. if (err) {
  1595. printk(KERN_ERR
  1596. "Failed to dump partial segment of f/w\n");
  1597. goto out;
  1598. }
  1599. sp = (uint8_t *)ha->request_ring;
  1600. for (i = 0; i < (cnt << 1); i++) {
  1601. if (tbuf[i] != sp[i] && warn++ < 10) {
  1602. printk(KERN_ERR "%s: FW compare error @ "
  1603. "byte(0x%x) loop#=%x\n",
  1604. __func__, i, num);
  1605. printk(KERN_ERR "%s: FWbyte=%x "
  1606. "FWfromChip=%x\n",
  1607. __func__, sp[i], tbuf[i]);
  1608. /*break; */
  1609. }
  1610. }
  1611. #endif
  1612. risc_address += cnt;
  1613. risc_code_size = risc_code_size - cnt;
  1614. fw_data = fw_data + cnt;
  1615. num++;
  1616. }
  1617. out:
  1618. #if DUMP_IT_BACK
  1619. pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
  1620. #endif
  1621. return err;
  1622. }
  1623. static int
  1624. qla1280_start_firmware(struct scsi_qla_host *ha)
  1625. {
  1626. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1627. int err;
  1628. dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
  1629. __func__);
  1630. /* Verify checksum of loaded RISC code. */
  1631. mb[0] = MBC_VERIFY_CHECKSUM;
  1632. /* mb[1] = ql12_risc_code_addr01; */
  1633. mb[1] = ha->fwstart;
  1634. err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1635. if (err) {
  1636. printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
  1637. return err;
  1638. }
  1639. /* Start firmware execution. */
  1640. dprintk(1, "%s: start firmware running.\n", __func__);
  1641. mb[0] = MBC_EXECUTE_FIRMWARE;
  1642. mb[1] = ha->fwstart;
  1643. err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1644. if (err) {
  1645. printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
  1646. ha->host_no);
  1647. }
  1648. return err;
  1649. }
  1650. static int
  1651. qla1280_load_firmware(struct scsi_qla_host *ha)
  1652. {
  1653. /* enter with host_lock taken */
  1654. int err;
  1655. err = qla1280_chip_diag(ha);
  1656. if (err)
  1657. goto out;
  1658. if (IS_ISP1040(ha))
  1659. err = qla1280_load_firmware_pio(ha);
  1660. else
  1661. err = qla1280_load_firmware_dma(ha);
  1662. if (err)
  1663. goto out;
  1664. err = qla1280_start_firmware(ha);
  1665. out:
  1666. return err;
  1667. }
  1668. /*
  1669. * Initialize rings
  1670. *
  1671. * Input:
  1672. * ha = adapter block pointer.
  1673. * ha->request_ring = request ring virtual address
  1674. * ha->response_ring = response ring virtual address
  1675. * ha->request_dma = request ring physical address
  1676. * ha->response_dma = response ring physical address
  1677. *
  1678. * Returns:
  1679. * 0 = success.
  1680. */
  1681. static int
  1682. qla1280_init_rings(struct scsi_qla_host *ha)
  1683. {
  1684. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1685. int status = 0;
  1686. ENTER("qla1280_init_rings");
  1687. /* Clear outstanding commands array. */
  1688. memset(ha->outstanding_cmds, 0,
  1689. sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
  1690. /* Initialize request queue. */
  1691. ha->request_ring_ptr = ha->request_ring;
  1692. ha->req_ring_index = 0;
  1693. ha->req_q_cnt = REQUEST_ENTRY_CNT;
  1694. /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
  1695. mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
  1696. mb[1] = REQUEST_ENTRY_CNT;
  1697. mb[3] = ha->request_dma & 0xffff;
  1698. mb[2] = (ha->request_dma >> 16) & 0xffff;
  1699. mb[4] = 0;
  1700. mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
  1701. mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
  1702. if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
  1703. BIT_3 | BIT_2 | BIT_1 | BIT_0,
  1704. &mb[0]))) {
  1705. /* Initialize response queue. */
  1706. ha->response_ring_ptr = ha->response_ring;
  1707. ha->rsp_ring_index = 0;
  1708. /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
  1709. mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
  1710. mb[1] = RESPONSE_ENTRY_CNT;
  1711. mb[3] = ha->response_dma & 0xffff;
  1712. mb[2] = (ha->response_dma >> 16) & 0xffff;
  1713. mb[5] = 0;
  1714. mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
  1715. mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
  1716. status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
  1717. BIT_3 | BIT_2 | BIT_1 | BIT_0,
  1718. &mb[0]);
  1719. }
  1720. if (status)
  1721. dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
  1722. LEAVE("qla1280_init_rings");
  1723. return status;
  1724. }
  1725. static void
  1726. qla1280_print_settings(struct nvram *nv)
  1727. {
  1728. dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
  1729. nv->bus[0].config_1.initiator_id);
  1730. dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
  1731. nv->bus[1].config_1.initiator_id);
  1732. dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
  1733. nv->bus[0].bus_reset_delay);
  1734. dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
  1735. nv->bus[1].bus_reset_delay);
  1736. dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
  1737. dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
  1738. dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
  1739. dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
  1740. dprintk(1, "qla1280 : async data setup time[0]=%d\n",
  1741. nv->bus[0].config_2.async_data_setup_time);
  1742. dprintk(1, "qla1280 : async data setup time[1]=%d\n",
  1743. nv->bus[1].config_2.async_data_setup_time);
  1744. dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
  1745. nv->bus[0].config_2.req_ack_active_negation);
  1746. dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
  1747. nv->bus[1].config_2.req_ack_active_negation);
  1748. dprintk(1, "qla1280 : data line active negation[0]=%d\n",
  1749. nv->bus[0].config_2.data_line_active_negation);
  1750. dprintk(1, "qla1280 : data line active negation[1]=%d\n",
  1751. nv->bus[1].config_2.data_line_active_negation);
  1752. dprintk(1, "qla1280 : disable loading risc code=%d\n",
  1753. nv->cntr_flags_1.disable_loading_risc_code);
  1754. dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
  1755. nv->cntr_flags_1.enable_64bit_addressing);
  1756. dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
  1757. nv->bus[0].selection_timeout);
  1758. dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
  1759. nv->bus[1].selection_timeout);
  1760. dprintk(1, "qla1280 : max queue depth[0]=%d\n",
  1761. nv->bus[0].max_queue_depth);
  1762. dprintk(1, "qla1280 : max queue depth[1]=%d\n",
  1763. nv->bus[1].max_queue_depth);
  1764. }
  1765. static void
  1766. qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
  1767. {
  1768. struct nvram *nv = &ha->nvram;
  1769. nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
  1770. nv->bus[bus].target[target].parameter.auto_request_sense = 1;
  1771. nv->bus[bus].target[target].parameter.tag_queuing = 1;
  1772. nv->bus[bus].target[target].parameter.enable_sync = 1;
  1773. #if 1 /* Some SCSI Processors do not seem to like this */
  1774. nv->bus[bus].target[target].parameter.enable_wide = 1;
  1775. #endif
  1776. nv->bus[bus].target[target].execution_throttle =
  1777. nv->bus[bus].max_queue_depth - 1;
  1778. nv->bus[bus].target[target].parameter.parity_checking = 1;
  1779. nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
  1780. if (IS_ISP1x160(ha)) {
  1781. nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
  1782. nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
  1783. nv->bus[bus].target[target].sync_period = 9;
  1784. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
  1785. nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
  1786. nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
  1787. } else {
  1788. nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
  1789. nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
  1790. nv->bus[bus].target[target].sync_period = 10;
  1791. }
  1792. }
  1793. static void
  1794. qla1280_set_defaults(struct scsi_qla_host *ha)
  1795. {
  1796. struct nvram *nv = &ha->nvram;
  1797. int bus, target;
  1798. dprintk(1, "Using defaults for NVRAM: \n");
  1799. memset(nv, 0, sizeof(struct nvram));
  1800. /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
  1801. nv->firmware_feature.f.enable_fast_posting = 1;
  1802. nv->firmware_feature.f.disable_synchronous_backoff = 1;
  1803. nv->termination.scsi_bus_0_control = 3;
  1804. nv->termination.scsi_bus_1_control = 3;
  1805. nv->termination.auto_term_support = 1;
  1806. /*
  1807. * Set default FIFO magic - What appropriate values would be here
  1808. * is unknown. This is what I have found testing with 12160s.
  1809. *
  1810. * Now, I would love the magic decoder ring for this one, the
  1811. * header file provided by QLogic seems to be bogus or incomplete
  1812. * at best.
  1813. */
  1814. nv->isp_config.burst_enable = 1;
  1815. if (IS_ISP1040(ha))
  1816. nv->isp_config.fifo_threshold |= 3;
  1817. else
  1818. nv->isp_config.fifo_threshold |= 4;
  1819. if (IS_ISP1x160(ha))
  1820. nv->isp_parameter = 0x01; /* fast memory enable */
  1821. for (bus = 0; bus < MAX_BUSES; bus++) {
  1822. nv->bus[bus].config_1.initiator_id = 7;
  1823. nv->bus[bus].config_2.req_ack_active_negation = 1;
  1824. nv->bus[bus].config_2.data_line_active_negation = 1;
  1825. nv->bus[bus].selection_timeout = 250;
  1826. nv->bus[bus].max_queue_depth = 32;
  1827. if (IS_ISP1040(ha)) {
  1828. nv->bus[bus].bus_reset_delay = 3;
  1829. nv->bus[bus].config_2.async_data_setup_time = 6;
  1830. nv->bus[bus].retry_delay = 1;
  1831. } else {
  1832. nv->bus[bus].bus_reset_delay = 5;
  1833. nv->bus[bus].config_2.async_data_setup_time = 8;
  1834. }
  1835. for (target = 0; target < MAX_TARGETS; target++)
  1836. qla1280_set_target_defaults(ha, bus, target);
  1837. }
  1838. }
  1839. static int
  1840. qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
  1841. {
  1842. struct nvram *nv = &ha->nvram;
  1843. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1844. int status, lun;
  1845. uint16_t flag;
  1846. /* Set Target Parameters. */
  1847. mb[0] = MBC_SET_TARGET_PARAMETERS;
  1848. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1849. /*
  1850. * Do not enable sync and ppr for the initial INQUIRY run. We
  1851. * enable this later if we determine the target actually
  1852. * supports it.
  1853. */
  1854. mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
  1855. | TP_WIDE | TP_PARITY | TP_DISCONNECT);
  1856. if (IS_ISP1x160(ha))
  1857. mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
  1858. else
  1859. mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
  1860. mb[3] |= nv->bus[bus].target[target].sync_period;
  1861. status = qla1280_mailbox_command(ha, 0x0f, mb);
  1862. /* Save Tag queuing enable flag. */
  1863. flag = (BIT_0 << target);
  1864. if (nv->bus[bus].target[target].parameter.tag_queuing)
  1865. ha->bus_settings[bus].qtag_enables |= flag;
  1866. /* Save Device enable flag. */
  1867. if (IS_ISP1x160(ha)) {
  1868. if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
  1869. ha->bus_settings[bus].device_enables |= flag;
  1870. ha->bus_settings[bus].lun_disables |= 0;
  1871. } else {
  1872. if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
  1873. ha->bus_settings[bus].device_enables |= flag;
  1874. /* Save LUN disable flag. */
  1875. if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
  1876. ha->bus_settings[bus].lun_disables |= flag;
  1877. }
  1878. /* Set Device Queue Parameters. */
  1879. for (lun = 0; lun < MAX_LUNS; lun++) {
  1880. mb[0] = MBC_SET_DEVICE_QUEUE;
  1881. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1882. mb[1] |= lun;
  1883. mb[2] = nv->bus[bus].max_queue_depth;
  1884. mb[3] = nv->bus[bus].target[target].execution_throttle;
  1885. status |= qla1280_mailbox_command(ha, 0x0f, mb);
  1886. }
  1887. return status;
  1888. }
  1889. static int
  1890. qla1280_config_bus(struct scsi_qla_host *ha, int bus)
  1891. {
  1892. struct nvram *nv = &ha->nvram;
  1893. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1894. int target, status;
  1895. /* SCSI Reset Disable. */
  1896. ha->bus_settings[bus].disable_scsi_reset =
  1897. nv->bus[bus].config_1.scsi_reset_disable;
  1898. /* Initiator ID. */
  1899. ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
  1900. mb[0] = MBC_SET_INITIATOR_ID;
  1901. mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
  1902. ha->bus_settings[bus].id;
  1903. status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1904. /* Reset Delay. */
  1905. ha->bus_settings[bus].bus_reset_delay =
  1906. nv->bus[bus].bus_reset_delay;
  1907. /* Command queue depth per device. */
  1908. ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
  1909. /* Set target parameters. */
  1910. for (target = 0; target < MAX_TARGETS; target++)
  1911. status |= qla1280_config_target(ha, bus, target);
  1912. return status;
  1913. }
  1914. static int
  1915. qla1280_nvram_config(struct scsi_qla_host *ha)
  1916. {
  1917. struct device_reg __iomem *reg = ha->iobase;
  1918. struct nvram *nv = &ha->nvram;
  1919. int bus, target, status = 0;
  1920. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1921. ENTER("qla1280_nvram_config");
  1922. if (ha->nvram_valid) {
  1923. /* Always force AUTO sense for LINUX SCSI */
  1924. for (bus = 0; bus < MAX_BUSES; bus++)
  1925. for (target = 0; target < MAX_TARGETS; target++) {
  1926. nv->bus[bus].target[target].parameter.
  1927. auto_request_sense = 1;
  1928. }
  1929. } else {
  1930. qla1280_set_defaults(ha);
  1931. }
  1932. qla1280_print_settings(nv);
  1933. /* Disable RISC load of firmware. */
  1934. ha->flags.disable_risc_code_load =
  1935. nv->cntr_flags_1.disable_loading_risc_code;
  1936. if (IS_ISP1040(ha)) {
  1937. uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
  1938. hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
  1939. cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
  1940. cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
  1941. ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
  1942. /* Busted fifo, says mjacob. */
  1943. if (hwrev != ISP_CFG0_1040A)
  1944. cfg1 |= nv->isp_config.fifo_threshold << 4;
  1945. cfg1 |= nv->isp_config.burst_enable << 2;
  1946. WRT_REG_WORD(&reg->cfg_1, cfg1);
  1947. WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
  1948. WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
  1949. } else {
  1950. uint16_t cfg1, term;
  1951. /* Set ISP hardware DMA burst */
  1952. cfg1 = nv->isp_config.fifo_threshold << 4;
  1953. cfg1 |= nv->isp_config.burst_enable << 2;
  1954. /* Enable DMA arbitration on dual channel controllers */
  1955. if (ha->ports > 1)
  1956. cfg1 |= BIT_13;
  1957. WRT_REG_WORD(&reg->cfg_1, cfg1);
  1958. /* Set SCSI termination. */
  1959. WRT_REG_WORD(&reg->gpio_enable,
  1960. BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
  1961. term = nv->termination.scsi_bus_1_control;
  1962. term |= nv->termination.scsi_bus_0_control << 2;
  1963. term |= nv->termination.auto_term_support << 7;
  1964. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1965. WRT_REG_WORD(&reg->gpio_data, term);
  1966. }
  1967. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1968. /* ISP parameter word. */
  1969. mb[0] = MBC_SET_SYSTEM_PARAMETER;
  1970. mb[1] = nv->isp_parameter;
  1971. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1972. if (IS_ISP1x40(ha)) {
  1973. /* clock rate - for qla1240 and older, only */
  1974. mb[0] = MBC_SET_CLOCK_RATE;
  1975. mb[1] = 40;
  1976. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1977. }
  1978. /* Firmware feature word. */
  1979. mb[0] = MBC_SET_FIRMWARE_FEATURES;
  1980. mb[1] = nv->firmware_feature.f.enable_fast_posting;
  1981. mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
  1982. mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
  1983. #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
  1984. if (ia64_platform_is("sn2")) {
  1985. printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
  1986. "workaround\n", ha->host_no);
  1987. mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
  1988. }
  1989. #endif
  1990. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1991. /* Retry count and delay. */
  1992. mb[0] = MBC_SET_RETRY_COUNT;
  1993. mb[1] = nv->bus[0].retry_count;
  1994. mb[2] = nv->bus[0].retry_delay;
  1995. mb[6] = nv->bus[1].retry_count;
  1996. mb[7] = nv->bus[1].retry_delay;
  1997. status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
  1998. BIT_1 | BIT_0, &mb[0]);
  1999. /* ASYNC data setup time. */
  2000. mb[0] = MBC_SET_ASYNC_DATA_SETUP;
  2001. mb[1] = nv->bus[0].config_2.async_data_setup_time;
  2002. mb[2] = nv->bus[1].config_2.async_data_setup_time;
  2003. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2004. /* Active negation states. */
  2005. mb[0] = MBC_SET_ACTIVE_NEGATION;
  2006. mb[1] = 0;
  2007. if (nv->bus[0].config_2.req_ack_active_negation)
  2008. mb[1] |= BIT_5;
  2009. if (nv->bus[0].config_2.data_line_active_negation)
  2010. mb[1] |= BIT_4;
  2011. mb[2] = 0;
  2012. if (nv->bus[1].config_2.req_ack_active_negation)
  2013. mb[2] |= BIT_5;
  2014. if (nv->bus[1].config_2.data_line_active_negation)
  2015. mb[2] |= BIT_4;
  2016. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  2017. mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
  2018. mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
  2019. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  2020. /* thingy */
  2021. mb[0] = MBC_SET_PCI_CONTROL;
  2022. mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
  2023. mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
  2024. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  2025. mb[0] = MBC_SET_TAG_AGE_LIMIT;
  2026. mb[1] = 8;
  2027. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  2028. /* Selection timeout. */
  2029. mb[0] = MBC_SET_SELECTION_TIMEOUT;
  2030. mb[1] = nv->bus[0].selection_timeout;
  2031. mb[2] = nv->bus[1].selection_timeout;
  2032. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  2033. for (bus = 0; bus < ha->ports; bus++)
  2034. status |= qla1280_config_bus(ha, bus);
  2035. if (status)
  2036. dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
  2037. LEAVE("qla1280_nvram_config");
  2038. return status;
  2039. }
  2040. /*
  2041. * Get NVRAM data word
  2042. * Calculates word position in NVRAM and calls request routine to
  2043. * get the word from NVRAM.
  2044. *
  2045. * Input:
  2046. * ha = adapter block pointer.
  2047. * address = NVRAM word address.
  2048. *
  2049. * Returns:
  2050. * data word.
  2051. */
  2052. static uint16_t
  2053. qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
  2054. {
  2055. uint32_t nv_cmd;
  2056. uint16_t data;
  2057. nv_cmd = address << 16;
  2058. nv_cmd |= NV_READ_OP;
  2059. data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
  2060. dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
  2061. "0x%x", data);
  2062. return data;
  2063. }
  2064. /*
  2065. * NVRAM request
  2066. * Sends read command to NVRAM and gets data from NVRAM.
  2067. *
  2068. * Input:
  2069. * ha = adapter block pointer.
  2070. * nv_cmd = Bit 26 = start bit
  2071. * Bit 25, 24 = opcode
  2072. * Bit 23-16 = address
  2073. * Bit 15-0 = write data
  2074. *
  2075. * Returns:
  2076. * data word.
  2077. */
  2078. static uint16_t
  2079. qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
  2080. {
  2081. struct device_reg __iomem *reg = ha->iobase;
  2082. int cnt;
  2083. uint16_t data = 0;
  2084. uint16_t reg_data;
  2085. /* Send command to NVRAM. */
  2086. nv_cmd <<= 5;
  2087. for (cnt = 0; cnt < 11; cnt++) {
  2088. if (nv_cmd & BIT_31)
  2089. qla1280_nv_write(ha, NV_DATA_OUT);
  2090. else
  2091. qla1280_nv_write(ha, 0);
  2092. nv_cmd <<= 1;
  2093. }
  2094. /* Read data from NVRAM. */
  2095. for (cnt = 0; cnt < 16; cnt++) {
  2096. WRT_REG_WORD(&reg->nvram, (NV_SELECT | NV_CLOCK));
  2097. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2098. NVRAM_DELAY();
  2099. data <<= 1;
  2100. reg_data = RD_REG_WORD(&reg->nvram);
  2101. if (reg_data & NV_DATA_IN)
  2102. data |= BIT_0;
  2103. WRT_REG_WORD(&reg->nvram, NV_SELECT);
  2104. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2105. NVRAM_DELAY();
  2106. }
  2107. /* Deselect chip. */
  2108. WRT_REG_WORD(&reg->nvram, NV_DESELECT);
  2109. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2110. NVRAM_DELAY();
  2111. return data;
  2112. }
  2113. static void
  2114. qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
  2115. {
  2116. struct device_reg __iomem *reg = ha->iobase;
  2117. WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
  2118. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2119. NVRAM_DELAY();
  2120. WRT_REG_WORD(&reg->nvram, data | NV_SELECT | NV_CLOCK);
  2121. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2122. NVRAM_DELAY();
  2123. WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
  2124. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2125. NVRAM_DELAY();
  2126. }
  2127. /*
  2128. * Mailbox Command
  2129. * Issue mailbox command and waits for completion.
  2130. *
  2131. * Input:
  2132. * ha = adapter block pointer.
  2133. * mr = mailbox registers to load.
  2134. * mb = data pointer for mailbox registers.
  2135. *
  2136. * Output:
  2137. * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
  2138. *
  2139. * Returns:
  2140. * 0 = success
  2141. */
  2142. static int
  2143. qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
  2144. {
  2145. struct device_reg __iomem *reg = ha->iobase;
  2146. int status = 0;
  2147. int cnt;
  2148. uint16_t *optr, *iptr;
  2149. uint16_t __iomem *mptr;
  2150. uint16_t data;
  2151. DECLARE_COMPLETION_ONSTACK(wait);
  2152. struct timer_list timer;
  2153. ENTER("qla1280_mailbox_command");
  2154. if (ha->mailbox_wait) {
  2155. printk(KERN_ERR "Warning mailbox wait already in use!\n");
  2156. }
  2157. ha->mailbox_wait = &wait;
  2158. /*
  2159. * We really should start out by verifying that the mailbox is
  2160. * available before starting sending the command data
  2161. */
  2162. /* Load mailbox registers. */
  2163. mptr = (uint16_t __iomem *) &reg->mailbox0;
  2164. iptr = mb;
  2165. for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
  2166. if (mr & BIT_0) {
  2167. WRT_REG_WORD(mptr, (*iptr));
  2168. }
  2169. mr >>= 1;
  2170. mptr++;
  2171. iptr++;
  2172. }
  2173. /* Issue set host interrupt command. */
  2174. /* set up a timer just in case we're really jammed */
  2175. init_timer(&timer);
  2176. timer.expires = jiffies + 20*HZ;
  2177. timer.data = (unsigned long)ha;
  2178. timer.function = qla1280_mailbox_timeout;
  2179. add_timer(&timer);
  2180. spin_unlock_irq(ha->host->host_lock);
  2181. WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
  2182. data = qla1280_debounce_register(&reg->istatus);
  2183. wait_for_completion(&wait);
  2184. del_timer_sync(&timer);
  2185. spin_lock_irq(ha->host->host_lock);
  2186. ha->mailbox_wait = NULL;
  2187. /* Check for mailbox command timeout. */
  2188. if (ha->mailbox_out[0] != MBS_CMD_CMP) {
  2189. printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
  2190. "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
  2191. "0x%04x\n",
  2192. mb[0], ha->mailbox_out[0], RD_REG_WORD(&reg->istatus));
  2193. printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
  2194. RD_REG_WORD(&reg->mailbox0), RD_REG_WORD(&reg->mailbox1),
  2195. RD_REG_WORD(&reg->mailbox2), RD_REG_WORD(&reg->mailbox3));
  2196. printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
  2197. RD_REG_WORD(&reg->mailbox4), RD_REG_WORD(&reg->mailbox5),
  2198. RD_REG_WORD(&reg->mailbox6), RD_REG_WORD(&reg->mailbox7));
  2199. status = 1;
  2200. }
  2201. /* Load return mailbox registers. */
  2202. optr = mb;
  2203. iptr = (uint16_t *) &ha->mailbox_out[0];
  2204. mr = MAILBOX_REGISTER_COUNT;
  2205. memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
  2206. if (ha->flags.reset_marker)
  2207. qla1280_rst_aen(ha);
  2208. if (status)
  2209. dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
  2210. "0x%x ****\n", mb[0]);
  2211. LEAVE("qla1280_mailbox_command");
  2212. return status;
  2213. }
  2214. /*
  2215. * qla1280_poll
  2216. * Polls ISP for interrupts.
  2217. *
  2218. * Input:
  2219. * ha = adapter block pointer.
  2220. */
  2221. static void
  2222. qla1280_poll(struct scsi_qla_host *ha)
  2223. {
  2224. struct device_reg __iomem *reg = ha->iobase;
  2225. uint16_t data;
  2226. LIST_HEAD(done_q);
  2227. /* ENTER("qla1280_poll"); */
  2228. /* Check for pending interrupts. */
  2229. data = RD_REG_WORD(&reg->istatus);
  2230. if (data & RISC_INT)
  2231. qla1280_isr(ha, &done_q);
  2232. if (!ha->mailbox_wait) {
  2233. if (ha->flags.reset_marker)
  2234. qla1280_rst_aen(ha);
  2235. }
  2236. if (!list_empty(&done_q))
  2237. qla1280_done(ha);
  2238. /* LEAVE("qla1280_poll"); */
  2239. }
  2240. /*
  2241. * qla1280_bus_reset
  2242. * Issue SCSI bus reset.
  2243. *
  2244. * Input:
  2245. * ha = adapter block pointer.
  2246. * bus = SCSI bus number.
  2247. *
  2248. * Returns:
  2249. * 0 = success
  2250. */
  2251. static int
  2252. qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
  2253. {
  2254. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2255. uint16_t reset_delay;
  2256. int status;
  2257. dprintk(3, "qla1280_bus_reset: entered\n");
  2258. if (qla1280_verbose)
  2259. printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
  2260. ha->host_no, bus);
  2261. reset_delay = ha->bus_settings[bus].bus_reset_delay;
  2262. mb[0] = MBC_BUS_RESET;
  2263. mb[1] = reset_delay;
  2264. mb[2] = (uint16_t) bus;
  2265. status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2266. if (status) {
  2267. if (ha->bus_settings[bus].failed_reset_count > 2)
  2268. ha->bus_settings[bus].scsi_bus_dead = 1;
  2269. ha->bus_settings[bus].failed_reset_count++;
  2270. } else {
  2271. spin_unlock_irq(ha->host->host_lock);
  2272. ssleep(reset_delay);
  2273. spin_lock_irq(ha->host->host_lock);
  2274. ha->bus_settings[bus].scsi_bus_dead = 0;
  2275. ha->bus_settings[bus].failed_reset_count = 0;
  2276. ha->bus_settings[bus].reset_marker = 0;
  2277. /* Issue marker command. */
  2278. qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
  2279. }
  2280. /*
  2281. * We should probably call qla1280_set_target_parameters()
  2282. * here as well for all devices on the bus.
  2283. */
  2284. if (status)
  2285. dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
  2286. else
  2287. dprintk(3, "qla1280_bus_reset: exiting normally\n");
  2288. return status;
  2289. }
  2290. /*
  2291. * qla1280_device_reset
  2292. * Issue bus device reset message to the target.
  2293. *
  2294. * Input:
  2295. * ha = adapter block pointer.
  2296. * bus = SCSI BUS number.
  2297. * target = SCSI ID.
  2298. *
  2299. * Returns:
  2300. * 0 = success
  2301. */
  2302. static int
  2303. qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
  2304. {
  2305. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2306. int status;
  2307. ENTER("qla1280_device_reset");
  2308. mb[0] = MBC_ABORT_TARGET;
  2309. mb[1] = (bus ? (target | BIT_7) : target) << 8;
  2310. mb[2] = 1;
  2311. status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2312. /* Issue marker command. */
  2313. qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
  2314. if (status)
  2315. dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
  2316. LEAVE("qla1280_device_reset");
  2317. return status;
  2318. }
  2319. /*
  2320. * qla1280_abort_command
  2321. * Abort command aborts a specified IOCB.
  2322. *
  2323. * Input:
  2324. * ha = adapter block pointer.
  2325. * sp = SB structure pointer.
  2326. *
  2327. * Returns:
  2328. * 0 = success
  2329. */
  2330. static int
  2331. qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
  2332. {
  2333. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2334. unsigned int bus, target, lun;
  2335. int status;
  2336. ENTER("qla1280_abort_command");
  2337. bus = SCSI_BUS_32(sp->cmd);
  2338. target = SCSI_TCN_32(sp->cmd);
  2339. lun = SCSI_LUN_32(sp->cmd);
  2340. sp->flags |= SRB_ABORT_PENDING;
  2341. mb[0] = MBC_ABORT_COMMAND;
  2342. mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
  2343. mb[2] = handle >> 16;
  2344. mb[3] = handle & 0xffff;
  2345. status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
  2346. if (status) {
  2347. dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
  2348. sp->flags &= ~SRB_ABORT_PENDING;
  2349. }
  2350. LEAVE("qla1280_abort_command");
  2351. return status;
  2352. }
  2353. /*
  2354. * qla1280_reset_adapter
  2355. * Reset adapter.
  2356. *
  2357. * Input:
  2358. * ha = adapter block pointer.
  2359. */
  2360. static void
  2361. qla1280_reset_adapter(struct scsi_qla_host *ha)
  2362. {
  2363. struct device_reg __iomem *reg = ha->iobase;
  2364. ENTER("qla1280_reset_adapter");
  2365. /* Disable ISP chip */
  2366. ha->flags.online = 0;
  2367. WRT_REG_WORD(&reg->ictrl, ISP_RESET);
  2368. WRT_REG_WORD(&reg->host_cmd,
  2369. HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
  2370. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2371. LEAVE("qla1280_reset_adapter");
  2372. }
  2373. /*
  2374. * Issue marker command.
  2375. * Function issues marker IOCB.
  2376. *
  2377. * Input:
  2378. * ha = adapter block pointer.
  2379. * bus = SCSI BUS number
  2380. * id = SCSI ID
  2381. * lun = SCSI LUN
  2382. * type = marker modifier
  2383. */
  2384. static void
  2385. qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
  2386. {
  2387. struct mrk_entry *pkt;
  2388. ENTER("qla1280_marker");
  2389. /* Get request packet. */
  2390. if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
  2391. pkt->entry_type = MARKER_TYPE;
  2392. pkt->lun = (uint8_t) lun;
  2393. pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
  2394. pkt->modifier = type;
  2395. pkt->entry_status = 0;
  2396. /* Issue command to ISP */
  2397. qla1280_isp_cmd(ha);
  2398. }
  2399. LEAVE("qla1280_marker");
  2400. }
  2401. /*
  2402. * qla1280_64bit_start_scsi
  2403. * The start SCSI is responsible for building request packets on
  2404. * request ring and modifying ISP input pointer.
  2405. *
  2406. * Input:
  2407. * ha = adapter block pointer.
  2408. * sp = SB structure pointer.
  2409. *
  2410. * Returns:
  2411. * 0 = success, was able to issue command.
  2412. */
  2413. #ifdef QLA_64BIT_PTR
  2414. static int
  2415. qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
  2416. {
  2417. struct device_reg __iomem *reg = ha->iobase;
  2418. struct scsi_cmnd *cmd = sp->cmd;
  2419. cmd_a64_entry_t *pkt;
  2420. __le32 *dword_ptr;
  2421. dma_addr_t dma_handle;
  2422. int status = 0;
  2423. int cnt;
  2424. int req_cnt;
  2425. int seg_cnt;
  2426. u8 dir;
  2427. ENTER("qla1280_64bit_start_scsi:");
  2428. /* Calculate number of entries and segments required. */
  2429. req_cnt = 1;
  2430. seg_cnt = scsi_dma_map(cmd);
  2431. if (seg_cnt > 0) {
  2432. if (seg_cnt > 2) {
  2433. req_cnt += (seg_cnt - 2) / 5;
  2434. if ((seg_cnt - 2) % 5)
  2435. req_cnt++;
  2436. }
  2437. } else if (seg_cnt < 0) {
  2438. status = 1;
  2439. goto out;
  2440. }
  2441. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2442. /* Calculate number of free request entries. */
  2443. cnt = RD_REG_WORD(&reg->mailbox4);
  2444. if (ha->req_ring_index < cnt)
  2445. ha->req_q_cnt = cnt - ha->req_ring_index;
  2446. else
  2447. ha->req_q_cnt =
  2448. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2449. }
  2450. dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
  2451. ha->req_q_cnt, seg_cnt);
  2452. /* If room for request in request ring. */
  2453. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2454. status = SCSI_MLQUEUE_HOST_BUSY;
  2455. dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
  2456. "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
  2457. req_cnt);
  2458. goto out;
  2459. }
  2460. /* Check for room in outstanding command list. */
  2461. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
  2462. ha->outstanding_cmds[cnt] != NULL; cnt++);
  2463. if (cnt >= MAX_OUTSTANDING_COMMANDS) {
  2464. status = SCSI_MLQUEUE_HOST_BUSY;
  2465. dprintk(2, "qla1280_start_scsi: NO ROOM IN "
  2466. "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
  2467. goto out;
  2468. }
  2469. ha->outstanding_cmds[cnt] = sp;
  2470. ha->req_q_cnt -= req_cnt;
  2471. CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
  2472. dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
  2473. cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
  2474. dprintk(2, " bus %i, target %i, lun %i\n",
  2475. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2476. qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
  2477. /*
  2478. * Build command packet.
  2479. */
  2480. pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
  2481. pkt->entry_type = COMMAND_A64_TYPE;
  2482. pkt->entry_count = (uint8_t) req_cnt;
  2483. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2484. pkt->entry_status = 0;
  2485. pkt->handle = cpu_to_le32(cnt);
  2486. /* Zero out remaining portion of packet. */
  2487. memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
  2488. /* Set ISP command timeout. */
  2489. pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
  2490. /* Set device target ID and LUN */
  2491. pkt->lun = SCSI_LUN_32(cmd);
  2492. pkt->target = SCSI_BUS_32(cmd) ?
  2493. (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
  2494. /* Enable simple tag queuing if device supports it. */
  2495. if (cmd->device->simple_tags)
  2496. pkt->control_flags |= cpu_to_le16(BIT_3);
  2497. /* Load SCSI command packet. */
  2498. pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
  2499. memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
  2500. /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
  2501. /* Set transfer direction. */
  2502. dir = qla1280_data_direction(cmd);
  2503. pkt->control_flags |= cpu_to_le16(dir);
  2504. /* Set total data segment count. */
  2505. pkt->dseg_count = cpu_to_le16(seg_cnt);
  2506. /*
  2507. * Load data segments.
  2508. */
  2509. if (seg_cnt) { /* If data transfer. */
  2510. struct scatterlist *sg, *s;
  2511. int remseg = seg_cnt;
  2512. sg = scsi_sglist(cmd);
  2513. /* Setup packet address segment pointer. */
  2514. dword_ptr = (u32 *)&pkt->dseg_0_address;
  2515. /* Load command entry data segments. */
  2516. for_each_sg(sg, s, seg_cnt, cnt) {
  2517. if (cnt == 2)
  2518. break;
  2519. dma_handle = sg_dma_address(s);
  2520. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  2521. if (ha->flags.use_pci_vchannel)
  2522. sn_pci_set_vchan(ha->pdev,
  2523. (unsigned long *)&dma_handle,
  2524. SCSI_BUS_32(cmd));
  2525. #endif
  2526. *dword_ptr++ =
  2527. cpu_to_le32(pci_dma_lo32(dma_handle));
  2528. *dword_ptr++ =
  2529. cpu_to_le32(pci_dma_hi32(dma_handle));
  2530. *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
  2531. dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
  2532. cpu_to_le32(pci_dma_hi32(dma_handle)),
  2533. cpu_to_le32(pci_dma_lo32(dma_handle)),
  2534. cpu_to_le32(sg_dma_len(sg_next(s))));
  2535. remseg--;
  2536. }
  2537. dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
  2538. "command packet data - b %i, t %i, l %i \n",
  2539. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
  2540. SCSI_LUN_32(cmd));
  2541. qla1280_dump_buffer(5, (char *)pkt,
  2542. REQUEST_ENTRY_SIZE);
  2543. /*
  2544. * Build continuation packets.
  2545. */
  2546. dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
  2547. "remains\n", seg_cnt);
  2548. while (remseg > 0) {
  2549. /* Update sg start */
  2550. sg = s;
  2551. /* Adjust ring index. */
  2552. ha->req_ring_index++;
  2553. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2554. ha->req_ring_index = 0;
  2555. ha->request_ring_ptr =
  2556. ha->request_ring;
  2557. } else
  2558. ha->request_ring_ptr++;
  2559. pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
  2560. /* Zero out packet. */
  2561. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2562. /* Load packet defaults. */
  2563. ((struct cont_a64_entry *) pkt)->entry_type =
  2564. CONTINUE_A64_TYPE;
  2565. ((struct cont_a64_entry *) pkt)->entry_count = 1;
  2566. ((struct cont_a64_entry *) pkt)->sys_define =
  2567. (uint8_t)ha->req_ring_index;
  2568. /* Setup packet address segment pointer. */
  2569. dword_ptr =
  2570. (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
  2571. /* Load continuation entry data segments. */
  2572. for_each_sg(sg, s, remseg, cnt) {
  2573. if (cnt == 5)
  2574. break;
  2575. dma_handle = sg_dma_address(s);
  2576. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  2577. if (ha->flags.use_pci_vchannel)
  2578. sn_pci_set_vchan(ha->pdev,
  2579. (unsigned long *)&dma_handle,
  2580. SCSI_BUS_32(cmd));
  2581. #endif
  2582. *dword_ptr++ =
  2583. cpu_to_le32(pci_dma_lo32(dma_handle));
  2584. *dword_ptr++ =
  2585. cpu_to_le32(pci_dma_hi32(dma_handle));
  2586. *dword_ptr++ =
  2587. cpu_to_le32(sg_dma_len(s));
  2588. dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
  2589. cpu_to_le32(pci_dma_hi32(dma_handle)),
  2590. cpu_to_le32(pci_dma_lo32(dma_handle)),
  2591. cpu_to_le32(sg_dma_len(s)));
  2592. }
  2593. remseg -= cnt;
  2594. dprintk(5, "qla1280_64bit_start_scsi: "
  2595. "continuation packet data - b %i, t "
  2596. "%i, l %i \n", SCSI_BUS_32(cmd),
  2597. SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2598. qla1280_dump_buffer(5, (char *)pkt,
  2599. REQUEST_ENTRY_SIZE);
  2600. }
  2601. } else { /* No data transfer */
  2602. dprintk(5, "qla1280_64bit_start_scsi: No data, command "
  2603. "packet data - b %i, t %i, l %i \n",
  2604. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2605. qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
  2606. }
  2607. /* Adjust ring index. */
  2608. ha->req_ring_index++;
  2609. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2610. ha->req_ring_index = 0;
  2611. ha->request_ring_ptr = ha->request_ring;
  2612. } else
  2613. ha->request_ring_ptr++;
  2614. /* Set chip new ring index. */
  2615. dprintk(2,
  2616. "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
  2617. sp->flags |= SRB_SENT;
  2618. ha->actthreads++;
  2619. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2620. /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
  2621. mmiowb();
  2622. out:
  2623. if (status)
  2624. dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
  2625. else
  2626. dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
  2627. return status;
  2628. }
  2629. #else /* !QLA_64BIT_PTR */
  2630. /*
  2631. * qla1280_32bit_start_scsi
  2632. * The start SCSI is responsible for building request packets on
  2633. * request ring and modifying ISP input pointer.
  2634. *
  2635. * The Qlogic firmware interface allows every queue slot to have a SCSI
  2636. * command and up to 4 scatter/gather (SG) entries. If we need more
  2637. * than 4 SG entries, then continuation entries are used that can
  2638. * hold another 7 entries each. The start routine determines if there
  2639. * is eought empty slots then build the combination of requests to
  2640. * fulfill the OS request.
  2641. *
  2642. * Input:
  2643. * ha = adapter block pointer.
  2644. * sp = SCSI Request Block structure pointer.
  2645. *
  2646. * Returns:
  2647. * 0 = success, was able to issue command.
  2648. */
  2649. static int
  2650. qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
  2651. {
  2652. struct device_reg __iomem *reg = ha->iobase;
  2653. struct scsi_cmnd *cmd = sp->cmd;
  2654. struct cmd_entry *pkt;
  2655. __le32 *dword_ptr;
  2656. int status = 0;
  2657. int cnt;
  2658. int req_cnt;
  2659. int seg_cnt;
  2660. u8 dir;
  2661. ENTER("qla1280_32bit_start_scsi");
  2662. dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
  2663. cmd->cmnd[0]);
  2664. /* Calculate number of entries and segments required. */
  2665. req_cnt = 1;
  2666. seg_cnt = scsi_dma_map(cmd);
  2667. if (seg_cnt) {
  2668. /*
  2669. * if greater than four sg entries then we need to allocate
  2670. * continuation entries
  2671. */
  2672. if (seg_cnt > 4) {
  2673. req_cnt += (seg_cnt - 4) / 7;
  2674. if ((seg_cnt - 4) % 7)
  2675. req_cnt++;
  2676. }
  2677. dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
  2678. cmd, seg_cnt, req_cnt);
  2679. } else if (seg_cnt < 0) {
  2680. status = 1;
  2681. goto out;
  2682. }
  2683. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2684. /* Calculate number of free request entries. */
  2685. cnt = RD_REG_WORD(&reg->mailbox4);
  2686. if (ha->req_ring_index < cnt)
  2687. ha->req_q_cnt = cnt - ha->req_ring_index;
  2688. else
  2689. ha->req_q_cnt =
  2690. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2691. }
  2692. dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
  2693. ha->req_q_cnt, seg_cnt);
  2694. /* If room for request in request ring. */
  2695. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2696. status = SCSI_MLQUEUE_HOST_BUSY;
  2697. dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
  2698. "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
  2699. ha->req_q_cnt, req_cnt);
  2700. goto out;
  2701. }
  2702. /* Check for empty slot in outstanding command list. */
  2703. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
  2704. (ha->outstanding_cmds[cnt] != 0); cnt++) ;
  2705. if (cnt >= MAX_OUTSTANDING_COMMANDS) {
  2706. status = SCSI_MLQUEUE_HOST_BUSY;
  2707. dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
  2708. "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
  2709. goto out;
  2710. }
  2711. CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
  2712. ha->outstanding_cmds[cnt] = sp;
  2713. ha->req_q_cnt -= req_cnt;
  2714. /*
  2715. * Build command packet.
  2716. */
  2717. pkt = (struct cmd_entry *) ha->request_ring_ptr;
  2718. pkt->entry_type = COMMAND_TYPE;
  2719. pkt->entry_count = (uint8_t) req_cnt;
  2720. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2721. pkt->entry_status = 0;
  2722. pkt->handle = cpu_to_le32(cnt);
  2723. /* Zero out remaining portion of packet. */
  2724. memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
  2725. /* Set ISP command timeout. */
  2726. pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
  2727. /* Set device target ID and LUN */
  2728. pkt->lun = SCSI_LUN_32(cmd);
  2729. pkt->target = SCSI_BUS_32(cmd) ?
  2730. (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
  2731. /* Enable simple tag queuing if device supports it. */
  2732. if (cmd->device->simple_tags)
  2733. pkt->control_flags |= cpu_to_le16(BIT_3);
  2734. /* Load SCSI command packet. */
  2735. pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
  2736. memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
  2737. /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
  2738. /* Set transfer direction. */
  2739. dir = qla1280_data_direction(cmd);
  2740. pkt->control_flags |= cpu_to_le16(dir);
  2741. /* Set total data segment count. */
  2742. pkt->dseg_count = cpu_to_le16(seg_cnt);
  2743. /*
  2744. * Load data segments.
  2745. */
  2746. if (seg_cnt) {
  2747. struct scatterlist *sg, *s;
  2748. int remseg = seg_cnt;
  2749. sg = scsi_sglist(cmd);
  2750. /* Setup packet address segment pointer. */
  2751. dword_ptr = &pkt->dseg_0_address;
  2752. dprintk(3, "Building S/G data segments..\n");
  2753. qla1280_dump_buffer(1, (char *)sg, 4 * 16);
  2754. /* Load command entry data segments. */
  2755. for_each_sg(sg, s, seg_cnt, cnt) {
  2756. if (cnt == 4)
  2757. break;
  2758. *dword_ptr++ =
  2759. cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
  2760. *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
  2761. dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
  2762. (pci_dma_lo32(sg_dma_address(s))),
  2763. (sg_dma_len(s)));
  2764. remseg--;
  2765. }
  2766. /*
  2767. * Build continuation packets.
  2768. */
  2769. dprintk(3, "S/G Building Continuation"
  2770. "...seg_cnt=0x%x remains\n", seg_cnt);
  2771. while (remseg > 0) {
  2772. /* Continue from end point */
  2773. sg = s;
  2774. /* Adjust ring index. */
  2775. ha->req_ring_index++;
  2776. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2777. ha->req_ring_index = 0;
  2778. ha->request_ring_ptr =
  2779. ha->request_ring;
  2780. } else
  2781. ha->request_ring_ptr++;
  2782. pkt = (struct cmd_entry *)ha->request_ring_ptr;
  2783. /* Zero out packet. */
  2784. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2785. /* Load packet defaults. */
  2786. ((struct cont_entry *) pkt)->
  2787. entry_type = CONTINUE_TYPE;
  2788. ((struct cont_entry *) pkt)->entry_count = 1;
  2789. ((struct cont_entry *) pkt)->sys_define =
  2790. (uint8_t) ha->req_ring_index;
  2791. /* Setup packet address segment pointer. */
  2792. dword_ptr =
  2793. &((struct cont_entry *) pkt)->dseg_0_address;
  2794. /* Load continuation entry data segments. */
  2795. for_each_sg(sg, s, remseg, cnt) {
  2796. if (cnt == 7)
  2797. break;
  2798. *dword_ptr++ =
  2799. cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
  2800. *dword_ptr++ =
  2801. cpu_to_le32(sg_dma_len(s));
  2802. dprintk(1,
  2803. "S/G Segment Cont. phys_addr=0x%x, "
  2804. "len=0x%x\n",
  2805. cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
  2806. cpu_to_le32(sg_dma_len(s)));
  2807. }
  2808. remseg -= cnt;
  2809. dprintk(5, "qla1280_32bit_start_scsi: "
  2810. "continuation packet data - "
  2811. "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
  2812. SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2813. qla1280_dump_buffer(5, (char *)pkt,
  2814. REQUEST_ENTRY_SIZE);
  2815. }
  2816. } else { /* No data transfer at all */
  2817. dprintk(5, "qla1280_32bit_start_scsi: No data, command "
  2818. "packet data - \n");
  2819. qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
  2820. }
  2821. dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
  2822. qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
  2823. REQUEST_ENTRY_SIZE);
  2824. /* Adjust ring index. */
  2825. ha->req_ring_index++;
  2826. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2827. ha->req_ring_index = 0;
  2828. ha->request_ring_ptr = ha->request_ring;
  2829. } else
  2830. ha->request_ring_ptr++;
  2831. /* Set chip new ring index. */
  2832. dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
  2833. "for pending command\n");
  2834. sp->flags |= SRB_SENT;
  2835. ha->actthreads++;
  2836. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2837. /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
  2838. mmiowb();
  2839. out:
  2840. if (status)
  2841. dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
  2842. LEAVE("qla1280_32bit_start_scsi");
  2843. return status;
  2844. }
  2845. #endif
  2846. /*
  2847. * qla1280_req_pkt
  2848. * Function is responsible for locking ring and
  2849. * getting a zeroed out request packet.
  2850. *
  2851. * Input:
  2852. * ha = adapter block pointer.
  2853. *
  2854. * Returns:
  2855. * 0 = failed to get slot.
  2856. */
  2857. static request_t *
  2858. qla1280_req_pkt(struct scsi_qla_host *ha)
  2859. {
  2860. struct device_reg __iomem *reg = ha->iobase;
  2861. request_t *pkt = NULL;
  2862. int cnt;
  2863. uint32_t timer;
  2864. ENTER("qla1280_req_pkt");
  2865. /*
  2866. * This can be called from interrupt context, damn it!!!
  2867. */
  2868. /* Wait for 30 seconds for slot. */
  2869. for (timer = 15000000; timer; timer--) {
  2870. if (ha->req_q_cnt > 0) {
  2871. /* Calculate number of free request entries. */
  2872. cnt = RD_REG_WORD(&reg->mailbox4);
  2873. if (ha->req_ring_index < cnt)
  2874. ha->req_q_cnt = cnt - ha->req_ring_index;
  2875. else
  2876. ha->req_q_cnt =
  2877. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2878. }
  2879. /* Found empty request ring slot? */
  2880. if (ha->req_q_cnt > 0) {
  2881. ha->req_q_cnt--;
  2882. pkt = ha->request_ring_ptr;
  2883. /* Zero out packet. */
  2884. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2885. /*
  2886. * How can this be right when we have a ring
  2887. * size of 512???
  2888. */
  2889. /* Set system defined field. */
  2890. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2891. /* Set entry count. */
  2892. pkt->entry_count = 1;
  2893. break;
  2894. }
  2895. udelay(2); /* 10 */
  2896. /* Check for pending interrupts. */
  2897. qla1280_poll(ha);
  2898. }
  2899. if (!pkt)
  2900. dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
  2901. else
  2902. dprintk(3, "qla1280_req_pkt: exiting normally\n");
  2903. return pkt;
  2904. }
  2905. /*
  2906. * qla1280_isp_cmd
  2907. * Function is responsible for modifying ISP input pointer.
  2908. * Releases ring lock.
  2909. *
  2910. * Input:
  2911. * ha = adapter block pointer.
  2912. */
  2913. static void
  2914. qla1280_isp_cmd(struct scsi_qla_host *ha)
  2915. {
  2916. struct device_reg __iomem *reg = ha->iobase;
  2917. ENTER("qla1280_isp_cmd");
  2918. dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
  2919. qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
  2920. REQUEST_ENTRY_SIZE);
  2921. /* Adjust ring index. */
  2922. ha->req_ring_index++;
  2923. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2924. ha->req_ring_index = 0;
  2925. ha->request_ring_ptr = ha->request_ring;
  2926. } else
  2927. ha->request_ring_ptr++;
  2928. /*
  2929. * Update request index to mailbox4 (Request Queue In).
  2930. * The mmiowb() ensures that this write is ordered with writes by other
  2931. * CPUs. Without the mmiowb(), it is possible for the following:
  2932. * CPUA posts write of index 5 to mailbox4
  2933. * CPUA releases host lock
  2934. * CPUB acquires host lock
  2935. * CPUB posts write of index 6 to mailbox4
  2936. * On PCI bus, order reverses and write of 6 posts, then index 5,
  2937. * causing chip to issue full queue of stale commands
  2938. * The mmiowb() prevents future writes from crossing the barrier.
  2939. * See Documentation/DocBook/deviceiobook.tmpl for more information.
  2940. */
  2941. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2942. mmiowb();
  2943. LEAVE("qla1280_isp_cmd");
  2944. }
  2945. /****************************************************************************/
  2946. /* Interrupt Service Routine. */
  2947. /****************************************************************************/
  2948. /****************************************************************************
  2949. * qla1280_isr
  2950. * Calls I/O done on command completion.
  2951. *
  2952. * Input:
  2953. * ha = adapter block pointer.
  2954. * done_q = done queue.
  2955. ****************************************************************************/
  2956. static void
  2957. qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
  2958. {
  2959. struct device_reg __iomem *reg = ha->iobase;
  2960. struct response *pkt;
  2961. struct srb *sp = NULL;
  2962. uint16_t mailbox[MAILBOX_REGISTER_COUNT];
  2963. uint16_t *wptr;
  2964. uint32_t index;
  2965. u16 istatus;
  2966. ENTER("qla1280_isr");
  2967. istatus = RD_REG_WORD(&reg->istatus);
  2968. if (!(istatus & (RISC_INT | PCI_INT)))
  2969. return;
  2970. /* Save mailbox register 5 */
  2971. mailbox[5] = RD_REG_WORD(&reg->mailbox5);
  2972. /* Check for mailbox interrupt. */
  2973. mailbox[0] = RD_REG_WORD_dmasync(&reg->semaphore);
  2974. if (mailbox[0] & BIT_0) {
  2975. /* Get mailbox data. */
  2976. /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
  2977. wptr = &mailbox[0];
  2978. *wptr++ = RD_REG_WORD(&reg->mailbox0);
  2979. *wptr++ = RD_REG_WORD(&reg->mailbox1);
  2980. *wptr = RD_REG_WORD(&reg->mailbox2);
  2981. if (mailbox[0] != MBA_SCSI_COMPLETION) {
  2982. wptr++;
  2983. *wptr++ = RD_REG_WORD(&reg->mailbox3);
  2984. *wptr++ = RD_REG_WORD(&reg->mailbox4);
  2985. wptr++;
  2986. *wptr++ = RD_REG_WORD(&reg->mailbox6);
  2987. *wptr = RD_REG_WORD(&reg->mailbox7);
  2988. }
  2989. /* Release mailbox registers. */
  2990. WRT_REG_WORD(&reg->semaphore, 0);
  2991. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  2992. dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
  2993. mailbox[0]);
  2994. /* Handle asynchronous event */
  2995. switch (mailbox[0]) {
  2996. case MBA_SCSI_COMPLETION: /* Response completion */
  2997. dprintk(5, "qla1280_isr: mailbox SCSI response "
  2998. "completion\n");
  2999. if (ha->flags.online) {
  3000. /* Get outstanding command index. */
  3001. index = mailbox[2] << 16 | mailbox[1];
  3002. /* Validate handle. */
  3003. if (index < MAX_OUTSTANDING_COMMANDS)
  3004. sp = ha->outstanding_cmds[index];
  3005. else
  3006. sp = NULL;
  3007. if (sp) {
  3008. /* Free outstanding command slot. */
  3009. ha->outstanding_cmds[index] = NULL;
  3010. /* Save ISP completion status */
  3011. CMD_RESULT(sp->cmd) = 0;
  3012. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3013. /* Place block on done queue */
  3014. list_add_tail(&sp->list, done_q);
  3015. } else {
  3016. /*
  3017. * If we get here we have a real problem!
  3018. */
  3019. printk(KERN_WARNING
  3020. "qla1280: ISP invalid handle\n");
  3021. }
  3022. }
  3023. break;
  3024. case MBA_BUS_RESET: /* SCSI Bus Reset */
  3025. ha->flags.reset_marker = 1;
  3026. index = mailbox[6] & BIT_0;
  3027. ha->bus_settings[index].reset_marker = 1;
  3028. printk(KERN_DEBUG "qla1280_isr(): index %i "
  3029. "asynchronous BUS_RESET\n", index);
  3030. break;
  3031. case MBA_SYSTEM_ERR: /* System Error */
  3032. printk(KERN_WARNING
  3033. "qla1280: ISP System Error - mbx1=%xh, mbx2="
  3034. "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
  3035. mailbox[3]);
  3036. break;
  3037. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  3038. printk(KERN_WARNING
  3039. "qla1280: ISP Request Transfer Error\n");
  3040. break;
  3041. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  3042. printk(KERN_WARNING
  3043. "qla1280: ISP Response Transfer Error\n");
  3044. break;
  3045. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  3046. dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
  3047. break;
  3048. case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
  3049. dprintk(2,
  3050. "qla1280_isr: asynchronous TIMEOUT_RESET\n");
  3051. break;
  3052. case MBA_DEVICE_RESET: /* Bus Device Reset */
  3053. printk(KERN_INFO "qla1280_isr(): asynchronous "
  3054. "BUS_DEVICE_RESET\n");
  3055. ha->flags.reset_marker = 1;
  3056. index = mailbox[6] & BIT_0;
  3057. ha->bus_settings[index].reset_marker = 1;
  3058. break;
  3059. case MBA_BUS_MODE_CHANGE:
  3060. dprintk(2,
  3061. "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
  3062. break;
  3063. default:
  3064. /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
  3065. if (mailbox[0] < MBA_ASYNC_EVENT) {
  3066. wptr = &mailbox[0];
  3067. memcpy((uint16_t *) ha->mailbox_out, wptr,
  3068. MAILBOX_REGISTER_COUNT *
  3069. sizeof(uint16_t));
  3070. if(ha->mailbox_wait != NULL)
  3071. complete(ha->mailbox_wait);
  3072. }
  3073. break;
  3074. }
  3075. } else {
  3076. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  3077. }
  3078. /*
  3079. * We will receive interrupts during mailbox testing prior to
  3080. * the card being marked online, hence the double check.
  3081. */
  3082. if (!(ha->flags.online && !ha->mailbox_wait)) {
  3083. dprintk(2, "qla1280_isr: Response pointer Error\n");
  3084. goto out;
  3085. }
  3086. if (mailbox[5] >= RESPONSE_ENTRY_CNT)
  3087. goto out;
  3088. while (ha->rsp_ring_index != mailbox[5]) {
  3089. pkt = ha->response_ring_ptr;
  3090. dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
  3091. " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
  3092. dprintk(5,"qla1280_isr: response packet data\n");
  3093. qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
  3094. if (pkt->entry_type == STATUS_TYPE) {
  3095. if ((le16_to_cpu(pkt->scsi_status) & 0xff)
  3096. || pkt->comp_status || pkt->entry_status) {
  3097. dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
  3098. "0x%x mailbox[5] = 0x%x, comp_status "
  3099. "= 0x%x, scsi_status = 0x%x\n",
  3100. ha->rsp_ring_index, mailbox[5],
  3101. le16_to_cpu(pkt->comp_status),
  3102. le16_to_cpu(pkt->scsi_status));
  3103. }
  3104. } else {
  3105. dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
  3106. "0x%x, mailbox[5] = 0x%x\n",
  3107. ha->rsp_ring_index, mailbox[5]);
  3108. dprintk(2, "qla1280_isr: response packet data\n");
  3109. qla1280_dump_buffer(2, (char *)pkt,
  3110. RESPONSE_ENTRY_SIZE);
  3111. }
  3112. if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
  3113. dprintk(2, "status: Cmd %p, handle %i\n",
  3114. ha->outstanding_cmds[pkt->handle]->cmd,
  3115. pkt->handle);
  3116. if (pkt->entry_type == STATUS_TYPE)
  3117. qla1280_status_entry(ha, pkt, done_q);
  3118. else
  3119. qla1280_error_entry(ha, pkt, done_q);
  3120. /* Adjust ring index. */
  3121. ha->rsp_ring_index++;
  3122. if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
  3123. ha->rsp_ring_index = 0;
  3124. ha->response_ring_ptr = ha->response_ring;
  3125. } else
  3126. ha->response_ring_ptr++;
  3127. WRT_REG_WORD(&reg->mailbox5, ha->rsp_ring_index);
  3128. }
  3129. }
  3130. out:
  3131. LEAVE("qla1280_isr");
  3132. }
  3133. /*
  3134. * qla1280_rst_aen
  3135. * Processes asynchronous reset.
  3136. *
  3137. * Input:
  3138. * ha = adapter block pointer.
  3139. */
  3140. static void
  3141. qla1280_rst_aen(struct scsi_qla_host *ha)
  3142. {
  3143. uint8_t bus;
  3144. ENTER("qla1280_rst_aen");
  3145. if (ha->flags.online && !ha->flags.reset_active &&
  3146. !ha->flags.abort_isp_active) {
  3147. ha->flags.reset_active = 1;
  3148. while (ha->flags.reset_marker) {
  3149. /* Issue marker command. */
  3150. ha->flags.reset_marker = 0;
  3151. for (bus = 0; bus < ha->ports &&
  3152. !ha->flags.reset_marker; bus++) {
  3153. if (ha->bus_settings[bus].reset_marker) {
  3154. ha->bus_settings[bus].reset_marker = 0;
  3155. qla1280_marker(ha, bus, 0, 0,
  3156. MK_SYNC_ALL);
  3157. }
  3158. }
  3159. }
  3160. }
  3161. LEAVE("qla1280_rst_aen");
  3162. }
  3163. /*
  3164. * qla1280_status_entry
  3165. * Processes received ISP status entry.
  3166. *
  3167. * Input:
  3168. * ha = adapter block pointer.
  3169. * pkt = entry pointer.
  3170. * done_q = done queue.
  3171. */
  3172. static void
  3173. qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
  3174. struct list_head *done_q)
  3175. {
  3176. unsigned int bus, target, lun;
  3177. int sense_sz;
  3178. struct srb *sp;
  3179. struct scsi_cmnd *cmd;
  3180. uint32_t handle = le32_to_cpu(pkt->handle);
  3181. uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
  3182. uint16_t comp_status = le16_to_cpu(pkt->comp_status);
  3183. ENTER("qla1280_status_entry");
  3184. /* Validate handle. */
  3185. if (handle < MAX_OUTSTANDING_COMMANDS)
  3186. sp = ha->outstanding_cmds[handle];
  3187. else
  3188. sp = NULL;
  3189. if (!sp) {
  3190. printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
  3191. goto out;
  3192. }
  3193. /* Free outstanding command slot. */
  3194. ha->outstanding_cmds[handle] = NULL;
  3195. cmd = sp->cmd;
  3196. /* Generate LU queue on cntrl, target, LUN */
  3197. bus = SCSI_BUS_32(cmd);
  3198. target = SCSI_TCN_32(cmd);
  3199. lun = SCSI_LUN_32(cmd);
  3200. if (comp_status || scsi_status) {
  3201. dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
  3202. "0x%x, handle = 0x%x\n", comp_status,
  3203. scsi_status, handle);
  3204. }
  3205. /* Target busy or queue full */
  3206. if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
  3207. (scsi_status & 0xFF) == SAM_STAT_BUSY) {
  3208. CMD_RESULT(cmd) = scsi_status & 0xff;
  3209. } else {
  3210. /* Save ISP completion status */
  3211. CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
  3212. if (scsi_status & SAM_STAT_CHECK_CONDITION) {
  3213. if (comp_status != CS_ARS_FAILED) {
  3214. uint16_t req_sense_length =
  3215. le16_to_cpu(pkt->req_sense_length);
  3216. if (req_sense_length < CMD_SNSLEN(cmd))
  3217. sense_sz = req_sense_length;
  3218. else
  3219. /*
  3220. * scsi_cmnd->sense_buffer is
  3221. * 64 bytes, why only copy 63?
  3222. * This looks wrong! /Jes
  3223. */
  3224. sense_sz = CMD_SNSLEN(cmd) - 1;
  3225. memcpy(cmd->sense_buffer,
  3226. &pkt->req_sense_data, sense_sz);
  3227. } else
  3228. sense_sz = 0;
  3229. memset(cmd->sense_buffer + sense_sz, 0,
  3230. SCSI_SENSE_BUFFERSIZE - sense_sz);
  3231. dprintk(2, "qla1280_status_entry: Check "
  3232. "condition Sense data, b %i, t %i, "
  3233. "l %i\n", bus, target, lun);
  3234. if (sense_sz)
  3235. qla1280_dump_buffer(2,
  3236. (char *)cmd->sense_buffer,
  3237. sense_sz);
  3238. }
  3239. }
  3240. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3241. /* Place command on done queue. */
  3242. list_add_tail(&sp->list, done_q);
  3243. out:
  3244. LEAVE("qla1280_status_entry");
  3245. }
  3246. /*
  3247. * qla1280_error_entry
  3248. * Processes error entry.
  3249. *
  3250. * Input:
  3251. * ha = adapter block pointer.
  3252. * pkt = entry pointer.
  3253. * done_q = done queue.
  3254. */
  3255. static void
  3256. qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
  3257. struct list_head *done_q)
  3258. {
  3259. struct srb *sp;
  3260. uint32_t handle = le32_to_cpu(pkt->handle);
  3261. ENTER("qla1280_error_entry");
  3262. if (pkt->entry_status & BIT_3)
  3263. dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
  3264. else if (pkt->entry_status & BIT_2)
  3265. dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
  3266. else if (pkt->entry_status & BIT_1)
  3267. dprintk(2, "qla1280_error_entry: FULL flag error\n");
  3268. else
  3269. dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
  3270. /* Validate handle. */
  3271. if (handle < MAX_OUTSTANDING_COMMANDS)
  3272. sp = ha->outstanding_cmds[handle];
  3273. else
  3274. sp = NULL;
  3275. if (sp) {
  3276. /* Free outstanding command slot. */
  3277. ha->outstanding_cmds[handle] = NULL;
  3278. /* Bad payload or header */
  3279. if (pkt->entry_status & (BIT_3 + BIT_2)) {
  3280. /* Bad payload or header, set error status. */
  3281. /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
  3282. CMD_RESULT(sp->cmd) = DID_ERROR << 16;
  3283. } else if (pkt->entry_status & BIT_1) { /* FULL flag */
  3284. CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
  3285. } else {
  3286. /* Set error status. */
  3287. CMD_RESULT(sp->cmd) = DID_ERROR << 16;
  3288. }
  3289. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3290. /* Place command on done queue. */
  3291. list_add_tail(&sp->list, done_q);
  3292. }
  3293. #ifdef QLA_64BIT_PTR
  3294. else if (pkt->entry_type == COMMAND_A64_TYPE) {
  3295. printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
  3296. }
  3297. #endif
  3298. LEAVE("qla1280_error_entry");
  3299. }
  3300. /*
  3301. * qla1280_abort_isp
  3302. * Resets ISP and aborts all outstanding commands.
  3303. *
  3304. * Input:
  3305. * ha = adapter block pointer.
  3306. *
  3307. * Returns:
  3308. * 0 = success
  3309. */
  3310. static int
  3311. qla1280_abort_isp(struct scsi_qla_host *ha)
  3312. {
  3313. struct device_reg __iomem *reg = ha->iobase;
  3314. struct srb *sp;
  3315. int status = 0;
  3316. int cnt;
  3317. int bus;
  3318. ENTER("qla1280_abort_isp");
  3319. if (ha->flags.abort_isp_active || !ha->flags.online)
  3320. goto out;
  3321. ha->flags.abort_isp_active = 1;
  3322. /* Disable ISP interrupts. */
  3323. qla1280_disable_intrs(ha);
  3324. WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
  3325. RD_REG_WORD(&reg->id_l);
  3326. printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
  3327. ha->host_no);
  3328. /* Dequeue all commands in outstanding command list. */
  3329. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  3330. struct scsi_cmnd *cmd;
  3331. sp = ha->outstanding_cmds[cnt];
  3332. if (sp) {
  3333. cmd = sp->cmd;
  3334. CMD_RESULT(cmd) = DID_RESET << 16;
  3335. CMD_HANDLE(cmd) = COMPLETED_HANDLE;
  3336. ha->outstanding_cmds[cnt] = NULL;
  3337. list_add_tail(&sp->list, &ha->done_q);
  3338. }
  3339. }
  3340. qla1280_done(ha);
  3341. status = qla1280_load_firmware(ha);
  3342. if (status)
  3343. goto out;
  3344. /* Setup adapter based on NVRAM parameters. */
  3345. qla1280_nvram_config (ha);
  3346. status = qla1280_init_rings(ha);
  3347. if (status)
  3348. goto out;
  3349. /* Issue SCSI reset. */
  3350. for (bus = 0; bus < ha->ports; bus++)
  3351. qla1280_bus_reset(ha, bus);
  3352. ha->flags.abort_isp_active = 0;
  3353. out:
  3354. if (status) {
  3355. printk(KERN_WARNING
  3356. "qla1280: ISP error recovery failed, board disabled");
  3357. qla1280_reset_adapter(ha);
  3358. dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
  3359. }
  3360. LEAVE("qla1280_abort_isp");
  3361. return status;
  3362. }
  3363. /*
  3364. * qla1280_debounce_register
  3365. * Debounce register.
  3366. *
  3367. * Input:
  3368. * port = register address.
  3369. *
  3370. * Returns:
  3371. * register value.
  3372. */
  3373. static u16
  3374. qla1280_debounce_register(volatile u16 __iomem * addr)
  3375. {
  3376. volatile u16 ret;
  3377. volatile u16 ret2;
  3378. ret = RD_REG_WORD(addr);
  3379. ret2 = RD_REG_WORD(addr);
  3380. if (ret == ret2)
  3381. return ret;
  3382. do {
  3383. cpu_relax();
  3384. ret = RD_REG_WORD(addr);
  3385. ret2 = RD_REG_WORD(addr);
  3386. } while (ret != ret2);
  3387. return ret;
  3388. }
  3389. /************************************************************************
  3390. * qla1280_check_for_dead_scsi_bus *
  3391. * *
  3392. * This routine checks for a dead SCSI bus *
  3393. ************************************************************************/
  3394. #define SET_SXP_BANK 0x0100
  3395. #define SCSI_PHASE_INVALID 0x87FF
  3396. static int
  3397. qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
  3398. {
  3399. uint16_t config_reg, scsi_control;
  3400. struct device_reg __iomem *reg = ha->iobase;
  3401. if (ha->bus_settings[bus].scsi_bus_dead) {
  3402. WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
  3403. config_reg = RD_REG_WORD(&reg->cfg_1);
  3404. WRT_REG_WORD(&reg->cfg_1, SET_SXP_BANK);
  3405. scsi_control = RD_REG_WORD(&reg->scsiControlPins);
  3406. WRT_REG_WORD(&reg->cfg_1, config_reg);
  3407. WRT_REG_WORD(&reg->host_cmd, HC_RELEASE_RISC);
  3408. if (scsi_control == SCSI_PHASE_INVALID) {
  3409. ha->bus_settings[bus].scsi_bus_dead = 1;
  3410. return 1; /* bus is dead */
  3411. } else {
  3412. ha->bus_settings[bus].scsi_bus_dead = 0;
  3413. ha->bus_settings[bus].failed_reset_count = 0;
  3414. }
  3415. }
  3416. return 0; /* bus is not dead */
  3417. }
  3418. static void
  3419. qla1280_get_target_parameters(struct scsi_qla_host *ha,
  3420. struct scsi_device *device)
  3421. {
  3422. uint16_t mb[MAILBOX_REGISTER_COUNT];
  3423. int bus, target, lun;
  3424. bus = device->channel;
  3425. target = device->id;
  3426. lun = device->lun;
  3427. mb[0] = MBC_GET_TARGET_PARAMETERS;
  3428. mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
  3429. mb[1] <<= 8;
  3430. qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
  3431. &mb[0]);
  3432. printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
  3433. if (mb[3] != 0) {
  3434. printk(" Sync: period %d, offset %d",
  3435. (mb[3] & 0xff), (mb[3] >> 8));
  3436. if (mb[2] & BIT_13)
  3437. printk(", Wide");
  3438. if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
  3439. printk(", DT");
  3440. } else
  3441. printk(" Async");
  3442. if (device->simple_tags)
  3443. printk(", Tagged queuing: depth %d", device->queue_depth);
  3444. printk("\n");
  3445. }
  3446. #if DEBUG_QLA1280
  3447. static void
  3448. __qla1280_dump_buffer(char *b, int size)
  3449. {
  3450. int cnt;
  3451. u8 c;
  3452. printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
  3453. "Bh Ch Dh Eh Fh\n");
  3454. printk(KERN_DEBUG "---------------------------------------------"
  3455. "------------------\n");
  3456. for (cnt = 0; cnt < size;) {
  3457. c = *b++;
  3458. printk("0x%02x", c);
  3459. cnt++;
  3460. if (!(cnt % 16))
  3461. printk("\n");
  3462. else
  3463. printk(" ");
  3464. }
  3465. if (cnt % 16)
  3466. printk("\n");
  3467. }
  3468. /**************************************************************************
  3469. * ql1280_print_scsi_cmd
  3470. *
  3471. **************************************************************************/
  3472. static void
  3473. __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
  3474. {
  3475. struct scsi_qla_host *ha;
  3476. struct Scsi_Host *host = CMD_HOST(cmd);
  3477. struct srb *sp;
  3478. /* struct scatterlist *sg; */
  3479. int i;
  3480. ha = (struct scsi_qla_host *)host->hostdata;
  3481. sp = (struct srb *)CMD_SP(cmd);
  3482. printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
  3483. printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
  3484. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
  3485. CMD_CDBLEN(cmd));
  3486. printk(" CDB = ");
  3487. for (i = 0; i < cmd->cmd_len; i++) {
  3488. printk("0x%02x ", cmd->cmnd[i]);
  3489. }
  3490. printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
  3491. printk(" request buffer=0x%p, request buffer len=0x%x\n",
  3492. scsi_sglist(cmd), scsi_bufflen(cmd));
  3493. /* if (cmd->use_sg)
  3494. {
  3495. sg = (struct scatterlist *) cmd->request_buffer;
  3496. printk(" SG buffer: \n");
  3497. qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
  3498. } */
  3499. printk(" tag=%d, transfersize=0x%x \n",
  3500. cmd->tag, cmd->transfersize);
  3501. printk(" Pid=%li, SP=0x%p\n", cmd->serial_number, CMD_SP(cmd));
  3502. printk(" underflow size = 0x%x, direction=0x%x\n",
  3503. cmd->underflow, cmd->sc_data_direction);
  3504. }
  3505. /**************************************************************************
  3506. * ql1280_dump_device
  3507. *
  3508. **************************************************************************/
  3509. static void
  3510. ql1280_dump_device(struct scsi_qla_host *ha)
  3511. {
  3512. struct scsi_cmnd *cp;
  3513. struct srb *sp;
  3514. int i;
  3515. printk(KERN_DEBUG "Outstanding Commands on controller:\n");
  3516. for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
  3517. if ((sp = ha->outstanding_cmds[i]) == NULL)
  3518. continue;
  3519. if ((cp = sp->cmd) == NULL)
  3520. continue;
  3521. qla1280_print_scsi_cmd(1, cp);
  3522. }
  3523. }
  3524. #endif
  3525. enum tokens {
  3526. TOKEN_NVRAM,
  3527. TOKEN_SYNC,
  3528. TOKEN_WIDE,
  3529. TOKEN_PPR,
  3530. TOKEN_VERBOSE,
  3531. TOKEN_DEBUG,
  3532. };
  3533. struct setup_tokens {
  3534. char *token;
  3535. int val;
  3536. };
  3537. static struct setup_tokens setup_token[] __initdata =
  3538. {
  3539. { "nvram", TOKEN_NVRAM },
  3540. { "sync", TOKEN_SYNC },
  3541. { "wide", TOKEN_WIDE },
  3542. { "ppr", TOKEN_PPR },
  3543. { "verbose", TOKEN_VERBOSE },
  3544. { "debug", TOKEN_DEBUG },
  3545. };
  3546. /**************************************************************************
  3547. * qla1280_setup
  3548. *
  3549. * Handle boot parameters. This really needs to be changed so one
  3550. * can specify per adapter parameters.
  3551. **************************************************************************/
  3552. static int __init
  3553. qla1280_setup(char *s)
  3554. {
  3555. char *cp, *ptr;
  3556. unsigned long val;
  3557. int toke;
  3558. cp = s;
  3559. while (cp && (ptr = strchr(cp, ':'))) {
  3560. ptr++;
  3561. if (!strcmp(ptr, "yes")) {
  3562. val = 0x10000;
  3563. ptr += 3;
  3564. } else if (!strcmp(ptr, "no")) {
  3565. val = 0;
  3566. ptr += 2;
  3567. } else
  3568. val = simple_strtoul(ptr, &ptr, 0);
  3569. switch ((toke = qla1280_get_token(cp))) {
  3570. case TOKEN_NVRAM:
  3571. if (!val)
  3572. driver_setup.no_nvram = 1;
  3573. break;
  3574. case TOKEN_SYNC:
  3575. if (!val)
  3576. driver_setup.no_sync = 1;
  3577. else if (val != 0x10000)
  3578. driver_setup.sync_mask = val;
  3579. break;
  3580. case TOKEN_WIDE:
  3581. if (!val)
  3582. driver_setup.no_wide = 1;
  3583. else if (val != 0x10000)
  3584. driver_setup.wide_mask = val;
  3585. break;
  3586. case TOKEN_PPR:
  3587. if (!val)
  3588. driver_setup.no_ppr = 1;
  3589. else if (val != 0x10000)
  3590. driver_setup.ppr_mask = val;
  3591. break;
  3592. case TOKEN_VERBOSE:
  3593. qla1280_verbose = val;
  3594. break;
  3595. default:
  3596. printk(KERN_INFO "qla1280: unknown boot option %s\n",
  3597. cp);
  3598. }
  3599. cp = strchr(ptr, ';');
  3600. if (cp)
  3601. cp++;
  3602. else {
  3603. break;
  3604. }
  3605. }
  3606. return 1;
  3607. }
  3608. static int __init
  3609. qla1280_get_token(char *str)
  3610. {
  3611. char *sep;
  3612. long ret = -1;
  3613. int i;
  3614. sep = strchr(str, ':');
  3615. if (sep) {
  3616. for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
  3617. if (!strncmp(setup_token[i].token, str, (sep - str))) {
  3618. ret = setup_token[i].val;
  3619. break;
  3620. }
  3621. }
  3622. }
  3623. return ret;
  3624. }
  3625. static struct scsi_host_template qla1280_driver_template = {
  3626. .module = THIS_MODULE,
  3627. .proc_name = "qla1280",
  3628. .name = "Qlogic ISP 1280/12160",
  3629. .info = qla1280_info,
  3630. .slave_configure = qla1280_slave_configure,
  3631. .queuecommand = qla1280_queuecommand,
  3632. .eh_abort_handler = qla1280_eh_abort,
  3633. .eh_device_reset_handler= qla1280_eh_device_reset,
  3634. .eh_bus_reset_handler = qla1280_eh_bus_reset,
  3635. .eh_host_reset_handler = qla1280_eh_adapter_reset,
  3636. .bios_param = qla1280_biosparam,
  3637. .can_queue = 0xfffff,
  3638. .this_id = -1,
  3639. .sg_tablesize = SG_ALL,
  3640. .cmd_per_lun = 1,
  3641. .use_clustering = ENABLE_CLUSTERING,
  3642. };
  3643. static int __devinit
  3644. qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  3645. {
  3646. int devnum = id->driver_data;
  3647. struct qla_boards *bdp = &ql1280_board_tbl[devnum];
  3648. struct Scsi_Host *host;
  3649. struct scsi_qla_host *ha;
  3650. int error = -ENODEV;
  3651. /* Bypass all AMI SUBSYS VENDOR IDs */
  3652. if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
  3653. printk(KERN_INFO
  3654. "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
  3655. goto error;
  3656. }
  3657. printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
  3658. bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
  3659. if (pci_enable_device(pdev)) {
  3660. printk(KERN_WARNING
  3661. "qla1280: Failed to enabled pci device, aborting.\n");
  3662. goto error;
  3663. }
  3664. pci_set_master(pdev);
  3665. error = -ENOMEM;
  3666. host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
  3667. if (!host) {
  3668. printk(KERN_WARNING
  3669. "qla1280: Failed to register host, aborting.\n");
  3670. goto error_disable_device;
  3671. }
  3672. ha = (struct scsi_qla_host *)host->hostdata;
  3673. memset(ha, 0, sizeof(struct scsi_qla_host));
  3674. ha->pdev = pdev;
  3675. ha->devnum = devnum; /* specifies microcode load address */
  3676. #ifdef QLA_64BIT_PTR
  3677. if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
  3678. if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
  3679. printk(KERN_WARNING "scsi(%li): Unable to set a "
  3680. "suitable DMA mask - aborting\n", ha->host_no);
  3681. error = -ENODEV;
  3682. goto error_put_host;
  3683. }
  3684. } else
  3685. dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
  3686. ha->host_no);
  3687. #else
  3688. if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
  3689. printk(KERN_WARNING "scsi(%li): Unable to set a "
  3690. "suitable DMA mask - aborting\n", ha->host_no);
  3691. error = -ENODEV;
  3692. goto error_put_host;
  3693. }
  3694. #endif
  3695. ha->request_ring = pci_alloc_consistent(ha->pdev,
  3696. ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
  3697. &ha->request_dma);
  3698. if (!ha->request_ring) {
  3699. printk(KERN_INFO "qla1280: Failed to get request memory\n");
  3700. goto error_put_host;
  3701. }
  3702. ha->response_ring = pci_alloc_consistent(ha->pdev,
  3703. ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
  3704. &ha->response_dma);
  3705. if (!ha->response_ring) {
  3706. printk(KERN_INFO "qla1280: Failed to get response memory\n");
  3707. goto error_free_request_ring;
  3708. }
  3709. ha->ports = bdp->numPorts;
  3710. ha->host = host;
  3711. ha->host_no = host->host_no;
  3712. host->irq = pdev->irq;
  3713. host->max_channel = bdp->numPorts - 1;
  3714. host->max_lun = MAX_LUNS - 1;
  3715. host->max_id = MAX_TARGETS;
  3716. host->max_sectors = 1024;
  3717. host->unique_id = host->host_no;
  3718. error = -ENODEV;
  3719. #if MEMORY_MAPPED_IO
  3720. ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
  3721. if (!ha->mmpbase) {
  3722. printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
  3723. goto error_free_response_ring;
  3724. }
  3725. host->base = (unsigned long)ha->mmpbase;
  3726. ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
  3727. #else
  3728. host->io_port = pci_resource_start(ha->pdev, 0);
  3729. if (!request_region(host->io_port, 0xff, "qla1280")) {
  3730. printk(KERN_INFO "qla1280: Failed to reserve i/o region "
  3731. "0x%04lx-0x%04lx - already in use\n",
  3732. host->io_port, host->io_port + 0xff);
  3733. goto error_free_response_ring;
  3734. }
  3735. ha->iobase = (struct device_reg *)host->io_port;
  3736. #endif
  3737. INIT_LIST_HEAD(&ha->done_q);
  3738. /* Disable ISP interrupts. */
  3739. qla1280_disable_intrs(ha);
  3740. if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
  3741. "qla1280", ha)) {
  3742. printk("qla1280 : Failed to reserve interrupt %d already "
  3743. "in use\n", pdev->irq);
  3744. goto error_release_region;
  3745. }
  3746. /* load the F/W, read paramaters, and init the H/W */
  3747. if (qla1280_initialize_adapter(ha)) {
  3748. printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
  3749. goto error_free_irq;
  3750. }
  3751. /* set our host ID (need to do something about our two IDs) */
  3752. host->this_id = ha->bus_settings[0].id;
  3753. pci_set_drvdata(pdev, host);
  3754. error = scsi_add_host(host, &pdev->dev);
  3755. if (error)
  3756. goto error_disable_adapter;
  3757. scsi_scan_host(host);
  3758. return 0;
  3759. error_disable_adapter:
  3760. qla1280_disable_intrs(ha);
  3761. error_free_irq:
  3762. free_irq(pdev->irq, ha);
  3763. error_release_region:
  3764. #if MEMORY_MAPPED_IO
  3765. iounmap(ha->mmpbase);
  3766. #else
  3767. release_region(host->io_port, 0xff);
  3768. #endif
  3769. error_free_response_ring:
  3770. pci_free_consistent(ha->pdev,
  3771. ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
  3772. ha->response_ring, ha->response_dma);
  3773. error_free_request_ring:
  3774. pci_free_consistent(ha->pdev,
  3775. ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
  3776. ha->request_ring, ha->request_dma);
  3777. error_put_host:
  3778. scsi_host_put(host);
  3779. error_disable_device:
  3780. pci_disable_device(pdev);
  3781. error:
  3782. return error;
  3783. }
  3784. static void __devexit
  3785. qla1280_remove_one(struct pci_dev *pdev)
  3786. {
  3787. struct Scsi_Host *host = pci_get_drvdata(pdev);
  3788. struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
  3789. scsi_remove_host(host);
  3790. qla1280_disable_intrs(ha);
  3791. free_irq(pdev->irq, ha);
  3792. #if MEMORY_MAPPED_IO
  3793. iounmap(ha->mmpbase);
  3794. #else
  3795. release_region(host->io_port, 0xff);
  3796. #endif
  3797. pci_free_consistent(ha->pdev,
  3798. ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
  3799. ha->request_ring, ha->request_dma);
  3800. pci_free_consistent(ha->pdev,
  3801. ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
  3802. ha->response_ring, ha->response_dma);
  3803. pci_disable_device(pdev);
  3804. scsi_host_put(host);
  3805. }
  3806. static struct pci_driver qla1280_pci_driver = {
  3807. .name = "qla1280",
  3808. .id_table = qla1280_pci_tbl,
  3809. .probe = qla1280_probe_one,
  3810. .remove = __devexit_p(qla1280_remove_one),
  3811. };
  3812. static int __init
  3813. qla1280_init(void)
  3814. {
  3815. if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
  3816. printk(KERN_WARNING
  3817. "qla1280: struct srb too big, aborting\n");
  3818. return -EINVAL;
  3819. }
  3820. #ifdef MODULE
  3821. /*
  3822. * If we are called as a module, the qla1280 pointer may not be null
  3823. * and it would point to our bootup string, just like on the lilo
  3824. * command line. IF not NULL, then process this config string with
  3825. * qla1280_setup
  3826. *
  3827. * Boot time Options
  3828. * To add options at boot time add a line to your lilo.conf file like:
  3829. * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
  3830. * which will result in the first four devices on the first two
  3831. * controllers being set to a tagged queue depth of 32.
  3832. */
  3833. if (qla1280)
  3834. qla1280_setup(qla1280);
  3835. #endif
  3836. return pci_register_driver(&qla1280_pci_driver);
  3837. }
  3838. static void __exit
  3839. qla1280_exit(void)
  3840. {
  3841. int i;
  3842. pci_unregister_driver(&qla1280_pci_driver);
  3843. /* release any allocated firmware images */
  3844. for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
  3845. if (qla1280_fw_tbl[i].fw) {
  3846. release_firmware(qla1280_fw_tbl[i].fw);
  3847. qla1280_fw_tbl[i].fw = NULL;
  3848. }
  3849. }
  3850. }
  3851. module_init(qla1280_init);
  3852. module_exit(qla1280_exit);
  3853. MODULE_AUTHOR("Qlogic & Jes Sorensen");
  3854. MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
  3855. MODULE_LICENSE("GPL");
  3856. MODULE_FIRMWARE("qlogic/1040.bin");
  3857. MODULE_FIRMWARE("qlogic/1280.bin");
  3858. MODULE_FIRMWARE("qlogic/12160.bin");
  3859. MODULE_VERSION(QLA1280_VERSION);
  3860. /*
  3861. * Overrides for Emacs so that we almost follow Linus's tabbing style.
  3862. * Emacs will notice this stuff at the end of the file and automatically
  3863. * adjust the settings for this buffer only. This must remain at the end
  3864. * of the file.
  3865. * ---------------------------------------------------------------------------
  3866. * Local variables:
  3867. * c-basic-offset: 8
  3868. * tab-width: 8
  3869. * End:
  3870. */