/drivers/scsi/dc395x.c

http://github.com/mirrors/linux · C · 4757 lines · 3213 code · 594 blank · 950 comment · 492 complexity · c369baa06d6fcb78d1cc634b9caa8afb MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <linux/slab.h>
  61. #include <asm/io.h>
  62. #include <scsi/scsi.h>
  63. #include <scsi/scsi_cmnd.h>
  64. #include <scsi/scsi_device.h>
  65. #include <scsi/scsi_host.h>
  66. #include "dc395x.h"
  67. #define DC395X_NAME "dc395x"
  68. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  69. #define DC395X_VERSION "v2.05, 2004/03/08"
  70. /*---------------------------------------------------------------------------
  71. Features
  72. ---------------------------------------------------------------------------*/
  73. /*
  74. * Set to disable parts of the driver
  75. */
  76. /*#define DC395x_NO_DISCONNECT*/
  77. /*#define DC395x_NO_TAGQ*/
  78. /*#define DC395x_NO_SYNC*/
  79. /*#define DC395x_NO_WIDE*/
  80. /*---------------------------------------------------------------------------
  81. Debugging
  82. ---------------------------------------------------------------------------*/
  83. /*
  84. * Types of debugging that can be enabled and disabled
  85. */
  86. #define DBG_KG 0x0001
  87. #define DBG_0 0x0002
  88. #define DBG_1 0x0004
  89. #define DBG_SG 0x0020
  90. #define DBG_FIFO 0x0040
  91. #define DBG_PIO 0x0080
  92. /*
  93. * Set set of things to output debugging for.
  94. * Undefine to remove all debugging
  95. */
  96. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  97. /*#define DEBUG_MASK DBG_0*/
  98. /*
  99. * Output a kernel mesage at the specified level and append the
  100. * driver name and a ": " to the start of the message
  101. */
  102. #define dprintkl(level, format, arg...) \
  103. printk(level DC395X_NAME ": " format , ## arg)
  104. #ifdef DEBUG_MASK
  105. /*
  106. * print a debug message - this is formated with KERN_DEBUG, then the
  107. * driver name followed by a ": " and then the message is output.
  108. * This also checks that the specified debug level is enabled before
  109. * outputing the message
  110. */
  111. #define dprintkdbg(type, format, arg...) \
  112. do { \
  113. if ((type) & (DEBUG_MASK)) \
  114. dprintkl(KERN_DEBUG , format , ## arg); \
  115. } while (0)
  116. /*
  117. * Check if the specified type of debugging is enabled
  118. */
  119. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  120. #else
  121. /*
  122. * No debugging. Do nothing
  123. */
  124. #define dprintkdbg(type, format, arg...) \
  125. do {} while (0)
  126. #define debug_enabled(type) (0)
  127. #endif
  128. #ifndef PCI_VENDOR_ID_TEKRAM
  129. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  130. #endif
  131. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  132. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  133. #endif
  134. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  135. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  137. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  138. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  139. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  140. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  141. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  142. /* cmd->result */
  143. #define RES_TARGET 0x000000FF /* Target State */
  144. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  145. #define RES_ENDMSG 0x0000FF00 /* End Message */
  146. #define RES_DID 0x00FF0000 /* DID_ codes */
  147. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  148. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  149. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  150. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  151. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  152. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  153. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  154. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  155. #define TAG_NONE 255
  156. /*
  157. * srb->segement_x is the hw sg list. It is always allocated as a
  158. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  159. * cross a page boundy.
  160. */
  161. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  162. struct SGentry {
  163. u32 address; /* bus! address */
  164. u32 length;
  165. };
  166. /* The SEEPROM structure for TRM_S1040 */
  167. struct NVRamTarget {
  168. u8 cfg0; /* Target configuration byte 0 */
  169. u8 period; /* Target period */
  170. u8 cfg2; /* Target configuration byte 2 */
  171. u8 cfg3; /* Target configuration byte 3 */
  172. };
  173. struct NvRamType {
  174. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  175. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  176. u8 sub_class; /* 4 Sub Class */
  177. u8 vendor_id[2]; /* 5,6 Vendor ID */
  178. u8 device_id[2]; /* 7,8 Device ID */
  179. u8 reserved; /* 9 Reserved */
  180. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  181. /** 10,11,12,13
  182. ** 14,15,16,17
  183. ** ....
  184. ** ....
  185. ** 70,71,72,73
  186. */
  187. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  188. u8 channel_cfg; /* 75 Channel configuration */
  189. u8 delay_time; /* 76 Power on delay time */
  190. u8 max_tag; /* 77 Maximum tags */
  191. u8 reserved0; /* 78 */
  192. u8 boot_target; /* 79 */
  193. u8 boot_lun; /* 80 */
  194. u8 reserved1; /* 81 */
  195. u16 reserved2[22]; /* 82,..125 */
  196. u16 cksum; /* 126,127 */
  197. };
  198. struct ScsiReqBlk {
  199. struct list_head list; /* next/prev ptrs for srb lists */
  200. struct DeviceCtlBlk *dcb;
  201. struct scsi_cmnd *cmd;
  202. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  203. dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  204. u8 sg_count; /* No of HW sg entries for this request */
  205. u8 sg_index; /* Index of HW sg entry for this request */
  206. size_t total_xfer_length; /* Total number of bytes remaining to be transferred */
  207. size_t request_length; /* Total number of bytes in this request */
  208. /*
  209. * The sense buffer handling function, request_sense, uses
  210. * the first hw sg entry (segment_x[0]) and the transfer
  211. * length (total_xfer_length). While doing this it stores the
  212. * original values into the last sg hw list
  213. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  214. * total_xfer_length in xferred. These values are restored in
  215. * pci_unmap_srb_sense. This is the only place xferred is used.
  216. */
  217. size_t xferred; /* Saved copy of total_xfer_length */
  218. u16 state;
  219. u8 msgin_buf[6];
  220. u8 msgout_buf[6];
  221. u8 adapter_status;
  222. u8 target_status;
  223. u8 msg_count;
  224. u8 end_message;
  225. u8 tag_number;
  226. u8 status;
  227. u8 retry_count;
  228. u8 flag;
  229. u8 scsi_phase;
  230. };
  231. struct DeviceCtlBlk {
  232. struct list_head list; /* next/prev ptrs for the dcb list */
  233. struct AdapterCtlBlk *acb;
  234. struct list_head srb_going_list; /* head of going srb list */
  235. struct list_head srb_waiting_list; /* head of waiting srb list */
  236. struct ScsiReqBlk *active_srb;
  237. u32 tag_mask;
  238. u16 max_command;
  239. u8 target_id; /* SCSI Target ID (SCSI Only) */
  240. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  241. u8 identify_msg;
  242. u8 dev_mode;
  243. u8 inquiry7; /* To store Inquiry flags */
  244. u8 sync_mode; /* 0:async mode */
  245. u8 min_nego_period; /* for nego. */
  246. u8 sync_period; /* for reg. */
  247. u8 sync_offset; /* for reg. and nego.(low nibble) */
  248. u8 flag;
  249. u8 dev_type;
  250. u8 init_tcq_flag;
  251. };
  252. struct AdapterCtlBlk {
  253. struct Scsi_Host *scsi_host;
  254. unsigned long io_port_base;
  255. unsigned long io_port_len;
  256. struct list_head dcb_list; /* head of going dcb list */
  257. struct DeviceCtlBlk *dcb_run_robin;
  258. struct DeviceCtlBlk *active_dcb;
  259. struct list_head srb_free_list; /* head of free srb list */
  260. struct ScsiReqBlk *tmp_srb;
  261. struct timer_list waiting_timer;
  262. struct timer_list selto_timer;
  263. unsigned long last_reset;
  264. u16 srb_count;
  265. u8 sel_timeout;
  266. unsigned int irq_level;
  267. u8 tag_max_num;
  268. u8 acb_flag;
  269. u8 gmode2;
  270. u8 config;
  271. u8 lun_chk;
  272. u8 scan_devices;
  273. u8 hostid_bit;
  274. u8 dcb_map[DC395x_MAX_SCSI_ID];
  275. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  276. struct pci_dev *dev;
  277. u8 msg_len;
  278. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  279. struct ScsiReqBlk srb;
  280. struct NvRamType eeprom; /* eeprom settings for this adapter */
  281. };
  282. /*---------------------------------------------------------------------------
  283. Forward declarations
  284. ---------------------------------------------------------------------------*/
  285. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  286. u16 *pscsi_status);
  287. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  288. u16 *pscsi_status);
  289. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  290. u16 *pscsi_status);
  291. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  292. u16 *pscsi_status);
  293. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  294. u16 *pscsi_status);
  295. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  296. u16 *pscsi_status);
  297. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  298. u16 *pscsi_status);
  299. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  300. u16 *pscsi_status);
  301. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  302. u16 *pscsi_status);
  303. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  304. u16 *pscsi_status);
  305. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  306. u16 *pscsi_status);
  307. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  308. u16 *pscsi_status);
  309. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  310. u16 *pscsi_status);
  311. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  312. u16 *pscsi_status);
  313. static void set_basic_config(struct AdapterCtlBlk *acb);
  314. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  315. struct ScsiReqBlk *srb);
  316. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  317. static void data_io_transfer(struct AdapterCtlBlk *acb,
  318. struct ScsiReqBlk *srb, u16 io_dir);
  319. static void disconnect(struct AdapterCtlBlk *acb);
  320. static void reselect(struct AdapterCtlBlk *acb);
  321. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  322. struct ScsiReqBlk *srb);
  323. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  324. struct ScsiReqBlk *srb);
  325. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  326. struct ScsiReqBlk *srb);
  327. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  328. struct scsi_cmnd *cmd, u8 force);
  329. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  330. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  331. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  332. struct ScsiReqBlk *srb);
  333. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  334. struct ScsiReqBlk *srb);
  335. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  336. struct ScsiReqBlk *srb);
  337. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  338. struct DeviceCtlBlk *dcb);
  339. static void waiting_timeout(struct timer_list *t);
  340. /*---------------------------------------------------------------------------
  341. Static Data
  342. ---------------------------------------------------------------------------*/
  343. static u16 current_sync_offset = 0;
  344. static void *dc395x_scsi_phase0[] = {
  345. data_out_phase0,/* phase:0 */
  346. data_in_phase0, /* phase:1 */
  347. command_phase0, /* phase:2 */
  348. status_phase0, /* phase:3 */
  349. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  350. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  351. msgout_phase0, /* phase:6 */
  352. msgin_phase0, /* phase:7 */
  353. };
  354. static void *dc395x_scsi_phase1[] = {
  355. data_out_phase1,/* phase:0 */
  356. data_in_phase1, /* phase:1 */
  357. command_phase1, /* phase:2 */
  358. status_phase1, /* phase:3 */
  359. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  360. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  361. msgout_phase1, /* phase:6 */
  362. msgin_phase1, /* phase:7 */
  363. };
  364. /*
  365. *Fast20: 000 50ns, 20.0 MHz
  366. * 001 75ns, 13.3 MHz
  367. * 010 100ns, 10.0 MHz
  368. * 011 125ns, 8.0 MHz
  369. * 100 150ns, 6.6 MHz
  370. * 101 175ns, 5.7 MHz
  371. * 110 200ns, 5.0 MHz
  372. * 111 250ns, 4.0 MHz
  373. *
  374. *Fast40(LVDS): 000 25ns, 40.0 MHz
  375. * 001 50ns, 20.0 MHz
  376. * 010 75ns, 13.3 MHz
  377. * 011 100ns, 10.0 MHz
  378. * 100 125ns, 8.0 MHz
  379. * 101 150ns, 6.6 MHz
  380. * 110 175ns, 5.7 MHz
  381. * 111 200ns, 5.0 MHz
  382. */
  383. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  384. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  385. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  386. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  387. /*---------------------------------------------------------------------------
  388. Configuration
  389. ---------------------------------------------------------------------------*/
  390. /*
  391. * Module/boot parameters currently effect *all* instances of the
  392. * card in the system.
  393. */
  394. /*
  395. * Command line parameters are stored in a structure below.
  396. * These are the index's into the structure for the various
  397. * command line options.
  398. */
  399. #define CFG_ADAPTER_ID 0
  400. #define CFG_MAX_SPEED 1
  401. #define CFG_DEV_MODE 2
  402. #define CFG_ADAPTER_MODE 3
  403. #define CFG_TAGS 4
  404. #define CFG_RESET_DELAY 5
  405. #define CFG_NUM 6 /* number of configuration items */
  406. /*
  407. * Value used to indicate that a command line override
  408. * hasn't been used to modify the value.
  409. */
  410. #define CFG_PARAM_UNSET -1
  411. /*
  412. * Hold command line parameters.
  413. */
  414. struct ParameterData {
  415. int value; /* value of this setting */
  416. int min; /* minimum value */
  417. int max; /* maximum value */
  418. int def; /* default value */
  419. int safe; /* safe value */
  420. };
  421. static struct ParameterData cfg_data[] = {
  422. { /* adapter id */
  423. CFG_PARAM_UNSET,
  424. 0,
  425. 15,
  426. 7,
  427. 7
  428. },
  429. { /* max speed */
  430. CFG_PARAM_UNSET,
  431. 0,
  432. 7,
  433. 1, /* 13.3Mhz */
  434. 4, /* 6.7Hmz */
  435. },
  436. { /* dev mode */
  437. CFG_PARAM_UNSET,
  438. 0,
  439. 0x3f,
  440. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  441. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  442. NTC_DO_SEND_START,
  443. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  444. },
  445. { /* adapter mode */
  446. CFG_PARAM_UNSET,
  447. 0,
  448. 0x2f,
  449. NAC_SCANLUN |
  450. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  451. /*| NAC_ACTIVE_NEG*/,
  452. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  453. },
  454. { /* tags */
  455. CFG_PARAM_UNSET,
  456. 0,
  457. 5,
  458. 3, /* 16 tags (??) */
  459. 2,
  460. },
  461. { /* reset delay */
  462. CFG_PARAM_UNSET,
  463. 0,
  464. 180,
  465. 1, /* 1 second */
  466. 10, /* 10 seconds */
  467. }
  468. };
  469. /*
  470. * Safe settings. If set to zero the BIOS/default values with
  471. * command line overrides will be used. If set to 1 then safe and
  472. * slow settings will be used.
  473. */
  474. static bool use_safe_settings = 0;
  475. module_param_named(safe, use_safe_settings, bool, 0);
  476. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  477. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  478. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  479. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  480. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  481. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  482. MODULE_PARM_DESC(dev_mode, "Device mode.");
  483. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  484. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  485. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  486. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  487. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  488. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  489. /**
  490. * set_safe_settings - if the use_safe_settings option is set then
  491. * set all values to the safe and slow values.
  492. **/
  493. static void set_safe_settings(void)
  494. {
  495. if (use_safe_settings)
  496. {
  497. int i;
  498. dprintkl(KERN_INFO, "Using safe settings.\n");
  499. for (i = 0; i < CFG_NUM; i++)
  500. {
  501. cfg_data[i].value = cfg_data[i].safe;
  502. }
  503. }
  504. }
  505. /**
  506. * fix_settings - reset any boot parameters which are out of range
  507. * back to the default values.
  508. **/
  509. static void fix_settings(void)
  510. {
  511. int i;
  512. dprintkdbg(DBG_1,
  513. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  514. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  515. cfg_data[CFG_ADAPTER_ID].value,
  516. cfg_data[CFG_MAX_SPEED].value,
  517. cfg_data[CFG_DEV_MODE].value,
  518. cfg_data[CFG_ADAPTER_MODE].value,
  519. cfg_data[CFG_TAGS].value,
  520. cfg_data[CFG_RESET_DELAY].value);
  521. for (i = 0; i < CFG_NUM; i++)
  522. {
  523. if (cfg_data[i].value < cfg_data[i].min
  524. || cfg_data[i].value > cfg_data[i].max)
  525. cfg_data[i].value = cfg_data[i].def;
  526. }
  527. }
  528. /*
  529. * Mapping from the eeprom delay index value (index into this array)
  530. * to the number of actual seconds that the delay should be for.
  531. */
  532. static char eeprom_index_to_delay_map[] =
  533. { 1, 3, 5, 10, 16, 30, 60, 120 };
  534. /**
  535. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  536. * into a number of seconds.
  537. *
  538. * @eeprom: The eeprom structure in which we find the delay index to map.
  539. **/
  540. static void eeprom_index_to_delay(struct NvRamType *eeprom)
  541. {
  542. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  543. }
  544. /**
  545. * delay_to_eeprom_index - Take a delay in seconds and return the
  546. * closest eeprom index which will delay for at least that amount of
  547. * seconds.
  548. *
  549. * @delay: The delay, in seconds, to find the eeprom index for.
  550. **/
  551. static int delay_to_eeprom_index(int delay)
  552. {
  553. u8 idx = 0;
  554. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  555. idx++;
  556. return idx;
  557. }
  558. /**
  559. * eeprom_override - Override the eeprom settings, in the provided
  560. * eeprom structure, with values that have been set on the command
  561. * line.
  562. *
  563. * @eeprom: The eeprom data to override with command line options.
  564. **/
  565. static void eeprom_override(struct NvRamType *eeprom)
  566. {
  567. u8 id;
  568. /* Adapter Settings */
  569. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  570. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  571. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  572. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  573. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  574. eeprom->delay_time = delay_to_eeprom_index(
  575. cfg_data[CFG_RESET_DELAY].value);
  576. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  577. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  578. /* Device Settings */
  579. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  580. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  581. eeprom->target[id].cfg0 =
  582. (u8)cfg_data[CFG_DEV_MODE].value;
  583. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  584. eeprom->target[id].period =
  585. (u8)cfg_data[CFG_MAX_SPEED].value;
  586. }
  587. }
  588. /*---------------------------------------------------------------------------
  589. ---------------------------------------------------------------------------*/
  590. static unsigned int list_size(struct list_head *head)
  591. {
  592. unsigned int count = 0;
  593. struct list_head *pos;
  594. list_for_each(pos, head)
  595. count++;
  596. return count;
  597. }
  598. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  599. struct DeviceCtlBlk *pos)
  600. {
  601. int use_next = 0;
  602. struct DeviceCtlBlk* next = NULL;
  603. struct DeviceCtlBlk* i;
  604. if (list_empty(head))
  605. return NULL;
  606. /* find supplied dcb and then select the next one */
  607. list_for_each_entry(i, head, list)
  608. if (use_next) {
  609. next = i;
  610. break;
  611. } else if (i == pos) {
  612. use_next = 1;
  613. }
  614. /* if no next one take the head one (ie, wraparound) */
  615. if (!next)
  616. list_for_each_entry(i, head, list) {
  617. next = i;
  618. break;
  619. }
  620. return next;
  621. }
  622. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  623. {
  624. if (srb->tag_number < 255) {
  625. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  626. srb->tag_number = 255;
  627. }
  628. }
  629. /* Find cmd in SRB list */
  630. static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  631. struct list_head *head)
  632. {
  633. struct ScsiReqBlk *i;
  634. list_for_each_entry(i, head, list)
  635. if (i->cmd == cmd)
  636. return i;
  637. return NULL;
  638. }
  639. /* Sets the timer to wake us up */
  640. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  641. {
  642. if (timer_pending(&acb->waiting_timer))
  643. return;
  644. if (time_before(jiffies + to, acb->last_reset - HZ / 2))
  645. acb->waiting_timer.expires =
  646. acb->last_reset - HZ / 2 + 1;
  647. else
  648. acb->waiting_timer.expires = jiffies + to + 1;
  649. add_timer(&acb->waiting_timer);
  650. }
  651. /* Send the next command from the waiting list to the bus */
  652. static void waiting_process_next(struct AdapterCtlBlk *acb)
  653. {
  654. struct DeviceCtlBlk *start = NULL;
  655. struct DeviceCtlBlk *pos;
  656. struct DeviceCtlBlk *dcb;
  657. struct ScsiReqBlk *srb;
  658. struct list_head *dcb_list_head = &acb->dcb_list;
  659. if (acb->active_dcb
  660. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  661. return;
  662. if (timer_pending(&acb->waiting_timer))
  663. del_timer(&acb->waiting_timer);
  664. if (list_empty(dcb_list_head))
  665. return;
  666. /*
  667. * Find the starting dcb. Need to find it again in the list
  668. * since the list may have changed since we set the ptr to it
  669. */
  670. list_for_each_entry(dcb, dcb_list_head, list)
  671. if (dcb == acb->dcb_run_robin) {
  672. start = dcb;
  673. break;
  674. }
  675. if (!start) {
  676. /* This can happen! */
  677. start = list_entry(dcb_list_head->next, typeof(*start), list);
  678. acb->dcb_run_robin = start;
  679. }
  680. /*
  681. * Loop over the dcb, but we start somewhere (potentially) in
  682. * the middle of the loop so we need to manully do this.
  683. */
  684. pos = start;
  685. do {
  686. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  687. /* Make sure, the next another device gets scheduled ... */
  688. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  689. acb->dcb_run_robin);
  690. if (list_empty(waiting_list_head) ||
  691. pos->max_command <= list_size(&pos->srb_going_list)) {
  692. /* move to next dcb */
  693. pos = dcb_get_next(dcb_list_head, pos);
  694. } else {
  695. srb = list_entry(waiting_list_head->next,
  696. struct ScsiReqBlk, list);
  697. /* Try to send to the bus */
  698. if (!start_scsi(acb, pos, srb))
  699. list_move(&srb->list, &pos->srb_going_list);
  700. else
  701. waiting_set_timer(acb, HZ/50);
  702. break;
  703. }
  704. } while (pos != start);
  705. }
  706. /* Wake up waiting queue */
  707. static void waiting_timeout(struct timer_list *t)
  708. {
  709. unsigned long flags;
  710. struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
  711. dprintkdbg(DBG_1,
  712. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  713. DC395x_LOCK_IO(acb->scsi_host, flags);
  714. waiting_process_next(acb);
  715. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  716. }
  717. /* Get the DCB for a given ID/LUN combination */
  718. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  719. {
  720. return acb->children[id][lun];
  721. }
  722. /* Send SCSI Request Block (srb) to adapter (acb) */
  723. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  724. {
  725. struct DeviceCtlBlk *dcb = srb->dcb;
  726. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  727. acb->active_dcb ||
  728. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  729. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  730. waiting_process_next(acb);
  731. return;
  732. }
  733. if (!start_scsi(acb, dcb, srb)) {
  734. list_add_tail(&srb->list, &dcb->srb_going_list);
  735. } else {
  736. list_add(&srb->list, &dcb->srb_waiting_list);
  737. waiting_set_timer(acb, HZ / 50);
  738. }
  739. }
  740. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  741. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  742. struct ScsiReqBlk *srb)
  743. {
  744. int nseg;
  745. enum dma_data_direction dir = cmd->sc_data_direction;
  746. dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
  747. cmd, dcb->target_id, dcb->target_lun);
  748. srb->dcb = dcb;
  749. srb->cmd = cmd;
  750. srb->sg_count = 0;
  751. srb->total_xfer_length = 0;
  752. srb->sg_bus_addr = 0;
  753. srb->sg_index = 0;
  754. srb->adapter_status = 0;
  755. srb->target_status = 0;
  756. srb->msg_count = 0;
  757. srb->status = 0;
  758. srb->flag = 0;
  759. srb->state = 0;
  760. srb->retry_count = 0;
  761. srb->tag_number = TAG_NONE;
  762. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  763. srb->end_message = 0;
  764. nseg = scsi_dma_map(cmd);
  765. BUG_ON(nseg < 0);
  766. if (dir == PCI_DMA_NONE || !nseg) {
  767. dprintkdbg(DBG_0,
  768. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  769. cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
  770. srb->segment_x[0].address);
  771. } else {
  772. int i;
  773. u32 reqlen = scsi_bufflen(cmd);
  774. struct scatterlist *sg;
  775. struct SGentry *sgp = srb->segment_x;
  776. srb->sg_count = nseg;
  777. dprintkdbg(DBG_0,
  778. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  779. reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
  780. srb->sg_count);
  781. scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
  782. u32 busaddr = (u32)sg_dma_address(sg);
  783. u32 seglen = (u32)sg->length;
  784. sgp[i].address = busaddr;
  785. sgp[i].length = seglen;
  786. srb->total_xfer_length += seglen;
  787. }
  788. sgp += srb->sg_count - 1;
  789. /*
  790. * adjust last page if too big as it is allocated
  791. * on even page boundaries
  792. */
  793. if (srb->total_xfer_length > reqlen) {
  794. sgp->length -= (srb->total_xfer_length - reqlen);
  795. srb->total_xfer_length = reqlen;
  796. }
  797. /* Fixup for WIDE padding - make sure length is even */
  798. if (dcb->sync_period & WIDE_SYNC &&
  799. srb->total_xfer_length % 2) {
  800. srb->total_xfer_length++;
  801. sgp->length++;
  802. }
  803. srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
  804. srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
  805. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  806. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  807. }
  808. srb->request_length = srb->total_xfer_length;
  809. }
  810. /**
  811. * dc395x_queue_command - queue scsi command passed from the mid
  812. * layer, invoke 'done' on completion
  813. *
  814. * @cmd: pointer to scsi command object
  815. * @done: function pointer to be invoked on completion
  816. *
  817. * Returns 1 if the adapter (host) is busy, else returns 0. One
  818. * reason for an adapter to be busy is that the number
  819. * of outstanding queued commands is already equal to
  820. * struct Scsi_Host::can_queue .
  821. *
  822. * Required: if struct Scsi_Host::can_queue is ever non-zero
  823. * then this function is required.
  824. *
  825. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  826. * and is expected to be held on return.
  827. *
  828. **/
  829. static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  830. {
  831. struct DeviceCtlBlk *dcb;
  832. struct ScsiReqBlk *srb;
  833. struct AdapterCtlBlk *acb =
  834. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  835. dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
  836. cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
  837. /* Assume BAD_TARGET; will be cleared later */
  838. cmd->result = DID_BAD_TARGET << 16;
  839. /* ignore invalid targets */
  840. if (cmd->device->id >= acb->scsi_host->max_id ||
  841. cmd->device->lun >= acb->scsi_host->max_lun ||
  842. cmd->device->lun >31) {
  843. goto complete;
  844. }
  845. /* does the specified lun on the specified device exist */
  846. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  847. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  848. cmd->device->id, (u8)cmd->device->lun);
  849. goto complete;
  850. }
  851. /* do we have a DCB for the device */
  852. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  853. if (!dcb) {
  854. /* should never happen */
  855. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  856. cmd->device->id, (u8)cmd->device->lun);
  857. goto complete;
  858. }
  859. /* set callback and clear result in the command */
  860. cmd->scsi_done = done;
  861. cmd->result = 0;
  862. srb = list_first_entry_or_null(&acb->srb_free_list,
  863. struct ScsiReqBlk, list);
  864. if (!srb) {
  865. /*
  866. * Return 1 since we are unable to queue this command at this
  867. * point in time.
  868. */
  869. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  870. return 1;
  871. }
  872. list_del(&srb->list);
  873. build_srb(cmd, dcb, srb);
  874. if (!list_empty(&dcb->srb_waiting_list)) {
  875. /* append to waiting queue */
  876. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  877. waiting_process_next(acb);
  878. } else {
  879. /* process immediately */
  880. send_srb(acb, srb);
  881. }
  882. dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
  883. return 0;
  884. complete:
  885. /*
  886. * Complete the command immediatey, and then return 0 to
  887. * indicate that we have handled the command. This is usually
  888. * done when the commad is for things like non existent
  889. * devices.
  890. */
  891. done(cmd);
  892. return 0;
  893. }
  894. static DEF_SCSI_QCMD(dc395x_queue_command)
  895. static void dump_register_info(struct AdapterCtlBlk *acb,
  896. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  897. {
  898. u16 pstat;
  899. struct pci_dev *dev = acb->dev;
  900. pci_read_config_word(dev, PCI_STATUS, &pstat);
  901. if (!dcb)
  902. dcb = acb->active_dcb;
  903. if (!srb && dcb)
  904. srb = dcb->active_srb;
  905. if (srb) {
  906. if (!srb->cmd)
  907. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  908. srb, srb->cmd);
  909. else
  910. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
  911. "cmnd=0x%02x <%02i-%i>\n",
  912. srb, srb->cmd,
  913. srb->cmd->cmnd[0], srb->cmd->device->id,
  914. (u8)srb->cmd->device->lun);
  915. printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
  916. srb->segment_x, srb->sg_count, srb->sg_index,
  917. srb->total_xfer_length);
  918. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  919. srb->state, srb->status, srb->scsi_phase,
  920. (acb->active_dcb) ? "" : "not");
  921. }
  922. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  923. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  924. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  925. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  926. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  927. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  928. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  929. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  930. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  931. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  932. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  933. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  934. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  935. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  936. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  937. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  938. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  939. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  940. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  941. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  942. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  943. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  944. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  945. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  946. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  947. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  948. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  949. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  950. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  951. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  952. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  953. "pci{status=0x%04x}\n",
  954. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  955. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  956. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  957. pstat);
  958. }
  959. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  960. {
  961. #if debug_enabled(DBG_FIFO)
  962. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  963. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  964. if (!(fifocnt & 0x40))
  965. dprintkdbg(DBG_FIFO,
  966. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  967. fifocnt & 0x3f, lines, txt);
  968. #endif
  969. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  970. }
  971. static void reset_dev_param(struct AdapterCtlBlk *acb)
  972. {
  973. struct DeviceCtlBlk *dcb;
  974. struct NvRamType *eeprom = &acb->eeprom;
  975. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  976. list_for_each_entry(dcb, &acb->dcb_list, list) {
  977. u8 period_index;
  978. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  979. dcb->sync_period = 0;
  980. dcb->sync_offset = 0;
  981. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  982. period_index = eeprom->target[dcb->target_id].period & 0x07;
  983. dcb->min_nego_period = clock_period[period_index];
  984. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  985. || !(acb->config & HCC_WIDE_CARD))
  986. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  987. }
  988. }
  989. /*
  990. * perform a hard reset on the SCSI bus
  991. * @cmd - some command for this host (for fetching hooks)
  992. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  993. */
  994. static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  995. {
  996. struct AdapterCtlBlk *acb =
  997. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  998. dprintkl(KERN_INFO,
  999. "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
  1000. cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
  1001. if (timer_pending(&acb->waiting_timer))
  1002. del_timer(&acb->waiting_timer);
  1003. /*
  1004. * disable interrupt
  1005. */
  1006. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1007. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1008. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1009. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1010. reset_scsi_bus(acb);
  1011. udelay(500);
  1012. /* We may be in serious trouble. Wait some seconds */
  1013. acb->last_reset =
  1014. jiffies + 3 * HZ / 2 +
  1015. HZ * acb->eeprom.delay_time;
  1016. /*
  1017. * re-enable interrupt
  1018. */
  1019. /* Clear SCSI FIFO */
  1020. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1021. clear_fifo(acb, "eh_bus_reset");
  1022. /* Delete pending IRQ */
  1023. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1024. set_basic_config(acb);
  1025. reset_dev_param(acb);
  1026. doing_srb_done(acb, DID_RESET, cmd, 0);
  1027. acb->active_dcb = NULL;
  1028. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1029. waiting_process_next(acb);
  1030. return SUCCESS;
  1031. }
  1032. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1033. {
  1034. int rc;
  1035. spin_lock_irq(cmd->device->host->host_lock);
  1036. rc = __dc395x_eh_bus_reset(cmd);
  1037. spin_unlock_irq(cmd->device->host->host_lock);
  1038. return rc;
  1039. }
  1040. /*
  1041. * abort an errant SCSI command
  1042. * @cmd - command to be aborted
  1043. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1044. */
  1045. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1046. {
  1047. /*
  1048. * Look into our command queues: If it has not been sent already,
  1049. * we remove it and return success. Otherwise fail.
  1050. */
  1051. struct AdapterCtlBlk *acb =
  1052. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1053. struct DeviceCtlBlk *dcb;
  1054. struct ScsiReqBlk *srb;
  1055. dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
  1056. cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
  1057. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1058. if (!dcb) {
  1059. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1060. return FAILED;
  1061. }
  1062. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1063. if (srb) {
  1064. list_del(&srb->list);
  1065. pci_unmap_srb_sense(acb, srb);
  1066. pci_unmap_srb(acb, srb);
  1067. free_tag(dcb, srb);
  1068. list_add_tail(&srb->list, &acb->srb_free_list);
  1069. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1070. cmd->result = DID_ABORT << 16;
  1071. return SUCCESS;
  1072. }
  1073. srb = find_cmd(cmd, &dcb->srb_going_list);
  1074. if (srb) {
  1075. dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
  1076. /* XXX: Should abort the command here */
  1077. } else {
  1078. dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
  1079. }
  1080. return FAILED;
  1081. }
  1082. /* SDTR */
  1083. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1084. struct ScsiReqBlk *srb)
  1085. {
  1086. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1087. if (srb->msg_count > 1) {
  1088. dprintkl(KERN_INFO,
  1089. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1090. srb->msg_count, srb->msgout_buf[0],
  1091. srb->msgout_buf[1]);
  1092. return;
  1093. }
  1094. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1095. dcb->sync_offset = 0;
  1096. dcb->min_nego_period = 200 >> 2;
  1097. } else if (dcb->sync_offset == 0)
  1098. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1099. *ptr++ = MSG_EXTENDED; /* (01h) */
  1100. *ptr++ = 3; /* length */
  1101. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1102. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1103. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1104. srb->msg_count += 5;
  1105. srb->state |= SRB_DO_SYNC_NEGO;
  1106. }
  1107. /* WDTR */
  1108. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1109. struct ScsiReqBlk *srb)
  1110. {
  1111. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1112. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1113. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1114. if (srb->msg_count > 1) {
  1115. dprintkl(KERN_INFO,
  1116. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1117. srb->msg_count, srb->msgout_buf[0],
  1118. srb->msgout_buf[1]);
  1119. return;
  1120. }
  1121. *ptr++ = MSG_EXTENDED; /* (01h) */
  1122. *ptr++ = 2; /* length */
  1123. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1124. *ptr++ = wide;
  1125. srb->msg_count += 4;
  1126. srb->state |= SRB_DO_WIDE_NEGO;
  1127. }
  1128. #if 0
  1129. /* Timer to work around chip flaw: When selecting and the bus is
  1130. * busy, we sometimes miss a Selection timeout IRQ */
  1131. void selection_timeout_missed(unsigned long ptr);
  1132. /* Sets the timer to wake us up */
  1133. static void selto_timer(struct AdapterCtlBlk *acb)
  1134. {
  1135. if (timer_pending(&acb->selto_timer))
  1136. return;
  1137. acb->selto_timer.function = selection_timeout_missed;
  1138. acb->selto_timer.data = (unsigned long) acb;
  1139. if (time_before
  1140. (jiffies + HZ, acb->last_reset + HZ / 2))
  1141. acb->selto_timer.expires =
  1142. acb->last_reset + HZ / 2 + 1;
  1143. else
  1144. acb->selto_timer.expires = jiffies + HZ + 1;
  1145. add_timer(&acb->selto_timer);
  1146. }
  1147. void selection_timeout_missed(unsigned long ptr)
  1148. {
  1149. unsigned long flags;
  1150. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1151. struct ScsiReqBlk *srb;
  1152. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1153. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1154. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1155. return;
  1156. }
  1157. DC395x_LOCK_IO(acb->scsi_host, flags);
  1158. srb = acb->active_dcb->active_srb;
  1159. disconnect(acb);
  1160. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1161. }
  1162. #endif
  1163. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1164. struct ScsiReqBlk* srb)
  1165. {
  1166. u16 s_stat2, return_code;
  1167. u8 s_stat, scsicommand, i, identify_message;
  1168. u8 *ptr;
  1169. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
  1170. dcb->target_id, dcb->target_lun, srb);
  1171. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1172. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1173. s_stat2 = 0;
  1174. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1175. #if 1
  1176. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1177. dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
  1178. s_stat, s_stat2);
  1179. /*
  1180. * Try anyway?
  1181. *
  1182. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1183. * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed!
  1184. * (This is likely to be a bug in the hardware. Obviously, most people
  1185. * only have one initiator per SCSI bus.)
  1186. * Instead let this fail and have the timer make sure the command is
  1187. * tried again after a short time
  1188. */
  1189. /*selto_timer (acb); */
  1190. return 1;
  1191. }
  1192. #endif
  1193. if (acb->active_dcb) {
  1194. dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
  1195. "command while another command (0x%p) is active.",
  1196. srb->cmd,
  1197. acb->active_dcb->active_srb ?
  1198. acb->active_dcb->active_srb->cmd : 0);
  1199. return 1;
  1200. }
  1201. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1202. dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
  1203. return 1;
  1204. }
  1205. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1206. * to queue them again after a reset */
  1207. if (time_before(jiffies, acb->last_reset - HZ / 2)) {
  1208. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1209. return 1;
  1210. }
  1211. /* Flush FIFO */
  1212. clear_fifo(acb, "start_scsi");
  1213. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1214. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1215. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1216. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1217. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1218. identify_message = dcb->identify_msg;
  1219. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1220. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1221. if (srb->flag & AUTO_REQSENSE)
  1222. identify_message &= 0xBF;
  1223. if (((srb->cmd->cmnd[0] == INQUIRY)
  1224. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1225. || (srb->flag & AUTO_REQSENSE))
  1226. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1227. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1228. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1229. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1230. && (dcb->target_lun == 0)) {
  1231. srb->msgout_buf[0] = identify_message;
  1232. srb->msg_count = 1;
  1233. scsicommand = SCMD_SEL_ATNSTOP;
  1234. srb->state = SRB_MSGOUT;
  1235. #ifndef SYNC_FIRST
  1236. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1237. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1238. build_wdtr(acb, dcb, srb);
  1239. goto no_cmd;
  1240. }
  1241. #endif
  1242. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1243. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1244. build_sdtr(acb, dcb, srb);
  1245. goto no_cmd;
  1246. }
  1247. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1248. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1249. build_wdtr(acb, dcb, srb);
  1250. goto no_cmd;
  1251. }
  1252. srb->msg_count = 0;
  1253. }
  1254. /* Send identify message */
  1255. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1256. scsicommand = SCMD_SEL_ATN;
  1257. srb->state = SRB_START_;
  1258. #ifndef DC395x_NO_TAGQ
  1259. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1260. && (identify_message & 0xC0)) {
  1261. /* Send Tag message */
  1262. u32 tag_mask = 1;
  1263. u8 tag_number = 0;
  1264. while (tag_mask & dcb->tag_mask
  1265. && tag_number < dcb->max_command) {
  1266. tag_mask = tag_mask << 1;
  1267. tag_number++;
  1268. }
  1269. if (tag_number >= dcb->max_command) {
  1270. dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
  1271. "Out of tags target=<%02i-%i>)\n",
  1272. srb->cmd, srb->cmd->device->id,
  1273. (u8)srb->cmd->device->lun);
  1274. srb->state = SRB_READY;
  1275. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1276. DO_HWRESELECT);
  1277. return 1;
  1278. }
  1279. /* Send Tag id */
  1280. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1281. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1282. dcb->tag_mask |= tag_mask;
  1283. srb->tag_number = tag_number;
  1284. scsicommand = SCMD_SEL_ATN3;
  1285. srb->state = SRB_START_;
  1286. }
  1287. #endif
  1288. /*polling:*/
  1289. /* Send CDB ..command block ......... */
  1290. dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1291. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
  1292. srb->cmd->cmnd[0], srb->tag_number);
  1293. if (srb->flag & AUTO_REQSENSE) {
  1294. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1295. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1296. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1297. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1298. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1299. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1300. } else {
  1301. ptr = (u8 *)srb->cmd->cmnd;
  1302. for (i = 0; i < srb->cmd->cmd_len; i++)
  1303. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1304. }
  1305. no_cmd:
  1306. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1307. DO_HWRESELECT | DO_DATALATCH);
  1308. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1309. /*
  1310. * If start_scsi return 1:
  1311. * we caught an interrupt (must be reset or reselection ... )
  1312. * : Let's process it first!
  1313. */
  1314. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
  1315. srb->cmd, dcb->target_id, dcb->target_lun);
  1316. srb->state = SRB_READY;
  1317. free_tag(dcb, srb);
  1318. srb->msg_count = 0;
  1319. return_code = 1;
  1320. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1321. } else {
  1322. /*
  1323. * If start_scsi returns 0:
  1324. * we know that the SCSI processor is free
  1325. */
  1326. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1327. dcb->active_srb = srb;
  1328. acb->active_dcb = dcb;
  1329. return_code = 0;
  1330. /* it's important for atn stop */
  1331. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1332. DO_DATALATCH | DO_HWRESELECT);
  1333. /* SCSI command */
  1334. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1335. }
  1336. return return_code;
  1337. }
  1338. #define DC395x_ENABLE_MSGOUT \
  1339. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1340. srb->state |= SRB_MSGOUT
  1341. /* abort command */
  1342. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1343. struct ScsiReqBlk *srb)
  1344. {
  1345. srb->msgout_buf[0] = ABORT;
  1346. srb->msg_count = 1;
  1347. DC395x_ENABLE_MSGOUT;
  1348. srb->state &= ~SRB_MSGIN;
  1349. srb->state |= SRB_MSGOUT;
  1350. }
  1351. /**
  1352. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1353. * have been triggered for this card.
  1354. *
  1355. * @acb: a pointer to the adpter control block
  1356. * @scsi_status: the status return when we checked the card
  1357. **/
  1358. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1359. u16 scsi_status)
  1360. {
  1361. struct DeviceCtlBlk *dcb;
  1362. struct ScsiReqBlk *srb;
  1363. u16 phase;
  1364. u8 scsi_intstatus;
  1365. unsigned long flags;
  1366. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1367. u16 *);
  1368. DC395x_LOCK_IO(acb->scsi_host, flags);
  1369. /* This acknowledges the IRQ */
  1370. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1371. if ((scsi_status & 0x2007) == 0x2002)
  1372. dprintkl(KERN_DEBUG,
  1373. "COP after COP completed? %04x\n", scsi_status);
  1374. if (debug_enabled(DBG_KG)) {
  1375. if (scsi_intstatus & INT_SELTIMEOUT)
  1376. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1377. }
  1378. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1379. if (timer_pending(&acb->selto_timer))
  1380. del_timer(&acb->selto_timer);
  1381. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1382. disconnect(acb); /* bus free interrupt */
  1383. goto out_unlock;
  1384. }
  1385. if (scsi_intstatus & INT_RESELECTED) {
  1386. reselect(acb);
  1387. goto out_unlock;
  1388. }
  1389. if (scsi_intstatus & INT_SELECT) {
  1390. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1391. goto out_unlock;
  1392. }
  1393. if (scsi_intstatus & INT_SCSIRESET) {
  1394. scsi_reset_detect(acb);
  1395. goto out_unlock;
  1396. }
  1397. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1398. dcb = acb->active_dcb;
  1399. if (!dcb) {
  1400. dprintkl(KERN_DEBUG,
  1401. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1402. scsi_status, scsi_intstatus);
  1403. goto out_unlock;
  1404. }
  1405. srb = dcb->active_srb;
  1406. if (dcb->flag & ABORT_DEV_) {
  1407. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1408. enable_msgout_abort(acb, srb);
  1409. }
  1410. /* software sequential machine */
  1411. phase = (u16)srb->scsi_phase;
  1412. /*
  1413. * 62037 or 62137
  1414. * call dc395x_scsi_phase0[]... "phase entry"
  1415. * handle every phase before start transfer
  1416. */
  1417. /* data_out_phase0, phase:0 */
  1418. /* data_in_phase0, phase:1 */
  1419. /* command_phase0, phase:2 */
  1420. /* status_phase0, phase:3 */
  1421. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1422. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1423. /* msgout_phase0, phase:6 */
  1424. /* msgin_phase0, phase:7 */
  1425. dc395x_statev = dc395x_scsi_phase0[phase];
  1426. dc395x_statev(acb, srb, &scsi_status);
  1427. /*
  1428. * if there were any exception occurred scsi_status
  1429. * will be modify to bus free phase new scsi_status
  1430. * transfer out from ... previous dc395x_statev
  1431. */
  1432. srb->scsi_phase = scsi_status & PHASEMASK;
  1433. phase = (u16)scsi_status & PHASEMASK;
  1434. /*
  1435. * call dc395x_scsi_phase1[]... "phase entry" handle
  1436. * every phase to do transfer
  1437. */
  1438. /* data_out_phase1, phase:0 */
  1439. /* data_in_phase1, phase:1 */
  1440. /* command_phase1, phase:2 */
  1441. /* status_phase1, phase:3 */
  1442. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1443. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1444. /* msgout_phase1, phase:6 */
  1445. /* msgin_phase1, phase:7 */
  1446. dc395x_statev = dc395x_scsi_phase1[phase];
  1447. dc395x_statev(acb, srb, &scsi_status);
  1448. }
  1449. out_unlock:
  1450. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1451. }
  1452. static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
  1453. {
  1454. struct AdapterCtlBlk *acb = dev_id;
  1455. u16 scsi_status;
  1456. u8 dma_status;
  1457. irqreturn_t handled = IRQ_NONE;
  1458. /*
  1459. * Check for pending interrupt
  1460. */
  1461. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1462. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1463. if (scsi_status & SCSIINTERRUPT) {
  1464. /* interrupt pending - let's process it! */
  1465. dc395x_handle_interrupt(acb, scsi_status);
  1466. handled = IRQ_HANDLED;
  1467. }
  1468. else if (dma_status & 0x20) {
  1469. /* Error from the DMA engine */
  1470. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1471. #if 0
  1472. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1473. if (acb->active_dcb) {
  1474. acb->active_dcb-> flag |= ABORT_DEV_;
  1475. if (acb->active_dcb->active_srb)
  1476. enable_msgout_abort(a