PageRenderTime 104ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/scsi/dc395x.c

http://github.com/mirrors/linux
C | 4757 lines | 3213 code | 594 blank | 950 comment | 492 complexity | c369baa06d6fcb78d1cc634b9caa8afb MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <linux/slab.h>
  61. #include <asm/io.h>
  62. #include <scsi/scsi.h>
  63. #include <scsi/scsi_cmnd.h>
  64. #include <scsi/scsi_device.h>
  65. #include <scsi/scsi_host.h>
  66. #include "dc395x.h"
  67. #define DC395X_NAME "dc395x"
  68. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  69. #define DC395X_VERSION "v2.05, 2004/03/08"
  70. /*---------------------------------------------------------------------------
  71. Features
  72. ---------------------------------------------------------------------------*/
  73. /*
  74. * Set to disable parts of the driver
  75. */
  76. /*#define DC395x_NO_DISCONNECT*/
  77. /*#define DC395x_NO_TAGQ*/
  78. /*#define DC395x_NO_SYNC*/
  79. /*#define DC395x_NO_WIDE*/
  80. /*---------------------------------------------------------------------------
  81. Debugging
  82. ---------------------------------------------------------------------------*/
  83. /*
  84. * Types of debugging that can be enabled and disabled
  85. */
  86. #define DBG_KG 0x0001
  87. #define DBG_0 0x0002
  88. #define DBG_1 0x0004
  89. #define DBG_SG 0x0020
  90. #define DBG_FIFO 0x0040
  91. #define DBG_PIO 0x0080
  92. /*
  93. * Set set of things to output debugging for.
  94. * Undefine to remove all debugging
  95. */
  96. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  97. /*#define DEBUG_MASK DBG_0*/
  98. /*
  99. * Output a kernel mesage at the specified level and append the
  100. * driver name and a ": " to the start of the message
  101. */
  102. #define dprintkl(level, format, arg...) \
  103. printk(level DC395X_NAME ": " format , ## arg)
  104. #ifdef DEBUG_MASK
  105. /*
  106. * print a debug message - this is formated with KERN_DEBUG, then the
  107. * driver name followed by a ": " and then the message is output.
  108. * This also checks that the specified debug level is enabled before
  109. * outputing the message
  110. */
  111. #define dprintkdbg(type, format, arg...) \
  112. do { \
  113. if ((type) & (DEBUG_MASK)) \
  114. dprintkl(KERN_DEBUG , format , ## arg); \
  115. } while (0)
  116. /*
  117. * Check if the specified type of debugging is enabled
  118. */
  119. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  120. #else
  121. /*
  122. * No debugging. Do nothing
  123. */
  124. #define dprintkdbg(type, format, arg...) \
  125. do {} while (0)
  126. #define debug_enabled(type) (0)
  127. #endif
  128. #ifndef PCI_VENDOR_ID_TEKRAM
  129. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  130. #endif
  131. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  132. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  133. #endif
  134. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  135. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  137. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  138. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  139. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  140. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  141. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  142. /* cmd->result */
  143. #define RES_TARGET 0x000000FF /* Target State */
  144. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  145. #define RES_ENDMSG 0x0000FF00 /* End Message */
  146. #define RES_DID 0x00FF0000 /* DID_ codes */
  147. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  148. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  149. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  150. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  151. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  152. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  153. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  154. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  155. #define TAG_NONE 255
  156. /*
  157. * srb->segement_x is the hw sg list. It is always allocated as a
  158. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  159. * cross a page boundy.
  160. */
  161. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  162. struct SGentry {
  163. u32 address; /* bus! address */
  164. u32 length;
  165. };
  166. /* The SEEPROM structure for TRM_S1040 */
  167. struct NVRamTarget {
  168. u8 cfg0; /* Target configuration byte 0 */
  169. u8 period; /* Target period */
  170. u8 cfg2; /* Target configuration byte 2 */
  171. u8 cfg3; /* Target configuration byte 3 */
  172. };
  173. struct NvRamType {
  174. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  175. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  176. u8 sub_class; /* 4 Sub Class */
  177. u8 vendor_id[2]; /* 5,6 Vendor ID */
  178. u8 device_id[2]; /* 7,8 Device ID */
  179. u8 reserved; /* 9 Reserved */
  180. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  181. /** 10,11,12,13
  182. ** 14,15,16,17
  183. ** ....
  184. ** ....
  185. ** 70,71,72,73
  186. */
  187. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  188. u8 channel_cfg; /* 75 Channel configuration */
  189. u8 delay_time; /* 76 Power on delay time */
  190. u8 max_tag; /* 77 Maximum tags */
  191. u8 reserved0; /* 78 */
  192. u8 boot_target; /* 79 */
  193. u8 boot_lun; /* 80 */
  194. u8 reserved1; /* 81 */
  195. u16 reserved2[22]; /* 82,..125 */
  196. u16 cksum; /* 126,127 */
  197. };
  198. struct ScsiReqBlk {
  199. struct list_head list; /* next/prev ptrs for srb lists */
  200. struct DeviceCtlBlk *dcb;
  201. struct scsi_cmnd *cmd;
  202. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  203. dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  204. u8 sg_count; /* No of HW sg entries for this request */
  205. u8 sg_index; /* Index of HW sg entry for this request */
  206. size_t total_xfer_length; /* Total number of bytes remaining to be transferred */
  207. size_t request_length; /* Total number of bytes in this request */
  208. /*
  209. * The sense buffer handling function, request_sense, uses
  210. * the first hw sg entry (segment_x[0]) and the transfer
  211. * length (total_xfer_length). While doing this it stores the
  212. * original values into the last sg hw list
  213. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  214. * total_xfer_length in xferred. These values are restored in
  215. * pci_unmap_srb_sense. This is the only place xferred is used.
  216. */
  217. size_t xferred; /* Saved copy of total_xfer_length */
  218. u16 state;
  219. u8 msgin_buf[6];
  220. u8 msgout_buf[6];
  221. u8 adapter_status;
  222. u8 target_status;
  223. u8 msg_count;
  224. u8 end_message;
  225. u8 tag_number;
  226. u8 status;
  227. u8 retry_count;
  228. u8 flag;
  229. u8 scsi_phase;
  230. };
  231. struct DeviceCtlBlk {
  232. struct list_head list; /* next/prev ptrs for the dcb list */
  233. struct AdapterCtlBlk *acb;
  234. struct list_head srb_going_list; /* head of going srb list */
  235. struct list_head srb_waiting_list; /* head of waiting srb list */
  236. struct ScsiReqBlk *active_srb;
  237. u32 tag_mask;
  238. u16 max_command;
  239. u8 target_id; /* SCSI Target ID (SCSI Only) */
  240. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  241. u8 identify_msg;
  242. u8 dev_mode;
  243. u8 inquiry7; /* To store Inquiry flags */
  244. u8 sync_mode; /* 0:async mode */
  245. u8 min_nego_period; /* for nego. */
  246. u8 sync_period; /* for reg. */
  247. u8 sync_offset; /* for reg. and nego.(low nibble) */
  248. u8 flag;
  249. u8 dev_type;
  250. u8 init_tcq_flag;
  251. };
  252. struct AdapterCtlBlk {
  253. struct Scsi_Host *scsi_host;
  254. unsigned long io_port_base;
  255. unsigned long io_port_len;
  256. struct list_head dcb_list; /* head of going dcb list */
  257. struct DeviceCtlBlk *dcb_run_robin;
  258. struct DeviceCtlBlk *active_dcb;
  259. struct list_head srb_free_list; /* head of free srb list */
  260. struct ScsiReqBlk *tmp_srb;
  261. struct timer_list waiting_timer;
  262. struct timer_list selto_timer;
  263. unsigned long last_reset;
  264. u16 srb_count;
  265. u8 sel_timeout;
  266. unsigned int irq_level;
  267. u8 tag_max_num;
  268. u8 acb_flag;
  269. u8 gmode2;
  270. u8 config;
  271. u8 lun_chk;
  272. u8 scan_devices;
  273. u8 hostid_bit;
  274. u8 dcb_map[DC395x_MAX_SCSI_ID];
  275. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  276. struct pci_dev *dev;
  277. u8 msg_len;
  278. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  279. struct ScsiReqBlk srb;
  280. struct NvRamType eeprom; /* eeprom settings for this adapter */
  281. };
  282. /*---------------------------------------------------------------------------
  283. Forward declarations
  284. ---------------------------------------------------------------------------*/
  285. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  286. u16 *pscsi_status);
  287. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  288. u16 *pscsi_status);
  289. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  290. u16 *pscsi_status);
  291. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  292. u16 *pscsi_status);
  293. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  294. u16 *pscsi_status);
  295. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  296. u16 *pscsi_status);
  297. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  298. u16 *pscsi_status);
  299. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  300. u16 *pscsi_status);
  301. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  302. u16 *pscsi_status);
  303. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  304. u16 *pscsi_status);
  305. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  306. u16 *pscsi_status);
  307. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  308. u16 *pscsi_status);
  309. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  310. u16 *pscsi_status);
  311. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  312. u16 *pscsi_status);
  313. static void set_basic_config(struct AdapterCtlBlk *acb);
  314. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  315. struct ScsiReqBlk *srb);
  316. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  317. static void data_io_transfer(struct AdapterCtlBlk *acb,
  318. struct ScsiReqBlk *srb, u16 io_dir);
  319. static void disconnect(struct AdapterCtlBlk *acb);
  320. static void reselect(struct AdapterCtlBlk *acb);
  321. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  322. struct ScsiReqBlk *srb);
  323. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  324. struct ScsiReqBlk *srb);
  325. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  326. struct ScsiReqBlk *srb);
  327. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  328. struct scsi_cmnd *cmd, u8 force);
  329. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  330. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  331. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  332. struct ScsiReqBlk *srb);
  333. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  334. struct ScsiReqBlk *srb);
  335. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  336. struct ScsiReqBlk *srb);
  337. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  338. struct DeviceCtlBlk *dcb);
  339. static void waiting_timeout(struct timer_list *t);
  340. /*---------------------------------------------------------------------------
  341. Static Data
  342. ---------------------------------------------------------------------------*/
  343. static u16 current_sync_offset = 0;
  344. static void *dc395x_scsi_phase0[] = {
  345. data_out_phase0,/* phase:0 */
  346. data_in_phase0, /* phase:1 */
  347. command_phase0, /* phase:2 */
  348. status_phase0, /* phase:3 */
  349. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  350. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  351. msgout_phase0, /* phase:6 */
  352. msgin_phase0, /* phase:7 */
  353. };
  354. static void *dc395x_scsi_phase1[] = {
  355. data_out_phase1,/* phase:0 */
  356. data_in_phase1, /* phase:1 */
  357. command_phase1, /* phase:2 */
  358. status_phase1, /* phase:3 */
  359. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  360. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  361. msgout_phase1, /* phase:6 */
  362. msgin_phase1, /* phase:7 */
  363. };
  364. /*
  365. *Fast20: 000 50ns, 20.0 MHz
  366. * 001 75ns, 13.3 MHz
  367. * 010 100ns, 10.0 MHz
  368. * 011 125ns, 8.0 MHz
  369. * 100 150ns, 6.6 MHz
  370. * 101 175ns, 5.7 MHz
  371. * 110 200ns, 5.0 MHz
  372. * 111 250ns, 4.0 MHz
  373. *
  374. *Fast40(LVDS): 000 25ns, 40.0 MHz
  375. * 001 50ns, 20.0 MHz
  376. * 010 75ns, 13.3 MHz
  377. * 011 100ns, 10.0 MHz
  378. * 100 125ns, 8.0 MHz
  379. * 101 150ns, 6.6 MHz
  380. * 110 175ns, 5.7 MHz
  381. * 111 200ns, 5.0 MHz
  382. */
  383. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  384. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  385. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  386. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  387. /*---------------------------------------------------------------------------
  388. Configuration
  389. ---------------------------------------------------------------------------*/
  390. /*
  391. * Module/boot parameters currently effect *all* instances of the
  392. * card in the system.
  393. */
  394. /*
  395. * Command line parameters are stored in a structure below.
  396. * These are the index's into the structure for the various
  397. * command line options.
  398. */
  399. #define CFG_ADAPTER_ID 0
  400. #define CFG_MAX_SPEED 1
  401. #define CFG_DEV_MODE 2
  402. #define CFG_ADAPTER_MODE 3
  403. #define CFG_TAGS 4
  404. #define CFG_RESET_DELAY 5
  405. #define CFG_NUM 6 /* number of configuration items */
  406. /*
  407. * Value used to indicate that a command line override
  408. * hasn't been used to modify the value.
  409. */
  410. #define CFG_PARAM_UNSET -1
  411. /*
  412. * Hold command line parameters.
  413. */
  414. struct ParameterData {
  415. int value; /* value of this setting */
  416. int min; /* minimum value */
  417. int max; /* maximum value */
  418. int def; /* default value */
  419. int safe; /* safe value */
  420. };
  421. static struct ParameterData cfg_data[] = {
  422. { /* adapter id */
  423. CFG_PARAM_UNSET,
  424. 0,
  425. 15,
  426. 7,
  427. 7
  428. },
  429. { /* max speed */
  430. CFG_PARAM_UNSET,
  431. 0,
  432. 7,
  433. 1, /* 13.3Mhz */
  434. 4, /* 6.7Hmz */
  435. },
  436. { /* dev mode */
  437. CFG_PARAM_UNSET,
  438. 0,
  439. 0x3f,
  440. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  441. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  442. NTC_DO_SEND_START,
  443. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  444. },
  445. { /* adapter mode */
  446. CFG_PARAM_UNSET,
  447. 0,
  448. 0x2f,
  449. NAC_SCANLUN |
  450. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  451. /*| NAC_ACTIVE_NEG*/,
  452. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  453. },
  454. { /* tags */
  455. CFG_PARAM_UNSET,
  456. 0,
  457. 5,
  458. 3, /* 16 tags (??) */
  459. 2,
  460. },
  461. { /* reset delay */
  462. CFG_PARAM_UNSET,
  463. 0,
  464. 180,
  465. 1, /* 1 second */
  466. 10, /* 10 seconds */
  467. }
  468. };
  469. /*
  470. * Safe settings. If set to zero the BIOS/default values with
  471. * command line overrides will be used. If set to 1 then safe and
  472. * slow settings will be used.
  473. */
  474. static bool use_safe_settings = 0;
  475. module_param_named(safe, use_safe_settings, bool, 0);
  476. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  477. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  478. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  479. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  480. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  481. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  482. MODULE_PARM_DESC(dev_mode, "Device mode.");
  483. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  484. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  485. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  486. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  487. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  488. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  489. /**
  490. * set_safe_settings - if the use_safe_settings option is set then
  491. * set all values to the safe and slow values.
  492. **/
  493. static void set_safe_settings(void)
  494. {
  495. if (use_safe_settings)
  496. {
  497. int i;
  498. dprintkl(KERN_INFO, "Using safe settings.\n");
  499. for (i = 0; i < CFG_NUM; i++)
  500. {
  501. cfg_data[i].value = cfg_data[i].safe;
  502. }
  503. }
  504. }
  505. /**
  506. * fix_settings - reset any boot parameters which are out of range
  507. * back to the default values.
  508. **/
  509. static void fix_settings(void)
  510. {
  511. int i;
  512. dprintkdbg(DBG_1,
  513. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  514. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  515. cfg_data[CFG_ADAPTER_ID].value,
  516. cfg_data[CFG_MAX_SPEED].value,
  517. cfg_data[CFG_DEV_MODE].value,
  518. cfg_data[CFG_ADAPTER_MODE].value,
  519. cfg_data[CFG_TAGS].value,
  520. cfg_data[CFG_RESET_DELAY].value);
  521. for (i = 0; i < CFG_NUM; i++)
  522. {
  523. if (cfg_data[i].value < cfg_data[i].min
  524. || cfg_data[i].value > cfg_data[i].max)
  525. cfg_data[i].value = cfg_data[i].def;
  526. }
  527. }
  528. /*
  529. * Mapping from the eeprom delay index value (index into this array)
  530. * to the number of actual seconds that the delay should be for.
  531. */
  532. static char eeprom_index_to_delay_map[] =
  533. { 1, 3, 5, 10, 16, 30, 60, 120 };
  534. /**
  535. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  536. * into a number of seconds.
  537. *
  538. * @eeprom: The eeprom structure in which we find the delay index to map.
  539. **/
  540. static void eeprom_index_to_delay(struct NvRamType *eeprom)
  541. {
  542. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  543. }
  544. /**
  545. * delay_to_eeprom_index - Take a delay in seconds and return the
  546. * closest eeprom index which will delay for at least that amount of
  547. * seconds.
  548. *
  549. * @delay: The delay, in seconds, to find the eeprom index for.
  550. **/
  551. static int delay_to_eeprom_index(int delay)
  552. {
  553. u8 idx = 0;
  554. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  555. idx++;
  556. return idx;
  557. }
  558. /**
  559. * eeprom_override - Override the eeprom settings, in the provided
  560. * eeprom structure, with values that have been set on the command
  561. * line.
  562. *
  563. * @eeprom: The eeprom data to override with command line options.
  564. **/
  565. static void eeprom_override(struct NvRamType *eeprom)
  566. {
  567. u8 id;
  568. /* Adapter Settings */
  569. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  570. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  571. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  572. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  573. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  574. eeprom->delay_time = delay_to_eeprom_index(
  575. cfg_data[CFG_RESET_DELAY].value);
  576. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  577. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  578. /* Device Settings */
  579. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  580. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  581. eeprom->target[id].cfg0 =
  582. (u8)cfg_data[CFG_DEV_MODE].value;
  583. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  584. eeprom->target[id].period =
  585. (u8)cfg_data[CFG_MAX_SPEED].value;
  586. }
  587. }
  588. /*---------------------------------------------------------------------------
  589. ---------------------------------------------------------------------------*/
  590. static unsigned int list_size(struct list_head *head)
  591. {
  592. unsigned int count = 0;
  593. struct list_head *pos;
  594. list_for_each(pos, head)
  595. count++;
  596. return count;
  597. }
  598. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  599. struct DeviceCtlBlk *pos)
  600. {
  601. int use_next = 0;
  602. struct DeviceCtlBlk* next = NULL;
  603. struct DeviceCtlBlk* i;
  604. if (list_empty(head))
  605. return NULL;
  606. /* find supplied dcb and then select the next one */
  607. list_for_each_entry(i, head, list)
  608. if (use_next) {
  609. next = i;
  610. break;
  611. } else if (i == pos) {
  612. use_next = 1;
  613. }
  614. /* if no next one take the head one (ie, wraparound) */
  615. if (!next)
  616. list_for_each_entry(i, head, list) {
  617. next = i;
  618. break;
  619. }
  620. return next;
  621. }
  622. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  623. {
  624. if (srb->tag_number < 255) {
  625. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  626. srb->tag_number = 255;
  627. }
  628. }
  629. /* Find cmd in SRB list */
  630. static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  631. struct list_head *head)
  632. {
  633. struct ScsiReqBlk *i;
  634. list_for_each_entry(i, head, list)
  635. if (i->cmd == cmd)
  636. return i;
  637. return NULL;
  638. }
  639. /* Sets the timer to wake us up */
  640. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  641. {
  642. if (timer_pending(&acb->waiting_timer))
  643. return;
  644. if (time_before(jiffies + to, acb->last_reset - HZ / 2))
  645. acb->waiting_timer.expires =
  646. acb->last_reset - HZ / 2 + 1;
  647. else
  648. acb->waiting_timer.expires = jiffies + to + 1;
  649. add_timer(&acb->waiting_timer);
  650. }
  651. /* Send the next command from the waiting list to the bus */
  652. static void waiting_process_next(struct AdapterCtlBlk *acb)
  653. {
  654. struct DeviceCtlBlk *start = NULL;
  655. struct DeviceCtlBlk *pos;
  656. struct DeviceCtlBlk *dcb;
  657. struct ScsiReqBlk *srb;
  658. struct list_head *dcb_list_head = &acb->dcb_list;
  659. if (acb->active_dcb
  660. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  661. return;
  662. if (timer_pending(&acb->waiting_timer))
  663. del_timer(&acb->waiting_timer);
  664. if (list_empty(dcb_list_head))
  665. return;
  666. /*
  667. * Find the starting dcb. Need to find it again in the list
  668. * since the list may have changed since we set the ptr to it
  669. */
  670. list_for_each_entry(dcb, dcb_list_head, list)
  671. if (dcb == acb->dcb_run_robin) {
  672. start = dcb;
  673. break;
  674. }
  675. if (!start) {
  676. /* This can happen! */
  677. start = list_entry(dcb_list_head->next, typeof(*start), list);
  678. acb->dcb_run_robin = start;
  679. }
  680. /*
  681. * Loop over the dcb, but we start somewhere (potentially) in
  682. * the middle of the loop so we need to manully do this.
  683. */
  684. pos = start;
  685. do {
  686. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  687. /* Make sure, the next another device gets scheduled ... */
  688. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  689. acb->dcb_run_robin);
  690. if (list_empty(waiting_list_head) ||
  691. pos->max_command <= list_size(&pos->srb_going_list)) {
  692. /* move to next dcb */
  693. pos = dcb_get_next(dcb_list_head, pos);
  694. } else {
  695. srb = list_entry(waiting_list_head->next,
  696. struct ScsiReqBlk, list);
  697. /* Try to send to the bus */
  698. if (!start_scsi(acb, pos, srb))
  699. list_move(&srb->list, &pos->srb_going_list);
  700. else
  701. waiting_set_timer(acb, HZ/50);
  702. break;
  703. }
  704. } while (pos != start);
  705. }
  706. /* Wake up waiting queue */
  707. static void waiting_timeout(struct timer_list *t)
  708. {
  709. unsigned long flags;
  710. struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
  711. dprintkdbg(DBG_1,
  712. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  713. DC395x_LOCK_IO(acb->scsi_host, flags);
  714. waiting_process_next(acb);
  715. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  716. }
  717. /* Get the DCB for a given ID/LUN combination */
  718. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  719. {
  720. return acb->children[id][lun];
  721. }
  722. /* Send SCSI Request Block (srb) to adapter (acb) */
  723. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  724. {
  725. struct DeviceCtlBlk *dcb = srb->dcb;
  726. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  727. acb->active_dcb ||
  728. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  729. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  730. waiting_process_next(acb);
  731. return;
  732. }
  733. if (!start_scsi(acb, dcb, srb)) {
  734. list_add_tail(&srb->list, &dcb->srb_going_list);
  735. } else {
  736. list_add(&srb->list, &dcb->srb_waiting_list);
  737. waiting_set_timer(acb, HZ / 50);
  738. }
  739. }
  740. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  741. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  742. struct ScsiReqBlk *srb)
  743. {
  744. int nseg;
  745. enum dma_data_direction dir = cmd->sc_data_direction;
  746. dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
  747. cmd, dcb->target_id, dcb->target_lun);
  748. srb->dcb = dcb;
  749. srb->cmd = cmd;
  750. srb->sg_count = 0;
  751. srb->total_xfer_length = 0;
  752. srb->sg_bus_addr = 0;
  753. srb->sg_index = 0;
  754. srb->adapter_status = 0;
  755. srb->target_status = 0;
  756. srb->msg_count = 0;
  757. srb->status = 0;
  758. srb->flag = 0;
  759. srb->state = 0;
  760. srb->retry_count = 0;
  761. srb->tag_number = TAG_NONE;
  762. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  763. srb->end_message = 0;
  764. nseg = scsi_dma_map(cmd);
  765. BUG_ON(nseg < 0);
  766. if (dir == PCI_DMA_NONE || !nseg) {
  767. dprintkdbg(DBG_0,
  768. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  769. cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
  770. srb->segment_x[0].address);
  771. } else {
  772. int i;
  773. u32 reqlen = scsi_bufflen(cmd);
  774. struct scatterlist *sg;
  775. struct SGentry *sgp = srb->segment_x;
  776. srb->sg_count = nseg;
  777. dprintkdbg(DBG_0,
  778. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  779. reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
  780. srb->sg_count);
  781. scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
  782. u32 busaddr = (u32)sg_dma_address(sg);
  783. u32 seglen = (u32)sg->length;
  784. sgp[i].address = busaddr;
  785. sgp[i].length = seglen;
  786. srb->total_xfer_length += seglen;
  787. }
  788. sgp += srb->sg_count - 1;
  789. /*
  790. * adjust last page if too big as it is allocated
  791. * on even page boundaries
  792. */
  793. if (srb->total_xfer_length > reqlen) {
  794. sgp->length -= (srb->total_xfer_length - reqlen);
  795. srb->total_xfer_length = reqlen;
  796. }
  797. /* Fixup for WIDE padding - make sure length is even */
  798. if (dcb->sync_period & WIDE_SYNC &&
  799. srb->total_xfer_length % 2) {
  800. srb->total_xfer_length++;
  801. sgp->length++;
  802. }
  803. srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
  804. srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
  805. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  806. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  807. }
  808. srb->request_length = srb->total_xfer_length;
  809. }
  810. /**
  811. * dc395x_queue_command - queue scsi command passed from the mid
  812. * layer, invoke 'done' on completion
  813. *
  814. * @cmd: pointer to scsi command object
  815. * @done: function pointer to be invoked on completion
  816. *
  817. * Returns 1 if the adapter (host) is busy, else returns 0. One
  818. * reason for an adapter to be busy is that the number
  819. * of outstanding queued commands is already equal to
  820. * struct Scsi_Host::can_queue .
  821. *
  822. * Required: if struct Scsi_Host::can_queue is ever non-zero
  823. * then this function is required.
  824. *
  825. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  826. * and is expected to be held on return.
  827. *
  828. **/
  829. static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  830. {
  831. struct DeviceCtlBlk *dcb;
  832. struct ScsiReqBlk *srb;
  833. struct AdapterCtlBlk *acb =
  834. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  835. dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
  836. cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
  837. /* Assume BAD_TARGET; will be cleared later */
  838. cmd->result = DID_BAD_TARGET << 16;
  839. /* ignore invalid targets */
  840. if (cmd->device->id >= acb->scsi_host->max_id ||
  841. cmd->device->lun >= acb->scsi_host->max_lun ||
  842. cmd->device->lun >31) {
  843. goto complete;
  844. }
  845. /* does the specified lun on the specified device exist */
  846. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  847. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  848. cmd->device->id, (u8)cmd->device->lun);
  849. goto complete;
  850. }
  851. /* do we have a DCB for the device */
  852. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  853. if (!dcb) {
  854. /* should never happen */
  855. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  856. cmd->device->id, (u8)cmd->device->lun);
  857. goto complete;
  858. }
  859. /* set callback and clear result in the command */
  860. cmd->scsi_done = done;
  861. cmd->result = 0;
  862. srb = list_first_entry_or_null(&acb->srb_free_list,
  863. struct ScsiReqBlk, list);
  864. if (!srb) {
  865. /*
  866. * Return 1 since we are unable to queue this command at this
  867. * point in time.
  868. */
  869. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  870. return 1;
  871. }
  872. list_del(&srb->list);
  873. build_srb(cmd, dcb, srb);
  874. if (!list_empty(&dcb->srb_waiting_list)) {
  875. /* append to waiting queue */
  876. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  877. waiting_process_next(acb);
  878. } else {
  879. /* process immediately */
  880. send_srb(acb, srb);
  881. }
  882. dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
  883. return 0;
  884. complete:
  885. /*
  886. * Complete the command immediatey, and then return 0 to
  887. * indicate that we have handled the command. This is usually
  888. * done when the commad is for things like non existent
  889. * devices.
  890. */
  891. done(cmd);
  892. return 0;
  893. }
  894. static DEF_SCSI_QCMD(dc395x_queue_command)
  895. static void dump_register_info(struct AdapterCtlBlk *acb,
  896. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  897. {
  898. u16 pstat;
  899. struct pci_dev *dev = acb->dev;
  900. pci_read_config_word(dev, PCI_STATUS, &pstat);
  901. if (!dcb)
  902. dcb = acb->active_dcb;
  903. if (!srb && dcb)
  904. srb = dcb->active_srb;
  905. if (srb) {
  906. if (!srb->cmd)
  907. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  908. srb, srb->cmd);
  909. else
  910. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
  911. "cmnd=0x%02x <%02i-%i>\n",
  912. srb, srb->cmd,
  913. srb->cmd->cmnd[0], srb->cmd->device->id,
  914. (u8)srb->cmd->device->lun);
  915. printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
  916. srb->segment_x, srb->sg_count, srb->sg_index,
  917. srb->total_xfer_length);
  918. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  919. srb->state, srb->status, srb->scsi_phase,
  920. (acb->active_dcb) ? "" : "not");
  921. }
  922. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  923. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  924. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  925. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  926. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  927. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  928. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  929. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  930. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  931. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  932. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  933. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  934. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  935. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  936. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  937. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  938. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  939. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  940. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  941. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  942. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  943. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  944. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  945. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  946. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  947. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  948. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  949. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  950. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  951. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  952. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  953. "pci{status=0x%04x}\n",
  954. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  955. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  956. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  957. pstat);
  958. }
  959. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  960. {
  961. #if debug_enabled(DBG_FIFO)
  962. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  963. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  964. if (!(fifocnt & 0x40))
  965. dprintkdbg(DBG_FIFO,
  966. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  967. fifocnt & 0x3f, lines, txt);
  968. #endif
  969. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  970. }
  971. static void reset_dev_param(struct AdapterCtlBlk *acb)
  972. {
  973. struct DeviceCtlBlk *dcb;
  974. struct NvRamType *eeprom = &acb->eeprom;
  975. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  976. list_for_each_entry(dcb, &acb->dcb_list, list) {
  977. u8 period_index;
  978. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  979. dcb->sync_period = 0;
  980. dcb->sync_offset = 0;
  981. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  982. period_index = eeprom->target[dcb->target_id].period & 0x07;
  983. dcb->min_nego_period = clock_period[period_index];
  984. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  985. || !(acb->config & HCC_WIDE_CARD))
  986. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  987. }
  988. }
  989. /*
  990. * perform a hard reset on the SCSI bus
  991. * @cmd - some command for this host (for fetching hooks)
  992. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  993. */
  994. static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  995. {
  996. struct AdapterCtlBlk *acb =
  997. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  998. dprintkl(KERN_INFO,
  999. "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
  1000. cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
  1001. if (timer_pending(&acb->waiting_timer))
  1002. del_timer(&acb->waiting_timer);
  1003. /*
  1004. * disable interrupt
  1005. */
  1006. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1007. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1008. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1009. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1010. reset_scsi_bus(acb);
  1011. udelay(500);
  1012. /* We may be in serious trouble. Wait some seconds */
  1013. acb->last_reset =
  1014. jiffies + 3 * HZ / 2 +
  1015. HZ * acb->eeprom.delay_time;
  1016. /*
  1017. * re-enable interrupt
  1018. */
  1019. /* Clear SCSI FIFO */
  1020. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1021. clear_fifo(acb, "eh_bus_reset");
  1022. /* Delete pending IRQ */
  1023. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1024. set_basic_config(acb);
  1025. reset_dev_param(acb);
  1026. doing_srb_done(acb, DID_RESET, cmd, 0);
  1027. acb->active_dcb = NULL;
  1028. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1029. waiting_process_next(acb);
  1030. return SUCCESS;
  1031. }
  1032. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1033. {
  1034. int rc;
  1035. spin_lock_irq(cmd->device->host->host_lock);
  1036. rc = __dc395x_eh_bus_reset(cmd);
  1037. spin_unlock_irq(cmd->device->host->host_lock);
  1038. return rc;
  1039. }
  1040. /*
  1041. * abort an errant SCSI command
  1042. * @cmd - command to be aborted
  1043. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1044. */
  1045. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1046. {
  1047. /*
  1048. * Look into our command queues: If it has not been sent already,
  1049. * we remove it and return success. Otherwise fail.
  1050. */
  1051. struct AdapterCtlBlk *acb =
  1052. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1053. struct DeviceCtlBlk *dcb;
  1054. struct ScsiReqBlk *srb;
  1055. dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
  1056. cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
  1057. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1058. if (!dcb) {
  1059. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1060. return FAILED;
  1061. }
  1062. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1063. if (srb) {
  1064. list_del(&srb->list);
  1065. pci_unmap_srb_sense(acb, srb);
  1066. pci_unmap_srb(acb, srb);
  1067. free_tag(dcb, srb);
  1068. list_add_tail(&srb->list, &acb->srb_free_list);
  1069. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1070. cmd->result = DID_ABORT << 16;
  1071. return SUCCESS;
  1072. }
  1073. srb = find_cmd(cmd, &dcb->srb_going_list);
  1074. if (srb) {
  1075. dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
  1076. /* XXX: Should abort the command here */
  1077. } else {
  1078. dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
  1079. }
  1080. return FAILED;
  1081. }
  1082. /* SDTR */
  1083. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1084. struct ScsiReqBlk *srb)
  1085. {
  1086. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1087. if (srb->msg_count > 1) {
  1088. dprintkl(KERN_INFO,
  1089. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1090. srb->msg_count, srb->msgout_buf[0],
  1091. srb->msgout_buf[1]);
  1092. return;
  1093. }
  1094. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1095. dcb->sync_offset = 0;
  1096. dcb->min_nego_period = 200 >> 2;
  1097. } else if (dcb->sync_offset == 0)
  1098. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1099. *ptr++ = MSG_EXTENDED; /* (01h) */
  1100. *ptr++ = 3; /* length */
  1101. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1102. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1103. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1104. srb->msg_count += 5;
  1105. srb->state |= SRB_DO_SYNC_NEGO;
  1106. }
  1107. /* WDTR */
  1108. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1109. struct ScsiReqBlk *srb)
  1110. {
  1111. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1112. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1113. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1114. if (srb->msg_count > 1) {
  1115. dprintkl(KERN_INFO,
  1116. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1117. srb->msg_count, srb->msgout_buf[0],
  1118. srb->msgout_buf[1]);
  1119. return;
  1120. }
  1121. *ptr++ = MSG_EXTENDED; /* (01h) */
  1122. *ptr++ = 2; /* length */
  1123. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1124. *ptr++ = wide;
  1125. srb->msg_count += 4;
  1126. srb->state |= SRB_DO_WIDE_NEGO;
  1127. }
  1128. #if 0
  1129. /* Timer to work around chip flaw: When selecting and the bus is
  1130. * busy, we sometimes miss a Selection timeout IRQ */
  1131. void selection_timeout_missed(unsigned long ptr);
  1132. /* Sets the timer to wake us up */
  1133. static void selto_timer(struct AdapterCtlBlk *acb)
  1134. {
  1135. if (timer_pending(&acb->selto_timer))
  1136. return;
  1137. acb->selto_timer.function = selection_timeout_missed;
  1138. acb->selto_timer.data = (unsigned long) acb;
  1139. if (time_before
  1140. (jiffies + HZ, acb->last_reset + HZ / 2))
  1141. acb->selto_timer.expires =
  1142. acb->last_reset + HZ / 2 + 1;
  1143. else
  1144. acb->selto_timer.expires = jiffies + HZ + 1;
  1145. add_timer(&acb->selto_timer);
  1146. }
  1147. void selection_timeout_missed(unsigned long ptr)
  1148. {
  1149. unsigned long flags;
  1150. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1151. struct ScsiReqBlk *srb;
  1152. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1153. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1154. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1155. return;
  1156. }
  1157. DC395x_LOCK_IO(acb->scsi_host, flags);
  1158. srb = acb->active_dcb->active_srb;
  1159. disconnect(acb);
  1160. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1161. }
  1162. #endif
  1163. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1164. struct ScsiReqBlk* srb)
  1165. {
  1166. u16 s_stat2, return_code;
  1167. u8 s_stat, scsicommand, i, identify_message;
  1168. u8 *ptr;
  1169. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
  1170. dcb->target_id, dcb->target_lun, srb);
  1171. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1172. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1173. s_stat2 = 0;
  1174. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1175. #if 1
  1176. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1177. dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
  1178. s_stat, s_stat2);
  1179. /*
  1180. * Try anyway?
  1181. *
  1182. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1183. * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed!
  1184. * (This is likely to be a bug in the hardware. Obviously, most people
  1185. * only have one initiator per SCSI bus.)
  1186. * Instead let this fail and have the timer make sure the command is
  1187. * tried again after a short time
  1188. */
  1189. /*selto_timer (acb); */
  1190. return 1;
  1191. }
  1192. #endif
  1193. if (acb->active_dcb) {
  1194. dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
  1195. "command while another command (0x%p) is active.",
  1196. srb->cmd,
  1197. acb->active_dcb->active_srb ?
  1198. acb->active_dcb->active_srb->cmd : 0);
  1199. return 1;
  1200. }
  1201. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1202. dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
  1203. return 1;
  1204. }
  1205. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1206. * to queue them again after a reset */
  1207. if (time_before(jiffies, acb->last_reset - HZ / 2)) {
  1208. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1209. return 1;
  1210. }
  1211. /* Flush FIFO */
  1212. clear_fifo(acb, "start_scsi");
  1213. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1214. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1215. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1216. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1217. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1218. identify_message = dcb->identify_msg;
  1219. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1220. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1221. if (srb->flag & AUTO_REQSENSE)
  1222. identify_message &= 0xBF;
  1223. if (((srb->cmd->cmnd[0] == INQUIRY)
  1224. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1225. || (srb->flag & AUTO_REQSENSE))
  1226. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1227. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1228. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1229. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1230. && (dcb->target_lun == 0)) {
  1231. srb->msgout_buf[0] = identify_message;
  1232. srb->msg_count = 1;
  1233. scsicommand = SCMD_SEL_ATNSTOP;
  1234. srb->state = SRB_MSGOUT;
  1235. #ifndef SYNC_FIRST
  1236. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1237. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1238. build_wdtr(acb, dcb, srb);
  1239. goto no_cmd;
  1240. }
  1241. #endif
  1242. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1243. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1244. build_sdtr(acb, dcb, srb);
  1245. goto no_cmd;
  1246. }
  1247. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1248. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1249. build_wdtr(acb, dcb, srb);
  1250. goto no_cmd;
  1251. }
  1252. srb->msg_count = 0;
  1253. }
  1254. /* Send identify message */
  1255. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1256. scsicommand = SCMD_SEL_ATN;
  1257. srb->state = SRB_START_;
  1258. #ifndef DC395x_NO_TAGQ
  1259. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1260. && (identify_message & 0xC0)) {
  1261. /* Send Tag message */
  1262. u32 tag_mask = 1;
  1263. u8 tag_number = 0;
  1264. while (tag_mask & dcb->tag_mask
  1265. && tag_number < dcb->max_command) {
  1266. tag_mask = tag_mask << 1;
  1267. tag_number++;
  1268. }
  1269. if (tag_number >= dcb->max_command) {
  1270. dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
  1271. "Out of tags target=<%02i-%i>)\n",
  1272. srb->cmd, srb->cmd->device->id,
  1273. (u8)srb->cmd->device->lun);
  1274. srb->state = SRB_READY;
  1275. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1276. DO_HWRESELECT);
  1277. return 1;
  1278. }
  1279. /* Send Tag id */
  1280. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1281. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1282. dcb->tag_mask |= tag_mask;
  1283. srb->tag_number = tag_number;
  1284. scsicommand = SCMD_SEL_ATN3;
  1285. srb->state = SRB_START_;
  1286. }
  1287. #endif
  1288. /*polling:*/
  1289. /* Send CDB ..command block ......... */
  1290. dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1291. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
  1292. srb->cmd->cmnd[0], srb->tag_number);
  1293. if (srb->flag & AUTO_REQSENSE) {
  1294. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1295. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1296. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1297. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1298. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1299. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1300. } else {
  1301. ptr = (u8 *)srb->cmd->cmnd;
  1302. for (i = 0; i < srb->cmd->cmd_len; i++)
  1303. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1304. }
  1305. no_cmd:
  1306. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1307. DO_HWRESELECT | DO_DATALATCH);
  1308. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1309. /*
  1310. * If start_scsi return 1:
  1311. * we caught an interrupt (must be reset or reselection ... )
  1312. * : Let's process it first!
  1313. */
  1314. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
  1315. srb->cmd, dcb->target_id, dcb->target_lun);
  1316. srb->state = SRB_READY;
  1317. free_tag(dcb, srb);
  1318. srb->msg_count = 0;
  1319. return_code = 1;
  1320. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1321. } else {
  1322. /*
  1323. * If start_scsi returns 0:
  1324. * we know that the SCSI processor is free
  1325. */
  1326. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1327. dcb->active_srb = srb;
  1328. acb->active_dcb = dcb;
  1329. return_code = 0;
  1330. /* it's important for atn stop */
  1331. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1332. DO_DATALATCH | DO_HWRESELECT);
  1333. /* SCSI command */
  1334. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1335. }
  1336. return return_code;
  1337. }
  1338. #define DC395x_ENABLE_MSGOUT \
  1339. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1340. srb->state |= SRB_MSGOUT
  1341. /* abort command */
  1342. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1343. struct ScsiReqBlk *srb)
  1344. {
  1345. srb->msgout_buf[0] = ABORT;
  1346. srb->msg_count = 1;
  1347. DC395x_ENABLE_MSGOUT;
  1348. srb->state &= ~SRB_MSGIN;
  1349. srb->state |= SRB_MSGOUT;
  1350. }
  1351. /**
  1352. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1353. * have been triggered for this card.
  1354. *
  1355. * @acb: a pointer to the adpter control block
  1356. * @scsi_status: the status return when we checked the card
  1357. **/
  1358. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1359. u16 scsi_status)
  1360. {
  1361. struct DeviceCtlBlk *dcb;
  1362. struct ScsiReqBlk *srb;
  1363. u16 phase;
  1364. u8 scsi_intstatus;
  1365. unsigned long flags;
  1366. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1367. u16 *);
  1368. DC395x_LOCK_IO(acb->scsi_host, flags);
  1369. /* This acknowledges the IRQ */
  1370. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1371. if ((scsi_status & 0x2007) == 0x2002)
  1372. dprintkl(KERN_DEBUG,
  1373. "COP after COP completed? %04x\n", scsi_status);
  1374. if (debug_enabled(DBG_KG)) {
  1375. if (scsi_intstatus & INT_SELTIMEOUT)
  1376. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1377. }
  1378. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1379. if (timer_pending(&acb->selto_timer))
  1380. del_timer(&acb->selto_timer);
  1381. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1382. disconnect(acb); /* bus free interrupt */
  1383. goto out_unlock;
  1384. }
  1385. if (scsi_intstatus & INT_RESELECTED) {
  1386. reselect(acb);
  1387. goto out_unlock;
  1388. }
  1389. if (scsi_intstatus & INT_SELECT) {
  1390. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1391. goto out_unlock;
  1392. }
  1393. if (scsi_intstatus & INT_SCSIRESET) {
  1394. scsi_reset_detect(acb);
  1395. goto out_unlock;
  1396. }
  1397. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1398. dcb = acb->active_dcb;
  1399. if (!dcb) {
  1400. dprintkl(KERN_DEBUG,
  1401. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1402. scsi_status, scsi_intstatus);
  1403. goto out_unlock;
  1404. }
  1405. srb = dcb->active_srb;
  1406. if (dcb->flag & ABORT_DEV_) {
  1407. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1408. enable_msgout_abort(acb, srb);
  1409. }
  1410. /* software sequential machine */
  1411. phase = (u16)srb->scsi_phase;
  1412. /*
  1413. * 62037 or 62137
  1414. * call dc395x_scsi_phase0[]... "phase entry"
  1415. * handle every phase before start transfer
  1416. */
  1417. /* data_out_phase0, phase:0 */
  1418. /* data_in_phase0, phase:1 */
  1419. /* command_phase0, phase:2 */
  1420. /* status_phase0, phase:3 */
  1421. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1422. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1423. /* msgout_phase0, phase:6 */
  1424. /* msgin_phase0, phase:7 */
  1425. dc395x_statev = dc395x_scsi_phase0[phase];
  1426. dc395x_statev(acb, srb, &scsi_status);
  1427. /*
  1428. * if there were any exception occurred scsi_status
  1429. * will be modify to bus free phase new scsi_status
  1430. * transfer out from ... previous dc395x_statev
  1431. */
  1432. srb->scsi_phase = scsi_status & PHASEMASK;
  1433. phase = (u16)scsi_status & PHASEMASK;
  1434. /*
  1435. * call dc395x_scsi_phase1[]... "phase entry" handle
  1436. * every phase to do transfer
  1437. */
  1438. /* data_out_phase1, phase:0 */
  1439. /* data_in_phase1, phase:1 */
  1440. /* command_phase1, phase:2 */
  1441. /* status_phase1, phase:3 */
  1442. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1443. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1444. /* msgout_phase1, phase:6 */
  1445. /* msgin_phase1, phase:7 */
  1446. dc395x_statev = dc395x_scsi_phase1[phase];
  1447. dc395x_statev(acb, srb, &scsi_status);
  1448. }
  1449. out_unlock:
  1450. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1451. }
  1452. static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
  1453. {
  1454. struct AdapterCtlBlk *acb = dev_id;
  1455. u16 scsi_status;
  1456. u8 dma_status;
  1457. irqreturn_t handled = IRQ_NONE;
  1458. /*
  1459. * Check for pending interrupt
  1460. */
  1461. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1462. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1463. if (scsi_status & SCSIINTERRUPT) {
  1464. /* interrupt pending - let's process it! */
  1465. dc395x_handle_interrupt(acb, scsi_status);
  1466. handled = IRQ_HANDLED;
  1467. }
  1468. else if (dma_status & 0x20) {
  1469. /* Error from the DMA engine */
  1470. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1471. #if 0
  1472. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1473. if (acb->active_dcb) {
  1474. acb->active_dcb-> flag |= ABORT_DEV_;
  1475. if (acb->active_dcb->active_srb)
  1476. enable_msgout_abort(acb, acb->active_dcb->active_srb);
  1477. }
  1478. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
  1479. #else
  1480. dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
  1481. acb = NULL;
  1482. #endif
  1483. handled = IRQ_HANDLED;
  1484. }
  1485. return handled;
  1486. }
  1487. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1488. u16 *pscsi_status)
  1489. {
  1490. dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
  1491. if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
  1492. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  1493. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1494. srb->state &= ~SRB_MSGOUT;
  1495. }
  1496. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1497. u16 *pscsi_status)
  1498. {
  1499. u16 i;
  1500. u8 *ptr;
  1501. dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
  1502. clear_fifo(acb, "msgout_phase1");
  1503. if (!(srb->state & SRB_MSGOUT)) {
  1504. srb->state |= SRB_MSGOUT;
  1505. dprintkl(KERN_DEBUG,
  1506. "msgout_phase1: (0x%p) Phase unexpected\n",
  1507. srb->cmd); /* So what ? */
  1508. }
  1509. if (!srb->msg_count) {
  1510. dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
  1511. srb->cmd);
  1512. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
  1513. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1514. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1515. return;
  1516. }
  1517. ptr = (u8 *)srb->msgout_buf;
  1518. for (i = 0; i < srb->msg_count; i++)
  1519. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1520. srb->msg_count = 0;
  1521. if (srb->msgout_buf[0] == MSG_ABORT)
  1522. srb->state = SRB_ABORT_SENT;
  1523. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1524. }
  1525. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1526. u16 *pscsi_status)
  1527. {
  1528. dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
  1529. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1530. }
  1531. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1532. u16 *pscsi_status)
  1533. {
  1534. struct DeviceCtlBlk *dcb;
  1535. u8 *ptr;
  1536. u16 i;
  1537. dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
  1538. clear_fifo(acb, "command_phase1");
  1539. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
  1540. if (!(srb->flag & AUTO_REQSENSE)) {
  1541. ptr = (u8 *)srb->cmd->cmnd;
  1542. for (i = 0; i < srb->cmd->cmd_len; i++) {
  1543. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
  1544. ptr++;
  1545. }
  1546. } else {
  1547. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1548. dcb = acb->active_dcb;
  1549. /* target id */
  1550. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1551. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1552. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1553. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1554. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1555. }
  1556. srb->state |= SRB_COMMAND;
  1557. /* it's important for atn stop */
  1558. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1559. /* SCSI command */
  1560. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1561. }
  1562. /*
  1563. * Verify that the remaining space in the hw sg lists is the same as
  1564. * the count of remaining bytes in srb->total_xfer_length
  1565. */
  1566. static void sg_verify_length(struct ScsiReqBlk *srb)
  1567. {
  1568. if (debug_enabled(DBG_SG)) {
  1569. unsigned len = 0;
  1570. unsigned idx = srb->sg_index;
  1571. struct SGentry *psge = srb->segment_x + idx;
  1572. for (; idx < srb->sg_count; psge++, idx++)
  1573. len += psge->length;
  1574. if (len != srb->total_xfer_length)
  1575. dprintkdbg(DBG_SG,
  1576. "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
  1577. srb->total_xfer_length, len);
  1578. }
  1579. }
  1580. /*
  1581. * Compute the next Scatter Gather list index and adjust its length
  1582. * and address if necessary
  1583. */
  1584. static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
  1585. {
  1586. u8 idx;
  1587. u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
  1588. struct SGentry *psge = srb->segment_x + srb->sg_index;
  1589. dprintkdbg(DBG_0,
  1590. "sg_update_list: Transferred %i of %i bytes, %i remain\n",
  1591. xferred, srb->total_xfer_length, left);
  1592. if (xferred == 0) {
  1593. /* nothing to update since we did not transfer any data */
  1594. return;
  1595. }
  1596. sg_verify_length(srb);
  1597. srb->total_xfer_length = left; /* update remaining count */
  1598. for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
  1599. if (xferred >= psge->length) {
  1600. /* Complete SG entries done */
  1601. xferred -= psge->length;
  1602. } else {
  1603. /* Partial SG entry done */
  1604. dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
  1605. srb->sg_bus_addr, SEGMENTX_LEN,
  1606. DMA_TO_DEVICE);
  1607. psge->length -= xferred;
  1608. psge->address += xferred;
  1609. srb->sg_index = idx;
  1610. dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
  1611. srb->sg_bus_addr, SEGMENTX_LEN,
  1612. DMA_TO_DEVICE);
  1613. break;
  1614. }
  1615. psge++;
  1616. }
  1617. sg_verify_length(srb);
  1618. }
  1619. /*
  1620. * We have transferred a single byte (PIO mode?) and need to update
  1621. * the count of bytes remaining (total_xfer_length) and update the sg
  1622. * entry to either point to next byte in the current sg entry, or of
  1623. * already at the end to point to the start of the next sg entry
  1624. */
  1625. static void sg_subtract_one(struct ScsiReqBlk *srb)
  1626. {
  1627. sg_update_list(srb, srb->total_xfer_length - 1);
  1628. }
  1629. /*
  1630. * cleanup_after_transfer
  1631. *
  1632. * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
  1633. * KG: Currently called from StatusPhase1 ()
  1634. * Should probably also be called from other places
  1635. * Best might be to call it in DataXXPhase0, if new phase will differ
  1636. */
  1637. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  1638. struct ScsiReqBlk *srb)
  1639. {
  1640. /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
  1641. if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
  1642. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1643. clear_fifo(acb, "cleanup/in");
  1644. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1645. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1646. } else { /* write */
  1647. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1648. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1649. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1650. clear_fifo(acb, "cleanup/out");
  1651. }
  1652. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1653. }
  1654. /*
  1655. * Those no of bytes will be transferred w/ PIO through the SCSI FIFO
  1656. * Seems to be needed for unknown reasons; could be a hardware bug :-(
  1657. */
  1658. #define DC395x_LASTPIO 4
  1659. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1660. u16 *pscsi_status)
  1661. {
  1662. struct DeviceCtlBlk *dcb = srb->dcb;
  1663. u16 scsi_status = *pscsi_status;
  1664. u32 d_left_counter = 0;
  1665. dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
  1666. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1667. /*
  1668. * KG: We need to drain the buffers before we draw any conclusions!
  1669. * This means telling the DMA to push the rest into SCSI, telling
  1670. * SCSI to push the rest to the bus.
  1671. * However, the device might have been the one to stop us (phase
  1672. * change), and the data in transit just needs to be accounted so
  1673. * it can be retransmitted.)
  1674. */
  1675. /*
  1676. * KG: Stop DMA engine pushing more data into the SCSI FIFO
  1677. * If we need more data, the DMA SG list will be freshly set up, anyway
  1678. */
  1679. dprintkdbg(DBG_PIO, "data_out_phase0: "
  1680. "DMA{fifocnt=0x%02x fifostat=0x%02x} "
  1681. "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
  1682. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1683. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1684. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1685. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
  1686. srb->total_xfer_length);
  1687. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
  1688. if (!(srb->state & SRB_XFERPAD)) {
  1689. if (scsi_status & PARITYERROR)
  1690. srb->status |= PARITY_ERROR;
  1691. /*
  1692. * KG: Right, we can't just rely on the SCSI_COUNTER, because this
  1693. * is the no of bytes it got from the DMA engine not the no it
  1694. * transferred successfully to the device. (And the difference could
  1695. * be as much as the FIFO size, I guess ...)
  1696. */
  1697. if (!(scsi_status & SCSIXFERDONE)) {
  1698. /*
  1699. * when data transfer from DMA FIFO to SCSI FIFO
  1700. * if there was some data left in SCSI FIFO
  1701. */
  1702. d_left_counter =
  1703. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1704. 0x1F);
  1705. if (dcb->sync_period & WIDE_SYNC)
  1706. d_left_counter <<= 1;
  1707. dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
  1708. "SCSI{fifocnt=0x%02x cnt=0x%08x} "
  1709. "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
  1710. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1711. (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1712. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1713. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1714. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1715. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1716. DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
  1717. }
  1718. /*
  1719. * calculate all the residue data that not yet tranfered
  1720. * SCSI transfer counter + left in SCSI FIFO data
  1721. *
  1722. * .....TRM_S1040_SCSI_COUNTER (24bits)
  1723. * The counter always decrement by one for every SCSI byte transfer.
  1724. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
  1725. * The counter is SCSI FIFO offset counter (in units of bytes or! words)
  1726. */
  1727. if (srb->total_xfer_length > DC395x_LASTPIO)
  1728. d_left_counter +=
  1729. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1730. /* Is this a good idea? */
  1731. /*clear_fifo(acb, "DOP1"); */
  1732. /* KG: What is this supposed to be useful for? WIDE padding stuff? */
  1733. if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
  1734. && scsi_bufflen(srb->cmd) % 2) {
  1735. d_left_counter = 0;
  1736. dprintkl(KERN_INFO,
  1737. "data_out_phase0: Discard 1 byte (0x%02x)\n",
  1738. scsi_status);
  1739. }
  1740. /*
  1741. * KG: Oops again. Same thinko as above: The SCSI might have been
  1742. * faster than the DMA engine, so that it ran out of data.
  1743. * In that case, we have to do just nothing!
  1744. * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
  1745. */
  1746. /*
  1747. * KG: This is nonsense: We have been WRITING data to the bus
  1748. * If the SCSI engine has no bytes left, how should the DMA engine?
  1749. */
  1750. if (d_left_counter == 0) {
  1751. srb->total_xfer_length = 0;
  1752. } else {
  1753. /*
  1754. * if transfer not yet complete
  1755. * there were some data residue in SCSI FIFO or
  1756. * SCSI transfer counter not empty
  1757. */
  1758. long oldxferred =
  1759. srb->total_xfer_length - d_left_counter;
  1760. const int diff =
  1761. (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
  1762. sg_update_list(srb, d_left_counter);
  1763. /* KG: Most ugly hack! Apparently, this works around a chip bug */
  1764. if ((srb->segment_x[srb->sg_index].length ==
  1765. diff && scsi_sg_count(srb->cmd))
  1766. || ((oldxferred & ~PAGE_MASK) ==
  1767. (PAGE_SIZE - diff))
  1768. ) {
  1769. dprintkl(KERN_INFO, "data_out_phase0: "
  1770. "Work around chip bug (%i)?\n", diff);
  1771. d_left_counter =
  1772. srb->total_xfer_length - diff;
  1773. sg_update_list(srb, d_left_counter);
  1774. /*srb->total_xfer_length -= diff; */
  1775. /*srb->virt_addr += diff; */
  1776. /*if (srb->cmd->use_sg) */
  1777. /* srb->sg_index++; */
  1778. }
  1779. }
  1780. }
  1781. if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
  1782. cleanup_after_transfer(acb, srb);
  1783. }
  1784. }
  1785. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1786. u16 *pscsi_status)
  1787. {
  1788. dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
  1789. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1790. clear_fifo(acb, "data_out_phase1");
  1791. /* do prepare before transfer when data out phase */
  1792. data_io_transfer(acb, srb, XFERDATAOUT);
  1793. }
  1794. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1795. u16 *pscsi_status)
  1796. {
  1797. u16 scsi_status = *pscsi_status;
  1798. dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
  1799. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1800. /*
  1801. * KG: DataIn is much more tricky than DataOut. When the device is finished
  1802. * and switches to another phase, the SCSI engine should be finished too.
  1803. * But: There might still be bytes left in its FIFO to be fetched by the DMA
  1804. * engine and transferred to memory.
  1805. * We should wait for the FIFOs to be emptied by that (is there any way to
  1806. * enforce this?) and then stop the DMA engine, because it might think, that
  1807. * there are more bytes to follow. Yes, the device might disconnect prior to
  1808. * having all bytes transferred!
  1809. * Also we should make sure that all data from the DMA engine buffer's really
  1810. * made its way to the system memory! Some documentation on this would not
  1811. * seem to be a bad idea, actually.
  1812. */
  1813. if (!(srb->state & SRB_XFERPAD)) {
  1814. u32 d_left_counter;
  1815. unsigned int sc, fc;
  1816. if (scsi_status & PARITYERROR) {
  1817. dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
  1818. "Parity Error\n", srb->cmd);
  1819. srb->status |= PARITY_ERROR;
  1820. }
  1821. /*
  1822. * KG: We should wait for the DMA FIFO to be empty ...
  1823. * but: it would be better to wait first for the SCSI FIFO and then the
  1824. * the DMA FIFO to become empty? How do we know, that the device not already
  1825. * sent data to the FIFO in a MsgIn phase, eg.?
  1826. */
  1827. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
  1828. #if 0
  1829. int ctr = 6000000;
  1830. dprintkl(KERN_DEBUG,
  1831. "DIP0: Wait for DMA FIFO to flush ...\n");
  1832. /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
  1833. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
  1834. /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
  1835. while (!
  1836. (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
  1837. 0x80) && --ctr);
  1838. if (ctr < 6000000 - 1)
  1839. dprintkl(KERN_DEBUG
  1840. "DIP0: Had to wait for DMA ...\n");
  1841. if (!ctr)
  1842. dprintkl(KERN_ERR,
  1843. "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
  1844. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
  1845. #endif
  1846. dprintkdbg(DBG_KG, "data_in_phase0: "
  1847. "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
  1848. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1849. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
  1850. }
  1851. /* Now: Check remainig data: The SCSI counters should tell us ... */
  1852. sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1853. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1854. d_left_counter = sc + ((fc & 0x1f)
  1855. << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
  1856. 0));
  1857. dprintkdbg(DBG_KG, "data_in_phase0: "
  1858. "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
  1859. "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
  1860. "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
  1861. fc,
  1862. (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1863. sc,
  1864. fc,
  1865. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1866. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1867. srb->total_xfer_length, d_left_counter);
  1868. #if DC395x_LASTPIO
  1869. /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */
  1870. if (d_left_counter
  1871. && srb->total_xfer_length <= DC395x_LASTPIO) {
  1872. size_t left_io = srb->total_xfer_length;
  1873. /*u32 addr = (srb->segment_x[srb->sg_index].address); */
  1874. /*sg_update_list (srb, d_left_counter); */
  1875. dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
  1876. "for remaining %i bytes:",
  1877. fc & 0x1f,
  1878. (srb->dcb->sync_period & WIDE_SYNC) ?
  1879. "words" : "bytes",
  1880. srb->total_xfer_length);
  1881. if (srb->dcb->sync_period & WIDE_SYNC)
  1882. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  1883. CFG2_WIDEFIFO);
  1884. while (left_io) {
  1885. unsigned char *virt, *base = NULL;
  1886. unsigned long flags = 0;
  1887. size_t len = left_io;
  1888. size_t offset = srb->request_length - left_io;
  1889. local_irq_save(flags);
  1890. /* Assumption: it's inside one page as it's at most 4 bytes and
  1891. I just assume it's on a 4-byte boundary */
  1892. base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
  1893. srb->sg_count, &offset, &len);
  1894. virt = base + offset;
  1895. left_io -= len;
  1896. while (len) {
  1897. u8 byte;
  1898. byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  1899. *virt++ = byte;
  1900. if (debug_enabled(DBG_PIO))
  1901. printk(" %02x", byte);
  1902. d_left_counter--;
  1903. sg_subtract_one(srb);
  1904. len--;
  1905. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1906. if (fc == 0x40) {
  1907. left_io = 0;
  1908. break;
  1909. }
  1910. }
  1911. WARN_ON((fc != 0x40) == !d_left_counter);
  1912. if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
  1913. /* Read the last byte ... */
  1914. if (srb->total_xfer_length > 0) {
  1915. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  1916. *virt++ = byte;
  1917. srb->total_xfer_length--;
  1918. if (debug_enabled(DBG_PIO))
  1919. printk(" %02x", byte);
  1920. }
  1921. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  1922. }
  1923. scsi_kunmap_atomic_sg(base);
  1924. local_irq_restore(flags);
  1925. }
  1926. /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
  1927. /*srb->total_xfer_length = 0; */
  1928. if (debug_enabled(DBG_PIO))
  1929. printk("\n");
  1930. }
  1931. #endif /* DC395x_LASTPIO */
  1932. #if 0
  1933. /*
  1934. * KG: This was in DATAOUT. Does it also belong here?
  1935. * Nobody seems to know what counter and fifo_cnt count exactly ...
  1936. */
  1937. if (!(scsi_status & SCSIXFERDONE)) {
  1938. /*
  1939. * when data transfer from DMA FIFO to SCSI FIFO
  1940. * if there was some data left in SCSI FIFO
  1941. */
  1942. d_left_counter =
  1943. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1944. 0x1F);
  1945. if (srb->dcb->sync_period & WIDE_SYNC)
  1946. d_left_counter <<= 1;
  1947. /*
  1948. * if WIDE scsi SCSI FIFOCNT unit is word !!!
  1949. * so need to *= 2
  1950. * KG: Seems to be correct ...
  1951. */
  1952. }
  1953. #endif
  1954. /* KG: This should not be needed any more! */
  1955. if (d_left_counter == 0
  1956. || (scsi_status & SCSIXFERCNT_2_ZERO)) {
  1957. #if 0
  1958. int ctr = 6000000;
  1959. u8 TempDMAstatus;
  1960. do {
  1961. TempDMAstatus =
  1962. DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1963. } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
  1964. if (!ctr)
  1965. dprintkl(KERN_ERR,
  1966. "Deadlock in DataInPhase0 waiting for DMA!!\n");
  1967. srb->total_xfer_length = 0;
  1968. #endif
  1969. srb->total_xfer_length = d_left_counter;
  1970. } else { /* phase changed */
  1971. /*
  1972. * parsing the case:
  1973. * when a transfer not yet complete
  1974. * but be disconnected by target
  1975. * if transfer not yet complete
  1976. * there were some data residue in SCSI FIFO or
  1977. * SCSI transfer counter not empty
  1978. */
  1979. sg_update_list(srb, d_left_counter);
  1980. }
  1981. }
  1982. /* KG: The target may decide to disconnect: Empty FIFO before! */
  1983. if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
  1984. cleanup_after_transfer(acb, srb);
  1985. }
  1986. }
  1987. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1988. u16 *pscsi_status)
  1989. {
  1990. dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
  1991. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1992. data_io_transfer(acb, srb, XFERDATAIN);
  1993. }
  1994. static void data_io_transfer(struct AdapterCtlBlk *acb,
  1995. struct ScsiReqBlk *srb, u16 io_dir)
  1996. {
  1997. struct DeviceCtlBlk *dcb = srb->dcb;
  1998. u8 bval;
  1999. dprintkdbg(DBG_0,
  2000. "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
  2001. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
  2002. ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
  2003. srb->total_xfer_length, srb->sg_index, srb->sg_count);
  2004. if (srb == acb->tmp_srb)
  2005. dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
  2006. if (srb->sg_index >= srb->sg_count) {
  2007. /* can't happen? out of bounds error */
  2008. return;
  2009. }
  2010. if (srb->total_xfer_length > DC395x_LASTPIO) {
  2011. u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2012. /*
  2013. * KG: What should we do: Use SCSI Cmd 0x90/0x92?
  2014. * Maybe, even ABORTXFER would be appropriate
  2015. */
  2016. if (dma_status & XFERPENDING) {
  2017. dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
  2018. "Expect trouble!\n");
  2019. dump_register_info(acb, dcb, srb);
  2020. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  2021. }
  2022. /* clear_fifo(acb, "IO"); */
  2023. /*
  2024. * load what physical address of Scatter/Gather list table
  2025. * want to be transfer
  2026. */
  2027. srb->state |= SRB_DATA_XFER;
  2028. DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
  2029. if (scsi_sg_count(srb->cmd)) { /* with S/G */
  2030. io_dir |= DMACMD_SG;
  2031. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2032. srb->sg_bus_addr +
  2033. sizeof(struct SGentry) *
  2034. srb->sg_index);
  2035. /* load how many bytes in the sg list table */
  2036. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2037. ((u32)(srb->sg_count -
  2038. srb->sg_index) << 3));
  2039. } else { /* without S/G */
  2040. io_dir &= ~DMACMD_SG;
  2041. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2042. srb->segment_x[0].address);
  2043. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2044. srb->segment_x[0].length);
  2045. }
  2046. /* load total transfer length (24bits) max value 16Mbyte */
  2047. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2048. srb->total_xfer_length);
  2049. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2050. if (io_dir & DMACMD_DIR) { /* read */
  2051. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2052. SCMD_DMA_IN);
  2053. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2054. } else {
  2055. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2056. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2057. SCMD_DMA_OUT);
  2058. }
  2059. }
  2060. #if DC395x_LASTPIO
  2061. else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
  2062. /*
  2063. * load what physical address of Scatter/Gather list table
  2064. * want to be transfer
  2065. */
  2066. srb->state |= SRB_DATA_XFER;
  2067. /* load total transfer length (24bits) max value 16Mbyte */
  2068. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2069. srb->total_xfer_length);
  2070. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2071. if (io_dir & DMACMD_DIR) { /* read */
  2072. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2073. SCMD_FIFO_IN);
  2074. } else { /* write */
  2075. int ln = srb->total_xfer_length;
  2076. size_t left_io = srb->total_xfer_length;
  2077. if (srb->dcb->sync_period & WIDE_SYNC)
  2078. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2079. CFG2_WIDEFIFO);
  2080. while (left_io) {
  2081. unsigned char *virt, *base = NULL;
  2082. unsigned long flags = 0;
  2083. size_t len = left_io;
  2084. size_t offset = srb->request_length - left_io;
  2085. local_irq_save(flags);
  2086. /* Again, max 4 bytes */
  2087. base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
  2088. srb->sg_count, &offset, &len);
  2089. virt = base + offset;
  2090. left_io -= len;
  2091. while (len--) {
  2092. if (debug_enabled(DBG_PIO))
  2093. printk(" %02x", *virt);
  2094. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
  2095. sg_subtract_one(srb);
  2096. }
  2097. scsi_kunmap_atomic_sg(base);
  2098. local_irq_restore(flags);
  2099. }
  2100. if (srb->dcb->sync_period & WIDE_SYNC) {
  2101. if (ln % 2) {
  2102. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  2103. if (debug_enabled(DBG_PIO))
  2104. printk(" |00");
  2105. }
  2106. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2107. }
  2108. /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
  2109. if (debug_enabled(DBG_PIO))
  2110. printk("\n");
  2111. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2112. SCMD_FIFO_OUT);
  2113. }
  2114. }
  2115. #endif /* DC395x_LASTPIO */
  2116. else { /* xfer pad */
  2117. u8 data = 0, data2 = 0;
  2118. if (srb->sg_count) {
  2119. srb->adapter_status = H_OVER_UNDER_RUN;
  2120. srb->status |= OVER_RUN;
  2121. }
  2122. /*
  2123. * KG: despite the fact that we are using 16 bits I/O ops
  2124. * the SCSI FIFO is only 8 bits according to the docs
  2125. * (we can set bit 1 in 0x8f to serialize FIFO access ...)
  2126. */
  2127. if (dcb->sync_period & WIDE_SYNC) {
  2128. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
  2129. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2130. CFG2_WIDEFIFO);
  2131. if (io_dir & DMACMD_DIR) {
  2132. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2133. data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2134. } else {
  2135. /* Danger, Robinson: If you find KGs
  2136. * scattered over the wide disk, the driver
  2137. * or chip is to blame :-( */
  2138. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2139. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
  2140. }
  2141. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2142. } else {
  2143. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2144. /* Danger, Robinson: If you find a collection of Ks on your disk
  2145. * something broke :-( */
  2146. if (io_dir & DMACMD_DIR)
  2147. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2148. else
  2149. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2150. }
  2151. srb->state |= SRB_XFERPAD;
  2152. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2153. /* SCSI command */
  2154. bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
  2155. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
  2156. }
  2157. }
  2158. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2159. u16 *pscsi_status)
  2160. {
  2161. dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
  2162. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2163. srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2164. srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
  2165. srb->state = SRB_COMPLETED;
  2166. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  2167. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2168. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2169. }
  2170. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2171. u16 *pscsi_status)
  2172. {
  2173. dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
  2174. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2175. srb->state = SRB_STATUS;
  2176. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2177. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
  2178. }
  2179. /* Check if the message is complete */
  2180. static inline u8 msgin_completed(u8 * msgbuf, u32 len)
  2181. {
  2182. if (*msgbuf == EXTENDED_MESSAGE) {
  2183. if (len < 2)
  2184. return 0;
  2185. if (len < msgbuf[1] + 2)
  2186. return 0;
  2187. } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
  2188. if (len < 2)
  2189. return 0;
  2190. return 1;
  2191. }
  2192. /* reject_msg */
  2193. static inline void msgin_reject(struct AdapterCtlBlk *acb,
  2194. struct ScsiReqBlk *srb)
  2195. {
  2196. srb->msgout_buf[0] = MESSAGE_REJECT;
  2197. srb->msg_count = 1;
  2198. DC395x_ENABLE_MSGOUT;
  2199. srb->state &= ~SRB_MSGIN;
  2200. srb->state |= SRB_MSGOUT;
  2201. dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
  2202. srb->msgin_buf[0],
  2203. srb->dcb->target_id, srb->dcb->target_lun);
  2204. }
  2205. static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
  2206. struct DeviceCtlBlk *dcb, u8 tag)
  2207. {
  2208. struct ScsiReqBlk *srb = NULL;
  2209. struct ScsiReqBlk *i;
  2210. dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
  2211. srb->cmd, tag, srb);
  2212. if (!(dcb->tag_mask & (1 << tag)))
  2213. dprintkl(KERN_DEBUG,
  2214. "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
  2215. dcb->tag_mask, tag);
  2216. if (list_empty(&dcb->srb_going_list))
  2217. goto mingx0;
  2218. list_for_each_entry(i, &dcb->srb_going_list, list) {
  2219. if (i->tag_number == tag) {
  2220. srb = i;
  2221. break;
  2222. }
  2223. }
  2224. if (!srb)
  2225. goto mingx0;
  2226. dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
  2227. srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
  2228. if (dcb->flag & ABORT_DEV_) {
  2229. /*srb->state = SRB_ABORT_SENT; */
  2230. enable_msgout_abort(acb, srb);
  2231. }
  2232. if (!(srb->state & SRB_DISCONNECT))
  2233. goto mingx0;
  2234. memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
  2235. srb->state |= dcb->active_srb->state;
  2236. srb->state |= SRB_DATA_XFER;
  2237. dcb->active_srb = srb;
  2238. /* How can we make the DORS happy? */
  2239. return srb;
  2240. mingx0:
  2241. srb = acb->tmp_srb;
  2242. srb->state = SRB_UNEXPECT_RESEL;
  2243. dcb->active_srb = srb;
  2244. srb->msgout_buf[0] = MSG_ABORT_TAG;
  2245. srb->msg_count = 1;
  2246. DC395x_ENABLE_MSGOUT;
  2247. dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
  2248. return srb;
  2249. }
  2250. static inline void reprogram_regs(struct AdapterCtlBlk *acb,
  2251. struct DeviceCtlBlk *dcb)
  2252. {
  2253. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  2254. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  2255. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  2256. set_xfer_rate(acb, dcb);
  2257. }
  2258. /* set async transfer mode */
  2259. static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2260. {
  2261. struct DeviceCtlBlk *dcb = srb->dcb;
  2262. dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
  2263. dcb->target_id, dcb->target_lun);
  2264. dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
  2265. dcb->sync_mode |= SYNC_NEGO_DONE;
  2266. /*dcb->sync_period &= 0; */
  2267. dcb->sync_offset = 0;
  2268. dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
  2269. srb->state &= ~SRB_DO_SYNC_NEGO;
  2270. reprogram_regs(acb, dcb);
  2271. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2272. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2273. build_wdtr(acb, dcb, srb);
  2274. DC395x_ENABLE_MSGOUT;
  2275. dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
  2276. }
  2277. }
  2278. /* set sync transfer mode */
  2279. static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2280. {
  2281. struct DeviceCtlBlk *dcb = srb->dcb;
  2282. u8 bval;
  2283. int fact;
  2284. dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
  2285. "(%02i.%01i MHz) Offset %i\n",
  2286. dcb->target_id, srb->msgin_buf[3] << 2,
  2287. (250 / srb->msgin_buf[3]),
  2288. ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
  2289. srb->msgin_buf[4]);
  2290. if (srb->msgin_buf[4] > 15)
  2291. srb->msgin_buf[4] = 15;
  2292. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
  2293. dcb->sync_offset = 0;
  2294. else if (dcb->sync_offset == 0)
  2295. dcb->sync_offset = srb->msgin_buf[4];
  2296. if (srb->msgin_buf[4] > dcb->sync_offset)
  2297. srb->msgin_buf[4] = dcb->sync_offset;
  2298. else
  2299. dcb->sync_offset = srb->msgin_buf[4];
  2300. bval = 0;
  2301. while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
  2302. || dcb->min_nego_period >
  2303. clock_period[bval]))
  2304. bval++;
  2305. if (srb->msgin_buf[3] < clock_period[bval])
  2306. dprintkl(KERN_INFO,
  2307. "msgin_set_sync: Increase sync nego period to %ins\n",
  2308. clock_period[bval] << 2);
  2309. srb->msgin_buf[3] = clock_period[bval];
  2310. dcb->sync_period &= 0xf0;
  2311. dcb->sync_period |= ALT_SYNC | bval;
  2312. dcb->min_nego_period = srb->msgin_buf[3];
  2313. if (dcb->sync_period & WIDE_SYNC)
  2314. fact = 500;
  2315. else
  2316. fact = 250;
  2317. dprintkl(KERN_INFO,
  2318. "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
  2319. dcb->target_id, (fact == 500) ? "Wide16" : "",
  2320. dcb->min_nego_period << 2, dcb->sync_offset,
  2321. (fact / dcb->min_nego_period),
  2322. ((fact % dcb->min_nego_period) * 10 +
  2323. dcb->min_nego_period / 2) / dcb->min_nego_period);
  2324. if (!(srb->state & SRB_DO_SYNC_NEGO)) {
  2325. /* Reply with corrected SDTR Message */
  2326. dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
  2327. srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
  2328. memcpy(srb->msgout_buf, srb->msgin_buf, 5);
  2329. srb->msg_count = 5;
  2330. DC395x_ENABLE_MSGOUT;
  2331. dcb->sync_mode |= SYNC_NEGO_DONE;
  2332. } else {
  2333. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2334. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2335. build_wdtr(acb, dcb, srb);
  2336. DC395x_ENABLE_MSGOUT;
  2337. dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
  2338. }
  2339. }
  2340. srb->state &= ~SRB_DO_SYNC_NEGO;
  2341. dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
  2342. reprogram_regs(acb, dcb);
  2343. }
  2344. static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
  2345. struct ScsiReqBlk *srb)
  2346. {
  2347. struct DeviceCtlBlk *dcb = srb->dcb;
  2348. dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
  2349. dcb->sync_period &= ~WIDE_SYNC;
  2350. dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
  2351. dcb->sync_mode |= WIDE_NEGO_DONE;
  2352. srb->state &= ~SRB_DO_WIDE_NEGO;
  2353. reprogram_regs(acb, dcb);
  2354. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2355. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2356. build_sdtr(acb, dcb, srb);
  2357. DC395x_ENABLE_MSGOUT;
  2358. dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
  2359. }
  2360. }
  2361. static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2362. {
  2363. struct DeviceCtlBlk *dcb = srb->dcb;
  2364. u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
  2365. && acb->config & HCC_WIDE_CARD) ? 1 : 0;
  2366. dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
  2367. if (srb->msgin_buf[3] > wide)
  2368. srb->msgin_buf[3] = wide;
  2369. /* Completed */
  2370. if (!(srb->state & SRB_DO_WIDE_NEGO)) {
  2371. dprintkl(KERN_DEBUG,
  2372. "msgin_set_wide: Wide nego initiated <%02i>\n",
  2373. dcb->target_id);
  2374. memcpy(srb->msgout_buf, srb->msgin_buf, 4);
  2375. srb->msg_count = 4;
  2376. srb->state |= SRB_DO_WIDE_NEGO;
  2377. DC395x_ENABLE_MSGOUT;
  2378. }
  2379. dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
  2380. if (srb->msgin_buf[3] > 0)
  2381. dcb->sync_period |= WIDE_SYNC;
  2382. else
  2383. dcb->sync_period &= ~WIDE_SYNC;
  2384. srb->state &= ~SRB_DO_WIDE_NEGO;
  2385. /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
  2386. dprintkdbg(DBG_1,
  2387. "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
  2388. (8 << srb->msgin_buf[3]), dcb->target_id);
  2389. reprogram_regs(acb, dcb);
  2390. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2391. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2392. build_sdtr(acb, dcb, srb);
  2393. DC395x_ENABLE_MSGOUT;
  2394. dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
  2395. }
  2396. }
  2397. /*
  2398. * extended message codes:
  2399. *
  2400. * code description
  2401. *
  2402. * 02h Reserved
  2403. * 00h MODIFY DATA POINTER
  2404. * 01h SYNCHRONOUS DATA TRANSFER REQUEST
  2405. * 03h WIDE DATA TRANSFER REQUEST
  2406. * 04h - 7Fh Reserved
  2407. * 80h - FFh Vendor specific
  2408. */
  2409. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2410. u16 *pscsi_status)
  2411. {
  2412. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2413. dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
  2414. srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2415. if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
  2416. /* Now eval the msg */
  2417. switch (srb->msgin_buf[0]) {
  2418. case DISCONNECT:
  2419. srb->state = SRB_DISCONNECT;
  2420. break;
  2421. case SIMPLE_QUEUE_TAG:
  2422. case HEAD_OF_QUEUE_TAG:
  2423. case ORDERED_QUEUE_TAG:
  2424. srb =
  2425. msgin_qtag(acb, dcb,
  2426. srb->msgin_buf[1]);
  2427. break;
  2428. case MESSAGE_REJECT:
  2429. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  2430. DO_CLRATN | DO_DATALATCH);
  2431. /* A sync nego message was rejected ! */
  2432. if (srb->state & SRB_DO_SYNC_NEGO) {
  2433. msgin_set_async(acb, srb);
  2434. break;
  2435. }
  2436. /* A wide nego message was rejected ! */
  2437. if (srb->state & SRB_DO_WIDE_NEGO) {
  2438. msgin_set_nowide(acb, srb);
  2439. break;
  2440. }
  2441. enable_msgout_abort(acb, srb);
  2442. /*srb->state |= SRB_ABORT_SENT */
  2443. break;
  2444. case EXTENDED_MESSAGE:
  2445. /* SDTR */
  2446. if (srb->msgin_buf[1] == 3
  2447. && srb->msgin_buf[2] == EXTENDED_SDTR) {
  2448. msgin_set_sync(acb, srb);
  2449. break;
  2450. }
  2451. /* WDTR */
  2452. if (srb->msgin_buf[1] == 2
  2453. && srb->msgin_buf[2] == EXTENDED_WDTR
  2454. && srb->msgin_buf[3] <= 2) { /* sanity check ... */
  2455. msgin_set_wide(acb, srb);
  2456. break;
  2457. }
  2458. msgin_reject(acb, srb);
  2459. break;
  2460. case MSG_IGNOREWIDE:
  2461. /* Discard wide residual */
  2462. dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
  2463. break;
  2464. case COMMAND_COMPLETE:
  2465. /* nothing has to be done */
  2466. break;
  2467. case SAVE_POINTERS:
  2468. /*
  2469. * SAVE POINTER may be ignored as we have the struct
  2470. * ScsiReqBlk* associated with the scsi command.
  2471. */
  2472. dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
  2473. "SAVE POINTER rem=%i Ignore\n",
  2474. srb->cmd, srb->total_xfer_length);
  2475. break;
  2476. case RESTORE_POINTERS:
  2477. dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
  2478. break;
  2479. case ABORT:
  2480. dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
  2481. "<%02i-%i> ABORT msg\n",
  2482. srb->cmd, dcb->target_id,
  2483. dcb->target_lun);
  2484. dcb->flag |= ABORT_DEV_;
  2485. enable_msgout_abort(acb, srb);
  2486. break;
  2487. default:
  2488. /* reject unknown messages */
  2489. if (srb->msgin_buf[0] & IDENTIFY_BASE) {
  2490. dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
  2491. srb->msg_count = 1;
  2492. srb->msgout_buf[0] = dcb->identify_msg;
  2493. DC395x_ENABLE_MSGOUT;
  2494. srb->state |= SRB_MSGOUT;
  2495. /*break; */
  2496. }
  2497. msgin_reject(acb, srb);
  2498. }
  2499. /* Clear counter and MsgIn state */
  2500. srb->state &= ~SRB_MSGIN;
  2501. acb->msg_len = 0;
  2502. }
  2503. *pscsi_status = PH_BUS_FREE;
  2504. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
  2505. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2506. }
  2507. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2508. u16 *pscsi_status)
  2509. {
  2510. dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
  2511. clear_fifo(acb, "msgin_phase1");
  2512. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2513. if (!(srb->state & SRB_MSGIN)) {
  2514. srb->state &= ~SRB_DISCONNECT;
  2515. srb->state |= SRB_MSGIN;
  2516. }
  2517. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2518. /* SCSI command */
  2519. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
  2520. }
  2521. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2522. u16 *pscsi_status)
  2523. {
  2524. }
  2525. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2526. u16 *pscsi_status)
  2527. {
  2528. }
  2529. static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
  2530. {
  2531. struct DeviceCtlBlk *i;
  2532. /* set all lun device's period, offset */
  2533. if (dcb->identify_msg & 0x07)
  2534. return;
  2535. if (acb->scan_devices) {
  2536. current_sync_offset = dcb->sync_offset;
  2537. return;
  2538. }
  2539. list_for_each_entry(i, &acb->dcb_list, list)
  2540. if (i->target_id == dcb->target_id) {
  2541. i->sync_period = dcb->sync_period;
  2542. i->sync_offset = dcb->sync_offset;
  2543. i->sync_mode = dcb->sync_mode;
  2544. i->min_nego_period = dcb->min_nego_period;
  2545. }
  2546. }
  2547. static void disconnect(struct AdapterCtlBlk *acb)
  2548. {
  2549. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2550. struct ScsiReqBlk *srb;
  2551. if (!dcb) {
  2552. dprintkl(KERN_ERR, "disconnect: No such device\n");
  2553. udelay(500);
  2554. /* Suspend queue for a while */
  2555. acb->last_reset =
  2556. jiffies + HZ / 2 +
  2557. HZ * acb->eeprom.delay_time;
  2558. clear_fifo(acb, "disconnectEx");
  2559. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2560. return;
  2561. }
  2562. srb = dcb->active_srb;
  2563. acb->active_dcb = NULL;
  2564. dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
  2565. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2566. clear_fifo(acb, "disconnect");
  2567. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2568. if (srb->state & SRB_UNEXPECT_RESEL) {
  2569. dprintkl(KERN_ERR,
  2570. "disconnect: Unexpected reselection <%02i-%i>\n",
  2571. dcb->target_id, dcb->target_lun);
  2572. srb->state = 0;
  2573. waiting_process_next(acb);
  2574. } else if (srb->state & SRB_ABORT_SENT) {
  2575. dcb->flag &= ~ABORT_DEV_;
  2576. acb->last_reset = jiffies + HZ / 2 + 1;
  2577. dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
  2578. doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
  2579. waiting_process_next(acb);
  2580. } else {
  2581. if ((srb->state & (SRB_START_ + SRB_MSGOUT))
  2582. || !(srb->
  2583. state & (SRB_DISCONNECT + SRB_COMPLETED))) {
  2584. /*
  2585. * Selection time out
  2586. * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
  2587. */
  2588. /* Unexp. Disc / Sel Timeout */
  2589. if (srb->state != SRB_START_
  2590. && srb->state != SRB_MSGOUT) {
  2591. srb->state = SRB_READY;
  2592. dprintkl(KERN_DEBUG,
  2593. "disconnect: (0x%p) Unexpected\n",
  2594. srb->cmd);
  2595. srb->target_status = SCSI_STAT_SEL_TIMEOUT;
  2596. goto disc1;
  2597. } else {
  2598. /* Normal selection timeout */
  2599. dprintkdbg(DBG_KG, "disconnect: (0x%p) "
  2600. "<%02i-%i> SelTO\n", srb->cmd,
  2601. dcb->target_id, dcb->target_lun);
  2602. if (srb->retry_count++ > DC395x_MAX_RETRIES
  2603. || acb->scan_devices) {
  2604. srb->target_status =
  2605. SCSI_STAT_SEL_TIMEOUT;
  2606. goto disc1;
  2607. }
  2608. free_tag(dcb, srb);
  2609. list_move(&srb->list, &dcb->srb_waiting_list);
  2610. dprintkdbg(DBG_KG,
  2611. "disconnect: (0x%p) Retry\n",
  2612. srb->cmd);
  2613. waiting_set_timer(acb, HZ / 20);
  2614. }
  2615. } else if (srb->state & SRB_DISCONNECT) {
  2616. u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  2617. /*
  2618. * SRB_DISCONNECT (This is what we expect!)
  2619. */
  2620. if (bval & 0x40) {
  2621. dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
  2622. " 0x%02x: ACK set! Other controllers?\n",
  2623. bval);
  2624. /* It could come from another initiator, therefore don't do much ! */
  2625. } else
  2626. waiting_process_next(acb);
  2627. } else if (srb->state & SRB_COMPLETED) {
  2628. disc1:
  2629. /*
  2630. ** SRB_COMPLETED
  2631. */
  2632. free_tag(dcb, srb);
  2633. dcb->active_srb = NULL;
  2634. srb->state = SRB_FREE;
  2635. srb_done(acb, dcb, srb);
  2636. }
  2637. }
  2638. }
  2639. static void reselect(struct AdapterCtlBlk *acb)
  2640. {
  2641. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2642. struct ScsiReqBlk *srb = NULL;
  2643. u16 rsel_tar_lun_id;
  2644. u8 id, lun;
  2645. u8 arblostflag = 0;
  2646. dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
  2647. clear_fifo(acb, "reselect");
  2648. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
  2649. /* Read Reselected Target ID and LUN */
  2650. rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
  2651. if (dcb) { /* Arbitration lost but Reselection win */
  2652. srb = dcb->active_srb;
  2653. if (!srb) {
  2654. dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
  2655. "but active_srb == NULL\n");
  2656. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2657. return;
  2658. }
  2659. /* Why the if ? */
  2660. if (!acb->scan_devices) {
  2661. dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
  2662. "Arb lost but Resel win rsel=%i stat=0x%04x\n",
  2663. srb->cmd, dcb->target_id,
  2664. dcb->target_lun, rsel_tar_lun_id,
  2665. DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
  2666. arblostflag = 1;
  2667. /*srb->state |= SRB_DISCONNECT; */
  2668. srb->state = SRB_READY;
  2669. free_tag(dcb, srb);
  2670. list_move(&srb->list, &dcb->srb_waiting_list);
  2671. waiting_set_timer(acb, HZ / 20);
  2672. /* return; */
  2673. }
  2674. }
  2675. /* Read Reselected Target Id and LUN */
  2676. if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
  2677. dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
  2678. "Got %i!\n", rsel_tar_lun_id);
  2679. id = rsel_tar_lun_id & 0xff;
  2680. lun = (rsel_tar_lun_id >> 8) & 7;
  2681. dcb = find_dcb(acb, id, lun);
  2682. if (!dcb) {
  2683. dprintkl(KERN_ERR, "reselect: From non existent device "
  2684. "<%02i-%i>\n", id, lun);
  2685. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2686. return;
  2687. }
  2688. acb->active_dcb = dcb;
  2689. if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
  2690. dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
  2691. "disconnection? <%02i-%i>\n",
  2692. dcb->target_id, dcb->target_lun);
  2693. if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
  2694. srb = acb->tmp_srb;
  2695. dcb->active_srb = srb;
  2696. } else {
  2697. /* There can be only one! */
  2698. srb = dcb->active_srb;
  2699. if (!srb || !(srb->state & SRB_DISCONNECT)) {
  2700. /*
  2701. * abort command
  2702. */
  2703. dprintkl(KERN_DEBUG,
  2704. "reselect: w/o disconnected cmds <%02i-%i>\n",
  2705. dcb->target_id, dcb->target_lun);
  2706. srb = acb->tmp_srb;
  2707. srb->state = SRB_UNEXPECT_RESEL;
  2708. dcb->active_srb = srb;
  2709. enable_msgout_abort(acb, srb);
  2710. } else {
  2711. if (dcb->flag & ABORT_DEV_) {
  2712. /*srb->state = SRB_ABORT_SENT; */
  2713. enable_msgout_abort(acb, srb);
  2714. } else
  2715. srb->state = SRB_DATA_XFER;
  2716. }
  2717. }
  2718. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2719. /* Program HA ID, target ID, period and offset */
  2720. dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
  2721. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
  2722. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
  2723. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
  2724. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
  2725. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2726. /* SCSI command */
  2727. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2728. }
  2729. static inline u8 tagq_blacklist(char *name)
  2730. {
  2731. #ifndef DC395x_NO_TAGQ
  2732. #if 0
  2733. u8 i;
  2734. for (i = 0; i < BADDEVCNT; i++)
  2735. if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
  2736. return 1;
  2737. #endif
  2738. return 0;
  2739. #else
  2740. return 1;
  2741. #endif
  2742. }
  2743. static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
  2744. {
  2745. /* Check for SCSI format (ANSI and Response data format) */
  2746. if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
  2747. if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
  2748. && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
  2749. /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
  2750. /* ((dcb->dev_type == TYPE_DISK)
  2751. || (dcb->dev_type == TYPE_MOD)) && */
  2752. !tagq_blacklist(((char *)ptr) + 8)) {
  2753. if (dcb->max_command == 1)
  2754. dcb->max_command =
  2755. dcb->acb->tag_max_num;
  2756. dcb->sync_mode |= EN_TAG_QUEUEING;
  2757. /*dcb->tag_mask = 0; */
  2758. } else
  2759. dcb->max_command = 1;
  2760. }
  2761. }
  2762. static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2763. struct ScsiInqData *ptr)
  2764. {
  2765. u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
  2766. dcb->dev_type = bval1;
  2767. /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
  2768. disc_tagq_set(dcb, ptr);
  2769. }
  2770. /* unmap mapped pci regions from SRB */
  2771. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2772. {
  2773. struct scsi_cmnd *cmd = srb->cmd;
  2774. enum dma_data_direction dir = cmd->sc_data_direction;
  2775. if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
  2776. /* unmap DC395x SG list */
  2777. dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
  2778. srb->sg_bus_addr, SEGMENTX_LEN);
  2779. dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
  2780. DMA_TO_DEVICE);
  2781. dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
  2782. scsi_sg_count(cmd), scsi_bufflen(cmd));
  2783. /* unmap the sg segments */
  2784. scsi_dma_unmap(cmd);
  2785. }
  2786. }
  2787. /* unmap mapped pci sense buffer from SRB */
  2788. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  2789. struct ScsiReqBlk *srb)
  2790. {
  2791. if (!(srb->flag & AUTO_REQSENSE))
  2792. return;
  2793. /* Unmap sense buffer */
  2794. dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
  2795. srb->segment_x[0].address);
  2796. dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
  2797. srb->segment_x[0].length, DMA_FROM_DEVICE);
  2798. /* Restore SG stuff */
  2799. srb->total_xfer_length = srb->xferred;
  2800. srb->segment_x[0].address =
  2801. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
  2802. srb->segment_x[0].length =
  2803. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
  2804. }
  2805. /*
  2806. * Complete execution of a SCSI command
  2807. * Signal completion to the generic SCSI driver
  2808. */
  2809. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2810. struct ScsiReqBlk *srb)
  2811. {
  2812. u8 tempcnt, status;
  2813. struct scsi_cmnd *cmd = srb->cmd;
  2814. enum dma_data_direction dir = cmd->sc_data_direction;
  2815. int ckc_only = 1;
  2816. dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
  2817. srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2818. dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
  2819. srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
  2820. scsi_sgtalbe(cmd));
  2821. status = srb->target_status;
  2822. if (srb->flag & AUTO_REQSENSE) {
  2823. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
  2824. pci_unmap_srb_sense(acb, srb);
  2825. /*
  2826. ** target status..........................
  2827. */
  2828. srb->flag &= ~AUTO_REQSENSE;
  2829. srb->adapter_status = 0;
  2830. srb->target_status = CHECK_CONDITION << 1;
  2831. if (debug_enabled(DBG_1)) {
  2832. switch (cmd->sense_buffer[2] & 0x0f) {
  2833. case NOT_READY:
  2834. dprintkl(KERN_DEBUG,
  2835. "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2836. cmd->cmnd[0], dcb->target_id,
  2837. dcb->target_lun, status, acb->scan_devices);
  2838. break;
  2839. case UNIT_ATTENTION:
  2840. dprintkl(KERN_DEBUG,
  2841. "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2842. cmd->cmnd[0], dcb->target_id,
  2843. dcb->target_lun, status, acb->scan_devices);
  2844. break;
  2845. case ILLEGAL_REQUEST:
  2846. dprintkl(KERN_DEBUG,
  2847. "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2848. cmd->cmnd[0], dcb->target_id,
  2849. dcb->target_lun, status, acb->scan_devices);
  2850. break;
  2851. case MEDIUM_ERROR:
  2852. dprintkl(KERN_DEBUG,
  2853. "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2854. cmd->cmnd[0], dcb->target_id,
  2855. dcb->target_lun, status, acb->scan_devices);
  2856. break;
  2857. case HARDWARE_ERROR:
  2858. dprintkl(KERN_DEBUG,
  2859. "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2860. cmd->cmnd[0], dcb->target_id,
  2861. dcb->target_lun, status, acb->scan_devices);
  2862. break;
  2863. }
  2864. if (cmd->sense_buffer[7] >= 6)
  2865. printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
  2866. "(0x%08x 0x%08x)\n",
  2867. cmd->sense_buffer[2], cmd->sense_buffer[12],
  2868. cmd->sense_buffer[13],
  2869. *((unsigned int *)(cmd->sense_buffer + 3)),
  2870. *((unsigned int *)(cmd->sense_buffer + 8)));
  2871. else
  2872. printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
  2873. cmd->sense_buffer[2],
  2874. *((unsigned int *)(cmd->sense_buffer + 3)));
  2875. }
  2876. if (status == (CHECK_CONDITION << 1)) {
  2877. cmd->result = DID_BAD_TARGET << 16;
  2878. goto ckc_e;
  2879. }
  2880. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
  2881. if (srb->total_xfer_length
  2882. && srb->total_xfer_length >= cmd->underflow)
  2883. cmd->result =
  2884. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  2885. srb->end_message, CHECK_CONDITION);
  2886. /*SET_RES_DID(cmd->result,DID_OK) */
  2887. else
  2888. cmd->result =
  2889. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  2890. srb->end_message, CHECK_CONDITION);
  2891. goto ckc_e;
  2892. }
  2893. /*************************************************************/
  2894. if (status) {
  2895. /*
  2896. * target status..........................
  2897. */
  2898. if (status_byte(status) == CHECK_CONDITION) {
  2899. request_sense(acb, dcb, srb);
  2900. return;
  2901. } else if (status_byte(status) == QUEUE_FULL) {
  2902. tempcnt = (u8)list_size(&dcb->srb_going_list);
  2903. dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
  2904. dcb->target_id, dcb->target_lun, tempcnt);
  2905. if (tempcnt > 1)
  2906. tempcnt--;
  2907. dcb->max_command = tempcnt;
  2908. free_tag(dcb, srb);
  2909. list_move(&srb->list, &dcb->srb_waiting_list);
  2910. waiting_set_timer(acb, HZ / 20);
  2911. srb->adapter_status = 0;
  2912. srb->target_status = 0;
  2913. return;
  2914. } else if (status == SCSI_STAT_SEL_TIMEOUT) {
  2915. srb->adapter_status = H_SEL_TIMEOUT;
  2916. srb->target_status = 0;
  2917. cmd->result = DID_NO_CONNECT << 16;
  2918. } else {
  2919. srb->adapter_status = 0;
  2920. SET_RES_DID(cmd->result, DID_ERROR);
  2921. SET_RES_MSG(cmd->result, srb->end_message);
  2922. SET_RES_TARGET(cmd->result, status);
  2923. }
  2924. } else {
  2925. /*
  2926. ** process initiator status..........................
  2927. */
  2928. status = srb->adapter_status;
  2929. if (status & H_OVER_UNDER_RUN) {
  2930. srb->target_status = 0;
  2931. SET_RES_DID(cmd->result, DID_OK);
  2932. SET_RES_MSG(cmd->result, srb->end_message);
  2933. } else if (srb->status & PARITY_ERROR) {
  2934. SET_RES_DID(cmd->result, DID_PARITY);
  2935. SET_RES_MSG(cmd->result, srb->end_message);
  2936. } else { /* No error */
  2937. srb->adapter_status = 0;
  2938. srb->target_status = 0;
  2939. SET_RES_DID(cmd->result, DID_OK);
  2940. }
  2941. }
  2942. ckc_only = 0;
  2943. /* Check Error Conditions */
  2944. ckc_e:
  2945. pci_unmap_srb(acb, srb);
  2946. if (cmd->cmnd[0] == INQUIRY) {
  2947. unsigned char *base = NULL;
  2948. struct ScsiInqData *ptr;
  2949. unsigned long flags = 0;
  2950. struct scatterlist* sg = scsi_sglist(cmd);
  2951. size_t offset = 0, len = sizeof(struct ScsiInqData);
  2952. local_irq_save(flags);
  2953. base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
  2954. ptr = (struct ScsiInqData *)(base + offset);
  2955. if (!ckc_only && (cmd->result & RES_DID) == 0
  2956. && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
  2957. && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
  2958. dcb->inquiry7 = ptr->Flags;
  2959. /*if( srb->cmd->cmnd[0] == INQUIRY && */
  2960. /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
  2961. if ((cmd->result == (DID_OK << 16) ||
  2962. status_byte(cmd->result) == CHECK_CONDITION)) {
  2963. if (!dcb->init_tcq_flag) {
  2964. add_dev(acb, dcb, ptr);
  2965. dcb->init_tcq_flag = 1;
  2966. }
  2967. }
  2968. scsi_kunmap_atomic_sg(base);
  2969. local_irq_restore(flags);
  2970. }
  2971. /* Here is the info for Doug Gilbert's sg3 ... */
  2972. scsi_set_resid(cmd, srb->total_xfer_length);
  2973. /* This may be interpreted by sb. or not ... */
  2974. cmd->SCp.this_residual = srb->total_xfer_length;
  2975. cmd->SCp.buffers_residual = 0;
  2976. if (debug_enabled(DBG_KG)) {
  2977. if (srb->total_xfer_length)
  2978. dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
  2979. "cmnd=0x%02x Missed %i bytes\n",
  2980. cmd, cmd->device->id, (u8)cmd->device->lun,
  2981. cmd->cmnd[0], srb->total_xfer_length);
  2982. }
  2983. if (srb != acb->tmp_srb) {
  2984. /* Add to free list */
  2985. dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
  2986. cmd, cmd->result);
  2987. list_move_tail(&srb->list, &acb->srb_free_list);
  2988. } else {
  2989. dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
  2990. }
  2991. cmd->scsi_done(cmd);
  2992. waiting_process_next(acb);
  2993. }
  2994. /* abort all cmds in our queues */
  2995. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
  2996. struct scsi_cmnd *cmd, u8 force)
  2997. {
  2998. struct DeviceCtlBlk *dcb;
  2999. dprintkl(KERN_INFO, "doing_srb_done: pids ");
  3000. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3001. struct ScsiReqBlk *srb;
  3002. struct ScsiReqBlk *tmp;
  3003. struct scsi_cmnd *p;
  3004. list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
  3005. enum dma_data_direction dir;
  3006. int result;
  3007. p = srb->cmd;
  3008. dir = p->sc_data_direction;
  3009. result = MK_RES(0, did_flag, 0, 0);
  3010. printk("G:%p(%02i-%i) ", p,
  3011. p->device->id, (u8)p->device->lun);
  3012. list_del(&srb->list);
  3013. free_tag(dcb, srb);
  3014. list_add_tail(&srb->list, &acb->srb_free_list);
  3015. p->result = result;
  3016. pci_unmap_srb_sense(acb, srb);
  3017. pci_unmap_srb(acb, srb);
  3018. if (force) {
  3019. /* For new EH, we normally don't need to give commands back,
  3020. * as they all complete or all time out */
  3021. p->scsi_done(p);
  3022. }
  3023. }
  3024. if (!list_empty(&dcb->srb_going_list))
  3025. dprintkl(KERN_DEBUG,
  3026. "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
  3027. dcb->target_id, dcb->target_lun);
  3028. if (dcb->tag_mask)
  3029. dprintkl(KERN_DEBUG,
  3030. "tag_mask for <%02i-%i> should be empty, is %08x!\n",
  3031. dcb->target_id, dcb->target_lun,
  3032. dcb->tag_mask);
  3033. /* Waiting queue */
  3034. list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
  3035. int result;
  3036. p = srb->cmd;
  3037. result = MK_RES(0, did_flag, 0, 0);
  3038. printk("W:%p<%02i-%i>", p, p->device->id,
  3039. (u8)p->device->lun);
  3040. list_move_tail(&srb->list, &acb->srb_free_list);
  3041. p->result = result;
  3042. pci_unmap_srb_sense(acb, srb);
  3043. pci_unmap_srb(acb, srb);
  3044. if (force) {
  3045. /* For new EH, we normally don't need to give commands back,
  3046. * as they all complete or all time out */
  3047. cmd->scsi_done(cmd);
  3048. }
  3049. }
  3050. if (!list_empty(&dcb->srb_waiting_list))
  3051. dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
  3052. list_size(&dcb->srb_waiting_list), dcb->target_id,
  3053. dcb->target_lun);
  3054. dcb->flag &= ~ABORT_DEV_;
  3055. }
  3056. printk("\n");
  3057. }
  3058. static void reset_scsi_bus(struct AdapterCtlBlk *acb)
  3059. {
  3060. dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
  3061. acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3062. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3063. while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
  3064. /* nothing */;
  3065. }
  3066. static void set_basic_config(struct AdapterCtlBlk *acb)
  3067. {
  3068. u8 bval;
  3069. u16 wval;
  3070. DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
  3071. if (acb->config & HCC_PARITY)
  3072. bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
  3073. else
  3074. bval = PHASELATCH | INITIATOR | BLOCKRST;
  3075. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
  3076. /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
  3077. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
  3078. /* program Host ID */
  3079. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  3080. /* set ansynchronous transfer */
  3081. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
  3082. /* Turn LED control off */
  3083. wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
  3084. DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
  3085. /* DMA config */
  3086. wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
  3087. wval |=
  3088. DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
  3089. DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
  3090. /* Clear pending interrupt status */
  3091. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3092. /* Enable SCSI interrupt */
  3093. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
  3094. DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
  3095. /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
  3096. );
  3097. }
  3098. static void scsi_reset_detect(struct AdapterCtlBlk *acb)
  3099. {
  3100. dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
  3101. /* delay half a second */
  3102. if (timer_pending(&acb->waiting_timer))
  3103. del_timer(&acb->waiting_timer);
  3104. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3105. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3106. /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
  3107. udelay(500);
  3108. /* Maybe we locked up the bus? Then lets wait even longer ... */
  3109. acb->last_reset =
  3110. jiffies + 5 * HZ / 2 +
  3111. HZ * acb->eeprom.delay_time;
  3112. clear_fifo(acb, "scsi_reset_detect");
  3113. set_basic_config(acb);
  3114. /*1.25 */
  3115. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
  3116. if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3117. acb->acb_flag |= RESET_DONE;
  3118. } else {
  3119. acb->acb_flag |= RESET_DETECT;
  3120. reset_dev_param(acb);
  3121. doing_srb_done(acb, DID_RESET, NULL, 1);
  3122. /*DC395x_RecoverSRB( acb ); */
  3123. acb->active_dcb = NULL;
  3124. acb->acb_flag = 0;
  3125. waiting_process_next(acb);
  3126. }
  3127. }
  3128. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  3129. struct ScsiReqBlk *srb)
  3130. {
  3131. struct scsi_cmnd *cmd = srb->cmd;
  3132. dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
  3133. cmd, cmd->device->id, (u8)cmd->device->lun);
  3134. srb->flag |= AUTO_REQSENSE;
  3135. srb->adapter_status = 0;
  3136. srb->target_status = 0;
  3137. /* KG: Can this prevent crap sense data ? */
  3138. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  3139. /* Save some data */
  3140. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
  3141. srb->segment_x[0].address;
  3142. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
  3143. srb->segment_x[0].length;
  3144. srb->xferred = srb->total_xfer_length;
  3145. /* srb->segment_x : a one entry of S/G list table */
  3146. srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
  3147. srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
  3148. /* Map sense buffer */
  3149. srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
  3150. cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
  3151. DMA_FROM_DEVICE);
  3152. dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
  3153. cmd->sense_buffer, srb->segment_x[0].address,
  3154. SCSI_SENSE_BUFFERSIZE);
  3155. srb->sg_count = 1;
  3156. srb->sg_index = 0;
  3157. if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
  3158. dprintkl(KERN_DEBUG,
  3159. "request_sense: (0x%p) failed <%02i-%i>\n",
  3160. srb->cmd, dcb->target_id, dcb->target_lun);
  3161. list_move(&srb->list, &dcb->srb_waiting_list);
  3162. waiting_set_timer(acb, HZ / 100);
  3163. }
  3164. }
  3165. /**
  3166. * device_alloc - Allocate a new device instance. This create the
  3167. * devices instance and sets up all the data items. The adapter
  3168. * instance is required to obtain confiuration information for this
  3169. * device. This does *not* add this device to the adapters device
  3170. * list.
  3171. *
  3172. * @acb: The adapter to obtain configuration information from.
  3173. * @target: The target for the new device.
  3174. * @lun: The lun for the new device.
  3175. *
  3176. * Return the new device if successful or NULL on failure.
  3177. **/
  3178. static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
  3179. u8 target, u8 lun)
  3180. {
  3181. struct NvRamType *eeprom = &acb->eeprom;
  3182. u8 period_index = eeprom->target[target].period & 0x07;
  3183. struct DeviceCtlBlk *dcb;
  3184. dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
  3185. dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
  3186. if (!dcb)
  3187. return NULL;
  3188. dcb->acb = NULL;
  3189. INIT_LIST_HEAD(&dcb->srb_going_list);
  3190. INIT_LIST_HEAD(&dcb->srb_waiting_list);
  3191. dcb->active_srb = NULL;
  3192. dcb->tag_mask = 0;
  3193. dcb->max_command = 1;
  3194. dcb->target_id = target;
  3195. dcb->target_lun = lun;
  3196. dcb->dev_mode = eeprom->target[target].cfg0;
  3197. #ifndef DC395x_NO_DISCONNECT
  3198. dcb->identify_msg =
  3199. IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
  3200. #else
  3201. dcb->identify_msg = IDENTIFY(0, lun);
  3202. #endif
  3203. dcb->inquiry7 = 0;
  3204. dcb->sync_mode = 0;
  3205. dcb->min_nego_period = clock_period[period_index];
  3206. dcb->sync_period = 0;
  3207. dcb->sync_offset = 0;
  3208. dcb->flag = 0;
  3209. #ifndef DC395x_NO_WIDE
  3210. if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
  3211. && (acb->config & HCC_WIDE_CARD))
  3212. dcb->sync_mode |= WIDE_NEGO_ENABLE;
  3213. #endif
  3214. #ifndef DC395x_NO_SYNC
  3215. if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
  3216. if (!(lun) || current_sync_offset)
  3217. dcb->sync_mode |= SYNC_NEGO_ENABLE;
  3218. #endif
  3219. if (dcb->target_lun != 0) {
  3220. /* Copy settings */
  3221. struct DeviceCtlBlk *p;
  3222. list_for_each_entry(p, &acb->dcb_list, list)
  3223. if (p->target_id == dcb->target_id)
  3224. break;
  3225. dprintkdbg(DBG_1,
  3226. "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
  3227. dcb->target_id, dcb->target_lun,
  3228. p->target_id, p->target_lun);
  3229. dcb->sync_mode = p->sync_mode;
  3230. dcb->sync_period = p->sync_period;
  3231. dcb->min_nego_period = p->min_nego_period;
  3232. dcb->sync_offset = p->sync_offset;
  3233. dcb->inquiry7 = p->inquiry7;
  3234. }
  3235. return dcb;
  3236. }
  3237. /**
  3238. * adapter_add_device - Adds the device instance to the adaptor instance.
  3239. *
  3240. * @acb: The adapter device to be updated
  3241. * @dcb: A newly created and initialised device instance to add.
  3242. **/
  3243. static void adapter_add_device(struct AdapterCtlBlk *acb,
  3244. struct DeviceCtlBlk *dcb)
  3245. {
  3246. /* backpointer to adapter */
  3247. dcb->acb = acb;
  3248. /* set run_robin to this device if it is currently empty */
  3249. if (list_empty(&acb->dcb_list))
  3250. acb->dcb_run_robin = dcb;
  3251. /* add device to list */
  3252. list_add_tail(&dcb->list, &acb->dcb_list);
  3253. /* update device maps */
  3254. acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
  3255. acb->children[dcb->target_id][dcb->target_lun] = dcb;
  3256. }
  3257. /**
  3258. * adapter_remove_device - Removes the device instance from the adaptor
  3259. * instance. The device instance is not check in any way or freed by this.
  3260. * The caller is expected to take care of that. This will simply remove the
  3261. * device from the adapters data strcutures.
  3262. *
  3263. * @acb: The adapter device to be updated
  3264. * @dcb: A device that has previously been added to the adapter.
  3265. **/
  3266. static void adapter_remove_device(struct AdapterCtlBlk *acb,
  3267. struct DeviceCtlBlk *dcb)
  3268. {
  3269. struct DeviceCtlBlk *i;
  3270. struct DeviceCtlBlk *tmp;
  3271. dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
  3272. dcb->target_id, dcb->target_lun);
  3273. /* fix up any pointers to this device that we have in the adapter */
  3274. if (acb->active_dcb == dcb)
  3275. acb->active_dcb = NULL;
  3276. if (acb->dcb_run_robin == dcb)
  3277. acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
  3278. /* unlink from list */
  3279. list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
  3280. if (dcb == i) {
  3281. list_del(&i->list);
  3282. break;
  3283. }
  3284. /* clear map and children */
  3285. acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
  3286. acb->children[dcb->target_id][dcb->target_lun] = NULL;
  3287. dcb->acb = NULL;
  3288. }
  3289. /**
  3290. * adapter_remove_and_free_device - Removes a single device from the adapter
  3291. * and then frees the device information.
  3292. *
  3293. * @acb: The adapter device to be updated
  3294. * @dcb: A device that has previously been added to the adapter.
  3295. */
  3296. static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
  3297. struct DeviceCtlBlk *dcb)
  3298. {
  3299. if (list_size(&dcb->srb_going_list) > 1) {
  3300. dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
  3301. "Won't remove because of %i active requests.\n",
  3302. dcb->target_id, dcb->target_lun,
  3303. list_size(&dcb->srb_going_list));
  3304. return;
  3305. }
  3306. adapter_remove_device(acb, dcb);
  3307. kfree(dcb);
  3308. }
  3309. /**
  3310. * adapter_remove_and_free_all_devices - Removes and frees all of the
  3311. * devices associated with the specified adapter.
  3312. *
  3313. * @acb: The adapter from which all devices should be removed.
  3314. **/
  3315. static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
  3316. {
  3317. struct DeviceCtlBlk *dcb;
  3318. struct DeviceCtlBlk *tmp;
  3319. dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
  3320. list_size(&acb->dcb_list));
  3321. list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
  3322. adapter_remove_and_free_device(acb, dcb);
  3323. }
  3324. /**
  3325. * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
  3326. * scsi device that we need to deal with. We allocate a new device and then
  3327. * insert that device into the adapters device list.
  3328. *
  3329. * @scsi_device: The new scsi device that we need to handle.
  3330. **/
  3331. static int dc395x_slave_alloc(struct scsi_device *scsi_device)
  3332. {
  3333. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3334. struct DeviceCtlBlk *dcb;
  3335. dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
  3336. if (!dcb)
  3337. return -ENOMEM;
  3338. adapter_add_device(acb, dcb);
  3339. return 0;
  3340. }
  3341. /**
  3342. * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
  3343. * device that is going away.
  3344. *
  3345. * @scsi_device: The new scsi device that we need to handle.
  3346. **/
  3347. static void dc395x_slave_destroy(struct scsi_device *scsi_device)
  3348. {
  3349. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3350. struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
  3351. if (dcb)
  3352. adapter_remove_and_free_device(acb, dcb);
  3353. }
  3354. /**
  3355. * trms1040_wait_30us: wait for 30 us
  3356. *
  3357. * Waits for 30us (using the chip by the looks of it..)
  3358. *
  3359. * @io_port: base I/O address
  3360. **/
  3361. static void trms1040_wait_30us(unsigned long io_port)
  3362. {
  3363. /* ScsiPortStallExecution(30); wait 30 us */
  3364. outb(5, io_port + TRM_S1040_GEN_TIMER);
  3365. while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
  3366. /* nothing */ ;
  3367. }
  3368. /**
  3369. * trms1040_write_cmd - write the secified command and address to
  3370. * chip
  3371. *
  3372. * @io_port: base I/O address
  3373. * @cmd: SB + op code (command) to send
  3374. * @addr: address to send
  3375. **/
  3376. static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
  3377. {
  3378. int i;
  3379. u8 send_data;
  3380. /* program SB + OP code */
  3381. for (i = 0; i < 3; i++, cmd <<= 1) {
  3382. send_data = NVR_SELECT;
  3383. if (cmd & 0x04) /* Start from bit 2 */
  3384. send_data |= NVR_BITOUT;
  3385. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3386. trms1040_wait_30us(io_port);
  3387. outb((send_data | NVR_CLOCK),
  3388. io_port + TRM_S1040_GEN_NVRAM);
  3389. trms1040_wait_30us(io_port);
  3390. }
  3391. /* send address */
  3392. for (i = 0; i < 7; i++, addr <<= 1) {
  3393. send_data = NVR_SELECT;
  3394. if (addr & 0x40) /* Start from bit 6 */
  3395. send_data |= NVR_BITOUT;
  3396. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3397. trms1040_wait_30us(io_port);
  3398. outb((send_data | NVR_CLOCK),
  3399. io_port + TRM_S1040_GEN_NVRAM);
  3400. trms1040_wait_30us(io_port);
  3401. }
  3402. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3403. trms1040_wait_30us(io_port);
  3404. }
  3405. /**
  3406. * trms1040_set_data - store a single byte in the eeprom
  3407. *
  3408. * Called from write all to write a single byte into the SSEEPROM
  3409. * Which is done one bit at a time.
  3410. *
  3411. * @io_port: base I/O address
  3412. * @addr: offset into EEPROM
  3413. * @byte: bytes to write
  3414. **/
  3415. static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
  3416. {
  3417. int i;
  3418. u8 send_data;
  3419. /* Send write command & address */
  3420. trms1040_write_cmd(io_port, 0x05, addr);
  3421. /* Write data */
  3422. for (i = 0; i < 8; i++, byte <<= 1) {
  3423. send_data = NVR_SELECT;
  3424. if (byte & 0x80) /* Start from bit 7 */
  3425. send_data |= NVR_BITOUT;
  3426. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3427. trms1040_wait_30us(io_port);
  3428. outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3429. trms1040_wait_30us(io_port);
  3430. }
  3431. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3432. trms1040_wait_30us(io_port);
  3433. /* Disable chip select */
  3434. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3435. trms1040_wait_30us(io_port);
  3436. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3437. trms1040_wait_30us(io_port);
  3438. /* Wait for write ready */
  3439. while (1) {
  3440. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3441. trms1040_wait_30us(io_port);
  3442. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3443. trms1040_wait_30us(io_port);
  3444. if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
  3445. break;
  3446. }
  3447. /* Disable chip select */
  3448. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3449. }
  3450. /**
  3451. * trms1040_write_all - write 128 bytes to the eeprom
  3452. *
  3453. * Write the supplied 128 bytes to the chips SEEPROM
  3454. *
  3455. * @eeprom: the data to write
  3456. * @io_port: the base io port
  3457. **/
  3458. static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
  3459. {
  3460. u8 *b_eeprom = (u8 *)eeprom;
  3461. u8 addr;
  3462. /* Enable SEEPROM */
  3463. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3464. io_port + TRM_S1040_GEN_CONTROL);
  3465. /* write enable */
  3466. trms1040_write_cmd(io_port, 0x04, 0xFF);
  3467. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3468. trms1040_wait_30us(io_port);
  3469. /* write */
  3470. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3471. trms1040_set_data(io_port, addr, *b_eeprom);
  3472. /* write disable */
  3473. trms1040_write_cmd(io_port, 0x04, 0x00);
  3474. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3475. trms1040_wait_30us(io_port);
  3476. /* Disable SEEPROM */
  3477. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3478. io_port + TRM_S1040_GEN_CONTROL);
  3479. }
  3480. /**
  3481. * trms1040_get_data - get a single byte from the eeprom
  3482. *
  3483. * Called from read all to read a single byte into the SSEEPROM
  3484. * Which is done one bit at a time.
  3485. *
  3486. * @io_port: base I/O address
  3487. * @addr: offset into SEEPROM
  3488. *
  3489. * Returns the byte read.
  3490. **/
  3491. static u8 trms1040_get_data(unsigned long io_port, u8 addr)
  3492. {
  3493. int i;
  3494. u8 read_byte;
  3495. u8 result = 0;
  3496. /* Send read command & address */
  3497. trms1040_write_cmd(io_port, 0x06, addr);
  3498. /* read data */
  3499. for (i = 0; i < 8; i++) {
  3500. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3501. trms1040_wait_30us(io_port);
  3502. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3503. /* Get data bit while falling edge */
  3504. read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
  3505. result <<= 1;
  3506. if (read_byte & NVR_BITIN)
  3507. result |= 1;
  3508. trms1040_wait_30us(io_port);
  3509. }
  3510. /* Disable chip select */
  3511. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3512. return result;
  3513. }
  3514. /**
  3515. * trms1040_read_all - read all bytes from the eeprom
  3516. *
  3517. * Read the 128 bytes from the SEEPROM.
  3518. *
  3519. * @eeprom: where to store the data
  3520. * @io_port: the base io port
  3521. **/
  3522. static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
  3523. {
  3524. u8 *b_eeprom = (u8 *)eeprom;
  3525. u8 addr;
  3526. /* Enable SEEPROM */
  3527. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3528. io_port + TRM_S1040_GEN_CONTROL);
  3529. /* read details */
  3530. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3531. *b_eeprom = trms1040_get_data(io_port, addr);
  3532. /* Disable SEEPROM */
  3533. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3534. io_port + TRM_S1040_GEN_CONTROL);
  3535. }
  3536. /**
  3537. * check_eeprom - get and check contents of the eeprom
  3538. *
  3539. * Read seeprom 128 bytes into the memory provider in eeprom.
  3540. * Checks the checksum and if it's not correct it uses a set of default
  3541. * values.
  3542. *
  3543. * @eeprom: caller allocated strcuture to read the eeprom data into
  3544. * @io_port: io port to read from
  3545. **/
  3546. static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
  3547. {
  3548. u16 *w_eeprom = (u16 *)eeprom;
  3549. u16 w_addr;
  3550. u16 cksum;
  3551. u32 d_addr;
  3552. u32 *d_eeprom;
  3553. trms1040_read_all(eeprom, io_port); /* read eeprom */
  3554. cksum = 0;
  3555. for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
  3556. w_addr++, w_eeprom++)
  3557. cksum += *w_eeprom;
  3558. if (cksum != 0x1234) {
  3559. /*
  3560. * Checksum is wrong.
  3561. * Load a set of defaults into the eeprom buffer
  3562. */
  3563. dprintkl(KERN_WARNING,
  3564. "EEProm checksum error: using default values and options.\n");
  3565. eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3566. eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3567. eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3568. eeprom->sub_sys_id[1] =
  3569. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3570. eeprom->sub_class = 0x00;
  3571. eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3572. eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3573. eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3574. eeprom->device_id[1] =
  3575. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3576. eeprom->reserved = 0x00;
  3577. for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
  3578. d_addr < 16; d_addr++, d_eeprom++)
  3579. *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
  3580. *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
  3581. *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
  3582. for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
  3583. *d_eeprom = 0x00;
  3584. /* Now load defaults (maybe set by boot/module params) */
  3585. set_safe_settings();
  3586. fix_settings();
  3587. eeprom_override(eeprom);
  3588. eeprom->cksum = 0x00;
  3589. for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
  3590. w_addr < 63; w_addr++, w_eeprom++)
  3591. cksum += *w_eeprom;
  3592. *w_eeprom = 0x1234 - cksum;
  3593. trms1040_write_all(eeprom, io_port);
  3594. eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
  3595. } else {
  3596. set_safe_settings();
  3597. eeprom_index_to_delay(eeprom);
  3598. eeprom_override(eeprom);
  3599. }
  3600. }
  3601. /**
  3602. * print_eeprom_settings - output the eeprom settings
  3603. * to the kernel log so people can see what they were.
  3604. *
  3605. * @eeprom: The eeprom data strucutre to show details for.
  3606. **/
  3607. static void print_eeprom_settings(struct NvRamType *eeprom)
  3608. {
  3609. dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
  3610. eeprom->scsi_id,
  3611. eeprom->target[0].period,
  3612. clock_speed[eeprom->target[0].period] / 10,
  3613. clock_speed[eeprom->target[0].period] % 10,
  3614. eeprom->target[0].cfg0);
  3615. dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
  3616. eeprom->channel_cfg, eeprom->max_tag,
  3617. 1 << eeprom->max_tag, eeprom->delay_time);
  3618. }
  3619. /* Free SG tables */
  3620. static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
  3621. {
  3622. int i;
  3623. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3624. for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
  3625. kfree(acb->srb_array[i].segment_x);
  3626. }
  3627. /*
  3628. * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
  3629. * should never cross a page boundary */
  3630. static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
  3631. {
  3632. const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
  3633. *SEGMENTX_LEN;
  3634. int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
  3635. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3636. int srb_idx = 0;
  3637. unsigned i = 0;
  3638. struct SGentry *uninitialized_var(ptr);
  3639. for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
  3640. acb->srb_array[i].segment_x = NULL;
  3641. dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
  3642. while (pages--) {
  3643. ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3644. if (!ptr) {
  3645. adapter_sg_tables_free(acb);
  3646. return 1;
  3647. }
  3648. dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
  3649. PAGE_SIZE, ptr, srb_idx);
  3650. i = 0;
  3651. while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
  3652. acb->srb_array[srb_idx++].segment_x =
  3653. ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
  3654. }
  3655. if (i < srbs_per_page)
  3656. acb->srb.segment_x =
  3657. ptr + (i * DC395x_MAX_SG_LISTENTRY);
  3658. else
  3659. dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
  3660. return 0;
  3661. }
  3662. /**
  3663. * adapter_print_config - print adapter connection and termination
  3664. * config
  3665. *
  3666. * The io port in the adapter needs to have been set before calling
  3667. * this function.
  3668. *
  3669. * @acb: The adapter to print the information for.
  3670. **/
  3671. static void adapter_print_config(struct AdapterCtlBlk *acb)
  3672. {
  3673. u8 bval;
  3674. bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
  3675. dprintkl(KERN_INFO, "%sConnectors: ",
  3676. ((bval & WIDESCSI) ? "(Wide) " : ""));
  3677. if (!(bval & CON5068))
  3678. printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
  3679. if (!(bval & CON68))
  3680. printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
  3681. if (!(bval & CON50))
  3682. printk("int50 ");
  3683. if ((bval & (CON5068 | CON50 | CON68)) ==
  3684. 0 /*(CON5068 | CON50 | CON68) */ )
  3685. printk(" Oops! (All 3?) ");
  3686. bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
  3687. printk(" Termination: ");
  3688. if (bval & DIS_TERM)
  3689. printk("Disabled\n");
  3690. else {
  3691. if (bval & AUTOTERM)
  3692. printk("Auto ");
  3693. if (bval & LOW8TERM)
  3694. printk("Low ");
  3695. if (bval & UP8TERM)
  3696. printk("High ");
  3697. printk("\n");
  3698. }
  3699. }
  3700. /**
  3701. * adapter_init_params - Initialize the various parameters in the
  3702. * adapter structure. Note that the pointer to the scsi_host is set
  3703. * early (when this instance is created) and the io_port and irq
  3704. * values are set later after they have been reserved. This just gets
  3705. * everything set to a good starting position.
  3706. *
  3707. * The eeprom structure in the adapter needs to have been set before
  3708. * calling this function.
  3709. *
  3710. * @acb: The adapter to initialize.
  3711. **/
  3712. static void adapter_init_params(struct AdapterCtlBlk *acb)
  3713. {
  3714. struct NvRamType *eeprom = &acb->eeprom;
  3715. int i;
  3716. /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
  3717. /* NOTE: acb->io_port_base is set at port registration time */
  3718. /* NOTE: acb->io_port_len is set at port registration time */
  3719. INIT_LIST_HEAD(&acb->dcb_list);
  3720. acb->dcb_run_robin = NULL;
  3721. acb->active_dcb = NULL;
  3722. INIT_LIST_HEAD(&acb->srb_free_list);
  3723. /* temp SRB for Q tag used or abort command used */
  3724. acb->tmp_srb = &acb->srb;
  3725. timer_setup(&acb->waiting_timer, waiting_timeout, 0);
  3726. timer_setup(&acb->selto_timer, NULL, 0);
  3727. acb->srb_count = DC395x_MAX_SRB_CNT;
  3728. acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
  3729. /* NOTE: acb->irq_level is set at IRQ registration time */
  3730. acb->tag_max_num = 1 << eeprom->max_tag;
  3731. if (acb->tag_max_num > 30)
  3732. acb->tag_max_num = 30;
  3733. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3734. acb->gmode2 = eeprom->channel_cfg;
  3735. acb->config = 0; /* NOTE: actually set in adapter_init_chip */
  3736. if (eeprom->channel_cfg & NAC_SCANLUN)
  3737. acb->lun_chk = 1;
  3738. acb->scan_devices = 1;
  3739. acb->scsi_host->this_id = eeprom->scsi_id;
  3740. acb->hostid_bit = (1 << acb->scsi_host->this_id);
  3741. for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
  3742. acb->dcb_map[i] = 0;
  3743. acb->msg_len = 0;
  3744. /* link static array of srbs into the srb free list */
  3745. for (i = 0; i < acb->srb_count - 1; i++)
  3746. list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
  3747. }
  3748. /**
  3749. * adapter_init_host - Initialize the scsi host instance based on
  3750. * values that we have already stored in the adapter instance. There's
  3751. * some mention that a lot of these are deprecated, so we won't use
  3752. * them (we'll use the ones in the adapter instance) but we'll fill
  3753. * them in in case something else needs them.
  3754. *
  3755. * The eeprom structure, irq and io ports in the adapter need to have
  3756. * been set before calling this function.
  3757. *
  3758. * @host: The scsi host instance to fill in the values for.
  3759. **/
  3760. static void adapter_init_scsi_host(struct Scsi_Host *host)
  3761. {
  3762. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3763. struct NvRamType *eeprom = &acb->eeprom;
  3764. host->max_cmd_len = 24;
  3765. host->can_queue = DC395x_MAX_CMD_QUEUE;
  3766. host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
  3767. host->this_id = (int)eeprom->scsi_id;
  3768. host->io_port = acb->io_port_base;
  3769. host->n_io_port = acb->io_port_len;
  3770. host->dma_channel = -1;
  3771. host->unique_id = acb->io_port_base;
  3772. host->irq = acb->irq_level;
  3773. acb->last_reset = jiffies;
  3774. host->max_id = 16;
  3775. if (host->max_id - 1 == eeprom->scsi_id)
  3776. host->max_id--;
  3777. if (eeprom->channel_cfg & NAC_SCANLUN)
  3778. host->max_lun = 8;
  3779. else
  3780. host->max_lun = 1;
  3781. }
  3782. /**
  3783. * adapter_init_chip - Get the chip into a know state and figure out
  3784. * some of the settings that apply to this adapter.
  3785. *
  3786. * The io port in the adapter needs to have been set before calling
  3787. * this function. The config will be configured correctly on return.
  3788. *
  3789. * @acb: The adapter which we are to init.
  3790. **/
  3791. static void adapter_init_chip(struct AdapterCtlBlk *acb)
  3792. {
  3793. struct NvRamType *eeprom = &acb->eeprom;
  3794. /* Mask all the interrupt */
  3795. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  3796. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  3797. /* Reset SCSI module */
  3798. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3799. /* Reset PCI/DMA module */
  3800. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3801. udelay(20);
  3802. /* program configuration 0 */
  3803. acb->config = HCC_AUTOTERM | HCC_PARITY;
  3804. if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
  3805. acb->config |= HCC_WIDE_CARD;
  3806. if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
  3807. acb->config |= HCC_SCSI_RESET;
  3808. if (acb->config & HCC_SCSI_RESET) {
  3809. dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
  3810. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3811. /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
  3812. /*spin_unlock_irq (&io_request_lock); */
  3813. udelay(500);
  3814. acb->last_reset =
  3815. jiffies + HZ / 2 +
  3816. HZ * acb->eeprom.delay_time;
  3817. /*spin_lock_irq (&io_request_lock); */
  3818. }
  3819. }
  3820. /**
  3821. * init_adapter - Grab the resource for the card, setup the adapter
  3822. * information, set the card into a known state, create the various
  3823. * tables etc etc. This basically gets all adapter information all up
  3824. * to date, initialised and gets the chip in sync with it.
  3825. *
  3826. * @host: This hosts adapter structure
  3827. * @io_port: The base I/O port
  3828. * @irq: IRQ
  3829. *
  3830. * Returns 0 if the initialization succeeds, any other value on
  3831. * failure.
  3832. **/
  3833. static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
  3834. u32 io_port_len, unsigned int irq)
  3835. {
  3836. if (!request_region(io_port, io_port_len, DC395X_NAME)) {
  3837. dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
  3838. goto failed;
  3839. }
  3840. /* store port base to indicate we have registered it */
  3841. acb->io_port_base = io_port;
  3842. acb->io_port_len = io_port_len;
  3843. if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
  3844. /* release the region we just claimed */
  3845. dprintkl(KERN_INFO, "Failed to register IRQ\n");
  3846. goto failed;
  3847. }
  3848. /* store irq to indicate we have registered it */
  3849. acb->irq_level = irq;
  3850. /* get eeprom configuration information and command line settings etc */
  3851. check_eeprom(&acb->eeprom, io_port);
  3852. print_eeprom_settings(&acb->eeprom);
  3853. /* setup adapter control block */
  3854. adapter_init_params(acb);
  3855. /* display card connectors/termination settings */
  3856. adapter_print_config(acb);
  3857. if (adapter_sg_tables_alloc(acb)) {
  3858. dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
  3859. goto failed;
  3860. }
  3861. adapter_init_scsi_host(acb->scsi_host);
  3862. adapter_init_chip(acb);
  3863. set_basic_config(acb);
  3864. dprintkdbg(DBG_0,
  3865. "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
  3866. "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
  3867. acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
  3868. sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
  3869. return 0;
  3870. failed:
  3871. if (acb->irq_level)
  3872. free_irq(acb->irq_level, acb);
  3873. if (acb->io_port_base)
  3874. release_region(acb->io_port_base, acb->io_port_len);
  3875. adapter_sg_tables_free(acb);
  3876. return 1;
  3877. }
  3878. /**
  3879. * adapter_uninit_chip - cleanly shut down the scsi controller chip,
  3880. * stopping all operations and disabling interrupt generation on the
  3881. * card.
  3882. *
  3883. * @acb: The adapter which we are to shutdown.
  3884. **/
  3885. static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
  3886. {
  3887. /* disable interrupts */
  3888. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
  3889. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
  3890. /* reset the scsi bus */
  3891. if (acb->config & HCC_SCSI_RESET)
  3892. reset_scsi_bus(acb);
  3893. /* clear any pending interrupt state */
  3894. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3895. }
  3896. /**
  3897. * adapter_uninit - Shut down the chip and release any resources that
  3898. * we had allocated. Once this returns the adapter should not be used
  3899. * anymore.
  3900. *
  3901. * @acb: The adapter which we are to un-initialize.
  3902. **/
  3903. static void adapter_uninit(struct AdapterCtlBlk *acb)
  3904. {
  3905. unsigned long flags;
  3906. DC395x_LOCK_IO(acb->scsi_host, flags);
  3907. /* remove timers */
  3908. if (timer_pending(&acb->waiting_timer))
  3909. del_timer(&acb->waiting_timer);
  3910. if (timer_pending(&acb->selto_timer))
  3911. del_timer(&acb->selto_timer);
  3912. adapter_uninit_chip(acb);
  3913. adapter_remove_and_free_all_devices(acb);
  3914. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  3915. if (acb->irq_level)
  3916. free_irq(acb->irq_level, acb);
  3917. if (acb->io_port_base)
  3918. release_region(acb->io_port_base, acb->io_port_len);
  3919. adapter_sg_tables_free(acb);
  3920. }
  3921. #undef YESNO
  3922. #define YESNO(YN) \
  3923. if (YN) seq_printf(m, " Yes ");\
  3924. else seq_printf(m, " No ")
  3925. static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
  3926. {
  3927. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3928. int spd, spd1;
  3929. struct DeviceCtlBlk *dcb;
  3930. unsigned long flags;
  3931. int dev;
  3932. seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
  3933. " Driver Version " DC395X_VERSION "\n");
  3934. DC395x_LOCK_IO(acb->scsi_host, flags);
  3935. seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
  3936. seq_printf(m, "DC395U/UW/F DC315/U %s\n",
  3937. (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
  3938. seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
  3939. seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
  3940. seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
  3941. seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
  3942. seq_printf(m, "AdapterID %i\n", host->this_id);
  3943. seq_printf(m, "tag_max_num %i", acb->tag_max_num);
  3944. /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
  3945. seq_printf(m, ", FilterCfg 0x%02x",
  3946. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
  3947. seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
  3948. /*seq_printf(m, "\n"); */
  3949. seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
  3950. seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  3951. acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
  3952. acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
  3953. acb->dcb_map[6], acb->dcb_map[7]);
  3954. seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
  3955. acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
  3956. acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
  3957. acb->dcb_map[14], acb->dcb_map[15]);
  3958. seq_puts(m,
  3959. "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
  3960. dev = 0;
  3961. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3962. int nego_period;
  3963. seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
  3964. dcb->target_lun);
  3965. YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
  3966. YESNO(dcb->sync_offset);
  3967. YESNO(dcb->sync_period & WIDE_SYNC);
  3968. YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
  3969. YESNO(dcb->dev_mode & NTC_DO_SEND_START);
  3970. YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
  3971. nego_period = clock_period[dcb->sync_period & 0x07] << 2;
  3972. if (dcb->sync_offset)
  3973. seq_printf(m, " %03i ns ", nego_period);
  3974. else
  3975. seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
  3976. if (dcb->sync_offset & 0x0f) {
  3977. spd = 1000 / (nego_period);
  3978. spd1 = 1000 % (nego_period);
  3979. spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
  3980. seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
  3981. (dcb->sync_offset & 0x0f));
  3982. } else
  3983. seq_puts(m, " ");
  3984. /* Add more info ... */
  3985. seq_printf(m, " %02i\n", dcb->max_command);
  3986. dev++;
  3987. }
  3988. if (timer_pending(&acb->waiting_timer))
  3989. seq_puts(m, "Waiting queue timer running\n");
  3990. else
  3991. seq_putc(m, '\n');
  3992. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3993. struct ScsiReqBlk *srb;
  3994. if (!list_empty(&dcb->srb_waiting_list))
  3995. seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
  3996. dcb->target_id, dcb->target_lun,
  3997. list_size(&dcb->srb_waiting_list));
  3998. list_for_each_entry(srb, &dcb->srb_waiting_list, list)
  3999. seq_printf(m, " %p", srb->cmd);
  4000. if (!list_empty(&dcb->srb_going_list))
  4001. seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
  4002. dcb->target_id, dcb->target_lun,
  4003. list_size(&dcb->srb_going_list));
  4004. list_for_each_entry(srb, &dcb->srb_going_list, list)
  4005. seq_printf(m, " %p", srb->cmd);
  4006. if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
  4007. seq_putc(m, '\n');
  4008. }
  4009. if (debug_enabled(DBG_1)) {
  4010. seq_printf(m, "DCB list for ACB %p:\n", acb);
  4011. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4012. seq_printf(m, "%p -> ", dcb);
  4013. }
  4014. seq_puts(m, "END\n");
  4015. }
  4016. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4017. return 0;
  4018. }
  4019. static struct scsi_host_template dc395x_driver_template = {
  4020. .module = THIS_MODULE,
  4021. .proc_name = DC395X_NAME,
  4022. .show_info = dc395x_show_info,
  4023. .name = DC395X_BANNER " " DC395X_VERSION,
  4024. .queuecommand = dc395x_queue_command,
  4025. .slave_alloc = dc395x_slave_alloc,
  4026. .slave_destroy = dc395x_slave_destroy,
  4027. .can_queue = DC395x_MAX_CAN_QUEUE,
  4028. .this_id = 7,
  4029. .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
  4030. .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
  4031. .eh_abort_handler = dc395x_eh_abort,
  4032. .eh_bus_reset_handler = dc395x_eh_bus_reset,
  4033. .dma_boundary = PAGE_SIZE - 1,
  4034. };
  4035. /**
  4036. * banner_display - Display banner on first instance of driver
  4037. * initialized.
  4038. **/
  4039. static void banner_display(void)
  4040. {
  4041. static int banner_done = 0;
  4042. if (!banner_done)
  4043. {
  4044. dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
  4045. banner_done = 1;
  4046. }
  4047. }
  4048. /**
  4049. * dc395x_init_one - Initialise a single instance of the adapter.
  4050. *
  4051. * The PCI layer will call this once for each instance of the adapter
  4052. * that it finds in the system. The pci_dev strcuture indicates which
  4053. * instance we are being called from.
  4054. *
  4055. * @dev: The PCI device to initialize.
  4056. * @id: Looks like a pointer to the entry in our pci device table
  4057. * that was actually matched by the PCI subsystem.
  4058. *
  4059. * Returns 0 on success, or an error code (-ve) on failure.
  4060. **/
  4061. static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
  4062. {
  4063. struct Scsi_Host *scsi_host = NULL;
  4064. struct AdapterCtlBlk *acb = NULL;
  4065. unsigned long io_port_base;
  4066. unsigned int io_port_len;
  4067. unsigned int irq;
  4068. dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
  4069. banner_display();
  4070. if (pci_enable_device(dev))
  4071. {
  4072. dprintkl(KERN_INFO, "PCI Enable device failed.\n");
  4073. return -ENODEV;
  4074. }
  4075. io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
  4076. io_port_len = pci_resource_len(dev, 0);
  4077. irq = dev->irq;
  4078. dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
  4079. /* allocate scsi host information (includes out adapter) */
  4080. scsi_host = scsi_host_alloc(&dc395x_driver_template,
  4081. sizeof(struct AdapterCtlBlk));
  4082. if (!scsi_host) {
  4083. dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
  4084. goto fail;
  4085. }
  4086. acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
  4087. acb->scsi_host = scsi_host;
  4088. acb->dev = dev;
  4089. /* initialise the adapter and everything we need */
  4090. if (adapter_init(acb, io_port_base, io_port_len, irq)) {
  4091. dprintkl(KERN_INFO, "adapter init failed\n");
  4092. goto fail;
  4093. }
  4094. pci_set_master(dev);
  4095. /* get the scsi mid level to scan for new devices on the bus */
  4096. if (scsi_add_host(scsi_host, &dev->dev)) {
  4097. dprintkl(KERN_ERR, "scsi_add_host failed\n");
  4098. goto fail;
  4099. }
  4100. pci_set_drvdata(dev, scsi_host);
  4101. scsi_scan_host(scsi_host);
  4102. return 0;
  4103. fail:
  4104. if (acb != NULL)
  4105. adapter_uninit(acb);
  4106. if (scsi_host != NULL)
  4107. scsi_host_put(scsi_host);
  4108. pci_disable_device(dev);
  4109. return -ENODEV;
  4110. }
  4111. /**
  4112. * dc395x_remove_one - Called to remove a single instance of the
  4113. * adapter.
  4114. *
  4115. * @dev: The PCI device to initialize.
  4116. **/
  4117. static void dc395x_remove_one(struct pci_dev *dev)
  4118. {
  4119. struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
  4120. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
  4121. dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
  4122. scsi_remove_host(scsi_host);
  4123. adapter_uninit(acb);
  4124. pci_disable_device(dev);
  4125. scsi_host_put(scsi_host);
  4126. }
  4127. static struct pci_device_id dc395x_pci_table[] = {
  4128. {
  4129. .vendor = PCI_VENDOR_ID_TEKRAM,
  4130. .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
  4131. .subvendor = PCI_ANY_ID,
  4132. .subdevice = PCI_ANY_ID,
  4133. },
  4134. {} /* Terminating entry */
  4135. };
  4136. MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
  4137. static struct pci_driver dc395x_driver = {
  4138. .name = DC395X_NAME,
  4139. .id_table = dc395x_pci_table,
  4140. .probe = dc395x_init_one,
  4141. .remove = dc395x_remove_one,
  4142. };
  4143. /**
  4144. * dc395x_module_init - Module initialization function
  4145. *
  4146. * Used by both module and built-in driver to initialise this driver.
  4147. **/
  4148. static int __init dc395x_module_init(void)
  4149. {
  4150. return pci_register_driver(&dc395x_driver);
  4151. }
  4152. /**
  4153. * dc395x_module_exit - Module cleanup function.
  4154. **/
  4155. static void __exit dc395x_module_exit(void)
  4156. {
  4157. pci_unregister_driver(&dc395x_driver);
  4158. }
  4159. module_init(dc395x_module_init);
  4160. module_exit(dc395x_module_exit);
  4161. MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
  4162. MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
  4163. MODULE_LICENSE("GPL");