PageRenderTime 76ms CodeModel.GetById 34ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/scsi/sym53c8xx_2/sym_glue.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 2157 lines | 1468 code | 281 blank | 408 comment | 250 complexity | 7139314a76d8b3a5ae7cbad56bdf8e53 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
  3. * of PCI-SCSI IO processors.
  4. *
  5. * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
  6. * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
  7. *
  8. * This driver is derived from the Linux sym53c8xx driver.
  9. * Copyright (C) 1998-2000 Gerard Roudier
  10. *
  11. * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
  12. * a port of the FreeBSD ncr driver to Linux-1.2.13.
  13. *
  14. * The original ncr driver has been written for 386bsd and FreeBSD by
  15. * Wolfgang Stanglmeier <wolf@cologne.de>
  16. * Stefan Esser <se@mi.Uni-Koeln.de>
  17. * Copyright (C) 1994 Wolfgang Stanglmeier
  18. *
  19. * Other major contributions:
  20. *
  21. * NVRAM detection and reading.
  22. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  23. *
  24. *-----------------------------------------------------------------------------
  25. *
  26. * This program is free software; you can redistribute it and/or modify
  27. * it under the terms of the GNU General Public License as published by
  28. * the Free Software Foundation; either version 2 of the License, or
  29. * (at your option) any later version.
  30. *
  31. * This program is distributed in the hope that it will be useful,
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  34. * GNU General Public License for more details.
  35. *
  36. * You should have received a copy of the GNU General Public License
  37. * along with this program; if not, write to the Free Software
  38. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  39. */
  40. #include <linux/ctype.h>
  41. #include <linux/init.h>
  42. #include <linux/module.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/spinlock.h>
  45. #include <scsi/scsi.h>
  46. #include <scsi/scsi_tcq.h>
  47. #include <scsi/scsi_device.h>
  48. #include <scsi/scsi_transport.h>
  49. #include "sym_glue.h"
  50. #include "sym_nvram.h"
  51. #define NAME53C "sym53c"
  52. #define NAME53C8XX "sym53c8xx"
  53. struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP;
  54. unsigned int sym_debug_flags = 0;
  55. static char *excl_string;
  56. static char *safe_string;
  57. module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0);
  58. module_param_named(burst, sym_driver_setup.burst_order, byte, 0);
  59. module_param_named(led, sym_driver_setup.scsi_led, byte, 0);
  60. module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0);
  61. module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0);
  62. module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0);
  63. module_param_named(hostid, sym_driver_setup.host_id, byte, 0);
  64. module_param_named(verb, sym_driver_setup.verbose, byte, 0);
  65. module_param_named(debug, sym_debug_flags, uint, 0);
  66. module_param_named(settle, sym_driver_setup.settle_delay, byte, 0);
  67. module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0);
  68. module_param_named(excl, excl_string, charp, 0);
  69. module_param_named(safe, safe_string, charp, 0);
  70. MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default");
  71. MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers");
  72. MODULE_PARM_DESC(led, "Set to 1 to enable LED support");
  73. MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3");
  74. MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole");
  75. MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error");
  76. MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
  77. MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive");
  78. MODULE_PARM_DESC(debug, "Set bits to enable debugging");
  79. MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3");
  80. MODULE_PARM_DESC(nvram, "Option currently not used");
  81. MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached");
  82. MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\"");
  83. MODULE_LICENSE("GPL");
  84. MODULE_VERSION(SYM_VERSION);
  85. MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>");
  86. MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
  87. static void sym2_setup_params(void)
  88. {
  89. char *p = excl_string;
  90. int xi = 0;
  91. while (p && (xi < 8)) {
  92. char *next_p;
  93. int val = (int) simple_strtoul(p, &next_p, 0);
  94. sym_driver_setup.excludes[xi++] = val;
  95. p = next_p;
  96. }
  97. if (safe_string) {
  98. if (*safe_string == 'y') {
  99. sym_driver_setup.max_tag = 0;
  100. sym_driver_setup.burst_order = 0;
  101. sym_driver_setup.scsi_led = 0;
  102. sym_driver_setup.scsi_diff = 1;
  103. sym_driver_setup.irq_mode = 0;
  104. sym_driver_setup.scsi_bus_check = 2;
  105. sym_driver_setup.host_id = 7;
  106. sym_driver_setup.verbose = 2;
  107. sym_driver_setup.settle_delay = 10;
  108. sym_driver_setup.use_nvram = 1;
  109. } else if (*safe_string != 'n') {
  110. printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s"
  111. " passed to safe option", safe_string);
  112. }
  113. }
  114. }
  115. static struct scsi_transport_template *sym2_transport_template = NULL;
  116. /*
  117. * Driver private area in the SCSI command structure.
  118. */
  119. struct sym_ucmd { /* Override the SCSI pointer structure */
  120. struct completion *eh_done; /* SCSI error handling */
  121. };
  122. #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
  123. #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
  124. /*
  125. * Complete a pending CAM CCB.
  126. */
  127. void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
  128. {
  129. struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
  130. BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
  131. if (ucmd->eh_done)
  132. complete(ucmd->eh_done);
  133. scsi_dma_unmap(cmd);
  134. cmd->scsi_done(cmd);
  135. }
  136. /*
  137. * Tell the SCSI layer about a BUS RESET.
  138. */
  139. void sym_xpt_async_bus_reset(struct sym_hcb *np)
  140. {
  141. printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));
  142. np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;
  143. np->s.settle_time_valid = 1;
  144. if (sym_verbose >= 2)
  145. printf_info("%s: command processing suspended for %d seconds\n",
  146. sym_name(np), sym_driver_setup.settle_delay);
  147. }
  148. /*
  149. * Choose the more appropriate CAM status if
  150. * the IO encountered an extended error.
  151. */
  152. static int sym_xerr_cam_status(int cam_status, int x_status)
  153. {
  154. if (x_status) {
  155. if (x_status & XE_PARITY_ERR)
  156. cam_status = DID_PARITY;
  157. else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
  158. cam_status = DID_ERROR;
  159. else if (x_status & XE_BAD_PHASE)
  160. cam_status = DID_ERROR;
  161. else
  162. cam_status = DID_ERROR;
  163. }
  164. return cam_status;
  165. }
  166. /*
  167. * Build CAM result for a failed or auto-sensed IO.
  168. */
  169. void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
  170. {
  171. struct scsi_cmnd *cmd = cp->cmd;
  172. u_int cam_status, scsi_status, drv_status;
  173. drv_status = 0;
  174. cam_status = DID_OK;
  175. scsi_status = cp->ssss_status;
  176. if (cp->host_flags & HF_SENSE) {
  177. scsi_status = cp->sv_scsi_status;
  178. resid = cp->sv_resid;
  179. if (sym_verbose && cp->sv_xerr_status)
  180. sym_print_xerr(cmd, cp->sv_xerr_status);
  181. if (cp->host_status == HS_COMPLETE &&
  182. cp->ssss_status == S_GOOD &&
  183. cp->xerr_status == 0) {
  184. cam_status = sym_xerr_cam_status(DID_OK,
  185. cp->sv_xerr_status);
  186. drv_status = DRIVER_SENSE;
  187. /*
  188. * Bounce back the sense data to user.
  189. */
  190. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  191. memcpy(cmd->sense_buffer, cp->sns_bbuf,
  192. min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
  193. #if 0
  194. /*
  195. * If the device reports a UNIT ATTENTION condition
  196. * due to a RESET condition, we should consider all
  197. * disconnect CCBs for this unit as aborted.
  198. */
  199. if (1) {
  200. u_char *p;
  201. p = (u_char *) cmd->sense_data;
  202. if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
  203. sym_clear_tasks(np, DID_ABORT,
  204. cp->target,cp->lun, -1);
  205. }
  206. #endif
  207. } else {
  208. /*
  209. * Error return from our internal request sense. This
  210. * is bad: we must clear the contingent allegiance
  211. * condition otherwise the device will always return
  212. * BUSY. Use a big stick.
  213. */
  214. sym_reset_scsi_target(np, cmd->device->id);
  215. cam_status = DID_ERROR;
  216. }
  217. } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */
  218. cam_status = DID_OK;
  219. else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */
  220. cam_status = DID_NO_CONNECT;
  221. else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/
  222. cam_status = DID_ERROR;
  223. else { /* Extended error */
  224. if (sym_verbose) {
  225. sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n",
  226. cp->host_status, cp->ssss_status,
  227. cp->xerr_status);
  228. }
  229. /*
  230. * Set the most appropriate value for CAM status.
  231. */
  232. cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
  233. }
  234. scsi_set_resid(cmd, resid);
  235. cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
  236. }
  237. static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
  238. {
  239. int segment;
  240. int use_sg;
  241. cp->data_len = 0;
  242. use_sg = scsi_dma_map(cmd);
  243. if (use_sg > 0) {
  244. struct scatterlist *sg;
  245. struct sym_tcb *tp = &np->target[cp->target];
  246. struct sym_tblmove *data;
  247. if (use_sg > SYM_CONF_MAX_SG) {
  248. scsi_dma_unmap(cmd);
  249. return -1;
  250. }
  251. data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
  252. scsi_for_each_sg(cmd, sg, use_sg, segment) {
  253. dma_addr_t baddr = sg_dma_address(sg);
  254. unsigned int len = sg_dma_len(sg);
  255. if ((len & 1) && (tp->head.wval & EWS)) {
  256. len++;
  257. cp->odd_byte_adjustment++;
  258. }
  259. sym_build_sge(np, &data[segment], baddr, len);
  260. cp->data_len += len;
  261. }
  262. } else {
  263. segment = -2;
  264. }
  265. return segment;
  266. }
  267. /*
  268. * Queue a SCSI command.
  269. */
  270. static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd)
  271. {
  272. struct scsi_device *sdev = cmd->device;
  273. struct sym_tcb *tp;
  274. struct sym_lcb *lp;
  275. struct sym_ccb *cp;
  276. int order;
  277. /*
  278. * Retrieve the target descriptor.
  279. */
  280. tp = &np->target[sdev->id];
  281. /*
  282. * Select tagged/untagged.
  283. */
  284. lp = sym_lp(tp, sdev->lun);
  285. order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
  286. /*
  287. * Queue the SCSI IO.
  288. */
  289. cp = sym_get_ccb(np, cmd, order);
  290. if (!cp)
  291. return 1; /* Means resource shortage */
  292. sym_queue_scsiio(np, cmd, cp);
  293. return 0;
  294. }
  295. /*
  296. * Setup buffers and pointers that address the CDB.
  297. */
  298. static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
  299. {
  300. memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len);
  301. cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]);
  302. cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
  303. return 0;
  304. }
  305. /*
  306. * Setup pointers that address the data and start the I/O.
  307. */
  308. int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
  309. {
  310. u32 lastp, goalp;
  311. int dir;
  312. /*
  313. * Build the CDB.
  314. */
  315. if (sym_setup_cdb(np, cmd, cp))
  316. goto out_abort;
  317. /*
  318. * No direction means no data.
  319. */
  320. dir = cmd->sc_data_direction;
  321. if (dir != DMA_NONE) {
  322. cp->segments = sym_scatter(np, cp, cmd);
  323. if (cp->segments < 0) {
  324. sym_set_cam_status(cmd, DID_ERROR);
  325. goto out_abort;
  326. }
  327. /*
  328. * No segments means no data.
  329. */
  330. if (!cp->segments)
  331. dir = DMA_NONE;
  332. } else {
  333. cp->data_len = 0;
  334. cp->segments = 0;
  335. }
  336. /*
  337. * Set the data pointer.
  338. */
  339. switch (dir) {
  340. case DMA_BIDIRECTIONAL:
  341. scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command");
  342. sym_set_cam_status(cmd, DID_ERROR);
  343. goto out_abort;
  344. case DMA_TO_DEVICE:
  345. goalp = SCRIPTA_BA(np, data_out2) + 8;
  346. lastp = goalp - 8 - (cp->segments * (2*4));
  347. break;
  348. case DMA_FROM_DEVICE:
  349. cp->host_flags |= HF_DATA_IN;
  350. goalp = SCRIPTA_BA(np, data_in2) + 8;
  351. lastp = goalp - 8 - (cp->segments * (2*4));
  352. break;
  353. case DMA_NONE:
  354. default:
  355. lastp = goalp = SCRIPTB_BA(np, no_data);
  356. break;
  357. }
  358. /*
  359. * Set all pointers values needed by SCRIPTS.
  360. */
  361. cp->phys.head.lastp = cpu_to_scr(lastp);
  362. cp->phys.head.savep = cpu_to_scr(lastp);
  363. cp->startp = cp->phys.head.savep;
  364. cp->goalp = cpu_to_scr(goalp);
  365. /*
  366. * When `#ifed 1', the code below makes the driver
  367. * panic on the first attempt to write to a SCSI device.
  368. * It is the first test we want to do after a driver
  369. * change that does not seem obviously safe. :)
  370. */
  371. #if 0
  372. switch (cp->cdb_buf[0]) {
  373. case 0x0A: case 0x2A: case 0xAA:
  374. panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
  375. break;
  376. default:
  377. break;
  378. }
  379. #endif
  380. /*
  381. * activate this job.
  382. */
  383. sym_put_start_queue(np, cp);
  384. return 0;
  385. out_abort:
  386. sym_free_ccb(np, cp);
  387. sym_xpt_done(np, cmd);
  388. return 0;
  389. }
  390. /*
  391. * timer daemon.
  392. *
  393. * Misused to keep the driver running when
  394. * interrupts are not configured correctly.
  395. */
  396. static void sym_timer(struct sym_hcb *np)
  397. {
  398. unsigned long thistime = jiffies;
  399. /*
  400. * Restart the timer.
  401. */
  402. np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL;
  403. add_timer(&np->s.timer);
  404. /*
  405. * If we are resetting the ncr, wait for settle_time before
  406. * clearing it. Then command processing will be resumed.
  407. */
  408. if (np->s.settle_time_valid) {
  409. if (time_before_eq(np->s.settle_time, thistime)) {
  410. if (sym_verbose >= 2 )
  411. printk("%s: command processing resumed\n",
  412. sym_name(np));
  413. np->s.settle_time_valid = 0;
  414. }
  415. return;
  416. }
  417. /*
  418. * Nothing to do for now, but that may come.
  419. */
  420. if (np->s.lasttime + 4*HZ < thistime) {
  421. np->s.lasttime = thistime;
  422. }
  423. #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS
  424. /*
  425. * Some way-broken PCI bridges may lead to
  426. * completions being lost when the clearing
  427. * of the INTFLY flag by the CPU occurs
  428. * concurrently with the chip raising this flag.
  429. * If this ever happen, lost completions will
  430. * be reaped here.
  431. */
  432. sym_wakeup_done(np);
  433. #endif
  434. }
  435. /*
  436. * PCI BUS error handler.
  437. */
  438. void sym_log_bus_error(struct Scsi_Host *shost)
  439. {
  440. struct sym_data *sym_data = shost_priv(shost);
  441. struct pci_dev *pdev = sym_data->pdev;
  442. unsigned short pci_sts;
  443. pci_read_config_word(pdev, PCI_STATUS, &pci_sts);
  444. if (pci_sts & 0xf900) {
  445. pci_write_config_word(pdev, PCI_STATUS, pci_sts);
  446. shost_printk(KERN_WARNING, shost,
  447. "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900);
  448. }
  449. }
  450. /*
  451. * queuecommand method. Entered with the host adapter lock held and
  452. * interrupts disabled.
  453. */
  454. static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
  455. void (*done)(struct scsi_cmnd *))
  456. {
  457. struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
  458. struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
  459. int sts = 0;
  460. cmd->scsi_done = done;
  461. memset(ucp, 0, sizeof(*ucp));
  462. /*
  463. * Shorten our settle_time if needed for
  464. * this command not to time out.
  465. */
  466. if (np->s.settle_time_valid && cmd->request->timeout) {
  467. unsigned long tlimit = jiffies + cmd->request->timeout;
  468. tlimit -= SYM_CONF_TIMER_INTERVAL*2;
  469. if (time_after(np->s.settle_time, tlimit)) {
  470. np->s.settle_time = tlimit;
  471. }
  472. }
  473. if (np->s.settle_time_valid)
  474. return SCSI_MLQUEUE_HOST_BUSY;
  475. sts = sym_queue_command(np, cmd);
  476. if (sts)
  477. return SCSI_MLQUEUE_HOST_BUSY;
  478. return 0;
  479. }
  480. static DEF_SCSI_QCMD(sym53c8xx_queue_command)
  481. /*
  482. * Linux entry point of the interrupt handler.
  483. */
  484. static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
  485. {
  486. struct Scsi_Host *shost = dev_id;
  487. struct sym_data *sym_data = shost_priv(shost);
  488. irqreturn_t result;
  489. /* Avoid spinloop trying to handle interrupts on frozen device */
  490. if (pci_channel_offline(sym_data->pdev))
  491. return IRQ_NONE;
  492. if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("[");
  493. spin_lock(shost->host_lock);
  494. result = sym_interrupt(shost);
  495. spin_unlock(shost->host_lock);
  496. if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n");
  497. return result;
  498. }
  499. /*
  500. * Linux entry point of the timer handler
  501. */
  502. static void sym53c8xx_timer(unsigned long npref)
  503. {
  504. struct sym_hcb *np = (struct sym_hcb *)npref;
  505. unsigned long flags;
  506. spin_lock_irqsave(np->s.host->host_lock, flags);
  507. sym_timer(np);
  508. spin_unlock_irqrestore(np->s.host->host_lock, flags);
  509. }
  510. /*
  511. * What the eh thread wants us to perform.
  512. */
  513. #define SYM_EH_ABORT 0
  514. #define SYM_EH_DEVICE_RESET 1
  515. #define SYM_EH_BUS_RESET 2
  516. #define SYM_EH_HOST_RESET 3
  517. /*
  518. * Generic method for our eh processing.
  519. * The 'op' argument tells what we have to do.
  520. */
  521. static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
  522. {
  523. struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
  524. struct Scsi_Host *shost = cmd->device->host;
  525. struct sym_data *sym_data = shost_priv(shost);
  526. struct pci_dev *pdev = sym_data->pdev;
  527. struct sym_hcb *np = sym_data->ncb;
  528. SYM_QUEHEAD *qp;
  529. int cmd_queued = 0;
  530. int sts = -1;
  531. struct completion eh_done;
  532. scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname);
  533. /* We may be in an error condition because the PCI bus
  534. * went down. In this case, we need to wait until the
  535. * PCI bus is reset, the card is reset, and only then
  536. * proceed with the scsi error recovery. There's no
  537. * point in hurrying; take a leisurely wait.
  538. */
  539. #define WAIT_FOR_PCI_RECOVERY 35
  540. if (pci_channel_offline(pdev)) {
  541. int finished_reset = 0;
  542. init_completion(&eh_done);
  543. spin_lock_irq(shost->host_lock);
  544. /* Make sure we didn't race */
  545. if (pci_channel_offline(pdev)) {
  546. BUG_ON(sym_data->io_reset);
  547. sym_data->io_reset = &eh_done;
  548. } else {
  549. finished_reset = 1;
  550. }
  551. spin_unlock_irq(shost->host_lock);
  552. if (!finished_reset)
  553. finished_reset = wait_for_completion_timeout
  554. (sym_data->io_reset,
  555. WAIT_FOR_PCI_RECOVERY*HZ);
  556. spin_lock_irq(shost->host_lock);
  557. sym_data->io_reset = NULL;
  558. spin_unlock_irq(shost->host_lock);
  559. if (!finished_reset)
  560. return SCSI_FAILED;
  561. }
  562. spin_lock_irq(shost->host_lock);
  563. /* This one is queued in some place -> to wait for completion */
  564. FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
  565. struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  566. if (cp->cmd == cmd) {
  567. cmd_queued = 1;
  568. break;
  569. }
  570. }
  571. /* Try to proceed the operation we have been asked for */
  572. sts = -1;
  573. switch(op) {
  574. case SYM_EH_ABORT:
  575. sts = sym_abort_scsiio(np, cmd, 1);
  576. break;
  577. case SYM_EH_DEVICE_RESET:
  578. sts = sym_reset_scsi_target(np, cmd->device->id);
  579. break;
  580. case SYM_EH_BUS_RESET:
  581. sym_reset_scsi_bus(np, 1);
  582. sts = 0;
  583. break;
  584. case SYM_EH_HOST_RESET:
  585. sym_reset_scsi_bus(np, 0);
  586. sym_start_up(shost, 1);
  587. sts = 0;
  588. break;
  589. default:
  590. break;
  591. }
  592. /* On error, restore everything and cross fingers :) */
  593. if (sts)
  594. cmd_queued = 0;
  595. if (cmd_queued) {
  596. init_completion(&eh_done);
  597. ucmd->eh_done = &eh_done;
  598. spin_unlock_irq(shost->host_lock);
  599. if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
  600. ucmd->eh_done = NULL;
  601. sts = -2;
  602. }
  603. } else {
  604. spin_unlock_irq(shost->host_lock);
  605. }
  606. dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
  607. sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
  608. return sts ? SCSI_FAILED : SCSI_SUCCESS;
  609. }
  610. /*
  611. * Error handlers called from the eh thread (one thread per HBA).
  612. */
  613. static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
  614. {
  615. return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
  616. }
  617. static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
  618. {
  619. return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
  620. }
  621. static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
  622. {
  623. return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
  624. }
  625. static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
  626. {
  627. return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
  628. }
  629. /*
  630. * Tune device queuing depth, according to various limits.
  631. */
  632. static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
  633. {
  634. struct sym_lcb *lp = sym_lp(tp, lun);
  635. u_short oldtags;
  636. if (!lp)
  637. return;
  638. oldtags = lp->s.reqtags;
  639. if (reqtags > lp->s.scdev_depth)
  640. reqtags = lp->s.scdev_depth;
  641. lp->s.reqtags = reqtags;
  642. if (reqtags != oldtags) {
  643. dev_info(&tp->starget->dev,
  644. "tagged command queuing %s, command queue depth %d.\n",
  645. lp->s.reqtags ? "enabled" : "disabled", reqtags);
  646. }
  647. }
  648. static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
  649. {
  650. struct sym_hcb *np = sym_get_hcb(sdev->host);
  651. struct sym_tcb *tp = &np->target[sdev->id];
  652. struct sym_lcb *lp;
  653. unsigned long flags;
  654. int error;
  655. if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
  656. return -ENXIO;
  657. spin_lock_irqsave(np->s.host->host_lock, flags);
  658. /*
  659. * Fail the device init if the device is flagged NOSCAN at BOOT in
  660. * the NVRAM. This may speed up boot and maintain coherency with
  661. * BIOS device numbering. Clearing the flag allows the user to
  662. * rescan skipped devices later. We also return an error for
  663. * devices not flagged for SCAN LUNS in the NVRAM since some single
  664. * lun devices behave badly when asked for a non zero LUN.
  665. */
  666. if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
  667. tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
  668. starget_printk(KERN_INFO, sdev->sdev_target,
  669. "Scan at boot disabled in NVRAM\n");
  670. error = -ENXIO;
  671. goto out;
  672. }
  673. if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
  674. if (sdev->lun != 0) {
  675. error = -ENXIO;
  676. goto out;
  677. }
  678. starget_printk(KERN_INFO, sdev->sdev_target,
  679. "Multiple LUNs disabled in NVRAM\n");
  680. }
  681. lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
  682. if (!lp) {
  683. error = -ENOMEM;
  684. goto out;
  685. }
  686. if (tp->nlcb == 1)
  687. tp->starget = sdev->sdev_target;
  688. spi_min_period(tp->starget) = tp->usr_period;
  689. spi_max_width(tp->starget) = tp->usr_width;
  690. error = 0;
  691. out:
  692. spin_unlock_irqrestore(np->s.host->host_lock, flags);
  693. return error;
  694. }
  695. /*
  696. * Linux entry point for device queue sizing.
  697. */
  698. static int sym53c8xx_slave_configure(struct scsi_device *sdev)
  699. {
  700. struct sym_hcb *np = sym_get_hcb(sdev->host);
  701. struct sym_tcb *tp = &np->target[sdev->id];
  702. struct sym_lcb *lp = sym_lp(tp, sdev->lun);
  703. int reqtags, depth_to_use;
  704. /*
  705. * Get user flags.
  706. */
  707. lp->curr_flags = lp->user_flags;
  708. /*
  709. * Select queue depth from driver setup.
  710. * Do not use more than configured by user.
  711. * Use at least 1.
  712. * Do not use more than our maximum.
  713. */
  714. reqtags = sym_driver_setup.max_tag;
  715. if (reqtags > tp->usrtags)
  716. reqtags = tp->usrtags;
  717. if (!sdev->tagged_supported)
  718. reqtags = 0;
  719. if (reqtags > SYM_CONF_MAX_TAG)
  720. reqtags = SYM_CONF_MAX_TAG;
  721. depth_to_use = reqtags ? reqtags : 1;
  722. scsi_adjust_queue_depth(sdev,
  723. sdev->tagged_supported ? MSG_SIMPLE_TAG : 0,
  724. depth_to_use);
  725. lp->s.scdev_depth = depth_to_use;
  726. sym_tune_dev_queuing(tp, sdev->lun, reqtags);
  727. if (!spi_initial_dv(sdev->sdev_target))
  728. spi_dv_device(sdev);
  729. return 0;
  730. }
  731. static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
  732. {
  733. struct sym_hcb *np = sym_get_hcb(sdev->host);
  734. struct sym_tcb *tp = &np->target[sdev->id];
  735. struct sym_lcb *lp = sym_lp(tp, sdev->lun);
  736. unsigned long flags;
  737. /* if slave_alloc returned before allocating a sym_lcb, return */
  738. if (!lp)
  739. return;
  740. spin_lock_irqsave(np->s.host->host_lock, flags);
  741. if (lp->busy_itlq || lp->busy_itl) {
  742. /*
  743. * This really shouldn't happen, but we can't return an error
  744. * so let's try to stop all on-going I/O.
  745. */
  746. starget_printk(KERN_WARNING, tp->starget,
  747. "Removing busy LCB (%d)\n", sdev->lun);
  748. sym_reset_scsi_bus(np, 1);
  749. }
  750. if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
  751. /*
  752. * It was the last unit for this target.
  753. */
  754. tp->head.sval = 0;
  755. tp->head.wval = np->rv_scntl3;
  756. tp->head.uval = 0;
  757. tp->tgoal.check_nego = 1;
  758. tp->starget = NULL;
  759. }
  760. spin_unlock_irqrestore(np->s.host->host_lock, flags);
  761. }
  762. /*
  763. * Linux entry point for info() function
  764. */
  765. static const char *sym53c8xx_info (struct Scsi_Host *host)
  766. {
  767. return SYM_DRIVER_NAME;
  768. }
  769. #ifdef SYM_LINUX_PROC_INFO_SUPPORT
  770. /*
  771. * Proc file system stuff
  772. *
  773. * A read operation returns adapter information.
  774. * A write operation is a control command.
  775. * The string is parsed in the driver code and the command is passed
  776. * to the sym_usercmd() function.
  777. */
  778. #ifdef SYM_LINUX_USER_COMMAND_SUPPORT
  779. struct sym_usrcmd {
  780. u_long target;
  781. u_long lun;
  782. u_long data;
  783. u_long cmd;
  784. };
  785. #define UC_SETSYNC 10
  786. #define UC_SETTAGS 11
  787. #define UC_SETDEBUG 12
  788. #define UC_SETWIDE 14
  789. #define UC_SETFLAG 15
  790. #define UC_SETVERBOSE 17
  791. #define UC_RESETDEV 18
  792. #define UC_CLEARDEV 19
  793. static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
  794. {
  795. struct sym_tcb *tp;
  796. int t, l;
  797. switch (uc->cmd) {
  798. case 0: return;
  799. #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
  800. case UC_SETDEBUG:
  801. sym_debug_flags = uc->data;
  802. break;
  803. #endif
  804. case UC_SETVERBOSE:
  805. np->verbose = uc->data;
  806. break;
  807. default:
  808. /*
  809. * We assume that other commands apply to targets.
  810. * This should always be the case and avoid the below
  811. * 4 lines to be repeated 6 times.
  812. */
  813. for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
  814. if (!((uc->target >> t) & 1))
  815. continue;
  816. tp = &np->target[t];
  817. if (!tp->nlcb)
  818. continue;
  819. switch (uc->cmd) {
  820. case UC_SETSYNC:
  821. if (!uc->data || uc->data >= 255) {
  822. tp->tgoal.iu = tp->tgoal.dt =
  823. tp->tgoal.qas = 0;
  824. tp->tgoal.offset = 0;
  825. } else if (uc->data <= 9 && np->minsync_dt) {
  826. if (uc->data < np->minsync_dt)
  827. uc->data = np->minsync_dt;
  828. tp->tgoal.iu = tp->tgoal.dt =
  829. tp->tgoal.qas = 1;
  830. tp->tgoal.width = 1;
  831. tp->tgoal.period = uc->data;
  832. tp->tgoal.offset = np->maxoffs_dt;
  833. } else {
  834. if (uc->data < np->minsync)
  835. uc->data = np->minsync;
  836. tp->tgoal.iu = tp->tgoal.dt =
  837. tp->tgoal.qas = 0;
  838. tp->tgoal.period = uc->data;
  839. tp->tgoal.offset = np->maxoffs;
  840. }
  841. tp->tgoal.check_nego = 1;
  842. break;
  843. case UC_SETWIDE:
  844. tp->tgoal.width = uc->data ? 1 : 0;
  845. tp->tgoal.check_nego = 1;
  846. break;
  847. case UC_SETTAGS:
  848. for (l = 0; l < SYM_CONF_MAX_LUN; l++)
  849. sym_tune_dev_queuing(tp, l, uc->data);
  850. break;
  851. case UC_RESETDEV:
  852. tp->to_reset = 1;
  853. np->istat_sem = SEM;
  854. OUTB(np, nc_istat, SIGP|SEM);
  855. break;
  856. case UC_CLEARDEV:
  857. for (l = 0; l < SYM_CONF_MAX_LUN; l++) {
  858. struct sym_lcb *lp = sym_lp(tp, l);
  859. if (lp) lp->to_clear = 1;
  860. }
  861. np->istat_sem = SEM;
  862. OUTB(np, nc_istat, SIGP|SEM);
  863. break;
  864. case UC_SETFLAG:
  865. tp->usrflags = uc->data;
  866. break;
  867. }
  868. }
  869. break;
  870. }
  871. }
  872. static int sym_skip_spaces(char *ptr, int len)
  873. {
  874. int cnt, c;
  875. for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--);
  876. return (len - cnt);
  877. }
  878. static int get_int_arg(char *ptr, int len, u_long *pv)
  879. {
  880. char *end;
  881. *pv = simple_strtoul(ptr, &end, 10);
  882. return (end - ptr);
  883. }
  884. static int is_keyword(char *ptr, int len, char *verb)
  885. {
  886. int verb_len = strlen(verb);
  887. if (len >= verb_len && !memcmp(verb, ptr, verb_len))
  888. return verb_len;
  889. else
  890. return 0;
  891. }
  892. #define SKIP_SPACES(ptr, len) \
  893. if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \
  894. return -EINVAL; \
  895. ptr += arg_len; len -= arg_len;
  896. #define GET_INT_ARG(ptr, len, v) \
  897. if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
  898. return -EINVAL; \
  899. ptr += arg_len; len -= arg_len;
  900. /*
  901. * Parse a control command
  902. */
  903. static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length)
  904. {
  905. struct sym_hcb *np = sym_get_hcb(shost);
  906. char *ptr = buffer;
  907. int len = length;
  908. struct sym_usrcmd cmd, *uc = &cmd;
  909. int arg_len;
  910. u_long target;
  911. memset(uc, 0, sizeof(*uc));
  912. if (len > 0 && ptr[len-1] == '\n')
  913. --len;
  914. if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
  915. uc->cmd = UC_SETSYNC;
  916. else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
  917. uc->cmd = UC_SETTAGS;
  918. else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
  919. uc->cmd = UC_SETVERBOSE;
  920. else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
  921. uc->cmd = UC_SETWIDE;
  922. #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
  923. else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
  924. uc->cmd = UC_SETDEBUG;
  925. #endif
  926. else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
  927. uc->cmd = UC_SETFLAG;
  928. else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
  929. uc->cmd = UC_RESETDEV;
  930. else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
  931. uc->cmd = UC_CLEARDEV;
  932. else
  933. arg_len = 0;
  934. #ifdef DEBUG_PROC_INFO
  935. printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
  936. #endif
  937. if (!arg_len)
  938. return -EINVAL;
  939. ptr += arg_len; len -= arg_len;
  940. switch(uc->cmd) {
  941. case UC_SETSYNC:
  942. case UC_SETTAGS:
  943. case UC_SETWIDE:
  944. case UC_SETFLAG:
  945. case UC_RESETDEV:
  946. case UC_CLEARDEV:
  947. SKIP_SPACES(ptr, len);
  948. if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
  949. ptr += arg_len; len -= arg_len;
  950. uc->target = ~0;
  951. } else {
  952. GET_INT_ARG(ptr, len, target);
  953. uc->target = (1<<target);
  954. #ifdef DEBUG_PROC_INFO
  955. printk("sym_user_command: target=%ld\n", target);
  956. #endif
  957. }
  958. break;
  959. }
  960. switch(uc->cmd) {
  961. case UC_SETVERBOSE:
  962. case UC_SETSYNC:
  963. case UC_SETTAGS:
  964. case UC_SETWIDE:
  965. SKIP_SPACES(ptr, len);
  966. GET_INT_ARG(ptr, len, uc->data);
  967. #ifdef DEBUG_PROC_INFO
  968. printk("sym_user_command: data=%ld\n", uc->data);
  969. #endif
  970. break;
  971. #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
  972. case UC_SETDEBUG:
  973. while (len > 0) {
  974. SKIP_SPACES(ptr, len);
  975. if ((arg_len = is_keyword(ptr, len, "alloc")))
  976. uc->data |= DEBUG_ALLOC;
  977. else if ((arg_len = is_keyword(ptr, len, "phase")))
  978. uc->data |= DEBUG_PHASE;
  979. else if ((arg_len = is_keyword(ptr, len, "queue")))
  980. uc->data |= DEBUG_QUEUE;
  981. else if ((arg_len = is_keyword(ptr, len, "result")))
  982. uc->data |= DEBUG_RESULT;
  983. else if ((arg_len = is_keyword(ptr, len, "scatter")))
  984. uc->data |= DEBUG_SCATTER;
  985. else if ((arg_len = is_keyword(ptr, len, "script")))
  986. uc->data |= DEBUG_SCRIPT;
  987. else if ((arg_len = is_keyword(ptr, len, "tiny")))
  988. uc->data |= DEBUG_TINY;
  989. else if ((arg_len = is_keyword(ptr, len, "timing")))
  990. uc->data |= DEBUG_TIMING;
  991. else if ((arg_len = is_keyword(ptr, len, "nego")))
  992. uc->data |= DEBUG_NEGO;
  993. else if ((arg_len = is_keyword(ptr, len, "tags")))
  994. uc->data |= DEBUG_TAGS;
  995. else if ((arg_len = is_keyword(ptr, len, "pointer")))
  996. uc->data |= DEBUG_POINTER;
  997. else
  998. return -EINVAL;
  999. ptr += arg_len; len -= arg_len;
  1000. }
  1001. #ifdef DEBUG_PROC_INFO
  1002. printk("sym_user_command: data=%ld\n", uc->data);
  1003. #endif
  1004. break;
  1005. #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */
  1006. case UC_SETFLAG:
  1007. while (len > 0) {
  1008. SKIP_SPACES(ptr, len);
  1009. if ((arg_len = is_keyword(ptr, len, "no_disc")))
  1010. uc->data &= ~SYM_DISC_ENABLED;
  1011. else
  1012. return -EINVAL;
  1013. ptr += arg_len; len -= arg_len;
  1014. }
  1015. break;
  1016. default:
  1017. break;
  1018. }
  1019. if (len)
  1020. return -EINVAL;
  1021. else {
  1022. unsigned long flags;
  1023. spin_lock_irqsave(shost->host_lock, flags);
  1024. sym_exec_user_command(np, uc);
  1025. spin_unlock_irqrestore(shost->host_lock, flags);
  1026. }
  1027. return length;
  1028. }
  1029. #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */
  1030. #ifdef SYM_LINUX_USER_INFO_SUPPORT
  1031. /*
  1032. * Informations through the proc file system.
  1033. */
  1034. struct info_str {
  1035. char *buffer;
  1036. int length;
  1037. int offset;
  1038. int pos;
  1039. };
  1040. static void copy_mem_info(struct info_str *info, char *data, int len)
  1041. {
  1042. if (info->pos + len > info->length)
  1043. len = info->length - info->pos;
  1044. if (info->pos + len < info->offset) {
  1045. info->pos += len;
  1046. return;
  1047. }
  1048. if (info->pos < info->offset) {
  1049. data += (info->offset - info->pos);
  1050. len -= (info->offset - info->pos);
  1051. }
  1052. if (len > 0) {
  1053. memcpy(info->buffer + info->pos, data, len);
  1054. info->pos += len;
  1055. }
  1056. }
  1057. static int copy_info(struct info_str *info, char *fmt, ...)
  1058. {
  1059. va_list args;
  1060. char buf[81];
  1061. int len;
  1062. va_start(args, fmt);
  1063. len = vsprintf(buf, fmt, args);
  1064. va_end(args);
  1065. copy_mem_info(info, buf, len);
  1066. return len;
  1067. }
  1068. /*
  1069. * Copy formatted information into the input buffer.
  1070. */
  1071. static int sym_host_info(struct Scsi_Host *shost, char *ptr, off_t offset, int len)
  1072. {
  1073. struct sym_data *sym_data = shost_priv(shost);
  1074. struct pci_dev *pdev = sym_data->pdev;
  1075. struct sym_hcb *np = sym_data->ncb;
  1076. struct info_str info;
  1077. info.buffer = ptr;
  1078. info.length = len;
  1079. info.offset = offset;
  1080. info.pos = 0;
  1081. copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, "
  1082. "revision id 0x%x\n", np->s.chip_name,
  1083. pdev->device, pdev->revision);
  1084. copy_info(&info, "At PCI address %s, IRQ %u\n",
  1085. pci_name(pdev), pdev->irq);
  1086. copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n",
  1087. (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
  1088. np->maxwide ? "Wide" : "Narrow",
  1089. np->minsync_dt ? ", DT capable" : "");
  1090. copy_info(&info, "Max. started commands %d, "
  1091. "max. commands per LUN %d\n",
  1092. SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
  1093. return info.pos > info.offset? info.pos - info.offset : 0;
  1094. }
  1095. #endif /* SYM_LINUX_USER_INFO_SUPPORT */
  1096. /*
  1097. * Entry point of the scsi proc fs of the driver.
  1098. * - func = 0 means read (returns adapter infos)
  1099. * - func = 1 means write (not yet merget from sym53c8xx)
  1100. */
  1101. static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer,
  1102. char **start, off_t offset, int length, int func)
  1103. {
  1104. int retv;
  1105. if (func) {
  1106. #ifdef SYM_LINUX_USER_COMMAND_SUPPORT
  1107. retv = sym_user_command(shost, buffer, length);
  1108. #else
  1109. retv = -EINVAL;
  1110. #endif
  1111. } else {
  1112. if (start)
  1113. *start = buffer;
  1114. #ifdef SYM_LINUX_USER_INFO_SUPPORT
  1115. retv = sym_host_info(shost, buffer, offset, length);
  1116. #else
  1117. retv = -EINVAL;
  1118. #endif
  1119. }
  1120. return retv;
  1121. }
  1122. #endif /* SYM_LINUX_PROC_INFO_SUPPORT */
  1123. /*
  1124. * Free resources claimed by sym_iomap_device(). Note that
  1125. * sym_free_resources() should be used instead of this function after calling
  1126. * sym_attach().
  1127. */
  1128. static void __devinit
  1129. sym_iounmap_device(struct sym_device *device)
  1130. {
  1131. if (device->s.ioaddr)
  1132. pci_iounmap(device->pdev, device->s.ioaddr);
  1133. if (device->s.ramaddr)
  1134. pci_iounmap(device->pdev, device->s.ramaddr);
  1135. }
  1136. /*
  1137. * Free controller resources.
  1138. */
  1139. static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
  1140. int do_free_irq)
  1141. {
  1142. /*
  1143. * Free O/S specific resources.
  1144. */
  1145. if (do_free_irq)
  1146. free_irq(pdev->irq, np->s.host);
  1147. if (np->s.ioaddr)
  1148. pci_iounmap(pdev, np->s.ioaddr);
  1149. if (np->s.ramaddr)
  1150. pci_iounmap(pdev, np->s.ramaddr);
  1151. /*
  1152. * Free O/S independent resources.
  1153. */
  1154. sym_hcb_free(np);
  1155. sym_mfree_dma(np, sizeof(*np), "HCB");
  1156. }
  1157. /*
  1158. * Host attach and initialisations.
  1159. *
  1160. * Allocate host data and ncb structure.
  1161. * Remap MMIO region.
  1162. * Do chip initialization.
  1163. * If all is OK, install interrupt handling and
  1164. * start the timer daemon.
  1165. */
  1166. static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
  1167. int unit, struct sym_device *dev)
  1168. {
  1169. struct sym_data *sym_data;
  1170. struct sym_hcb *np = NULL;
  1171. struct Scsi_Host *shost = NULL;
  1172. struct pci_dev *pdev = dev->pdev;
  1173. unsigned long flags;
  1174. struct sym_fw *fw;
  1175. int do_free_irq = 0;
  1176. printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n",
  1177. unit, dev->chip.name, pdev->revision, pci_name(pdev),
  1178. pdev->irq);
  1179. /*
  1180. * Get the firmware for this chip.
  1181. */
  1182. fw = sym_find_firmware(&dev->chip);
  1183. if (!fw)
  1184. goto attach_failed;
  1185. shost = scsi_host_alloc(tpnt, sizeof(*sym_data));
  1186. if (!shost)
  1187. goto attach_failed;
  1188. sym_data = shost_priv(shost);
  1189. /*
  1190. * Allocate immediately the host control block,
  1191. * since we are only expecting to succeed. :)
  1192. * We keep track in the HCB of all the resources that
  1193. * are to be released on error.
  1194. */
  1195. np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB");
  1196. if (!np)
  1197. goto attach_failed;
  1198. np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */
  1199. sym_data->ncb = np;
  1200. sym_data->pdev = pdev;
  1201. np->s.host = shost;
  1202. pci_set_drvdata(pdev, shost);
  1203. /*
  1204. * Copy some useful infos to the HCB.
  1205. */
  1206. np->hcb_ba = vtobus(np);
  1207. np->verbose = sym_driver_setup.verbose;
  1208. np->s.unit = unit;
  1209. np->features = dev->chip.features;
  1210. np->clock_divn = dev->chip.nr_divisor;
  1211. np->maxoffs = dev->chip.offset_max;
  1212. np->maxburst = dev->chip.burst_max;
  1213. np->myaddr = dev->host_id;
  1214. np->mmio_ba = (u32)dev->mmio_base;
  1215. np->ram_ba = (u32)dev->ram_base;
  1216. np->s.ioaddr = dev->s.ioaddr;
  1217. np->s.ramaddr = dev->s.ramaddr;
  1218. /*
  1219. * Edit its name.
  1220. */
  1221. strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
  1222. sprintf(np->s.inst_name, "sym%d", np->s.unit);
  1223. if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
  1224. !pci_set_dma_mask(pdev, DMA_DAC_MASK)) {
  1225. set_dac(np);
  1226. } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  1227. printf_warning("%s: No suitable DMA available\n", sym_name(np));
  1228. goto attach_failed;
  1229. }
  1230. if (sym_hcb_attach(shost, fw, dev->nvram))
  1231. goto attach_failed;
  1232. /*
  1233. * Install the interrupt handler.
  1234. * If we synchonize the C code with SCRIPTS on interrupt,
  1235. * we do not want to share the INTR line at all.
  1236. */
  1237. if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX,
  1238. shost)) {
  1239. printf_err("%s: request irq %u failure\n",
  1240. sym_name(np), pdev->irq);
  1241. goto attach_failed;
  1242. }
  1243. do_free_irq = 1;
  1244. /*
  1245. * After SCSI devices have been opened, we cannot
  1246. * reset the bus safely, so we do it here.
  1247. */
  1248. spin_lock_irqsave(shost->host_lock, flags);
  1249. if (sym_reset_scsi_bus(np, 0))
  1250. goto reset_failed;
  1251. /*
  1252. * Start the SCRIPTS.
  1253. */
  1254. sym_start_up(shost, 1);
  1255. /*
  1256. * Start the timer daemon
  1257. */
  1258. init_timer(&np->s.timer);
  1259. np->s.timer.data = (unsigned long) np;
  1260. np->s.timer.function = sym53c8xx_timer;
  1261. np->s.lasttime=0;
  1262. sym_timer (np);
  1263. /*
  1264. * Fill Linux host instance structure
  1265. * and return success.
  1266. */
  1267. shost->max_channel = 0;
  1268. shost->this_id = np->myaddr;
  1269. shost->max_id = np->maxwide ? 16 : 8;
  1270. shost->max_lun = SYM_CONF_MAX_LUN;
  1271. shost->unique_id = pci_resource_start(pdev, 0);
  1272. shost->cmd_per_lun = SYM_CONF_MAX_TAG;
  1273. shost->can_queue = (SYM_CONF_MAX_START-2);
  1274. shost->sg_tablesize = SYM_CONF_MAX_SG;
  1275. shost->max_cmd_len = 16;
  1276. BUG_ON(sym2_transport_template == NULL);
  1277. shost->transportt = sym2_transport_template;
  1278. /* 53c896 rev 1 errata: DMA may not cross 16MB boundary */
  1279. if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2)
  1280. shost->dma_boundary = 0xFFFFFF;
  1281. spin_unlock_irqrestore(shost->host_lock, flags);
  1282. return shost;
  1283. reset_failed:
  1284. printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, "
  1285. "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
  1286. spin_unlock_irqrestore(shost->host_lock, flags);
  1287. attach_failed:
  1288. printf_info("sym%d: giving up ...\n", unit);
  1289. if (np)
  1290. sym_free_resources(np, pdev, do_free_irq);
  1291. else
  1292. sym_iounmap_device(dev);
  1293. if (shost)
  1294. scsi_host_put(shost);
  1295. return NULL;
  1296. }
  1297. /*
  1298. * Detect and try to read SYMBIOS and TEKRAM NVRAM.
  1299. */
  1300. #if SYM_CONF_NVRAM_SUPPORT
  1301. static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
  1302. {
  1303. devp->nvram = nvp;
  1304. nvp->type = 0;
  1305. sym_read_nvram(devp, nvp);
  1306. }
  1307. #else
  1308. static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
  1309. {
  1310. }
  1311. #endif /* SYM_CONF_NVRAM_SUPPORT */
  1312. static int __devinit sym_check_supported(struct sym_device *device)
  1313. {
  1314. struct sym_chip *chip;
  1315. struct pci_dev *pdev = device->pdev;
  1316. unsigned long io_port = pci_resource_start(pdev, 0);
  1317. int i;
  1318. /*
  1319. * If user excluded this chip, do not initialize it.
  1320. * I hate this code so much. Must kill it.
  1321. */
  1322. if (io_port) {
  1323. for (i = 0 ; i < 8 ; i++) {
  1324. if (sym_driver_setup.excludes[i] == io_port)
  1325. return -ENODEV;
  1326. }
  1327. }
  1328. /*
  1329. * Check if the chip is supported. Then copy the chip description
  1330. * to our device structure so we can make it match the actual device
  1331. * and options.
  1332. */
  1333. chip = sym_lookup_chip_table(pdev->device, pdev->revision);
  1334. if (!chip) {
  1335. dev_info(&pdev->dev, "device not supported\n");
  1336. return -ENODEV;
  1337. }
  1338. memcpy(&device->chip, chip, sizeof(device->chip));
  1339. return 0;
  1340. }
  1341. /*
  1342. * Ignore Symbios chips controlled by various RAID controllers.
  1343. * These controllers set value 0x52414944 at RAM end - 16.
  1344. */
  1345. static int __devinit sym_check_raid(struct sym_device *device)
  1346. {
  1347. unsigned int ram_size, ram_val;
  1348. if (!device->s.ramaddr)
  1349. return 0;
  1350. if (device->chip.features & FE_RAM8K)
  1351. ram_size = 8192;
  1352. else
  1353. ram_size = 4096;
  1354. ram_val = readl(device->s.ramaddr + ram_size - 16);
  1355. if (ram_val != 0x52414944)
  1356. return 0;
  1357. dev_info(&device->pdev->dev,
  1358. "not initializing, driven by RAID controller.\n");
  1359. return -ENODEV;
  1360. }
  1361. static int __devinit sym_set_workarounds(struct sym_device *device)
  1362. {
  1363. struct sym_chip *chip = &device->chip;
  1364. struct pci_dev *pdev = device->pdev;
  1365. u_short status_reg;
  1366. /*
  1367. * (ITEM 12 of a DEL about the 896 I haven't yet).
  1368. * We must ensure the chip will use WRITE AND INVALIDATE.
  1369. * The revision number limit is for now arbitrary.
  1370. */
  1371. if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) {
  1372. chip->features |= (FE_WRIE | FE_CLSE);
  1373. }
  1374. /* If the chip can do Memory Write Invalidate, enable it */
  1375. if (chip->features & FE_WRIE) {
  1376. if (pci_set_mwi(pdev))
  1377. return -ENODEV;
  1378. }
  1379. /*
  1380. * Work around for errant bit in 895A. The 66Mhz
  1381. * capable bit is set erroneously. Clear this bit.
  1382. * (Item 1 DEL 533)
  1383. *
  1384. * Make sure Config space and Features agree.
  1385. *
  1386. * Recall: writes are not normal to status register -
  1387. * write a 1 to clear and a 0 to leave unchanged.
  1388. * Can only reset bits.
  1389. */
  1390. pci_read_config_word(pdev, PCI_STATUS, &status_reg);
  1391. if (chip->features & FE_66MHZ) {
  1392. if (!(status_reg & PCI_STATUS_66MHZ))
  1393. chip->features &= ~FE_66MHZ;
  1394. } else {
  1395. if (status_reg & PCI_STATUS_66MHZ) {
  1396. status_reg = PCI_STATUS_66MHZ;
  1397. pci_write_config_word(pdev, PCI_STATUS, status_reg);
  1398. pci_read_config_word(pdev, PCI_STATUS, &status_reg);
  1399. }
  1400. }
  1401. return 0;
  1402. }
  1403. /*
  1404. * Map HBA registers and on-chip SRAM (if present).
  1405. */
  1406. static int __devinit
  1407. sym_iomap_device(struct sym_device *device)
  1408. {
  1409. struct pci_dev *pdev = device->pdev;
  1410. struct pci_bus_region bus_addr;
  1411. int i = 2;
  1412. pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]);
  1413. device->mmio_base = bus_addr.start;
  1414. if (device->chip.features & FE_RAM) {
  1415. /*
  1416. * If the BAR is 64-bit, resource 2 will be occupied by the
  1417. * upper 32 bits
  1418. */
  1419. if (!pdev->resource[i].flags)
  1420. i++;
  1421. pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]);
  1422. device->ram_base = bus_addr.start;
  1423. }
  1424. #ifdef CONFIG_SCSI_SYM53C8XX_MMIO
  1425. if (device->mmio_base)
  1426. device->s.ioaddr = pci_iomap(pdev, 1,
  1427. pci_resource_len(pdev, 1));
  1428. #endif
  1429. if (!device->s.ioaddr)
  1430. device->s.ioaddr = pci_iomap(pdev, 0,
  1431. pci_resource_len(pdev, 0));
  1432. if (!device->s.ioaddr) {
  1433. dev_err(&pdev->dev, "could not map registers; giving up.\n");
  1434. return -EIO;
  1435. }
  1436. if (device->ram_base) {
  1437. device->s.ramaddr = pci_iomap(pdev, i,
  1438. pci_resource_len(pdev, i));
  1439. if (!device->s.ramaddr) {
  1440. dev_warn(&pdev->dev,
  1441. "could not map SRAM; continuing anyway.\n");
  1442. device->ram_base = 0;
  1443. }
  1444. }
  1445. return 0;
  1446. }
  1447. /*
  1448. * The NCR PQS and PDS cards are constructed as a DEC bridge
  1449. * behind which sits a proprietary NCR memory controller and
  1450. * either four or two 53c875s as separate devices. We can tell
  1451. * if an 875 is part of a PQS/PDS or not since if it is, it will
  1452. * be on the same bus as the memory controller. In its usual
  1453. * mode of operation, the 875s are slaved to the memory
  1454. * controller for all transfers. To operate with the Linux
  1455. * driver, the memory controller is disabled and the 875s
  1456. * freed to function independently. The only wrinkle is that
  1457. * the preset SCSI ID (which may be zero) must be read in from
  1458. * a special configuration space register of the 875.
  1459. */
  1460. static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev)
  1461. {
  1462. int slot;
  1463. u8 tmp;
  1464. for (slot = 0; slot < 256; slot++) {
  1465. struct pci_dev *memc = pci_get_slot(pdev->bus, slot);
  1466. if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) {
  1467. pci_dev_put(memc);
  1468. continue;
  1469. }
  1470. /* bit 1: allow individual 875 configuration */
  1471. pci_read_config_byte(memc, 0x44, &tmp);
  1472. if ((tmp & 0x2) == 0) {
  1473. tmp |= 0x2;
  1474. pci_write_config_byte(memc, 0x44, tmp);
  1475. }
  1476. /* bit 2: drive individual 875 interrupts to the bus */
  1477. pci_read_config_byte(memc, 0x45, &tmp);
  1478. if ((tmp & 0x4) == 0) {
  1479. tmp |= 0x4;
  1480. pci_write_config_byte(memc, 0x45, tmp);
  1481. }
  1482. pci_dev_put(memc);
  1483. break;
  1484. }
  1485. pci_read_config_byte(pdev, 0x84, &tmp);
  1486. sym_dev->host_id = tmp;
  1487. }
  1488. /*
  1489. * Called before unloading the module.
  1490. * Detach the host.
  1491. * We have to free resources and halt the NCR chip.
  1492. */
  1493. static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
  1494. {
  1495. struct sym_hcb *np = sym_get_hcb(shost);
  1496. printk("%s: detaching ...\n", sym_name(np));
  1497. del_timer_sync(&np->s.timer);
  1498. /*
  1499. * Reset NCR chip.
  1500. * We should use sym_soft_reset(), but we don't want to do
  1501. * so, since we may not be safe if interrupts occur.
  1502. */
  1503. printk("%s: resetting chip\n", sym_name(np));
  1504. OUTB(np, nc_istat, SRST);
  1505. INB(np, nc_mbox1);
  1506. udelay(10);
  1507. OUTB(np, nc_istat, 0);
  1508. sym_free_resources(np, pdev, 1);
  1509. scsi_host_put(shost);
  1510. return 1;
  1511. }
  1512. /*
  1513. * Driver host template.
  1514. */
  1515. static struct scsi_host_template sym2_template = {
  1516. .module = THIS_MODULE,
  1517. .name = "sym53c8xx",
  1518. .info = sym53c8xx_info,
  1519. .queuecommand = sym53c8xx_queue_command,
  1520. .slave_alloc = sym53c8xx_slave_alloc,
  1521. .slave_configure = sym53c8xx_slave_configure,
  1522. .slave_destroy = sym53c8xx_slave_destroy,
  1523. .eh_abort_handler = sym53c8xx_eh_abort_handler,
  1524. .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
  1525. .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
  1526. .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
  1527. .this_id = 7,
  1528. .use_clustering = ENABLE_CLUSTERING,
  1529. .max_sectors = 0xFFFF,
  1530. #ifdef SYM_LINUX_PROC_INFO_SUPPORT
  1531. .proc_info = sym53c8xx_proc_info,
  1532. .proc_name = NAME53C8XX,
  1533. #endif
  1534. };
  1535. static int attach_count;
  1536. static int __devinit sym2_probe(struct pci_dev *pdev,
  1537. const struct pci_device_id *ent)
  1538. {
  1539. struct sym_device sym_dev;
  1540. struct sym_nvram nvram;
  1541. struct Scsi_Host *shost;
  1542. int do_iounmap = 0;
  1543. int do_disable_device = 1;
  1544. memset(&sym_dev, 0, sizeof(sym_dev));
  1545. memset(&nvram, 0, sizeof(nvram));
  1546. sym_dev.pdev = pdev;
  1547. sym_dev.host_id = SYM_SETUP_HOST_ID;
  1548. if (pci_enable_device(pdev))
  1549. goto leave;
  1550. pci_set_master(pdev);
  1551. if (pci_request_regions(pdev, NAME53C8XX))
  1552. goto disable;
  1553. if (sym_check_supported(&sym_dev))
  1554. goto free;
  1555. if (sym_iomap_device(&sym_dev))
  1556. goto free;
  1557. do_iounmap = 1;
  1558. if (sym_check_raid(&sym_dev)) {
  1559. do_disable_device = 0; /* Don't disable the device */
  1560. goto free;
  1561. }
  1562. if (sym_set_workarounds(&sym_dev))
  1563. goto free;
  1564. sym_config_pqs(pdev, &sym_dev);
  1565. sym_get_nvram(&sym_dev, &nvram);
  1566. do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */
  1567. shost = sym_attach(&sym2_template, attach_count, &sym_dev);
  1568. if (!shost)
  1569. goto free;
  1570. if (scsi_add_host(shost, &pdev->dev))
  1571. goto detach;
  1572. scsi_scan_host(shost);
  1573. attach_count++;
  1574. return 0;
  1575. detach:
  1576. sym_detach(pci_get_drvdata(pdev), pdev);
  1577. free:
  1578. if (do_iounmap)
  1579. sym_iounmap_device(&sym_dev);
  1580. pci_release_regions(pdev);
  1581. disable:
  1582. if (do_disable_device)
  1583. pci_disable_device(pdev);
  1584. leave:
  1585. return -ENODEV;
  1586. }
  1587. static void sym2_remove(struct pci_dev *pdev)
  1588. {
  1589. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1590. scsi_remove_host(shost);
  1591. sym_detach(shost, pdev);
  1592. pci_release_regions(pdev);
  1593. pci_disable_device(pdev);
  1594. attach_count--;
  1595. }
  1596. /**
  1597. * sym2_io_error_detected() - called when PCI error is detected
  1598. * @pdev: pointer to PCI device
  1599. * @state: current state of the PCI slot
  1600. */
  1601. static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev,
  1602. enum pci_channel_state state)
  1603. {
  1604. /* If slot is permanently frozen, turn everything off */
  1605. if (state == pci_channel_io_perm_failure) {
  1606. sym2_remove(pdev);
  1607. return PCI_ERS_RESULT_DISCONNECT;
  1608. }
  1609. disable_irq(pdev->irq);
  1610. pci_disable_device(pdev);
  1611. /* Request that MMIO be enabled, so register dump can be taken. */
  1612. return PCI_ERS_RESULT_CAN_RECOVER;
  1613. }
  1614. /**
  1615. * sym2_io_slot_dump - Enable MMIO and dump debug registers
  1616. * @pdev: pointer to PCI device
  1617. */
  1618. static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
  1619. {
  1620. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1621. sym_dump_registers(shost);
  1622. /* Request a slot reset. */
  1623. return PCI_ERS_RESULT_NEED_RESET;
  1624. }
  1625. /**
  1626. * sym2_reset_workarounds - hardware-specific work-arounds
  1627. *
  1628. * This routine is similar to sym_set_workarounds(), except
  1629. * that, at this point, we already know that the device was
  1630. * successfully initialized at least once before, and so most
  1631. * of the steps taken there are un-needed here.
  1632. */
  1633. static void sym2_reset_workarounds(struct pci_dev *pdev)
  1634. {
  1635. u_short status_reg;
  1636. struct sym_chip *chip;
  1637. chip = sym_lookup_chip_table(pdev->device, pdev->revision);
  1638. /* Work around for errant bit in 895A, in a fashion
  1639. * similar to what is done in sym_set_workarounds().
  1640. */
  1641. pci_read_config_word(pdev, PCI_STATUS, &status_reg);
  1642. if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) {
  1643. status_reg = PCI_STATUS_66MHZ;
  1644. pci_write_config_word(pdev, PCI_STATUS, status_reg);
  1645. pci_read_config_word(pdev, PCI_STATUS, &status_reg);
  1646. }
  1647. }
  1648. /**
  1649. * sym2_io_slot_reset() - called when the pci bus has been reset.
  1650. * @pdev: pointer to PCI device
  1651. *
  1652. * Restart the card from scratch.
  1653. */
  1654. static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev)
  1655. {
  1656. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1657. struct sym_hcb *np = sym_get_hcb(shost);
  1658. printk(KERN_INFO "%s: recovering from a PCI slot reset\n",
  1659. sym_name(np));
  1660. if (pci_enable_device(pdev)) {
  1661. printk(KERN_ERR "%s: Unable to enable after PCI reset\n",
  1662. sym_name(np));
  1663. return PCI_ERS_RESULT_DISCONNECT;
  1664. }
  1665. pci_set_master(pdev);
  1666. enable_irq(pdev->irq);
  1667. /* If the chip can do Memory Write Invalidate, enable it */
  1668. if (np->features & FE_WRIE) {
  1669. if (pci_set_mwi(pdev))
  1670. return PCI_ERS_RESULT_DISCONNECT;
  1671. }
  1672. /* Perform work-arounds, analogous to sym_set_workarounds() */
  1673. sym2_reset_workarounds(pdev);
  1674. /* Perform host reset only on one instance of the card */
  1675. if (PCI_FUNC(pdev->devfn) == 0) {
  1676. if (sym_reset_scsi_bus(np, 0)) {
  1677. printk(KERN_ERR "%s: Unable to reset scsi host\n",
  1678. sym_name(np));
  1679. return PCI_ERS_RESULT_DISCONNECT;
  1680. }
  1681. sym_start_up(shost, 1);
  1682. }
  1683. return PCI_ERS_RESULT_RECOVERED;
  1684. }
  1685. /**
  1686. * sym2_io_resume() - resume normal ops after PCI reset
  1687. * @pdev: pointer to PCI device
  1688. *
  1689. * Called when the error recovery driver tells us that its
  1690. * OK to resume normal operation. Use completion to allow
  1691. * halted scsi ops to resume.
  1692. */
  1693. static void sym2_io_resume(struct pci_dev *pdev)
  1694. {
  1695. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1696. struct sym_data *sym_data = shost_priv(shost);
  1697. spin_lock_irq(shost->host_lock);
  1698. if (sym_data->io_reset)
  1699. complete_all(sym_data->io_reset);
  1700. spin_unlock_irq(shost->host_lock);
  1701. }
  1702. static void sym2_get_signalling(struct Scsi_Host *shost)
  1703. {
  1704. struct sym_hcb *np = sym_get_hcb(shost);
  1705. enum spi_signal_type type;
  1706. switch (np->scsi_mode) {
  1707. case SMODE_SE:
  1708. type = SPI_SIGNAL_SE;
  1709. break;
  1710. case SMODE_LVD:
  1711. type = SPI_SIGNAL_LVD;
  1712. break;
  1713. case SMODE_HVD:
  1714. type = SPI_SIGNAL_HVD;
  1715. break;
  1716. default:
  1717. type = SPI_SIGNAL_UNKNOWN;
  1718. break;
  1719. }
  1720. spi_signalling(shost) = type;
  1721. }
  1722. static void sym2_set_offset(struct scsi_target *starget, int offset)
  1723. {
  1724. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  1725. struct sym_hcb *np = sym_get_hcb(shost);
  1726. struct sym_tcb *tp = &np->target[starget->id];
  1727. tp->tgoal.offset = offset;
  1728. tp->tgoal.check_nego = 1;
  1729. }
  1730. static void sym2_set_period(struct scsi_target *starget, int period)
  1731. {
  1732. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  1733. struct sym_hcb *np = sym_get_hcb(shost);
  1734. struct sym_tcb *tp = &np->target[starget->id];
  1735. /* have to have DT for these transfers, but DT will also
  1736. * set width, so check that this is allowed */
  1737. if (period <= np->minsync && spi_width(starget))
  1738. tp->tgoal.dt = 1;
  1739. tp->tgoal.period = period;
  1740. tp->tgoal.check_nego = 1;
  1741. }
  1742. static void sym2_set_width(struct scsi_target *starget, int width)
  1743. {
  1744. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  1745. struct sym_hcb *np = sym_get_hcb(shost);
  1746. struct sym_tcb *tp = &np->target[starget->id];
  1747. /* It is illegal to have DT set on narrow transfers. If DT is
  1748. * clear, we must also clear IU and QAS. */
  1749. if (width == 0)
  1750. tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
  1751. tp->tgoal.width = width;
  1752. tp->tgoal.check_nego = 1;
  1753. }
  1754. static void sym2_set_dt(struct scsi_target *starget, int dt)
  1755. {
  1756. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  1757. struct sym_hcb *np = sym_get_hcb(shost);
  1758. struct sym_tcb *tp = &np->target[starget->id];
  1759. /* We must clear QAS and IU if DT is clear */
  1760. if (dt)
  1761. tp->tgoal.dt = 1;
  1762. else
  1763. tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
  1764. tp->tgoal.check_nego = 1;
  1765. }
  1766. #if 0
  1767. static void sym2_set_iu(struct scsi_target *starget, int iu)
  1768. {
  1769. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  1770. struct sym_hcb *np = sym_get_hcb(shost);
  1771. struct sym_tcb *tp = &np->target[starget->id];
  1772. if (iu)
  1773. tp->tgoal.iu = tp->tgoal.dt = 1;
  1774. else
  1775. tp->tgoal.iu = 0;
  1776. tp->tgoal.check_nego = 1;
  1777. }
  1778. static void sym2_set_qas(struct scsi_target *starget, int qas)
  1779. {
  1780. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  1781. struct sym_hcb *np = sym_get_hcb(shost);
  1782. struct sym_tcb *tp = &np->target[starget->id];
  1783. if (qas)
  1784. tp->tgoal.dt = tp->tgoal.qas = 1;
  1785. else
  1786. tp->tgoal.qas = 0;
  1787. tp->tgoal.check_nego = 1;
  1788. }
  1789. #endif
  1790. static struct spi_function_template sym2_transport_functions = {
  1791. .set_offset = sym2_set_offset,
  1792. .show_offset = 1,
  1793. .set_period = sym2_set_period,
  1794. .show_period = 1,
  1795. .set_width = sym2_set_width,
  1796. .show_width = 1,
  1797. .set_dt = sym2_set_dt,
  1798. .show_dt = 1,
  1799. #if 0
  1800. .set_iu = sym2_set_iu,
  1801. .show_iu = 1,
  1802. .set_qas = sym2_set_qas,
  1803. .show_qas = 1,
  1804. #endif
  1805. .get_signalling = sym2_get_signalling,
  1806. };
  1807. static struct pci_device_id sym2_id_table[] __devinitdata = {
  1808. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
  1809. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1810. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
  1811. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
  1812. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825,
  1813. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1814. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815,
  1815. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1816. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP,
  1817. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
  1818. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
  1819. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1820. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
  1821. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
  1822. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
  1823. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1824. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
  1825. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1826. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885,
  1827. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1828. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875,
  1829. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1830. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510,
  1831. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, /* new */
  1832. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A,
  1833. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1834. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A,
  1835. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1836. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33,
  1837. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1838. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66,
  1839. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1840. { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J,
  1841. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  1842. { 0, }
  1843. };
  1844. MODULE_DEVICE_TABLE(pci, sym2_id_table);
  1845. static struct pci_error_handlers sym2_err_handler = {
  1846. .error_detected = sym2_io_error_detected,
  1847. .mmio_enabled = sym2_io_slot_dump,
  1848. .slot_reset = sym2_io_slot_reset,
  1849. .resume = sym2_io_resume,
  1850. };
  1851. static struct pci_driver sym2_driver = {
  1852. .name = NAME53C8XX,
  1853. .id_table = sym2_id_table,
  1854. .probe = sym2_probe,
  1855. .remove = sym2_remove,
  1856. .err_handler = &sym2_err_handler,
  1857. };
  1858. static int __init sym2_init(void)
  1859. {
  1860. int error;
  1861. sym2_setup_params();
  1862. sym2_transport_template = spi_attach_transport(&sym2_transport_functions);
  1863. if (!sym2_transport_template)
  1864. return -ENODEV;
  1865. error = pci_register_driver(&sym2_driver);
  1866. if (error)
  1867. spi_release_transport(sym2_transport_template);
  1868. return error;
  1869. }
  1870. static void __exit sym2_exit(void)
  1871. {
  1872. pci_unregister_driver(&sym2_driver);
  1873. spi_release_transport(sym2_transport_template);
  1874. }
  1875. module_init(sym2_init);
  1876. module_exit(sym2_exit);