/drivers/ata/sata_mv.c

https://bitbucket.org/wisechild/galaxy-nexus · C · 4423 lines · 2993 code · 573 blank · 857 comment · 390 complexity · 6277dbf56209256fcc7e8f48a3cf64b2 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2008-2009: Marvell Corporation, all rights reserved.
  5. * Copyright 2005: EMC Corporation, all rights reserved.
  6. * Copyright 2005 Red Hat, Inc. All rights reserved.
  7. *
  8. * Originally written by Brett Russ.
  9. * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  10. *
  11. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; version 2 of the License.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. *
  26. */
  27. /*
  28. * sata_mv TODO list:
  29. *
  30. * --> Develop a low-power-consumption strategy, and implement it.
  31. *
  32. * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  33. *
  34. * --> [Experiment, Marvell value added] Is it possible to use target
  35. * mode to cross-connect two Linux boxes with Marvell cards? If so,
  36. * creating LibATA target mode support would be very interesting.
  37. *
  38. * Target mode, for those without docs, is the ability to directly
  39. * connect two SATA ports.
  40. */
  41. /*
  42. * 80x1-B2 errata PCI#11:
  43. *
  44. * Users of the 6041/6081 Rev.B2 chips (current is C0)
  45. * should be careful to insert those cards only onto PCI-X bus #0,
  46. * and only in device slots 0..7, not higher. The chips may not
  47. * work correctly otherwise (note: this is a pretty rare condition).
  48. */
  49. #include <linux/kernel.h>
  50. #include <linux/module.h>
  51. #include <linux/pci.h>
  52. #include <linux/init.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/delay.h>
  55. #include <linux/interrupt.h>
  56. #include <linux/dmapool.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/device.h>
  59. #include <linux/clk.h>
  60. #include <linux/platform_device.h>
  61. #include <linux/ata_platform.h>
  62. #include <linux/mbus.h>
  63. #include <linux/bitops.h>
  64. #include <linux/gfp.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi_cmnd.h>
  67. #include <scsi/scsi_device.h>
  68. #include <linux/libata.h>
  69. #define DRV_NAME "sata_mv"
  70. #define DRV_VERSION "1.28"
  71. /*
  72. * module options
  73. */
  74. static int msi;
  75. #ifdef CONFIG_PCI
  76. module_param(msi, int, S_IRUGO);
  77. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  78. #endif
  79. static int irq_coalescing_io_count;
  80. module_param(irq_coalescing_io_count, int, S_IRUGO);
  81. MODULE_PARM_DESC(irq_coalescing_io_count,
  82. "IRQ coalescing I/O count threshold (0..255)");
  83. static int irq_coalescing_usecs;
  84. module_param(irq_coalescing_usecs, int, S_IRUGO);
  85. MODULE_PARM_DESC(irq_coalescing_usecs,
  86. "IRQ coalescing time threshold in usecs");
  87. enum {
  88. /* BAR's are enumerated in terms of pci_resource_start() terms */
  89. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  90. MV_IO_BAR = 2, /* offset 0x18: IO space */
  91. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  92. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  93. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  94. /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
  95. COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
  96. MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
  97. MAX_COAL_IO_COUNT = 255, /* completed I/O count */
  98. MV_PCI_REG_BASE = 0,
  99. /*
  100. * Per-chip ("all ports") interrupt coalescing feature.
  101. * This is only for GEN_II / GEN_IIE hardware.
  102. *
  103. * Coalescing defers the interrupt until either the IO_THRESHOLD
  104. * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
  105. */
  106. COAL_REG_BASE = 0x18000,
  107. IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
  108. ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
  109. IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
  110. IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
  111. /*
  112. * Registers for the (unused here) transaction coalescing feature:
  113. */
  114. TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
  115. TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
  116. SATAHC0_REG_BASE = 0x20000,
  117. FLASH_CTL = 0x1046c,
  118. GPIO_PORT_CTL = 0x104f0,
  119. RESET_CFG = 0x180d8,
  120. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  121. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  122. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  123. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  124. MV_MAX_Q_DEPTH = 32,
  125. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  126. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  127. * CRPB needs alignment on a 256B boundary. Size == 256B
  128. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  129. */
  130. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  131. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  132. MV_MAX_SG_CT = 256,
  133. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  134. /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
  135. MV_PORT_HC_SHIFT = 2,
  136. MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
  137. /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
  138. MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
  139. /* Host Flags */
  140. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  141. MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
  142. MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
  143. MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
  144. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
  145. MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
  146. CRQB_FLAG_READ = (1 << 0),
  147. CRQB_TAG_SHIFT = 1,
  148. CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
  149. CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
  150. CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
  151. CRQB_CMD_ADDR_SHIFT = 8,
  152. CRQB_CMD_CS = (0x2 << 11),
  153. CRQB_CMD_LAST = (1 << 15),
  154. CRPB_FLAG_STATUS_SHIFT = 8,
  155. CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
  156. CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
  157. EPRD_FLAG_END_OF_TBL = (1 << 31),
  158. /* PCI interface registers */
  159. MV_PCI_COMMAND = 0xc00,
  160. MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
  161. MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
  162. PCI_MAIN_CMD_STS = 0xd30,
  163. STOP_PCI_MASTER = (1 << 2),
  164. PCI_MASTER_EMPTY = (1 << 3),
  165. GLOB_SFT_RST = (1 << 4),
  166. MV_PCI_MODE = 0xd00,
  167. MV_PCI_MODE_MASK = 0x30,
  168. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  169. MV_PCI_DISC_TIMER = 0xd04,
  170. MV_PCI_MSI_TRIGGER = 0xc38,
  171. MV_PCI_SERR_MASK = 0xc28,
  172. MV_PCI_XBAR_TMOUT = 0x1d04,
  173. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  174. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  175. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  176. MV_PCI_ERR_COMMAND = 0x1d50,
  177. PCI_IRQ_CAUSE = 0x1d58,
  178. PCI_IRQ_MASK = 0x1d5c,
  179. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  180. PCIE_IRQ_CAUSE = 0x1900,
  181. PCIE_IRQ_MASK = 0x1910,
  182. PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
  183. /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
  184. PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
  185. PCI_HC_MAIN_IRQ_MASK = 0x1d64,
  186. SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
  187. SOC_HC_MAIN_IRQ_MASK = 0x20024,
  188. ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
  189. DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
  190. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  191. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  192. DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
  193. DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
  194. PCI_ERR = (1 << 18),
  195. TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
  196. TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
  197. PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
  198. PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
  199. ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
  200. GPIO_INT = (1 << 22),
  201. SELF_INT = (1 << 23),
  202. TWSI_INT = (1 << 24),
  203. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  204. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  205. HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
  206. /* SATAHC registers */
  207. HC_CFG = 0x00,
  208. HC_IRQ_CAUSE = 0x14,
  209. DMA_IRQ = (1 << 0), /* shift by port # */
  210. HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
  211. DEV_IRQ = (1 << 8), /* shift by port # */
  212. /*
  213. * Per-HC (Host-Controller) interrupt coalescing feature.
  214. * This is present on all chip generations.
  215. *
  216. * Coalescing defers the interrupt until either the IO_THRESHOLD
  217. * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
  218. */
  219. HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
  220. HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
  221. SOC_LED_CTRL = 0x2c,
  222. SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
  223. SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
  224. /* with dev activity LED */
  225. /* Shadow block registers */
  226. SHD_BLK = 0x100,
  227. SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
  228. /* SATA registers */
  229. SATA_STATUS = 0x300, /* ctrl, err regs follow status */
  230. SATA_ACTIVE = 0x350,
  231. FIS_IRQ_CAUSE = 0x364,
  232. FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
  233. LTMODE = 0x30c, /* requires read-after-write */
  234. LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
  235. PHY_MODE2 = 0x330,
  236. PHY_MODE3 = 0x310,
  237. PHY_MODE4 = 0x314, /* requires read-after-write */
  238. PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
  239. PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
  240. PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
  241. PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
  242. SATA_IFCTL = 0x344,
  243. SATA_TESTCTL = 0x348,
  244. SATA_IFSTAT = 0x34c,
  245. VENDOR_UNIQUE_FIS = 0x35c,
  246. FISCFG = 0x360,
  247. FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
  248. FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
  249. PHY_MODE9_GEN2 = 0x398,
  250. PHY_MODE9_GEN1 = 0x39c,
  251. PHYCFG_OFS = 0x3a0, /* only in 65n devices */
  252. MV5_PHY_MODE = 0x74,
  253. MV5_LTMODE = 0x30,
  254. MV5_PHY_CTL = 0x0C,
  255. SATA_IFCFG = 0x050,
  256. MV_M2_PREAMP_MASK = 0x7e0,
  257. /* Port registers */
  258. EDMA_CFG = 0,
  259. EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
  260. EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
  261. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  262. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  263. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  264. EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
  265. EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
  266. EDMA_ERR_IRQ_CAUSE = 0x8,
  267. EDMA_ERR_IRQ_MASK = 0xc,
  268. EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
  269. EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
  270. EDMA_ERR_DEV = (1 << 2), /* device error */
  271. EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
  272. EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
  273. EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
  274. EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
  275. EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
  276. EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
  277. EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
  278. EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
  279. EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
  280. EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
  281. EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
  282. EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
  283. EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
  284. EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
  285. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
  286. EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
  287. EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
  288. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
  289. EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
  290. EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
  291. EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
  292. EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
  293. EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
  294. EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
  295. EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
  296. EDMA_ERR_OVERRUN_5 = (1 << 5),
  297. EDMA_ERR_UNDERRUN_5 = (1 << 6),
  298. EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
  299. EDMA_ERR_LNK_CTRL_RX_1 |
  300. EDMA_ERR_LNK_CTRL_RX_3 |
  301. EDMA_ERR_LNK_CTRL_TX,
  302. EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
  303. EDMA_ERR_PRD_PAR |
  304. EDMA_ERR_DEV_DCON |
  305. EDMA_ERR_DEV_CON |
  306. EDMA_ERR_SERR |
  307. EDMA_ERR_SELF_DIS |
  308. EDMA_ERR_CRQB_PAR |
  309. EDMA_ERR_CRPB_PAR |
  310. EDMA_ERR_INTRL_PAR |
  311. EDMA_ERR_IORDY |
  312. EDMA_ERR_LNK_CTRL_RX_2 |
  313. EDMA_ERR_LNK_DATA_RX |
  314. EDMA_ERR_LNK_DATA_TX |
  315. EDMA_ERR_TRANS_PROTO,
  316. EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
  317. EDMA_ERR_PRD_PAR |
  318. EDMA_ERR_DEV_DCON |
  319. EDMA_ERR_DEV_CON |
  320. EDMA_ERR_OVERRUN_5 |
  321. EDMA_ERR_UNDERRUN_5 |
  322. EDMA_ERR_SELF_DIS_5 |
  323. EDMA_ERR_CRQB_PAR |
  324. EDMA_ERR_CRPB_PAR |
  325. EDMA_ERR_INTRL_PAR |
  326. EDMA_ERR_IORDY,
  327. EDMA_REQ_Q_BASE_HI = 0x10,
  328. EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
  329. EDMA_REQ_Q_OUT_PTR = 0x18,
  330. EDMA_REQ_Q_PTR_SHIFT = 5,
  331. EDMA_RSP_Q_BASE_HI = 0x1c,
  332. EDMA_RSP_Q_IN_PTR = 0x20,
  333. EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
  334. EDMA_RSP_Q_PTR_SHIFT = 3,
  335. EDMA_CMD = 0x28, /* EDMA command register */
  336. EDMA_EN = (1 << 0), /* enable EDMA */
  337. EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
  338. EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
  339. EDMA_STATUS = 0x30, /* EDMA engine status */
  340. EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
  341. EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
  342. EDMA_IORDY_TMOUT = 0x34,
  343. EDMA_ARB_CFG = 0x38,
  344. EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
  345. EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
  346. BMDMA_CMD = 0x224, /* bmdma command register */
  347. BMDMA_STATUS = 0x228, /* bmdma status register */
  348. BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
  349. BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
  350. /* Host private flags (hp_flags) */
  351. MV_HP_FLAG_MSI = (1 << 0),
  352. MV_HP_ERRATA_50XXB0 = (1 << 1),
  353. MV_HP_ERRATA_50XXB2 = (1 << 2),
  354. MV_HP_ERRATA_60X1B2 = (1 << 3),
  355. MV_HP_ERRATA_60X1C0 = (1 << 4),
  356. MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
  357. MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
  358. MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
  359. MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
  360. MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
  361. MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
  362. MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
  363. /* Port private flags (pp_flags) */
  364. MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
  365. MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
  366. MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
  367. MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
  368. MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
  369. };
  370. #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
  371. #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
  372. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  373. #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
  374. #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
  375. #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
  376. #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
  377. enum {
  378. /* DMA boundary 0xffff is required by the s/g splitting
  379. * we need on /length/ in mv_fill-sg().
  380. */
  381. MV_DMA_BOUNDARY = 0xffffU,
  382. /* mask of register bits containing lower 32 bits
  383. * of EDMA request queue DMA address
  384. */
  385. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  386. /* ditto, for response queue */
  387. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  388. };
  389. enum chip_type {
  390. chip_504x,
  391. chip_508x,
  392. chip_5080,
  393. chip_604x,
  394. chip_608x,
  395. chip_6042,
  396. chip_7042,
  397. chip_soc,
  398. };
  399. /* Command ReQuest Block: 32B */
  400. struct mv_crqb {
  401. __le32 sg_addr;
  402. __le32 sg_addr_hi;
  403. __le16 ctrl_flags;
  404. __le16 ata_cmd[11];
  405. };
  406. struct mv_crqb_iie {
  407. __le32 addr;
  408. __le32 addr_hi;
  409. __le32 flags;
  410. __le32 len;
  411. __le32 ata_cmd[4];
  412. };
  413. /* Command ResPonse Block: 8B */
  414. struct mv_crpb {
  415. __le16 id;
  416. __le16 flags;
  417. __le32 tmstmp;
  418. };
  419. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  420. struct mv_sg {
  421. __le32 addr;
  422. __le32 flags_size;
  423. __le32 addr_hi;
  424. __le32 reserved;
  425. };
  426. /*
  427. * We keep a local cache of a few frequently accessed port
  428. * registers here, to avoid having to read them (very slow)
  429. * when switching between EDMA and non-EDMA modes.
  430. */
  431. struct mv_cached_regs {
  432. u32 fiscfg;
  433. u32 ltmode;
  434. u32 haltcond;
  435. u32 unknown_rsvd;
  436. };
  437. struct mv_port_priv {
  438. struct mv_crqb *crqb;
  439. dma_addr_t crqb_dma;
  440. struct mv_crpb *crpb;
  441. dma_addr_t crpb_dma;
  442. struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
  443. dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
  444. unsigned int req_idx;
  445. unsigned int resp_idx;
  446. u32 pp_flags;
  447. struct mv_cached_regs cached;
  448. unsigned int delayed_eh_pmp_map;
  449. };
  450. struct mv_port_signal {
  451. u32 amps;
  452. u32 pre;
  453. };
  454. struct mv_host_priv {
  455. u32 hp_flags;
  456. unsigned int board_idx;
  457. u32 main_irq_mask;
  458. struct mv_port_signal signal[8];
  459. const struct mv_hw_ops *ops;
  460. int n_ports;
  461. void __iomem *base;
  462. void __iomem *main_irq_cause_addr;
  463. void __iomem *main_irq_mask_addr;
  464. u32 irq_cause_offset;
  465. u32 irq_mask_offset;
  466. u32 unmask_all_irqs;
  467. #if defined(CONFIG_HAVE_CLK)
  468. struct clk *clk;
  469. #endif
  470. /*
  471. * These consistent DMA memory pools give us guaranteed
  472. * alignment for hardware-accessed data structures,
  473. * and less memory waste in accomplishing the alignment.
  474. */
  475. struct dma_pool *crqb_pool;
  476. struct dma_pool *crpb_pool;
  477. struct dma_pool *sg_tbl_pool;
  478. };
  479. struct mv_hw_ops {
  480. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  481. unsigned int port);
  482. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  483. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  484. void __iomem *mmio);
  485. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  486. unsigned int n_hc);
  487. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  488. void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
  489. };
  490. static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
  491. static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
  492. static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
  493. static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
  494. static int mv_port_start(struct ata_port *ap);
  495. static void mv_port_stop(struct ata_port *ap);
  496. static int mv_qc_defer(struct ata_queued_cmd *qc);
  497. static void mv_qc_prep(struct ata_queued_cmd *qc);
  498. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  499. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  500. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  501. unsigned long deadline);
  502. static void mv_eh_freeze(struct ata_port *ap);
  503. static void mv_eh_thaw(struct ata_port *ap);
  504. static void mv6_dev_config(struct ata_device *dev);
  505. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  506. unsigned int port);
  507. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  508. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  509. void __iomem *mmio);
  510. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  511. unsigned int n_hc);
  512. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  513. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
  514. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  515. unsigned int port);
  516. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  517. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  518. void __iomem *mmio);
  519. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  520. unsigned int n_hc);
  521. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  522. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  523. void __iomem *mmio);
  524. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  525. void __iomem *mmio);
  526. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  527. void __iomem *mmio, unsigned int n_hc);
  528. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  529. void __iomem *mmio);
  530. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
  531. static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
  532. void __iomem *mmio, unsigned int port);
  533. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
  534. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  535. unsigned int port_no);
  536. static int mv_stop_edma(struct ata_port *ap);
  537. static int mv_stop_edma_engine(void __iomem *port_mmio);
  538. static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
  539. static void mv_pmp_select(struct ata_port *ap, int pmp);
  540. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  541. unsigned long deadline);
  542. static int mv_softreset(struct ata_link *link, unsigned int *class,
  543. unsigned long deadline);
  544. static void mv_pmp_error_handler(struct ata_port *ap);
  545. static void mv_process_crpb_entries(struct ata_port *ap,
  546. struct mv_port_priv *pp);
  547. static void mv_sff_irq_clear(struct ata_port *ap);
  548. static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
  549. static void mv_bmdma_setup(struct ata_queued_cmd *qc);
  550. static void mv_bmdma_start(struct ata_queued_cmd *qc);
  551. static void mv_bmdma_stop(struct ata_queued_cmd *qc);
  552. static u8 mv_bmdma_status(struct ata_port *ap);
  553. static u8 mv_sff_check_status(struct ata_port *ap);
  554. /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  555. * because we have to allow room for worst case splitting of
  556. * PRDs for 64K boundaries in mv_fill_sg().
  557. */
  558. static struct scsi_host_template mv5_sht = {
  559. ATA_BASE_SHT(DRV_NAME),
  560. .sg_tablesize = MV_MAX_SG_CT / 2,
  561. .dma_boundary = MV_DMA_BOUNDARY,
  562. };
  563. static struct scsi_host_template mv6_sht = {
  564. ATA_NCQ_SHT(DRV_NAME),
  565. .can_queue = MV_MAX_Q_DEPTH - 1,
  566. .sg_tablesize = MV_MAX_SG_CT / 2,
  567. .dma_boundary = MV_DMA_BOUNDARY,
  568. };
  569. static struct ata_port_operations mv5_ops = {
  570. .inherits = &ata_sff_port_ops,
  571. .lost_interrupt = ATA_OP_NULL,
  572. .qc_defer = mv_qc_defer,
  573. .qc_prep = mv_qc_prep,
  574. .qc_issue = mv_qc_issue,
  575. .freeze = mv_eh_freeze,
  576. .thaw = mv_eh_thaw,
  577. .hardreset = mv_hardreset,
  578. .scr_read = mv5_scr_read,
  579. .scr_write = mv5_scr_write,
  580. .port_start = mv_port_start,
  581. .port_stop = mv_port_stop,
  582. };
  583. static struct ata_port_operations mv6_ops = {
  584. .inherits = &ata_bmdma_port_ops,
  585. .lost_interrupt = ATA_OP_NULL,
  586. .qc_defer = mv_qc_defer,
  587. .qc_prep = mv_qc_prep,
  588. .qc_issue = mv_qc_issue,
  589. .dev_config = mv6_dev_config,
  590. .freeze = mv_eh_freeze,
  591. .thaw = mv_eh_thaw,
  592. .hardreset = mv_hardreset,
  593. .softreset = mv_softreset,
  594. .pmp_hardreset = mv_pmp_hardreset,
  595. .pmp_softreset = mv_softreset,
  596. .error_handler = mv_pmp_error_handler,
  597. .scr_read = mv_scr_read,
  598. .scr_write = mv_scr_write,
  599. .sff_check_status = mv_sff_check_status,
  600. .sff_irq_clear = mv_sff_irq_clear,
  601. .check_atapi_dma = mv_check_atapi_dma,
  602. .bmdma_setup = mv_bmdma_setup,
  603. .bmdma_start = mv_bmdma_start,
  604. .bmdma_stop = mv_bmdma_stop,
  605. .bmdma_status = mv_bmdma_status,
  606. .port_start = mv_port_start,
  607. .port_stop = mv_port_stop,
  608. };
  609. static struct ata_port_operations mv_iie_ops = {
  610. .inherits = &mv6_ops,
  611. .dev_config = ATA_OP_NULL,
  612. .qc_prep = mv_qc_prep_iie,
  613. };
  614. static const struct ata_port_info mv_port_info[] = {
  615. { /* chip_504x */
  616. .flags = MV_GEN_I_FLAGS,
  617. .pio_mask = ATA_PIO4,
  618. .udma_mask = ATA_UDMA6,
  619. .port_ops = &mv5_ops,
  620. },
  621. { /* chip_508x */
  622. .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
  623. .pio_mask = ATA_PIO4,
  624. .udma_mask = ATA_UDMA6,
  625. .port_ops = &mv5_ops,
  626. },
  627. { /* chip_5080 */
  628. .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
  629. .pio_mask = ATA_PIO4,
  630. .udma_mask = ATA_UDMA6,
  631. .port_ops = &mv5_ops,
  632. },
  633. { /* chip_604x */
  634. .flags = MV_GEN_II_FLAGS,
  635. .pio_mask = ATA_PIO4,
  636. .udma_mask = ATA_UDMA6,
  637. .port_ops = &mv6_ops,
  638. },
  639. { /* chip_608x */
  640. .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
  641. .pio_mask = ATA_PIO4,
  642. .udma_mask = ATA_UDMA6,
  643. .port_ops = &mv6_ops,
  644. },
  645. { /* chip_6042 */
  646. .flags = MV_GEN_IIE_FLAGS,
  647. .pio_mask = ATA_PIO4,
  648. .udma_mask = ATA_UDMA6,
  649. .port_ops = &mv_iie_ops,
  650. },
  651. { /* chip_7042 */
  652. .flags = MV_GEN_IIE_FLAGS,
  653. .pio_mask = ATA_PIO4,
  654. .udma_mask = ATA_UDMA6,
  655. .port_ops = &mv_iie_ops,
  656. },
  657. { /* chip_soc */
  658. .flags = MV_GEN_IIE_FLAGS,
  659. .pio_mask = ATA_PIO4,
  660. .udma_mask = ATA_UDMA6,
  661. .port_ops = &mv_iie_ops,
  662. },
  663. };
  664. static const struct pci_device_id mv_pci_tbl[] = {
  665. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  666. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  667. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  668. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  669. /* RocketRAID 1720/174x have different identifiers */
  670. { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
  671. { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
  672. { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
  673. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  674. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  675. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  676. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  677. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  678. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  679. /* Adaptec 1430SA */
  680. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  681. /* Marvell 7042 support */
  682. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  683. /* Highpoint RocketRAID PCIe series */
  684. { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
  685. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  686. { } /* terminate list */
  687. };
  688. static const struct mv_hw_ops mv5xxx_ops = {
  689. .phy_errata = mv5_phy_errata,
  690. .enable_leds = mv5_enable_leds,
  691. .read_preamp = mv5_read_preamp,
  692. .reset_hc = mv5_reset_hc,
  693. .reset_flash = mv5_reset_flash,
  694. .reset_bus = mv5_reset_bus,
  695. };
  696. static const struct mv_hw_ops mv6xxx_ops = {
  697. .phy_errata = mv6_phy_errata,
  698. .enable_leds = mv6_enable_leds,
  699. .read_preamp = mv6_read_preamp,
  700. .reset_hc = mv6_reset_hc,
  701. .reset_flash = mv6_reset_flash,
  702. .reset_bus = mv_reset_pci_bus,
  703. };
  704. static const struct mv_hw_ops mv_soc_ops = {
  705. .phy_errata = mv6_phy_errata,
  706. .enable_leds = mv_soc_enable_leds,
  707. .read_preamp = mv_soc_read_preamp,
  708. .reset_hc = mv_soc_reset_hc,
  709. .reset_flash = mv_soc_reset_flash,
  710. .reset_bus = mv_soc_reset_bus,
  711. };
  712. static const struct mv_hw_ops mv_soc_65n_ops = {
  713. .phy_errata = mv_soc_65n_phy_errata,
  714. .enable_leds = mv_soc_enable_leds,
  715. .reset_hc = mv_soc_reset_hc,
  716. .reset_flash = mv_soc_reset_flash,
  717. .reset_bus = mv_soc_reset_bus,
  718. };
  719. /*
  720. * Functions
  721. */
  722. static inline void writelfl(unsigned long data, void __iomem *addr)
  723. {
  724. writel(data, addr);
  725. (void) readl(addr); /* flush to avoid PCI posted write */
  726. }
  727. static inline unsigned int mv_hc_from_port(unsigned int port)
  728. {
  729. return port >> MV_PORT_HC_SHIFT;
  730. }
  731. static inline unsigned int mv_hardport_from_port(unsigned int port)
  732. {
  733. return port & MV_PORT_MASK;
  734. }
  735. /*
  736. * Consolidate some rather tricky bit shift calculations.
  737. * This is hot-path stuff, so not a function.
  738. * Simple code, with two return values, so macro rather than inline.
  739. *
  740. * port is the sole input, in range 0..7.
  741. * shift is one output, for use with main_irq_cause / main_irq_mask registers.
  742. * hardport is the other output, in range 0..3.
  743. *
  744. * Note that port and hardport may be the same variable in some cases.
  745. */
  746. #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
  747. { \
  748. shift = mv_hc_from_port(port) * HC_SHIFT; \
  749. hardport = mv_hardport_from_port(port); \
  750. shift += hardport * 2; \
  751. }
  752. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  753. {
  754. return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  755. }
  756. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  757. unsigned int port)
  758. {
  759. return mv_hc_base(base, mv_hc_from_port(port));
  760. }
  761. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  762. {
  763. return mv_hc_base_from_port(base, port) +
  764. MV_SATAHC_ARBTR_REG_SZ +
  765. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  766. }
  767. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  768. {
  769. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  770. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  771. return hc_mmio + ofs;
  772. }
  773. static inline void __iomem *mv_host_base(struct ata_host *host)
  774. {
  775. struct mv_host_priv *hpriv = host->private_data;
  776. return hpriv->base;
  777. }
  778. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  779. {
  780. return mv_port_base(mv_host_base(ap->host), ap->port_no);
  781. }
  782. static inline int mv_get_hc_count(unsigned long port_flags)
  783. {
  784. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  785. }
  786. /**
  787. * mv_save_cached_regs - (re-)initialize cached port registers
  788. * @ap: the port whose registers we are caching
  789. *
  790. * Initialize the local cache of port registers,
  791. * so that reading them over and over again can
  792. * be avoided on the hotter paths of this driver.
  793. * This saves a few microseconds each time we switch
  794. * to/from EDMA mode to perform (eg.) a drive cache flush.
  795. */
  796. static void mv_save_cached_regs(struct ata_port *ap)
  797. {
  798. void __iomem *port_mmio = mv_ap_base(ap);
  799. struct mv_port_priv *pp = ap->private_data;
  800. pp->cached.fiscfg = readl(port_mmio + FISCFG);
  801. pp->cached.ltmode = readl(port_mmio + LTMODE);
  802. pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
  803. pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
  804. }
  805. /**
  806. * mv_write_cached_reg - write to a cached port register
  807. * @addr: hardware address of the register
  808. * @old: pointer to cached value of the register
  809. * @new: new value for the register
  810. *
  811. * Write a new value to a cached register,
  812. * but only if the value is different from before.
  813. */
  814. static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
  815. {
  816. if (new != *old) {
  817. unsigned long laddr;
  818. *old = new;
  819. /*
  820. * Workaround for 88SX60x1-B2 FEr SATA#13:
  821. * Read-after-write is needed to prevent generating 64-bit
  822. * write cycles on the PCI bus for SATA interface registers
  823. * at offsets ending in 0x4 or 0xc.
  824. *
  825. * Looks like a lot of fuss, but it avoids an unnecessary
  826. * +1 usec read-after-write delay for unaffected registers.
  827. */
  828. laddr = (long)addr & 0xffff;
  829. if (laddr >= 0x300 && laddr <= 0x33c) {
  830. laddr &= 0x000f;
  831. if (laddr == 0x4 || laddr == 0xc) {
  832. writelfl(new, addr); /* read after write */
  833. return;
  834. }
  835. }
  836. writel(new, addr); /* unaffected by the errata */
  837. }
  838. }
  839. static void mv_set_edma_ptrs(void __iomem *port_mmio,
  840. struct mv_host_priv *hpriv,
  841. struct mv_port_priv *pp)
  842. {
  843. u32 index;
  844. /*
  845. * initialize request queue
  846. */
  847. pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  848. index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  849. WARN_ON(pp->crqb_dma & 0x3ff);
  850. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
  851. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
  852. port_mmio + EDMA_REQ_Q_IN_PTR);
  853. writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
  854. /*
  855. * initialize response queue
  856. */
  857. pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  858. index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
  859. WARN_ON(pp->crpb_dma & 0xff);
  860. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
  861. writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
  862. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
  863. port_mmio + EDMA_RSP_Q_OUT_PTR);
  864. }
  865. static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
  866. {
  867. /*
  868. * When writing to the main_irq_mask in hardware,
  869. * we must ensure exclusivity between the interrupt coalescing bits
  870. * and the corresponding individual port DONE_IRQ bits.
  871. *
  872. * Note that this register is really an "IRQ enable" register,
  873. * not an "IRQ mask" register as Marvell's naming might suggest.
  874. */
  875. if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
  876. mask &= ~DONE_IRQ_0_3;
  877. if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
  878. mask &= ~DONE_IRQ_4_7;
  879. writelfl(mask, hpriv->main_irq_mask_addr);
  880. }
  881. static void mv_set_main_irq_mask(struct ata_host *host,
  882. u32 disable_bits, u32 enable_bits)
  883. {
  884. struct mv_host_priv *hpriv = host->private_data;
  885. u32 old_mask, new_mask;
  886. old_mask = hpriv->main_irq_mask;
  887. new_mask = (old_mask & ~disable_bits) | enable_bits;
  888. if (new_mask != old_mask) {
  889. hpriv->main_irq_mask = new_mask;
  890. mv_write_main_irq_mask(new_mask, hpriv);
  891. }
  892. }
  893. static void mv_enable_port_irqs(struct ata_port *ap,
  894. unsigned int port_bits)
  895. {
  896. unsigned int shift, hardport, port = ap->port_no;
  897. u32 disable_bits, enable_bits;
  898. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  899. disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
  900. enable_bits = port_bits << shift;
  901. mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
  902. }
  903. static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
  904. void __iomem *port_mmio,
  905. unsigned int port_irqs)
  906. {
  907. struct mv_host_priv *hpriv = ap->host->private_data;
  908. int hardport = mv_hardport_from_port(ap->port_no);
  909. void __iomem *hc_mmio = mv_hc_base_from_port(
  910. mv_host_base(ap->host), ap->port_no);
  911. u32 hc_irq_cause;
  912. /* clear EDMA event indicators, if any */
  913. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
  914. /* clear pending irq events */
  915. hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
  916. writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
  917. /* clear FIS IRQ Cause */
  918. if (IS_GEN_IIE(hpriv))
  919. writelfl(0, port_mmio + FIS_IRQ_CAUSE);
  920. mv_enable_port_irqs(ap, port_irqs);
  921. }
  922. static void mv_set_irq_coalescing(struct ata_host *host,
  923. unsigned int count, unsigned int usecs)
  924. {
  925. struct mv_host_priv *hpriv = host->private_data;
  926. void __iomem *mmio = hpriv->base, *hc_mmio;
  927. u32 coal_enable = 0;
  928. unsigned long flags;
  929. unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
  930. const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  931. ALL_PORTS_COAL_DONE;
  932. /* Disable IRQ coalescing if either threshold is zero */
  933. if (!usecs || !count) {
  934. clks = count = 0;
  935. } else {
  936. /* Respect maximum limits of the hardware */
  937. clks = usecs * COAL_CLOCKS_PER_USEC;
  938. if (clks > MAX_COAL_TIME_THRESHOLD)
  939. clks = MAX_COAL_TIME_THRESHOLD;
  940. if (count > MAX_COAL_IO_COUNT)
  941. count = MAX_COAL_IO_COUNT;
  942. }
  943. spin_lock_irqsave(&host->lock, flags);
  944. mv_set_main_irq_mask(host, coal_disable, 0);
  945. if (is_dual_hc && !IS_GEN_I(hpriv)) {
  946. /*
  947. * GEN_II/GEN_IIE with dual host controllers:
  948. * one set of global thresholds for the entire chip.
  949. */
  950. writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
  951. writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
  952. /* clear leftover coal IRQ bit */
  953. writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
  954. if (count)
  955. coal_enable = ALL_PORTS_COAL_DONE;
  956. clks = count = 0; /* force clearing of regular regs below */
  957. }
  958. /*
  959. * All chips: independent thresholds for each HC on the chip.
  960. */
  961. hc_mmio = mv_hc_base_from_port(mmio, 0);
  962. writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
  963. writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
  964. writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
  965. if (count)
  966. coal_enable |= PORTS_0_3_COAL_DONE;
  967. if (is_dual_hc) {
  968. hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
  969. writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
  970. writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
  971. writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
  972. if (count)
  973. coal_enable |= PORTS_4_7_COAL_DONE;
  974. }
  975. mv_set_main_irq_mask(host, 0, coal_enable);
  976. spin_unlock_irqrestore(&host->lock, flags);
  977. }
  978. /**
  979. * mv_start_edma - Enable eDMA engine
  980. * @base: port base address
  981. * @pp: port private data
  982. *
  983. * Verify the local cache of the eDMA state is accurate with a
  984. * WARN_ON.
  985. *
  986. * LOCKING:
  987. * Inherited from caller.
  988. */
  989. static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
  990. struct mv_port_priv *pp, u8 protocol)
  991. {
  992. int want_ncq = (protocol == ATA_PROT_NCQ);
  993. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  994. int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
  995. if (want_ncq != using_ncq)
  996. mv_stop_edma(ap);
  997. }
  998. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  999. struct mv_host_priv *hpriv = ap->host->private_data;
  1000. mv_edma_cfg(ap, want_ncq, 1);
  1001. mv_set_edma_ptrs(port_mmio, hpriv, pp);
  1002. mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
  1003. writelfl(EDMA_EN, port_mmio + EDMA_CMD);
  1004. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  1005. }
  1006. }
  1007. static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
  1008. {
  1009. void __iomem *port_mmio = mv_ap_base(ap);
  1010. const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
  1011. const int per_loop = 5, timeout = (15 * 1000 / per_loop);
  1012. int i;
  1013. /*
  1014. * Wait for the EDMA engine to finish transactions in progress.
  1015. * No idea what a good "timeout" value might be, but measurements
  1016. * indicate that it often requires hundreds of microseconds
  1017. * with two drives in-use. So we use the 15msec value above
  1018. * as a rough guess at what even more drives might require.
  1019. */
  1020. for (i = 0; i < timeout; ++i) {
  1021. u32 edma_stat = readl(port_mmio + EDMA_STATUS);
  1022. if ((edma_stat & empty_idle) == empty_idle)
  1023. break;
  1024. udelay(per_loop);
  1025. }
  1026. /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
  1027. }
  1028. /**
  1029. * mv_stop_edma_engine - Disable eDMA engine
  1030. * @port_mmio: io base address
  1031. *
  1032. * LOCKING:
  1033. * Inherited from caller.
  1034. */
  1035. static int mv_stop_edma_engine(void __iomem *port_mmio)
  1036. {
  1037. int i;
  1038. /* Disable eDMA. The disable bit auto clears. */
  1039. writelfl(EDMA_DS, port_mmio + EDMA_CMD);
  1040. /* Wait for the chip to confirm eDMA is off. */
  1041. for (i = 10000; i > 0; i--) {
  1042. u32 reg = readl(port_mmio + EDMA_CMD);
  1043. if (!(reg & EDMA_EN))
  1044. return 0;
  1045. udelay(10);
  1046. }
  1047. return -EIO;
  1048. }
  1049. static int mv_stop_edma(struct ata_port *ap)
  1050. {
  1051. void __iomem *port_mmio = mv_ap_base(ap);
  1052. struct mv_port_priv *pp = ap->private_data;
  1053. int err = 0;
  1054. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  1055. return 0;
  1056. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1057. mv_wait_for_edma_empty_idle(ap);
  1058. if (mv_stop_edma_engine(port_mmio)) {
  1059. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  1060. err = -EIO;
  1061. }
  1062. mv_edma_cfg(ap, 0, 0);
  1063. return err;
  1064. }
  1065. #ifdef ATA_DEBUG
  1066. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  1067. {
  1068. int b, w;
  1069. for (b = 0; b < bytes; ) {
  1070. DPRINTK("%p: ", start + b);
  1071. for (w = 0; b < bytes && w < 4; w++) {
  1072. printk("%08x ", readl(start + b));
  1073. b += sizeof(u32);
  1074. }
  1075. printk("\n");
  1076. }
  1077. }
  1078. #endif
  1079. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  1080. {
  1081. #ifdef ATA_DEBUG
  1082. int b, w;
  1083. u32 dw;
  1084. for (b = 0; b < bytes; ) {
  1085. DPRINTK("%02x: ", b);
  1086. for (w = 0; b < bytes && w < 4; w++) {
  1087. (void) pci_read_config_dword(pdev, b, &dw);
  1088. printk("%08x ", dw);
  1089. b += sizeof(u32);
  1090. }
  1091. printk("\n");
  1092. }
  1093. #endif
  1094. }
  1095. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  1096. struct pci_dev *pdev)
  1097. {
  1098. #ifdef ATA_DEBUG
  1099. void __iomem *hc_base = mv_hc_base(mmio_base,
  1100. port >> MV_PORT_HC_SHIFT);
  1101. void __iomem *port_base;
  1102. int start_port, num_ports, p, start_hc, num_hcs, hc;
  1103. if (0 > port) {
  1104. start_hc = start_port = 0;
  1105. num_ports = 8; /* shld be benign for 4 port devs */
  1106. num_hcs = 2;
  1107. } else {
  1108. start_hc = port >> MV_PORT_HC_SHIFT;
  1109. start_port = port;
  1110. num_ports = num_hcs = 1;
  1111. }
  1112. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  1113. num_ports > 1 ? num_ports - 1 : start_port);
  1114. if (NULL != pdev) {
  1115. DPRINTK("PCI config space regs:\n");
  1116. mv_dump_pci_cfg(pdev, 0x68);
  1117. }
  1118. DPRINTK("PCI regs:\n");
  1119. mv_dump_mem(mmio_base+0xc00, 0x3c);
  1120. mv_dump_mem(mmio_base+0xd00, 0x34);
  1121. mv_dump_mem(mmio_base+0xf00, 0x4);
  1122. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  1123. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  1124. hc_base = mv_hc_base(mmio_base, hc);
  1125. DPRINTK("HC regs (HC %i):\n", hc);
  1126. mv_dump_mem(hc_base, 0x1c);
  1127. }
  1128. for (p = start_port; p < start_port + num_ports; p++) {
  1129. port_base = mv_port_base(mmio_base, p);
  1130. DPRINTK("EDMA regs (port %i):\n", p);
  1131. mv_dump_mem(port_base, 0x54);
  1132. DPRINTK("SATA regs (port %i):\n", p);
  1133. mv_dump_mem(port_base+0x300, 0x60);
  1134. }
  1135. #endif
  1136. }
  1137. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  1138. {
  1139. unsigned int ofs;
  1140. switch (sc_reg_in) {
  1141. case SCR_STATUS:
  1142. case SCR_CONTROL:
  1143. case SCR_ERROR:
  1144. ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
  1145. break;
  1146. case SCR_ACTIVE:
  1147. ofs = SATA_ACTIVE; /* active is not with the others */
  1148. break;
  1149. default:
  1150. ofs = 0xffffffffU;
  1151. break;
  1152. }
  1153. return ofs;
  1154. }
  1155. static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
  1156. {
  1157. unsigned int ofs = mv_scr_offset(sc_reg_in);
  1158. if (ofs != 0xffffffffU) {
  1159. *val = readl(mv_ap_base(link->ap) + ofs);
  1160. return 0;
  1161. } else
  1162. return -EINVAL;
  1163. }
  1164. static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
  1165. {
  1166. unsigned int ofs = mv_scr_offset(sc_reg_in);
  1167. if (ofs != 0xffffffffU) {
  1168. void __iomem *addr = mv_ap_base(link->ap) + ofs;
  1169. if (sc_reg_in == SCR_CONTROL) {
  1170. /*
  1171. * Workaround for 88SX60x1 FEr SATA#26:
  1172. *
  1173. * COMRESETs have to take care not to accidentally
  1174. * put the drive to sleep when writing SCR_CONTROL.
  1175. * Setting bits 12..15 prevents this problem.
  1176. *
  1177. * So if we see an outbound COMMRESET, set those bits.
  1178. * Ditto for the followup write that clears the reset.
  1179. *
  1180. * The proprietary driver does this for
  1181. * all chip versions, and so do we.
  1182. */
  1183. if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
  1184. val |= 0xf000;
  1185. }
  1186. writelfl(val, addr);
  1187. return 0;
  1188. } else
  1189. return -EINVAL;
  1190. }
  1191. static void mv6_dev_config(struct ata_device *adev)
  1192. {
  1193. /*
  1194. * Deal with Gen-II ("mv6") hardware quirks/restrictions:
  1195. *
  1196. * Gen-II does not support NCQ over a port multiplier
  1197. * (no FIS-based switching).
  1198. */
  1199. if (adev->flags & ATA_DFLAG_NCQ) {
  1200. if (sata_pmp_attached(adev->link->ap)) {
  1201. adev->flags &= ~ATA_DFLAG_NCQ;
  1202. ata_dev_printk(adev, KERN_INFO,
  1203. "NCQ disabled for command-based switching\n");
  1204. }
  1205. }
  1206. }
  1207. static int mv_qc_defer(struct ata_queued_cmd *qc)
  1208. {
  1209. struct ata_link *link = qc->dev->link;
  1210. struct ata_port *ap = link->ap;
  1211. struct mv_port_priv *pp = ap->private_data;
  1212. /*
  1213. * Don't allow new commands if we're in a delayed EH state
  1214. * for NCQ and/or FIS-based switching.
  1215. */
  1216. if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
  1217. return ATA_DEFER_PORT;
  1218. /* PIO commands need exclusive link: no other commands [DMA or PIO]
  1219. * can run concurrently.
  1220. * set excl_link when we want to send a PIO command in DMA mode
  1221. * or a non-NCQ command in NCQ mode.
  1222. * When we receive a command from that link, and there are no
  1223. * outstanding commands, mark a flag to clear excl_link and let
  1224. * the command go through.
  1225. */
  1226. if (unlikely(ap->excl_link)) {
  1227. if (link == ap->excl_link) {
  1228. if (ap->nr_active_links)
  1229. return ATA_DEFER_PORT;
  1230. qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
  1231. return 0;
  1232. } else
  1233. return ATA_DEFER_PORT;
  1234. }
  1235. /*
  1236. * If the port is completely idle, then allow the new qc.
  1237. */
  1238. if (ap->nr_active_links == 0)
  1239. return 0;
  1240. /*
  1241. * The port is operating in host queuing mode (EDMA) with NCQ
  1242. * enabled, allow multiple NCQ commands. EDMA also allows
  1243. * queueing multiple DMA commands but libata core currently
  1244. * doesn't allow it.
  1245. */
  1246. if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
  1247. (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
  1248. if (ata_is_ncq(qc->tf.protocol))
  1249. return 0;
  1250. else {
  1251. ap->excl_link = link;
  1252. return ATA_DEFER_PORT;
  1253. }
  1254. }
  1255. return ATA_DEFER_PORT;
  1256. }
  1257. static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
  1258. {
  1259. struct mv_port_priv *pp = ap->private_data;
  1260. void __iomem *port_mmio;
  1261. u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
  1262. u32 ltmode, *old_ltmode = &pp->cached.ltmode;
  1263. u32 haltcond, *old_haltcond = &pp->cached.haltcond;
  1264. ltmode = *old_ltmode & ~LTMODE_BIT8;
  1265. haltcond = *old_haltcond | EDMA_ERR_DEV;
  1266. if (want_fbs) {
  1267. fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
  1268. ltmode = *old_ltmode | LTMODE_BIT8;
  1269. if (want_ncq)
  1270. haltcond &= ~EDMA_ERR_DEV;
  1271. else
  1272. fiscfg |= FISCFG_WAIT_DEV_ERR;
  1273. } else {
  1274. fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
  1275. }
  1276. port_mmio = mv_ap_base(ap);
  1277. mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
  1278. mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
  1279. mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
  1280. }
  1281. static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
  1282. {
  1283. struct mv_host_priv *hpriv = ap->host->private_data;
  1284. u32 old, new;
  1285. /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
  1286. old = readl(hpriv->base + GPIO_PORT_CTL);
  1287. if (want_ncq)
  1288. new = old | (1 << 22);
  1289. else
  1290. new = old & ~(1 << 22);
  1291. if (new != old)
  1292. writel(new, hpriv->base + GPIO_PORT_CTL);
  1293. }
  1294. /**
  1295. * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
  1296. * @ap: Port being initialized
  1297. *
  1298. * There are two DMA modes on these chips: basic DMA, and EDMA.
  1299. *
  1300. * Bit-0 of the "EDMA RESERVED" register enables/disables use
  1301. * of basic DMA on the GEN_IIE versions of the chips.
  1302. *
  1303. * This bit survives EDMA resets, and must be set for basic DMA
  1304. * to function, and should be cleared when EDMA is active.
  1305. */
  1306. static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
  1307. {
  1308. struct mv_port_priv *pp = ap->private_data;
  1309. u32 new, *old = &pp->cached.unknown_rsvd;
  1310. if (enable_bmdma)
  1311. new = *old | 1;
  1312. else
  1313. new = *old & ~1;
  1314. mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
  1315. }
  1316. /*
  1317. * SOC chips have an issue whereby the HDD LEDs don't always blink
  1318. * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
  1319. * of the SOC takes care of it, generating a steady blink rate when
  1320. * any drive on the chip is active.
  1321. *
  1322. * Unfortunately, the blink mode is a global hardware setting for the SOC,
  1323. * so we must use it whenever at least one port on the SOC has NCQ enabled.
  1324. *
  1325. * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
  1326. * LED operation works then, and provides better (more accurate) feedback.
  1327. *
  1328. * Note that this code assumes that an SOC never has more than one HC onboard.
  1329. */
  1330. static void mv_soc_led_blink_enable(struct ata_port *ap)
  1331. {
  1332. struct ata_host *host = ap->host;
  1333. struct mv_host_priv *hpriv = host->private_data;
  1334. void __iomem *hc_mmio;
  1335. u32 led_ctrl;
  1336. if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
  1337. return;
  1338. hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
  1339. hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
  1340. led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
  1341. writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
  1342. }
  1343. static void mv_soc_led_blink_disable(struct ata_port *ap)
  1344. {
  1345. struct ata_host *host = ap->host;
  1346. struct mv_host_priv *hpriv = host->private_data;
  1347. void __iomem *hc_mmio;
  1348. u32 led_ctrl;
  1349. unsigned int port;
  1350. if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
  1351. return;
  1352. /* disable led-blink only if no ports are using NCQ */
  1353. for (port = 0; port < hpriv->n_ports; port++) {
  1354. struct ata_port *this_ap = host->ports[port];
  1355. struct mv_port_priv *pp = this_ap->private_data;
  1356. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
  1357. return;
  1358. }
  1359. hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
  1360. hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
  1361. led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
  1362. writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
  1363. }
  1364. static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
  1365. {
  1366. u32 cfg;
  1367. struct mv_port_priv *pp = ap->private_data;
  1368. struct mv_host_priv *hpriv = ap->host->private_data;
  1369. void __iomem *port_mmio = mv_ap_base(ap);
  1370. /* set up non-NCQ EDMA configuration */
  1371. cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
  1372. pp->pp_flags &=
  1373. ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
  1374. if (IS_GEN_I(hpriv))
  1375. cfg |= (1 << 8); /* enab config burst size mask */
  1376. else if (IS_GEN_II(hpriv)) {
  1377. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  1378. mv_60x1_errata_sata25(ap, want_ncq);
  1379. } else if (IS_GEN_IIE(hpriv)) {
  1380. int want_fbs = sata_pmp_attached(ap);
  1381. /*
  1382. * Possible future enhancement:
  1383. *
  1384. * The chip can use FBS with non-NCQ, if we allow it,
  1385. * But first we need to have the error handling in place
  1386. * for this mode (datasheet section 7.3.15.4.2.3).
  1387. * So disallow non-NCQ FBS for now.
  1388. */
  1389. want_fbs &= want_ncq;
  1390. mv_config_fbs(ap, want_ncq, want_fbs);
  1391. if (want_fbs) {
  1392. pp->pp_flags |= MV_PP_FLAG_FBS_EN;
  1393. cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
  1394. }
  1395. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  1396. if (want_edma) {
  1397. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  1398. if (!IS_SOC(hpriv))
  1399. cfg |= (1 << 18); /* enab early completion */
  1400. }
  1401. if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
  1402. cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
  1403. mv_bmdma_enable_iie(ap, !want_edma);
  1404. if (IS_SOC(hpriv)) {
  1405. if (want_ncq)
  1406. mv_soc_led_blink_enable(ap);
  1407. else
  1408. mv_soc_led_blink_disable(ap);
  1409. }
  1410. }
  1411. if (want_ncq) {
  1412. cfg |= EDMA_CFG_NCQ;
  1413. pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
  1414. }
  1415. writelfl(cfg, port_mmio + EDMA_CFG);
  1416. }
  1417. static void mv_port_free_dma_mem(struct ata_port *ap)
  1418. {
  1419. struct mv_host_priv *hpriv = ap->host->private_data;
  1420. struct mv_port_priv *pp = ap->private_data;
  1421. int tag;
  1422. if (pp->crqb) {
  1423. dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
  1424. pp->crqb = NULL;
  1425. }
  1426. if (pp->crpb) {
  1427. dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
  1428. pp->crpb = NULL;
  1429. }
  1430. /*
  1431. * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
  1432. * For later hardware, we have one unique sg_tbl per NCQ tag.
  1433. */
  1434. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1435. if (pp->sg_tbl[tag]) {
  1436. if (tag == 0 || !IS_GEN_I(hpriv))
  1437. dma_pool_free(hpriv->sg_tbl_pool,
  1438. pp->sg_tbl[tag],
  1439. pp->sg_tbl_dma[tag]);
  1440. pp->sg_tbl[tag] = NULL;
  1441. }
  1442. }
  1443. }
  1444. /**
  1445. * mv_port_start - Port specific init/start routine.
  1446. * @ap: ATA channel to manipulate
  1447. *
  1448. * Allocate and point to DMA memory, init port private memory,
  1449. * zero indices.
  1450. *
  1451. * LOCKING:
  1452. * Inherited from caller.
  1453. */
  1454. static int mv_port_start(struct ata_port *ap)
  1455. {
  1456. struct device *dev = ap->host->dev;
  1457. struct mv_host_priv