PageRenderTime 82ms CodeModel.GetById 22ms app.highlight 44ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/ata/sata_mv.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 4423 lines | 2993 code | 573 blank | 857 comment | 390 complexity | 6277dbf56209256fcc7e8f48a3cf64b2 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * sata_mv.c - Marvell SATA support
   3 *
   4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
   5 * Copyright 2005: EMC Corporation, all rights reserved.
   6 * Copyright 2005 Red Hat, Inc.  All rights reserved.
   7 *
   8 * Originally written by Brett Russ.
   9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  10 *
  11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; version 2 of the License.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  25 *
  26 */
  27
  28/*
  29 * sata_mv TODO list:
  30 *
  31 * --> Develop a low-power-consumption strategy, and implement it.
  32 *
  33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  34 *
  35 * --> [Experiment, Marvell value added] Is it possible to use target
  36 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
  37 *       creating LibATA target mode support would be very interesting.
  38 *
  39 *       Target mode, for those without docs, is the ability to directly
  40 *       connect two SATA ports.
  41 */
  42
  43/*
  44 * 80x1-B2 errata PCI#11:
  45 *
  46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
  47 * should be careful to insert those cards only onto PCI-X bus #0,
  48 * and only in device slots 0..7, not higher.  The chips may not
  49 * work correctly otherwise  (note: this is a pretty rare condition).
  50 */
  51
  52#include <linux/kernel.h>
  53#include <linux/module.h>
  54#include <linux/pci.h>
  55#include <linux/init.h>
  56#include <linux/blkdev.h>
  57#include <linux/delay.h>
  58#include <linux/interrupt.h>
  59#include <linux/dmapool.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/device.h>
  62#include <linux/clk.h>
  63#include <linux/platform_device.h>
  64#include <linux/ata_platform.h>
  65#include <linux/mbus.h>
  66#include <linux/bitops.h>
  67#include <linux/gfp.h>
  68#include <scsi/scsi_host.h>
  69#include <scsi/scsi_cmnd.h>
  70#include <scsi/scsi_device.h>
  71#include <linux/libata.h>
  72
  73#define DRV_NAME	"sata_mv"
  74#define DRV_VERSION	"1.28"
  75
  76/*
  77 * module options
  78 */
  79
  80static int msi;
  81#ifdef CONFIG_PCI
  82module_param(msi, int, S_IRUGO);
  83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  84#endif
  85
  86static int irq_coalescing_io_count;
  87module_param(irq_coalescing_io_count, int, S_IRUGO);
  88MODULE_PARM_DESC(irq_coalescing_io_count,
  89		 "IRQ coalescing I/O count threshold (0..255)");
  90
  91static int irq_coalescing_usecs;
  92module_param(irq_coalescing_usecs, int, S_IRUGO);
  93MODULE_PARM_DESC(irq_coalescing_usecs,
  94		 "IRQ coalescing time threshold in usecs");
  95
  96enum {
  97	/* BAR's are enumerated in terms of pci_resource_start() terms */
  98	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
  99	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
 100	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
 101
 102	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
 103	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
 104
 105	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
 106	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
 107	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
 108	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
 109
 110	MV_PCI_REG_BASE		= 0,
 111
 112	/*
 113	 * Per-chip ("all ports") interrupt coalescing feature.
 114	 * This is only for GEN_II / GEN_IIE hardware.
 115	 *
 116	 * Coalescing defers the interrupt until either the IO_THRESHOLD
 117	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 118	 */
 119	COAL_REG_BASE		= 0x18000,
 120	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
 121	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
 122
 123	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
 124	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
 125
 126	/*
 127	 * Registers for the (unused here) transaction coalescing feature:
 128	 */
 129	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
 130	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
 131
 132	SATAHC0_REG_BASE	= 0x20000,
 133	FLASH_CTL		= 0x1046c,
 134	GPIO_PORT_CTL		= 0x104f0,
 135	RESET_CFG		= 0x180d8,
 136
 137	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
 138	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
 139	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
 140	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
 141
 142	MV_MAX_Q_DEPTH		= 32,
 143	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
 144
 145	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
 146	 * CRPB needs alignment on a 256B boundary. Size == 256B
 147	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
 148	 */
 149	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
 150	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
 151	MV_MAX_SG_CT		= 256,
 152	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
 153
 154	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
 155	MV_PORT_HC_SHIFT	= 2,
 156	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
 157	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
 158	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
 159
 160	/* Host Flags */
 161	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
 162
 163	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
 164
 165	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
 166
 167	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
 168				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
 169
 170	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
 171
 172	CRQB_FLAG_READ		= (1 << 0),
 173	CRQB_TAG_SHIFT		= 1,
 174	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
 175	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
 176	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
 177	CRQB_CMD_ADDR_SHIFT	= 8,
 178	CRQB_CMD_CS		= (0x2 << 11),
 179	CRQB_CMD_LAST		= (1 << 15),
 180
 181	CRPB_FLAG_STATUS_SHIFT	= 8,
 182	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
 183	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
 184
 185	EPRD_FLAG_END_OF_TBL	= (1 << 31),
 186
 187	/* PCI interface registers */
 188
 189	MV_PCI_COMMAND		= 0xc00,
 190	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
 191	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
 192
 193	PCI_MAIN_CMD_STS	= 0xd30,
 194	STOP_PCI_MASTER		= (1 << 2),
 195	PCI_MASTER_EMPTY	= (1 << 3),
 196	GLOB_SFT_RST		= (1 << 4),
 197
 198	MV_PCI_MODE		= 0xd00,
 199	MV_PCI_MODE_MASK	= 0x30,
 200
 201	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
 202	MV_PCI_DISC_TIMER	= 0xd04,
 203	MV_PCI_MSI_TRIGGER	= 0xc38,
 204	MV_PCI_SERR_MASK	= 0xc28,
 205	MV_PCI_XBAR_TMOUT	= 0x1d04,
 206	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
 207	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
 208	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
 209	MV_PCI_ERR_COMMAND	= 0x1d50,
 210
 211	PCI_IRQ_CAUSE		= 0x1d58,
 212	PCI_IRQ_MASK		= 0x1d5c,
 213	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
 214
 215	PCIE_IRQ_CAUSE		= 0x1900,
 216	PCIE_IRQ_MASK		= 0x1910,
 217	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
 218
 219	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
 220	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
 221	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
 222	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
 223	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
 224	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
 225	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
 226	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
 227	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
 228	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
 229	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
 230	PCI_ERR			= (1 << 18),
 231	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
 232	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
 233	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
 234	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
 235	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
 236	GPIO_INT		= (1 << 22),
 237	SELF_INT		= (1 << 23),
 238	TWSI_INT		= (1 << 24),
 239	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
 240	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
 241	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 242
 243	/* SATAHC registers */
 244	HC_CFG			= 0x00,
 245
 246	HC_IRQ_CAUSE		= 0x14,
 247	DMA_IRQ			= (1 << 0),	/* shift by port # */
 248	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
 249	DEV_IRQ			= (1 << 8),	/* shift by port # */
 250
 251	/*
 252	 * Per-HC (Host-Controller) interrupt coalescing feature.
 253	 * This is present on all chip generations.
 254	 *
 255	 * Coalescing defers the interrupt until either the IO_THRESHOLD
 256	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 257	 */
 258	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
 259	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
 260
 261	SOC_LED_CTRL		= 0x2c,
 262	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
 263	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
 264						/*  with dev activity LED */
 265
 266	/* Shadow block registers */
 267	SHD_BLK			= 0x100,
 268	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
 269
 270	/* SATA registers */
 271	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
 272	SATA_ACTIVE		= 0x350,
 273	FIS_IRQ_CAUSE		= 0x364,
 274	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
 275
 276	LTMODE			= 0x30c,	/* requires read-after-write */
 277	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
 278
 279	PHY_MODE2		= 0x330,
 280	PHY_MODE3		= 0x310,
 281
 282	PHY_MODE4		= 0x314,	/* requires read-after-write */
 283	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
 284	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
 285	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
 286	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
 287
 288	SATA_IFCTL		= 0x344,
 289	SATA_TESTCTL		= 0x348,
 290	SATA_IFSTAT		= 0x34c,
 291	VENDOR_UNIQUE_FIS	= 0x35c,
 292
 293	FISCFG			= 0x360,
 294	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
 295	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
 296
 297	PHY_MODE9_GEN2		= 0x398,
 298	PHY_MODE9_GEN1		= 0x39c,
 299	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
 300
 301	MV5_PHY_MODE		= 0x74,
 302	MV5_LTMODE		= 0x30,
 303	MV5_PHY_CTL		= 0x0C,
 304	SATA_IFCFG		= 0x050,
 305
 306	MV_M2_PREAMP_MASK	= 0x7e0,
 307
 308	/* Port registers */
 309	EDMA_CFG		= 0,
 310	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
 311	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
 312	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
 313	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
 314	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
 315	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
 316	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
 317
 318	EDMA_ERR_IRQ_CAUSE	= 0x8,
 319	EDMA_ERR_IRQ_MASK	= 0xc,
 320	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
 321	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
 322	EDMA_ERR_DEV		= (1 << 2),	/* device error */
 323	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
 324	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
 325	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
 326	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
 327	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
 328	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
 329	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
 330	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
 331	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
 332	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
 333	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
 334
 335	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
 336	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
 337	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
 338	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
 339	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
 340
 341	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
 342
 343	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
 344	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
 345	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
 346	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
 347	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
 348	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
 349
 350	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
 351
 352	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
 353	EDMA_ERR_OVERRUN_5	= (1 << 5),
 354	EDMA_ERR_UNDERRUN_5	= (1 << 6),
 355
 356	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 357				  EDMA_ERR_LNK_CTRL_RX_1 |
 358				  EDMA_ERR_LNK_CTRL_RX_3 |
 359				  EDMA_ERR_LNK_CTRL_TX,
 360
 361	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
 362				  EDMA_ERR_PRD_PAR |
 363				  EDMA_ERR_DEV_DCON |
 364				  EDMA_ERR_DEV_CON |
 365				  EDMA_ERR_SERR |
 366				  EDMA_ERR_SELF_DIS |
 367				  EDMA_ERR_CRQB_PAR |
 368				  EDMA_ERR_CRPB_PAR |
 369				  EDMA_ERR_INTRL_PAR |
 370				  EDMA_ERR_IORDY |
 371				  EDMA_ERR_LNK_CTRL_RX_2 |
 372				  EDMA_ERR_LNK_DATA_RX |
 373				  EDMA_ERR_LNK_DATA_TX |
 374				  EDMA_ERR_TRANS_PROTO,
 375
 376	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
 377				  EDMA_ERR_PRD_PAR |
 378				  EDMA_ERR_DEV_DCON |
 379				  EDMA_ERR_DEV_CON |
 380				  EDMA_ERR_OVERRUN_5 |
 381				  EDMA_ERR_UNDERRUN_5 |
 382				  EDMA_ERR_SELF_DIS_5 |
 383				  EDMA_ERR_CRQB_PAR |
 384				  EDMA_ERR_CRPB_PAR |
 385				  EDMA_ERR_INTRL_PAR |
 386				  EDMA_ERR_IORDY,
 387
 388	EDMA_REQ_Q_BASE_HI	= 0x10,
 389	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
 390
 391	EDMA_REQ_Q_OUT_PTR	= 0x18,
 392	EDMA_REQ_Q_PTR_SHIFT	= 5,
 393
 394	EDMA_RSP_Q_BASE_HI	= 0x1c,
 395	EDMA_RSP_Q_IN_PTR	= 0x20,
 396	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
 397	EDMA_RSP_Q_PTR_SHIFT	= 3,
 398
 399	EDMA_CMD		= 0x28,		/* EDMA command register */
 400	EDMA_EN			= (1 << 0),	/* enable EDMA */
 401	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
 402	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
 403
 404	EDMA_STATUS		= 0x30,		/* EDMA engine status */
 405	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
 406	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
 407
 408	EDMA_IORDY_TMOUT	= 0x34,
 409	EDMA_ARB_CFG		= 0x38,
 410
 411	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
 412	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
 413
 414	BMDMA_CMD		= 0x224,	/* bmdma command register */
 415	BMDMA_STATUS		= 0x228,	/* bmdma status register */
 416	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
 417	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
 418
 419	/* Host private flags (hp_flags) */
 420	MV_HP_FLAG_MSI		= (1 << 0),
 421	MV_HP_ERRATA_50XXB0	= (1 << 1),
 422	MV_HP_ERRATA_50XXB2	= (1 << 2),
 423	MV_HP_ERRATA_60X1B2	= (1 << 3),
 424	MV_HP_ERRATA_60X1C0	= (1 << 4),
 425	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
 426	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
 427	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
 428	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
 429	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
 430	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
 431	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
 432
 433	/* Port private flags (pp_flags) */
 434	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
 435	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
 436	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
 437	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
 438	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
 439};
 440
 441#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
 442#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 443#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 444#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
 445#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
 446
 447#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
 448#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
 449
 450enum {
 451	/* DMA boundary 0xffff is required by the s/g splitting
 452	 * we need on /length/ in mv_fill-sg().
 453	 */
 454	MV_DMA_BOUNDARY		= 0xffffU,
 455
 456	/* mask of register bits containing lower 32 bits
 457	 * of EDMA request queue DMA address
 458	 */
 459	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
 460
 461	/* ditto, for response queue */
 462	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
 463};
 464
 465enum chip_type {
 466	chip_504x,
 467	chip_508x,
 468	chip_5080,
 469	chip_604x,
 470	chip_608x,
 471	chip_6042,
 472	chip_7042,
 473	chip_soc,
 474};
 475
 476/* Command ReQuest Block: 32B */
 477struct mv_crqb {
 478	__le32			sg_addr;
 479	__le32			sg_addr_hi;
 480	__le16			ctrl_flags;
 481	__le16			ata_cmd[11];
 482};
 483
 484struct mv_crqb_iie {
 485	__le32			addr;
 486	__le32			addr_hi;
 487	__le32			flags;
 488	__le32			len;
 489	__le32			ata_cmd[4];
 490};
 491
 492/* Command ResPonse Block: 8B */
 493struct mv_crpb {
 494	__le16			id;
 495	__le16			flags;
 496	__le32			tmstmp;
 497};
 498
 499/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
 500struct mv_sg {
 501	__le32			addr;
 502	__le32			flags_size;
 503	__le32			addr_hi;
 504	__le32			reserved;
 505};
 506
 507/*
 508 * We keep a local cache of a few frequently accessed port
 509 * registers here, to avoid having to read them (very slow)
 510 * when switching between EDMA and non-EDMA modes.
 511 */
 512struct mv_cached_regs {
 513	u32			fiscfg;
 514	u32			ltmode;
 515	u32			haltcond;
 516	u32			unknown_rsvd;
 517};
 518
 519struct mv_port_priv {
 520	struct mv_crqb		*crqb;
 521	dma_addr_t		crqb_dma;
 522	struct mv_crpb		*crpb;
 523	dma_addr_t		crpb_dma;
 524	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
 525	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
 526
 527	unsigned int		req_idx;
 528	unsigned int		resp_idx;
 529
 530	u32			pp_flags;
 531	struct mv_cached_regs	cached;
 532	unsigned int		delayed_eh_pmp_map;
 533};
 534
 535struct mv_port_signal {
 536	u32			amps;
 537	u32			pre;
 538};
 539
 540struct mv_host_priv {
 541	u32			hp_flags;
 542	unsigned int 		board_idx;
 543	u32			main_irq_mask;
 544	struct mv_port_signal	signal[8];
 545	const struct mv_hw_ops	*ops;
 546	int			n_ports;
 547	void __iomem		*base;
 548	void __iomem		*main_irq_cause_addr;
 549	void __iomem		*main_irq_mask_addr;
 550	u32			irq_cause_offset;
 551	u32			irq_mask_offset;
 552	u32			unmask_all_irqs;
 553
 554#if defined(CONFIG_HAVE_CLK)
 555	struct clk		*clk;
 556#endif
 557	/*
 558	 * These consistent DMA memory pools give us guaranteed
 559	 * alignment for hardware-accessed data structures,
 560	 * and less memory waste in accomplishing the alignment.
 561	 */
 562	struct dma_pool		*crqb_pool;
 563	struct dma_pool		*crpb_pool;
 564	struct dma_pool		*sg_tbl_pool;
 565};
 566
 567struct mv_hw_ops {
 568	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 569			   unsigned int port);
 570	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
 571	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
 572			   void __iomem *mmio);
 573	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
 574			unsigned int n_hc);
 575	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
 576	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 577};
 578
 579static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 580static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 581static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 582static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 583static int mv_port_start(struct ata_port *ap);
 584static void mv_port_stop(struct ata_port *ap);
 585static int mv_qc_defer(struct ata_queued_cmd *qc);
 586static void mv_qc_prep(struct ata_queued_cmd *qc);
 587static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 588static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
 589static int mv_hardreset(struct ata_link *link, unsigned int *class,
 590			unsigned long deadline);
 591static void mv_eh_freeze(struct ata_port *ap);
 592static void mv_eh_thaw(struct ata_port *ap);
 593static void mv6_dev_config(struct ata_device *dev);
 594
 595static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 596			   unsigned int port);
 597static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 598static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
 599			   void __iomem *mmio);
 600static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 601			unsigned int n_hc);
 602static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 603static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
 604
 605static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 606			   unsigned int port);
 607static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 608static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
 609			   void __iomem *mmio);
 610static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 611			unsigned int n_hc);
 612static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 613static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
 614				      void __iomem *mmio);
 615static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
 616				      void __iomem *mmio);
 617static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
 618				  void __iomem *mmio, unsigned int n_hc);
 619static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
 620				      void __iomem *mmio);
 621static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 622static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
 623				  void __iomem *mmio, unsigned int port);
 624static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
 625static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 626			     unsigned int port_no);
 627static int mv_stop_edma(struct ata_port *ap);
 628static int mv_stop_edma_engine(void __iomem *port_mmio);
 629static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
 630
 631static void mv_pmp_select(struct ata_port *ap, int pmp);
 632static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 633				unsigned long deadline);
 634static int  mv_softreset(struct ata_link *link, unsigned int *class,
 635				unsigned long deadline);
 636static void mv_pmp_error_handler(struct ata_port *ap);
 637static void mv_process_crpb_entries(struct ata_port *ap,
 638					struct mv_port_priv *pp);
 639
 640static void mv_sff_irq_clear(struct ata_port *ap);
 641static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
 642static void mv_bmdma_setup(struct ata_queued_cmd *qc);
 643static void mv_bmdma_start(struct ata_queued_cmd *qc);
 644static void mv_bmdma_stop(struct ata_queued_cmd *qc);
 645static u8   mv_bmdma_status(struct ata_port *ap);
 646static u8 mv_sff_check_status(struct ata_port *ap);
 647
 648/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 649 * because we have to allow room for worst case splitting of
 650 * PRDs for 64K boundaries in mv_fill_sg().
 651 */
 652static struct scsi_host_template mv5_sht = {
 653	ATA_BASE_SHT(DRV_NAME),
 654	.sg_tablesize		= MV_MAX_SG_CT / 2,
 655	.dma_boundary		= MV_DMA_BOUNDARY,
 656};
 657
 658static struct scsi_host_template mv6_sht = {
 659	ATA_NCQ_SHT(DRV_NAME),
 660	.can_queue		= MV_MAX_Q_DEPTH - 1,
 661	.sg_tablesize		= MV_MAX_SG_CT / 2,
 662	.dma_boundary		= MV_DMA_BOUNDARY,
 663};
 664
 665static struct ata_port_operations mv5_ops = {
 666	.inherits		= &ata_sff_port_ops,
 667
 668	.lost_interrupt		= ATA_OP_NULL,
 669
 670	.qc_defer		= mv_qc_defer,
 671	.qc_prep		= mv_qc_prep,
 672	.qc_issue		= mv_qc_issue,
 673
 674	.freeze			= mv_eh_freeze,
 675	.thaw			= mv_eh_thaw,
 676	.hardreset		= mv_hardreset,
 677
 678	.scr_read		= mv5_scr_read,
 679	.scr_write		= mv5_scr_write,
 680
 681	.port_start		= mv_port_start,
 682	.port_stop		= mv_port_stop,
 683};
 684
 685static struct ata_port_operations mv6_ops = {
 686	.inherits		= &ata_bmdma_port_ops,
 687
 688	.lost_interrupt		= ATA_OP_NULL,
 689
 690	.qc_defer		= mv_qc_defer,
 691	.qc_prep		= mv_qc_prep,
 692	.qc_issue		= mv_qc_issue,
 693
 694	.dev_config             = mv6_dev_config,
 695
 696	.freeze			= mv_eh_freeze,
 697	.thaw			= mv_eh_thaw,
 698	.hardreset		= mv_hardreset,
 699	.softreset		= mv_softreset,
 700	.pmp_hardreset		= mv_pmp_hardreset,
 701	.pmp_softreset		= mv_softreset,
 702	.error_handler		= mv_pmp_error_handler,
 703
 704	.scr_read		= mv_scr_read,
 705	.scr_write		= mv_scr_write,
 706
 707	.sff_check_status	= mv_sff_check_status,
 708	.sff_irq_clear		= mv_sff_irq_clear,
 709	.check_atapi_dma	= mv_check_atapi_dma,
 710	.bmdma_setup		= mv_bmdma_setup,
 711	.bmdma_start		= mv_bmdma_start,
 712	.bmdma_stop		= mv_bmdma_stop,
 713	.bmdma_status		= mv_bmdma_status,
 714
 715	.port_start		= mv_port_start,
 716	.port_stop		= mv_port_stop,
 717};
 718
 719static struct ata_port_operations mv_iie_ops = {
 720	.inherits		= &mv6_ops,
 721	.dev_config		= ATA_OP_NULL,
 722	.qc_prep		= mv_qc_prep_iie,
 723};
 724
 725static const struct ata_port_info mv_port_info[] = {
 726	{  /* chip_504x */
 727		.flags		= MV_GEN_I_FLAGS,
 728		.pio_mask	= ATA_PIO4,
 729		.udma_mask	= ATA_UDMA6,
 730		.port_ops	= &mv5_ops,
 731	},
 732	{  /* chip_508x */
 733		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 734		.pio_mask	= ATA_PIO4,
 735		.udma_mask	= ATA_UDMA6,
 736		.port_ops	= &mv5_ops,
 737	},
 738	{  /* chip_5080 */
 739		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 740		.pio_mask	= ATA_PIO4,
 741		.udma_mask	= ATA_UDMA6,
 742		.port_ops	= &mv5_ops,
 743	},
 744	{  /* chip_604x */
 745		.flags		= MV_GEN_II_FLAGS,
 746		.pio_mask	= ATA_PIO4,
 747		.udma_mask	= ATA_UDMA6,
 748		.port_ops	= &mv6_ops,
 749	},
 750	{  /* chip_608x */
 751		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
 752		.pio_mask	= ATA_PIO4,
 753		.udma_mask	= ATA_UDMA6,
 754		.port_ops	= &mv6_ops,
 755	},
 756	{  /* chip_6042 */
 757		.flags		= MV_GEN_IIE_FLAGS,
 758		.pio_mask	= ATA_PIO4,
 759		.udma_mask	= ATA_UDMA6,
 760		.port_ops	= &mv_iie_ops,
 761	},
 762	{  /* chip_7042 */
 763		.flags		= MV_GEN_IIE_FLAGS,
 764		.pio_mask	= ATA_PIO4,
 765		.udma_mask	= ATA_UDMA6,
 766		.port_ops	= &mv_iie_ops,
 767	},
 768	{  /* chip_soc */
 769		.flags		= MV_GEN_IIE_FLAGS,
 770		.pio_mask	= ATA_PIO4,
 771		.udma_mask	= ATA_UDMA6,
 772		.port_ops	= &mv_iie_ops,
 773	},
 774};
 775
 776static const struct pci_device_id mv_pci_tbl[] = {
 777	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
 778	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 779	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 780	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
 781	/* RocketRAID 1720/174x have different identifiers */
 782	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
 783	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
 784	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
 785
 786	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 787	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
 788	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
 789	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
 790	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
 791
 792	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 793
 794	/* Adaptec 1430SA */
 795	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 796
 797	/* Marvell 7042 support */
 798	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 799
 800	/* Highpoint RocketRAID PCIe series */
 801	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
 802	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
 803
 804	{ }			/* terminate list */
 805};
 806
 807static const struct mv_hw_ops mv5xxx_ops = {
 808	.phy_errata		= mv5_phy_errata,
 809	.enable_leds		= mv5_enable_leds,
 810	.read_preamp		= mv5_read_preamp,
 811	.reset_hc		= mv5_reset_hc,
 812	.reset_flash		= mv5_reset_flash,
 813	.reset_bus		= mv5_reset_bus,
 814};
 815
 816static const struct mv_hw_ops mv6xxx_ops = {
 817	.phy_errata		= mv6_phy_errata,
 818	.enable_leds		= mv6_enable_leds,
 819	.read_preamp		= mv6_read_preamp,
 820	.reset_hc		= mv6_reset_hc,
 821	.reset_flash		= mv6_reset_flash,
 822	.reset_bus		= mv_reset_pci_bus,
 823};
 824
 825static const struct mv_hw_ops mv_soc_ops = {
 826	.phy_errata		= mv6_phy_errata,
 827	.enable_leds		= mv_soc_enable_leds,
 828	.read_preamp		= mv_soc_read_preamp,
 829	.reset_hc		= mv_soc_reset_hc,
 830	.reset_flash		= mv_soc_reset_flash,
 831	.reset_bus		= mv_soc_reset_bus,
 832};
 833
 834static const struct mv_hw_ops mv_soc_65n_ops = {
 835	.phy_errata		= mv_soc_65n_phy_errata,
 836	.enable_leds		= mv_soc_enable_leds,
 837	.reset_hc		= mv_soc_reset_hc,
 838	.reset_flash		= mv_soc_reset_flash,
 839	.reset_bus		= mv_soc_reset_bus,
 840};
 841
 842/*
 843 * Functions
 844 */
 845
 846static inline void writelfl(unsigned long data, void __iomem *addr)
 847{
 848	writel(data, addr);
 849	(void) readl(addr);	/* flush to avoid PCI posted write */
 850}
 851
 852static inline unsigned int mv_hc_from_port(unsigned int port)
 853{
 854	return port >> MV_PORT_HC_SHIFT;
 855}
 856
 857static inline unsigned int mv_hardport_from_port(unsigned int port)
 858{
 859	return port & MV_PORT_MASK;
 860}
 861
 862/*
 863 * Consolidate some rather tricky bit shift calculations.
 864 * This is hot-path stuff, so not a function.
 865 * Simple code, with two return values, so macro rather than inline.
 866 *
 867 * port is the sole input, in range 0..7.
 868 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 869 * hardport is the other output, in range 0..3.
 870 *
 871 * Note that port and hardport may be the same variable in some cases.
 872 */
 873#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
 874{								\
 875	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
 876	hardport = mv_hardport_from_port(port);			\
 877	shift   += hardport * 2;				\
 878}
 879
 880static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 881{
 882	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
 883}
 884
 885static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
 886						 unsigned int port)
 887{
 888	return mv_hc_base(base, mv_hc_from_port(port));
 889}
 890
 891static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
 892{
 893	return  mv_hc_base_from_port(base, port) +
 894		MV_SATAHC_ARBTR_REG_SZ +
 895		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 896}
 897
 898static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
 899{
 900	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
 901	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
 902
 903	return hc_mmio + ofs;
 904}
 905
 906static inline void __iomem *mv_host_base(struct ata_host *host)
 907{
 908	struct mv_host_priv *hpriv = host->private_data;
 909	return hpriv->base;
 910}
 911
 912static inline void __iomem *mv_ap_base(struct ata_port *ap)
 913{
 914	return mv_port_base(mv_host_base(ap->host), ap->port_no);
 915}
 916
 917static inline int mv_get_hc_count(unsigned long port_flags)
 918{
 919	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 920}
 921
 922/**
 923 *      mv_save_cached_regs - (re-)initialize cached port registers
 924 *      @ap: the port whose registers we are caching
 925 *
 926 *	Initialize the local cache of port registers,
 927 *	so that reading them over and over again can
 928 *	be avoided on the hotter paths of this driver.
 929 *	This saves a few microseconds each time we switch
 930 *	to/from EDMA mode to perform (eg.) a drive cache flush.
 931 */
 932static void mv_save_cached_regs(struct ata_port *ap)
 933{
 934	void __iomem *port_mmio = mv_ap_base(ap);
 935	struct mv_port_priv *pp = ap->private_data;
 936
 937	pp->cached.fiscfg = readl(port_mmio + FISCFG);
 938	pp->cached.ltmode = readl(port_mmio + LTMODE);
 939	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
 940	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
 941}
 942
 943/**
 944 *      mv_write_cached_reg - write to a cached port register
 945 *      @addr: hardware address of the register
 946 *      @old: pointer to cached value of the register
 947 *      @new: new value for the register
 948 *
 949 *	Write a new value to a cached register,
 950 *	but only if the value is different from before.
 951 */
 952static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
 953{
 954	if (new != *old) {
 955		unsigned long laddr;
 956		*old = new;
 957		/*
 958		 * Workaround for 88SX60x1-B2 FEr SATA#13:
 959		 * Read-after-write is needed to prevent generating 64-bit
 960		 * write cycles on the PCI bus for SATA interface registers
 961		 * at offsets ending in 0x4 or 0xc.
 962		 *
 963		 * Looks like a lot of fuss, but it avoids an unnecessary
 964		 * +1 usec read-after-write delay for unaffected registers.
 965		 */
 966		laddr = (long)addr & 0xffff;
 967		if (laddr >= 0x300 && laddr <= 0x33c) {
 968			laddr &= 0x000f;
 969			if (laddr == 0x4 || laddr == 0xc) {
 970				writelfl(new, addr); /* read after write */
 971				return;
 972			}
 973		}
 974		writel(new, addr); /* unaffected by the errata */
 975	}
 976}
 977
 978static void mv_set_edma_ptrs(void __iomem *port_mmio,
 979			     struct mv_host_priv *hpriv,
 980			     struct mv_port_priv *pp)
 981{
 982	u32 index;
 983
 984	/*
 985	 * initialize request queue
 986	 */
 987	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
 988	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
 989
 990	WARN_ON(pp->crqb_dma & 0x3ff);
 991	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
 992	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
 993		 port_mmio + EDMA_REQ_Q_IN_PTR);
 994	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
 995
 996	/*
 997	 * initialize response queue
 998	 */
 999	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1000	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1001
1002	WARN_ON(pp->crpb_dma & 0xff);
1003	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1004	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1005	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1006		 port_mmio + EDMA_RSP_Q_OUT_PTR);
1007}
1008
1009static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1010{
1011	/*
1012	 * When writing to the main_irq_mask in hardware,
1013	 * we must ensure exclusivity between the interrupt coalescing bits
1014	 * and the corresponding individual port DONE_IRQ bits.
1015	 *
1016	 * Note that this register is really an "IRQ enable" register,
1017	 * not an "IRQ mask" register as Marvell's naming might suggest.
1018	 */
1019	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1020		mask &= ~DONE_IRQ_0_3;
1021	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1022		mask &= ~DONE_IRQ_4_7;
1023	writelfl(mask, hpriv->main_irq_mask_addr);
1024}
1025
1026static void mv_set_main_irq_mask(struct ata_host *host,
1027				 u32 disable_bits, u32 enable_bits)
1028{
1029	struct mv_host_priv *hpriv = host->private_data;
1030	u32 old_mask, new_mask;
1031
1032	old_mask = hpriv->main_irq_mask;
1033	new_mask = (old_mask & ~disable_bits) | enable_bits;
1034	if (new_mask != old_mask) {
1035		hpriv->main_irq_mask = new_mask;
1036		mv_write_main_irq_mask(new_mask, hpriv);
1037	}
1038}
1039
1040static void mv_enable_port_irqs(struct ata_port *ap,
1041				     unsigned int port_bits)
1042{
1043	unsigned int shift, hardport, port = ap->port_no;
1044	u32 disable_bits, enable_bits;
1045
1046	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1047
1048	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1049	enable_bits  = port_bits << shift;
1050	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1051}
1052
1053static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1054					  void __iomem *port_mmio,
1055					  unsigned int port_irqs)
1056{
1057	struct mv_host_priv *hpriv = ap->host->private_data;
1058	int hardport = mv_hardport_from_port(ap->port_no);
1059	void __iomem *hc_mmio = mv_hc_base_from_port(
1060				mv_host_base(ap->host), ap->port_no);
1061	u32 hc_irq_cause;
1062
1063	/* clear EDMA event indicators, if any */
1064	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1065
1066	/* clear pending irq events */
1067	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1068	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1069
1070	/* clear FIS IRQ Cause */
1071	if (IS_GEN_IIE(hpriv))
1072		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1073
1074	mv_enable_port_irqs(ap, port_irqs);
1075}
1076
1077static void mv_set_irq_coalescing(struct ata_host *host,
1078				  unsigned int count, unsigned int usecs)
1079{
1080	struct mv_host_priv *hpriv = host->private_data;
1081	void __iomem *mmio = hpriv->base, *hc_mmio;
1082	u32 coal_enable = 0;
1083	unsigned long flags;
1084	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1085	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1086							ALL_PORTS_COAL_DONE;
1087
1088	/* Disable IRQ coalescing if either threshold is zero */
1089	if (!usecs || !count) {
1090		clks = count = 0;
1091	} else {
1092		/* Respect maximum limits of the hardware */
1093		clks = usecs * COAL_CLOCKS_PER_USEC;
1094		if (clks > MAX_COAL_TIME_THRESHOLD)
1095			clks = MAX_COAL_TIME_THRESHOLD;
1096		if (count > MAX_COAL_IO_COUNT)
1097			count = MAX_COAL_IO_COUNT;
1098	}
1099
1100	spin_lock_irqsave(&host->lock, flags);
1101	mv_set_main_irq_mask(host, coal_disable, 0);
1102
1103	if (is_dual_hc && !IS_GEN_I(hpriv)) {
1104		/*
1105		 * GEN_II/GEN_IIE with dual host controllers:
1106		 * one set of global thresholds for the entire chip.
1107		 */
1108		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1109		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1110		/* clear leftover coal IRQ bit */
1111		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1112		if (count)
1113			coal_enable = ALL_PORTS_COAL_DONE;
1114		clks = count = 0; /* force clearing of regular regs below */
1115	}
1116
1117	/*
1118	 * All chips: independent thresholds for each HC on the chip.
1119	 */
1120	hc_mmio = mv_hc_base_from_port(mmio, 0);
1121	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1122	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1123	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1124	if (count)
1125		coal_enable |= PORTS_0_3_COAL_DONE;
1126	if (is_dual_hc) {
1127		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1128		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1129		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1130		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1131		if (count)
1132			coal_enable |= PORTS_4_7_COAL_DONE;
1133	}
1134
1135	mv_set_main_irq_mask(host, 0, coal_enable);
1136	spin_unlock_irqrestore(&host->lock, flags);
1137}
1138
1139/**
1140 *      mv_start_edma - Enable eDMA engine
1141 *      @base: port base address
1142 *      @pp: port private data
1143 *
1144 *      Verify the local cache of the eDMA state is accurate with a
1145 *      WARN_ON.
1146 *
1147 *      LOCKING:
1148 *      Inherited from caller.
1149 */
1150static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1151			 struct mv_port_priv *pp, u8 protocol)
1152{
1153	int want_ncq = (protocol == ATA_PROT_NCQ);
1154
1155	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1156		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1157		if (want_ncq != using_ncq)
1158			mv_stop_edma(ap);
1159	}
1160	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1161		struct mv_host_priv *hpriv = ap->host->private_data;
1162
1163		mv_edma_cfg(ap, want_ncq, 1);
1164
1165		mv_set_edma_ptrs(port_mmio, hpriv, pp);
1166		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1167
1168		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1169		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1170	}
1171}
1172
1173static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1174{
1175	void __iomem *port_mmio = mv_ap_base(ap);
1176	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1177	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1178	int i;
1179
1180	/*
1181	 * Wait for the EDMA engine to finish transactions in progress.
1182	 * No idea what a good "timeout" value might be, but measurements
1183	 * indicate that it often requires hundreds of microseconds
1184	 * with two drives in-use.  So we use the 15msec value above
1185	 * as a rough guess at what even more drives might require.
1186	 */
1187	for (i = 0; i < timeout; ++i) {
1188		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1189		if ((edma_stat & empty_idle) == empty_idle)
1190			break;
1191		udelay(per_loop);
1192	}
1193	/* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1194}
1195
1196/**
1197 *      mv_stop_edma_engine - Disable eDMA engine
1198 *      @port_mmio: io base address
1199 *
1200 *      LOCKING:
1201 *      Inherited from caller.
1202 */
1203static int mv_stop_edma_engine(void __iomem *port_mmio)
1204{
1205	int i;
1206
1207	/* Disable eDMA.  The disable bit auto clears. */
1208	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1209
1210	/* Wait for the chip to confirm eDMA is off. */
1211	for (i = 10000; i > 0; i--) {
1212		u32 reg = readl(port_mmio + EDMA_CMD);
1213		if (!(reg & EDMA_EN))
1214			return 0;
1215		udelay(10);
1216	}
1217	return -EIO;
1218}
1219
1220static int mv_stop_edma(struct ata_port *ap)
1221{
1222	void __iomem *port_mmio = mv_ap_base(ap);
1223	struct mv_port_priv *pp = ap->private_data;
1224	int err = 0;
1225
1226	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1227		return 0;
1228	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1229	mv_wait_for_edma_empty_idle(ap);
1230	if (mv_stop_edma_engine(port_mmio)) {
1231		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
1232		err = -EIO;
1233	}
1234	mv_edma_cfg(ap, 0, 0);
1235	return err;
1236}
1237
1238#ifdef ATA_DEBUG
1239static void mv_dump_mem(void __iomem *start, unsigned bytes)
1240{
1241	int b, w;
1242	for (b = 0; b < bytes; ) {
1243		DPRINTK("%p: ", start + b);
1244		for (w = 0; b < bytes && w < 4; w++) {
1245			printk("%08x ", readl(start + b));
1246			b += sizeof(u32);
1247		}
1248		printk("\n");
1249	}
1250}
1251#endif
1252
1253static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1254{
1255#ifdef ATA_DEBUG
1256	int b, w;
1257	u32 dw;
1258	for (b = 0; b < bytes; ) {
1259		DPRINTK("%02x: ", b);
1260		for (w = 0; b < bytes && w < 4; w++) {
1261			(void) pci_read_config_dword(pdev, b, &dw);
1262			printk("%08x ", dw);
1263			b += sizeof(u32);
1264		}
1265		printk("\n");
1266	}
1267#endif
1268}
1269static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1270			     struct pci_dev *pdev)
1271{
1272#ifdef ATA_DEBUG
1273	void __iomem *hc_base = mv_hc_base(mmio_base,
1274					   port >> MV_PORT_HC_SHIFT);
1275	void __iomem *port_base;
1276	int start_port, num_ports, p, start_hc, num_hcs, hc;
1277
1278	if (0 > port) {
1279		start_hc = start_port = 0;
1280		num_ports = 8;		/* shld be benign for 4 port devs */
1281		num_hcs = 2;
1282	} else {
1283		start_hc = port >> MV_PORT_HC_SHIFT;
1284		start_port = port;
1285		num_ports = num_hcs = 1;
1286	}
1287	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1288		num_ports > 1 ? num_ports - 1 : start_port);
1289
1290	if (NULL != pdev) {
1291		DPRINTK("PCI config space regs:\n");
1292		mv_dump_pci_cfg(pdev, 0x68);
1293	}
1294	DPRINTK("PCI regs:\n");
1295	mv_dump_mem(mmio_base+0xc00, 0x3c);
1296	mv_dump_mem(mmio_base+0xd00, 0x34);
1297	mv_dump_mem(mmio_base+0xf00, 0x4);
1298	mv_dump_mem(mmio_base+0x1d00, 0x6c);
1299	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1300		hc_base = mv_hc_base(mmio_base, hc);
1301		DPRINTK("HC regs (HC %i):\n", hc);
1302		mv_dump_mem(hc_base, 0x1c);
1303	}
1304	for (p = start_port; p < start_port + num_ports; p++) {
1305		port_base = mv_port_base(mmio_base, p);
1306		DPRINTK("EDMA regs (port %i):\n", p);
1307		mv_dump_mem(port_base, 0x54);
1308		DPRINTK("SATA regs (port %i):\n", p);
1309		mv_dump_mem(port_base+0x300, 0x60);
1310	}
1311#endif
1312}
1313
1314static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1315{
1316	unsigned int ofs;
1317
1318	switch (sc_reg_in) {
1319	case SCR_STATUS:
1320	case SCR_CONTROL:
1321	case SCR_ERROR:
1322		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1323		break;
1324	case SCR_ACTIVE:
1325		ofs = SATA_ACTIVE;   /* active is not with the others */
1326		break;
1327	default:
1328		ofs = 0xffffffffU;
1329		break;
1330	}
1331	return ofs;
1332}
1333
1334static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1335{
1336	unsigned int ofs = mv_scr_offset(sc_reg_in);
1337
1338	if (ofs != 0xffffffffU) {
1339		*val = readl(mv_ap_base(link->ap) + ofs);
1340		return 0;
1341	} else
1342		return -EINVAL;
1343}
1344
1345static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1346{
1347	unsigned int ofs = mv_scr_offset(sc_reg_in);
1348
1349	if (ofs != 0xffffffffU) {
1350		void __iomem *addr = mv_ap_base(link->ap) + ofs;
1351		if (sc_reg_in == SCR_CONTROL) {
1352			/*
1353			 * Workaround for 88SX60x1 FEr SATA#26:
1354			 *
1355			 * COMRESETs have to take care not to accidentally
1356			 * put the drive to sleep when writing SCR_CONTROL.
1357			 * Setting bits 12..15 prevents this problem.
1358			 *
1359			 * So if we see an outbound COMMRESET, set those bits.
1360			 * Ditto for the followup write that clears the reset.
1361			 *
1362			 * The proprietary driver does this for
1363			 * all chip versions, and so do we.
1364			 */
1365			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1366				val |= 0xf000;
1367		}
1368		writelfl(val, addr);
1369		return 0;
1370	} else
1371		return -EINVAL;
1372}
1373
1374static void mv6_dev_config(struct ata_device *adev)
1375{
1376	/*
1377	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1378	 *
1379	 * Gen-II does not support NCQ over a port multiplier
1380	 *  (no FIS-based switching).
1381	 */
1382	if (adev->flags & ATA_DFLAG_NCQ) {
1383		if (sata_pmp_attached(adev->link->ap)) {
1384			adev->flags &= ~ATA_DFLAG_NCQ;
1385			ata_dev_printk(adev, KERN_INFO,
1386				"NCQ disabled for command-based switching\n");
1387		}
1388	}
1389}
1390
1391static int mv_qc_defer(struct ata_queued_cmd *qc)
1392{
1393	struct ata_link *link = qc->dev->link;
1394	struct ata_port *ap = link->ap;
1395	struct mv_port_priv *pp = ap->private_data;
1396
1397	/*
1398	 * Don't allow new commands if we're in a delayed EH state
1399	 * for NCQ and/or FIS-based switching.
1400	 */
1401	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1402		return ATA_DEFER_PORT;
1403
1404	/* PIO commands need exclusive link: no other commands [DMA or PIO]
1405	 * can run concurrently.
1406	 * set excl_link when we want to send a PIO command in DMA mode
1407	 * or a non-NCQ command in NCQ mode.
1408	 * When we receive a command from that link, and there are no
1409	 * outstanding commands, mark a flag to clear excl_link and let
1410	 * the command go through.
1411	 */
1412	if (unlikely(ap->excl_link)) {
1413		if (link == ap->excl_link) {
1414			if (ap->nr_active_links)
1415				return ATA_DEFER_PORT;
1416			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1417			return 0;
1418		} else
1419			return ATA_DEFER_PORT;
1420	}
1421
1422	/*
1423	 * If the port is completely idle, then allow the new qc.
1424	 */
1425	if (ap->nr_active_links == 0)
1426		return 0;
1427
1428	/*
1429	 * The port is operating in host queuing mode (EDMA) with NCQ
1430	 * enabled, allow multiple NCQ commands.  EDMA also allows
1431	 * queueing multiple DMA commands but libata core currently
1432	 * doesn't allow it.
1433	 */
1434	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1435	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1436		if (ata_is_ncq(qc->tf.protocol))
1437			return 0;
1438		else {
1439			ap->excl_link = link;
1440			return ATA_DEFER_PORT;
1441		}
1442	}
1443
1444	return ATA_DEFER_PORT;
1445}
1446
1447static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1448{
1449	struct mv_port_priv *pp = ap->private_data;
1450	void __iomem *port_mmio;
1451
1452	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1453	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1454	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1455
1456	ltmode   = *old_ltmode & ~LTMODE_BIT8;
1457	haltcond = *old_haltcond | EDMA_ERR_DEV;
1458
1459	if (want_fbs) {
1460		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1461		ltmode = *old_ltmode | LTMODE_BIT8;
1462		if (want_ncq)
1463			haltcond &= ~EDMA_ERR_DEV;
1464		else
1465			fiscfg |=  FISCFG_WAIT_DEV_ERR;
1466	} else {
1467		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1468	}
1469
1470	port_mmio = mv_ap_base(ap);
1471	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1472	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1473	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1474}
1475
1476static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1477{
1478	struct mv_host_priv *hpriv = ap->host->private_data;
1479	u32 old, new;
1480
1481	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1482	old = readl(hpriv->base + GPIO_PORT_CTL);
1483	if (want_ncq)
1484		new = old | (1 << 22);
1485	else
1486		new = old & ~(1 << 22);
1487	if (new != old)
1488		writel(new, hpriv->base + GPIO_PORT_CTL);
1489}
1490
1491/**
1492 *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1493 *	@ap: Port being initialized
1494 *
1495 *	There are two DMA modes on these chips:  basic DMA, and EDMA.
1496 *
1497 *	Bit-0 of the "EDMA RESERVED" register enables/disables use
1498 *	of basic DMA on the GEN_IIE versions of the chips.
1499 *
1500 *	This bit survives EDMA resets, and must be set for basic DMA
1501 *	to function, and should be cleared when EDMA is active.
1502 */
1503static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1504{
1505	struct mv_port_priv *pp = ap->private_data;
1506	u32 new, *old = &pp->cached.unknown_rsvd;
1507
1508	if (enable_bmdma)
1509		new = *old | 1;
1510	else
1511		new = *old & ~1;
1512	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1513}
1514
1515/*
1516 * SOC chips have an issue whereby the HDD LEDs don't always blink
1517 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1518 * of the SOC takes care of it, generating a steady blink rate when
1519 * any drive on the chip is active.
1520 *
1521 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1522 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1523 *
1524 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1525 * LED operation works then, and provides better (more accurate) feedback.
1526 *
1527 * Note that this code assumes that an SOC never has more than one HC onboard.
1528 */
1529static void mv_soc_led_blink_enable(struct ata_port *ap)
1530{
1531	struct ata_host *host = ap->host;
1532	struct mv_host_priv *hpriv = host->private_data;
1533	void __iomem *hc_mmio;
1534	u32 led_ctrl;
1535
1536	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1537		return;
1538	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1539	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1540	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1541	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1542}
1543
1544static void mv_soc_led_blink_disable(struct ata_port *ap)
1545{
1546	struct ata_host *host = ap->host;
1547	struct mv_host_priv *hpriv = host->private_data;
1548	void __iomem *hc_mmio;
1549	u32 led_ctrl;
1550	unsigned int port;
1551
1552	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1553		return;
1554
1555	/* disable led-blink only if no ports are using NCQ */
1556	for (port = 0; port < hpriv->n_ports; port++) {
1557		struct ata_port *this_ap = host->ports[port];
1558		struct mv_port_priv *pp = this_ap->private_data;
1559
1560		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1561			return;
1562	}
1563
1564	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1565	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1566	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1567	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1568}
1569
1570static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1571{
1572	u32 cfg;
1573	struct mv_port_priv *pp    = ap->private_data;
1574	struct mv_host_priv *hpriv = ap->host->private_data;
1575	void __iomem *port_mmio    = mv_ap_base(ap);
1576
1577	/* set up non-NCQ EDMA configuration */
1578	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1579	pp->pp_flags &=
1580	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1581
1582	if (IS_GEN_I(hpriv))
1583		cfg |= (1 << 8);	/* enab config burst size mask */
1584
1585	else if (IS_GEN_II(hpriv)) {
1586		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1587		mv_60x1_errata_sata25(ap, want_ncq);
1588
1589	} else if (IS_GEN_IIE(hpriv)) {
1590		int want_fbs = sata_pmp_attached(ap);
1591		/*
1592		 * Possible future enhancement:
1593		 *
1594		 * The chip can use FBS with non-NCQ, if we allow it,
1595		 * But first we need to have the error handling in place
1596		 * for this mode (datasheet section 7.3.15.4.2.3).
1597		 * So disallow non-NCQ FBS for now.
1598		 */
1599		want_fbs &= want_ncq;
1600
1601		mv_config_fbs(ap, want_ncq, want_fbs);
1602
1603		if (want_fbs) {
1604			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1605			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1606		}
1607
1608		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1609		if (want_edma) {
1610			cfg |= (1 << 22); /* enab 4-entry host queue cache */
1611			if (!IS_SOC(hpriv))
1612				cfg |= (1 << 18); /* enab early completion */
1613		}
1614		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1615			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1616		mv_bmdma_enable_iie(ap, !want_edma);
1617
1618		if (IS_SOC(hpriv)) {
1619			if (want_ncq)
1620				mv_soc_led_blink_enable(ap);
1621			else
1622				mv_soc_led_blink_disable(ap);
1623		}
1624	}
1625
1626	if (want_ncq) {
1627		cfg |= EDMA_CFG_NCQ;
1628		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1629	}
1630
1631	writelfl(cfg, port_mmio + EDMA_CFG);
1632}
1633
1634static void mv_port_free_dma_mem(struct ata_port *ap)
1635{
1636	struct mv_host_priv *hpriv = ap->host->private_data;
1637	struct mv_port_priv *pp = ap->private_data;
1638	int tag;
1639
1640	if (pp->crqb) {
1641		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1642		pp->crqb = NULL;
1643	}
1644	if (pp->crpb) {
1645		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1646		pp->crpb = NULL;
1647	}
1648	/*
1649	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1650	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1651	 */
1652	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1653		if (pp->sg_tbl[tag]) {
1654			if (tag == 0 || !IS_GEN_I(hpriv))
1655				dma_pool_free(hpriv->sg_tbl_pool,
1656					      pp->sg_tbl[tag],
1657					      pp->sg_tbl_dma[tag]);
1658			pp->sg_tbl[tag] = NULL;
1659		}
1660	}
1661}
1662
1663/**
1664 *      mv_port_start - Port specific init/start routine.
1665 *      @ap: ATA channel to manipulate
1666 *
1667 *      Allocate and point to DMA memory, init port private memory,
1668 *      zero indices.
1669 *
1670 *      LOCKING:
1671 *      Inherited from caller.
1672 */
1673static int mv_port_start(struct ata_port *ap)
1674{
1675	struct device *dev = ap->host->dev;
1676	struct mv_host_priv 

Large files files are truncated, but you can click here to view the full file