/drivers/ata/sata_mv.c
https://bitbucket.org/wisechild/galaxy-nexus · C · 4423 lines · 2993 code · 573 blank · 857 comment · 390 complexity · 6277dbf56209256fcc7e8f48a3cf64b2 MD5 · raw file
Large files are truncated click here to view the full file
- /*
- * sata_mv.c - Marvell SATA support
- *
- * Copyright 2008-2009: Marvell Corporation, all rights reserved.
- * Copyright 2005: EMC Corporation, all rights reserved.
- * Copyright 2005 Red Hat, Inc. All rights reserved.
- *
- * Originally written by Brett Russ.
- * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
- *
- * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
- /*
- * sata_mv TODO list:
- *
- * --> Develop a low-power-consumption strategy, and implement it.
- *
- * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
- *
- * --> [Experiment, Marvell value added] Is it possible to use target
- * mode to cross-connect two Linux boxes with Marvell cards? If so,
- * creating LibATA target mode support would be very interesting.
- *
- * Target mode, for those without docs, is the ability to directly
- * connect two SATA ports.
- */
- /*
- * 80x1-B2 errata PCI#11:
- *
- * Users of the 6041/6081 Rev.B2 chips (current is C0)
- * should be careful to insert those cards only onto PCI-X bus #0,
- * and only in device slots 0..7, not higher. The chips may not
- * work correctly otherwise (note: this is a pretty rare condition).
- */
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/pci.h>
- #include <linux/init.h>
- #include <linux/blkdev.h>
- #include <linux/delay.h>
- #include <linux/interrupt.h>
- #include <linux/dmapool.h>
- #include <linux/dma-mapping.h>
- #include <linux/device.h>
- #include <linux/clk.h>
- #include <linux/platform_device.h>
- #include <linux/ata_platform.h>
- #include <linux/mbus.h>
- #include <linux/bitops.h>
- #include <linux/gfp.h>
- #include <scsi/scsi_host.h>
- #include <scsi/scsi_cmnd.h>
- #include <scsi/scsi_device.h>
- #include <linux/libata.h>
- #define DRV_NAME "sata_mv"
- #define DRV_VERSION "1.28"
- /*
- * module options
- */
- static int msi;
- #ifdef CONFIG_PCI
- module_param(msi, int, S_IRUGO);
- MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
- #endif
- static int irq_coalescing_io_count;
- module_param(irq_coalescing_io_count, int, S_IRUGO);
- MODULE_PARM_DESC(irq_coalescing_io_count,
- "IRQ coalescing I/O count threshold (0..255)");
- static int irq_coalescing_usecs;
- module_param(irq_coalescing_usecs, int, S_IRUGO);
- MODULE_PARM_DESC(irq_coalescing_usecs,
- "IRQ coalescing time threshold in usecs");
- enum {
- /* BAR's are enumerated in terms of pci_resource_start() terms */
- MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
- MV_IO_BAR = 2, /* offset 0x18: IO space */
- MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
- MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
- MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
- /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
- COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
- MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
- MAX_COAL_IO_COUNT = 255, /* completed I/O count */
- MV_PCI_REG_BASE = 0,
- /*
- * Per-chip ("all ports") interrupt coalescing feature.
- * This is only for GEN_II / GEN_IIE hardware.
- *
- * Coalescing defers the interrupt until either the IO_THRESHOLD
- * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
- */
- COAL_REG_BASE = 0x18000,
- IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
- ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
- IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
- IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
- /*
- * Registers for the (unused here) transaction coalescing feature:
- */
- TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
- TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
- SATAHC0_REG_BASE = 0x20000,
- FLASH_CTL = 0x1046c,
- GPIO_PORT_CTL = 0x104f0,
- RESET_CFG = 0x180d8,
- MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
- MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
- MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
- MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
- MV_MAX_Q_DEPTH = 32,
- MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
- /* CRQB needs alignment on a 1KB boundary. Size == 1KB
- * CRPB needs alignment on a 256B boundary. Size == 256B
- * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
- */
- MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
- MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
- MV_MAX_SG_CT = 256,
- MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
- /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
- MV_PORT_HC_SHIFT = 2,
- MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
- /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
- MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
- /* Host Flags */
- MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
- MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
- MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
- MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
- ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
- MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
- CRQB_FLAG_READ = (1 << 0),
- CRQB_TAG_SHIFT = 1,
- CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
- CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
- CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
- CRQB_CMD_ADDR_SHIFT = 8,
- CRQB_CMD_CS = (0x2 << 11),
- CRQB_CMD_LAST = (1 << 15),
- CRPB_FLAG_STATUS_SHIFT = 8,
- CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
- CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
- EPRD_FLAG_END_OF_TBL = (1 << 31),
- /* PCI interface registers */
- MV_PCI_COMMAND = 0xc00,
- MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
- MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
- PCI_MAIN_CMD_STS = 0xd30,
- STOP_PCI_MASTER = (1 << 2),
- PCI_MASTER_EMPTY = (1 << 3),
- GLOB_SFT_RST = (1 << 4),
- MV_PCI_MODE = 0xd00,
- MV_PCI_MODE_MASK = 0x30,
- MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
- MV_PCI_DISC_TIMER = 0xd04,
- MV_PCI_MSI_TRIGGER = 0xc38,
- MV_PCI_SERR_MASK = 0xc28,
- MV_PCI_XBAR_TMOUT = 0x1d04,
- MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
- MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
- MV_PCI_ERR_ATTRIBUTE = 0x1d48,
- MV_PCI_ERR_COMMAND = 0x1d50,
- PCI_IRQ_CAUSE = 0x1d58,
- PCI_IRQ_MASK = 0x1d5c,
- PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
- PCIE_IRQ_CAUSE = 0x1900,
- PCIE_IRQ_MASK = 0x1910,
- PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
- /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
- PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
- PCI_HC_MAIN_IRQ_MASK = 0x1d64,
- SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
- SOC_HC_MAIN_IRQ_MASK = 0x20024,
- ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
- DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
- HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
- HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
- DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
- DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
- PCI_ERR = (1 << 18),
- TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
- TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
- PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
- PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
- ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
- GPIO_INT = (1 << 22),
- SELF_INT = (1 << 23),
- TWSI_INT = (1 << 24),
- HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
- HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
- HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
- /* SATAHC registers */
- HC_CFG = 0x00,
- HC_IRQ_CAUSE = 0x14,
- DMA_IRQ = (1 << 0), /* shift by port # */
- HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
- DEV_IRQ = (1 << 8), /* shift by port # */
- /*
- * Per-HC (Host-Controller) interrupt coalescing feature.
- * This is present on all chip generations.
- *
- * Coalescing defers the interrupt until either the IO_THRESHOLD
- * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
- */
- HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
- HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
- SOC_LED_CTRL = 0x2c,
- SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
- SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
- /* with dev activity LED */
- /* Shadow block registers */
- SHD_BLK = 0x100,
- SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
- /* SATA registers */
- SATA_STATUS = 0x300, /* ctrl, err regs follow status */
- SATA_ACTIVE = 0x350,
- FIS_IRQ_CAUSE = 0x364,
- FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
- LTMODE = 0x30c, /* requires read-after-write */
- LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
- PHY_MODE2 = 0x330,
- PHY_MODE3 = 0x310,
- PHY_MODE4 = 0x314, /* requires read-after-write */
- PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
- PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
- PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
- PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
- SATA_IFCTL = 0x344,
- SATA_TESTCTL = 0x348,
- SATA_IFSTAT = 0x34c,
- VENDOR_UNIQUE_FIS = 0x35c,
- FISCFG = 0x360,
- FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
- FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
- PHY_MODE9_GEN2 = 0x398,
- PHY_MODE9_GEN1 = 0x39c,
- PHYCFG_OFS = 0x3a0, /* only in 65n devices */
- MV5_PHY_MODE = 0x74,
- MV5_LTMODE = 0x30,
- MV5_PHY_CTL = 0x0C,
- SATA_IFCFG = 0x050,
- MV_M2_PREAMP_MASK = 0x7e0,
- /* Port registers */
- EDMA_CFG = 0,
- EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
- EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
- EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
- EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
- EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
- EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
- EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
- EDMA_ERR_IRQ_CAUSE = 0x8,
- EDMA_ERR_IRQ_MASK = 0xc,
- EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
- EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
- EDMA_ERR_DEV = (1 << 2), /* device error */
- EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
- EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
- EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
- EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
- EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
- EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
- EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
- EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
- EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
- EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
- EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
- EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
- EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
- EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
- EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
- EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
- EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
- EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
- EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
- EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
- EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
- EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
- EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
- EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
- EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
- EDMA_ERR_OVERRUN_5 = (1 << 5),
- EDMA_ERR_UNDERRUN_5 = (1 << 6),
- EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
- EDMA_ERR_LNK_CTRL_RX_1 |
- EDMA_ERR_LNK_CTRL_RX_3 |
- EDMA_ERR_LNK_CTRL_TX,
- EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
- EDMA_ERR_PRD_PAR |
- EDMA_ERR_DEV_DCON |
- EDMA_ERR_DEV_CON |
- EDMA_ERR_SERR |
- EDMA_ERR_SELF_DIS |
- EDMA_ERR_CRQB_PAR |
- EDMA_ERR_CRPB_PAR |
- EDMA_ERR_INTRL_PAR |
- EDMA_ERR_IORDY |
- EDMA_ERR_LNK_CTRL_RX_2 |
- EDMA_ERR_LNK_DATA_RX |
- EDMA_ERR_LNK_DATA_TX |
- EDMA_ERR_TRANS_PROTO,
- EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
- EDMA_ERR_PRD_PAR |
- EDMA_ERR_DEV_DCON |
- EDMA_ERR_DEV_CON |
- EDMA_ERR_OVERRUN_5 |
- EDMA_ERR_UNDERRUN_5 |
- EDMA_ERR_SELF_DIS_5 |
- EDMA_ERR_CRQB_PAR |
- EDMA_ERR_CRPB_PAR |
- EDMA_ERR_INTRL_PAR |
- EDMA_ERR_IORDY,
- EDMA_REQ_Q_BASE_HI = 0x10,
- EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
- EDMA_REQ_Q_OUT_PTR = 0x18,
- EDMA_REQ_Q_PTR_SHIFT = 5,
- EDMA_RSP_Q_BASE_HI = 0x1c,
- EDMA_RSP_Q_IN_PTR = 0x20,
- EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
- EDMA_RSP_Q_PTR_SHIFT = 3,
- EDMA_CMD = 0x28, /* EDMA command register */
- EDMA_EN = (1 << 0), /* enable EDMA */
- EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
- EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
- EDMA_STATUS = 0x30, /* EDMA engine status */
- EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
- EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
- EDMA_IORDY_TMOUT = 0x34,
- EDMA_ARB_CFG = 0x38,
- EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
- EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
- BMDMA_CMD = 0x224, /* bmdma command register */
- BMDMA_STATUS = 0x228, /* bmdma status register */
- BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
- BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
- /* Host private flags (hp_flags) */
- MV_HP_FLAG_MSI = (1 << 0),
- MV_HP_ERRATA_50XXB0 = (1 << 1),
- MV_HP_ERRATA_50XXB2 = (1 << 2),
- MV_HP_ERRATA_60X1B2 = (1 << 3),
- MV_HP_ERRATA_60X1C0 = (1 << 4),
- MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
- MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
- MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
- MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
- MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
- MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
- MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
- /* Port private flags (pp_flags) */
- MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
- MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
- MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
- MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
- MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
- };
- #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
- #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
- #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
- #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
- #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
- #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
- #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
- enum {
- /* DMA boundary 0xffff is required by the s/g splitting
- * we need on /length/ in mv_fill-sg().
- */
- MV_DMA_BOUNDARY = 0xffffU,
- /* mask of register bits containing lower 32 bits
- * of EDMA request queue DMA address
- */
- EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
- /* ditto, for response queue */
- EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
- };
- enum chip_type {
- chip_504x,
- chip_508x,
- chip_5080,
- chip_604x,
- chip_608x,
- chip_6042,
- chip_7042,
- chip_soc,
- };
- /* Command ReQuest Block: 32B */
- struct mv_crqb {
- __le32 sg_addr;
- __le32 sg_addr_hi;
- __le16 ctrl_flags;
- __le16 ata_cmd[11];
- };
- struct mv_crqb_iie {
- __le32 addr;
- __le32 addr_hi;
- __le32 flags;
- __le32 len;
- __le32 ata_cmd[4];
- };
- /* Command ResPonse Block: 8B */
- struct mv_crpb {
- __le16 id;
- __le16 flags;
- __le32 tmstmp;
- };
- /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
- struct mv_sg {
- __le32 addr;
- __le32 flags_size;
- __le32 addr_hi;
- __le32 reserved;
- };
- /*
- * We keep a local cache of a few frequently accessed port
- * registers here, to avoid having to read them (very slow)
- * when switching between EDMA and non-EDMA modes.
- */
- struct mv_cached_regs {
- u32 fiscfg;
- u32 ltmode;
- u32 haltcond;
- u32 unknown_rsvd;
- };
- struct mv_port_priv {
- struct mv_crqb *crqb;
- dma_addr_t crqb_dma;
- struct mv_crpb *crpb;
- dma_addr_t crpb_dma;
- struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
- dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
- unsigned int req_idx;
- unsigned int resp_idx;
- u32 pp_flags;
- struct mv_cached_regs cached;
- unsigned int delayed_eh_pmp_map;
- };
- struct mv_port_signal {
- u32 amps;
- u32 pre;
- };
- struct mv_host_priv {
- u32 hp_flags;
- unsigned int board_idx;
- u32 main_irq_mask;
- struct mv_port_signal signal[8];
- const struct mv_hw_ops *ops;
- int n_ports;
- void __iomem *base;
- void __iomem *main_irq_cause_addr;
- void __iomem *main_irq_mask_addr;
- u32 irq_cause_offset;
- u32 irq_mask_offset;
- u32 unmask_all_irqs;
- #if defined(CONFIG_HAVE_CLK)
- struct clk *clk;
- #endif
- /*
- * These consistent DMA memory pools give us guaranteed
- * alignment for hardware-accessed data structures,
- * and less memory waste in accomplishing the alignment.
- */
- struct dma_pool *crqb_pool;
- struct dma_pool *crpb_pool;
- struct dma_pool *sg_tbl_pool;
- };
- struct mv_hw_ops {
- void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int port);
- void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
- void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
- void __iomem *mmio);
- int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int n_hc);
- void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
- void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
- };
- static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
- static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
- static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
- static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
- static int mv_port_start(struct ata_port *ap);
- static void mv_port_stop(struct ata_port *ap);
- static int mv_qc_defer(struct ata_queued_cmd *qc);
- static void mv_qc_prep(struct ata_queued_cmd *qc);
- static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
- static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
- static int mv_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
- static void mv_eh_freeze(struct ata_port *ap);
- static void mv_eh_thaw(struct ata_port *ap);
- static void mv6_dev_config(struct ata_device *dev);
- static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int port);
- static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
- static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
- void __iomem *mmio);
- static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int n_hc);
- static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
- static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
- static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int port);
- static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
- static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
- void __iomem *mmio);
- static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int n_hc);
- static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
- static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
- void __iomem *mmio);
- static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
- void __iomem *mmio);
- static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
- void __iomem *mmio, unsigned int n_hc);
- static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
- void __iomem *mmio);
- static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
- static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
- void __iomem *mmio, unsigned int port);
- static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
- static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int port_no);
- static int mv_stop_edma(struct ata_port *ap);
- static int mv_stop_edma_engine(void __iomem *port_mmio);
- static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
- static void mv_pmp_select(struct ata_port *ap, int pmp);
- static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
- static int mv_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
- static void mv_pmp_error_handler(struct ata_port *ap);
- static void mv_process_crpb_entries(struct ata_port *ap,
- struct mv_port_priv *pp);
- static void mv_sff_irq_clear(struct ata_port *ap);
- static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
- static void mv_bmdma_setup(struct ata_queued_cmd *qc);
- static void mv_bmdma_start(struct ata_queued_cmd *qc);
- static void mv_bmdma_stop(struct ata_queued_cmd *qc);
- static u8 mv_bmdma_status(struct ata_port *ap);
- static u8 mv_sff_check_status(struct ata_port *ap);
- /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
- * because we have to allow room for worst case splitting of
- * PRDs for 64K boundaries in mv_fill_sg().
- */
- static struct scsi_host_template mv5_sht = {
- ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = MV_MAX_SG_CT / 2,
- .dma_boundary = MV_DMA_BOUNDARY,
- };
- static struct scsi_host_template mv6_sht = {
- ATA_NCQ_SHT(DRV_NAME),
- .can_queue = MV_MAX_Q_DEPTH - 1,
- .sg_tablesize = MV_MAX_SG_CT / 2,
- .dma_boundary = MV_DMA_BOUNDARY,
- };
- static struct ata_port_operations mv5_ops = {
- .inherits = &ata_sff_port_ops,
- .lost_interrupt = ATA_OP_NULL,
- .qc_defer = mv_qc_defer,
- .qc_prep = mv_qc_prep,
- .qc_issue = mv_qc_issue,
- .freeze = mv_eh_freeze,
- .thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
- .scr_read = mv5_scr_read,
- .scr_write = mv5_scr_write,
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
- };
- static struct ata_port_operations mv6_ops = {
- .inherits = &ata_bmdma_port_ops,
- .lost_interrupt = ATA_OP_NULL,
- .qc_defer = mv_qc_defer,
- .qc_prep = mv_qc_prep,
- .qc_issue = mv_qc_issue,
- .dev_config = mv6_dev_config,
- .freeze = mv_eh_freeze,
- .thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
- .softreset = mv_softreset,
- .pmp_hardreset = mv_pmp_hardreset,
- .pmp_softreset = mv_softreset,
- .error_handler = mv_pmp_error_handler,
- .scr_read = mv_scr_read,
- .scr_write = mv_scr_write,
- .sff_check_status = mv_sff_check_status,
- .sff_irq_clear = mv_sff_irq_clear,
- .check_atapi_dma = mv_check_atapi_dma,
- .bmdma_setup = mv_bmdma_setup,
- .bmdma_start = mv_bmdma_start,
- .bmdma_stop = mv_bmdma_stop,
- .bmdma_status = mv_bmdma_status,
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
- };
- static struct ata_port_operations mv_iie_ops = {
- .inherits = &mv6_ops,
- .dev_config = ATA_OP_NULL,
- .qc_prep = mv_qc_prep_iie,
- };
- static const struct ata_port_info mv_port_info[] = {
- { /* chip_504x */
- .flags = MV_GEN_I_FLAGS,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv5_ops,
- },
- { /* chip_508x */
- .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv5_ops,
- },
- { /* chip_5080 */
- .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv5_ops,
- },
- { /* chip_604x */
- .flags = MV_GEN_II_FLAGS,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv6_ops,
- },
- { /* chip_608x */
- .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv6_ops,
- },
- { /* chip_6042 */
- .flags = MV_GEN_IIE_FLAGS,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv_iie_ops,
- },
- { /* chip_7042 */
- .flags = MV_GEN_IIE_FLAGS,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv_iie_ops,
- },
- { /* chip_soc */
- .flags = MV_GEN_IIE_FLAGS,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &mv_iie_ops,
- },
- };
- static const struct pci_device_id mv_pci_tbl[] = {
- { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
- { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
- /* RocketRAID 1720/174x have different identifiers */
- { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
- { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
- { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
- { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
- { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
- /* Adaptec 1430SA */
- { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
- /* Marvell 7042 support */
- { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
- /* Highpoint RocketRAID PCIe series */
- { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
- { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
- { } /* terminate list */
- };
- static const struct mv_hw_ops mv5xxx_ops = {
- .phy_errata = mv5_phy_errata,
- .enable_leds = mv5_enable_leds,
- .read_preamp = mv5_read_preamp,
- .reset_hc = mv5_reset_hc,
- .reset_flash = mv5_reset_flash,
- .reset_bus = mv5_reset_bus,
- };
- static const struct mv_hw_ops mv6xxx_ops = {
- .phy_errata = mv6_phy_errata,
- .enable_leds = mv6_enable_leds,
- .read_preamp = mv6_read_preamp,
- .reset_hc = mv6_reset_hc,
- .reset_flash = mv6_reset_flash,
- .reset_bus = mv_reset_pci_bus,
- };
- static const struct mv_hw_ops mv_soc_ops = {
- .phy_errata = mv6_phy_errata,
- .enable_leds = mv_soc_enable_leds,
- .read_preamp = mv_soc_read_preamp,
- .reset_hc = mv_soc_reset_hc,
- .reset_flash = mv_soc_reset_flash,
- .reset_bus = mv_soc_reset_bus,
- };
- static const struct mv_hw_ops mv_soc_65n_ops = {
- .phy_errata = mv_soc_65n_phy_errata,
- .enable_leds = mv_soc_enable_leds,
- .reset_hc = mv_soc_reset_hc,
- .reset_flash = mv_soc_reset_flash,
- .reset_bus = mv_soc_reset_bus,
- };
- /*
- * Functions
- */
- static inline void writelfl(unsigned long data, void __iomem *addr)
- {
- writel(data, addr);
- (void) readl(addr); /* flush to avoid PCI posted write */
- }
- static inline unsigned int mv_hc_from_port(unsigned int port)
- {
- return port >> MV_PORT_HC_SHIFT;
- }
- static inline unsigned int mv_hardport_from_port(unsigned int port)
- {
- return port & MV_PORT_MASK;
- }
- /*
- * Consolidate some rather tricky bit shift calculations.
- * This is hot-path stuff, so not a function.
- * Simple code, with two return values, so macro rather than inline.
- *
- * port is the sole input, in range 0..7.
- * shift is one output, for use with main_irq_cause / main_irq_mask registers.
- * hardport is the other output, in range 0..3.
- *
- * Note that port and hardport may be the same variable in some cases.
- */
- #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
- { \
- shift = mv_hc_from_port(port) * HC_SHIFT; \
- hardport = mv_hardport_from_port(port); \
- shift += hardport * 2; \
- }
- static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
- {
- return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
- }
- static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
- unsigned int port)
- {
- return mv_hc_base(base, mv_hc_from_port(port));
- }
- static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
- {
- return mv_hc_base_from_port(base, port) +
- MV_SATAHC_ARBTR_REG_SZ +
- (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
- }
- static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
- {
- void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
- unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
- return hc_mmio + ofs;
- }
- static inline void __iomem *mv_host_base(struct ata_host *host)
- {
- struct mv_host_priv *hpriv = host->private_data;
- return hpriv->base;
- }
- static inline void __iomem *mv_ap_base(struct ata_port *ap)
- {
- return mv_port_base(mv_host_base(ap->host), ap->port_no);
- }
- static inline int mv_get_hc_count(unsigned long port_flags)
- {
- return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
- }
- /**
- * mv_save_cached_regs - (re-)initialize cached port registers
- * @ap: the port whose registers we are caching
- *
- * Initialize the local cache of port registers,
- * so that reading them over and over again can
- * be avoided on the hotter paths of this driver.
- * This saves a few microseconds each time we switch
- * to/from EDMA mode to perform (eg.) a drive cache flush.
- */
- static void mv_save_cached_regs(struct ata_port *ap)
- {
- void __iomem *port_mmio = mv_ap_base(ap);
- struct mv_port_priv *pp = ap->private_data;
- pp->cached.fiscfg = readl(port_mmio + FISCFG);
- pp->cached.ltmode = readl(port_mmio + LTMODE);
- pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
- pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
- }
- /**
- * mv_write_cached_reg - write to a cached port register
- * @addr: hardware address of the register
- * @old: pointer to cached value of the register
- * @new: new value for the register
- *
- * Write a new value to a cached register,
- * but only if the value is different from before.
- */
- static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
- {
- if (new != *old) {
- unsigned long laddr;
- *old = new;
- /*
- * Workaround for 88SX60x1-B2 FEr SATA#13:
- * Read-after-write is needed to prevent generating 64-bit
- * write cycles on the PCI bus for SATA interface registers
- * at offsets ending in 0x4 or 0xc.
- *
- * Looks like a lot of fuss, but it avoids an unnecessary
- * +1 usec read-after-write delay for unaffected registers.
- */
- laddr = (long)addr & 0xffff;
- if (laddr >= 0x300 && laddr <= 0x33c) {
- laddr &= 0x000f;
- if (laddr == 0x4 || laddr == 0xc) {
- writelfl(new, addr); /* read after write */
- return;
- }
- }
- writel(new, addr); /* unaffected by the errata */
- }
- }
- static void mv_set_edma_ptrs(void __iomem *port_mmio,
- struct mv_host_priv *hpriv,
- struct mv_port_priv *pp)
- {
- u32 index;
- /*
- * initialize request queue
- */
- pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
- index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
- WARN_ON(pp->crqb_dma & 0x3ff);
- writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
- writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
- port_mmio + EDMA_REQ_Q_IN_PTR);
- writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
- /*
- * initialize response queue
- */
- pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
- index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
- WARN_ON(pp->crpb_dma & 0xff);
- writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
- writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
- writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
- port_mmio + EDMA_RSP_Q_OUT_PTR);
- }
- static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
- {
- /*
- * When writing to the main_irq_mask in hardware,
- * we must ensure exclusivity between the interrupt coalescing bits
- * and the corresponding individual port DONE_IRQ bits.
- *
- * Note that this register is really an "IRQ enable" register,
- * not an "IRQ mask" register as Marvell's naming might suggest.
- */
- if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
- mask &= ~DONE_IRQ_0_3;
- if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
- mask &= ~DONE_IRQ_4_7;
- writelfl(mask, hpriv->main_irq_mask_addr);
- }
- static void mv_set_main_irq_mask(struct ata_host *host,
- u32 disable_bits, u32 enable_bits)
- {
- struct mv_host_priv *hpriv = host->private_data;
- u32 old_mask, new_mask;
- old_mask = hpriv->main_irq_mask;
- new_mask = (old_mask & ~disable_bits) | enable_bits;
- if (new_mask != old_mask) {
- hpriv->main_irq_mask = new_mask;
- mv_write_main_irq_mask(new_mask, hpriv);
- }
- }
- static void mv_enable_port_irqs(struct ata_port *ap,
- unsigned int port_bits)
- {
- unsigned int shift, hardport, port = ap->port_no;
- u32 disable_bits, enable_bits;
- MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
- disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
- enable_bits = port_bits << shift;
- mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
- }
- static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
- void __iomem *port_mmio,
- unsigned int port_irqs)
- {
- struct mv_host_priv *hpriv = ap->host->private_data;
- int hardport = mv_hardport_from_port(ap->port_no);
- void __iomem *hc_mmio = mv_hc_base_from_port(
- mv_host_base(ap->host), ap->port_no);
- u32 hc_irq_cause;
- /* clear EDMA event indicators, if any */
- writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
- /* clear pending irq events */
- hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
- writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
- /* clear FIS IRQ Cause */
- if (IS_GEN_IIE(hpriv))
- writelfl(0, port_mmio + FIS_IRQ_CAUSE);
- mv_enable_port_irqs(ap, port_irqs);
- }
- static void mv_set_irq_coalescing(struct ata_host *host,
- unsigned int count, unsigned int usecs)
- {
- struct mv_host_priv *hpriv = host->private_data;
- void __iomem *mmio = hpriv->base, *hc_mmio;
- u32 coal_enable = 0;
- unsigned long flags;
- unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
- const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
- ALL_PORTS_COAL_DONE;
- /* Disable IRQ coalescing if either threshold is zero */
- if (!usecs || !count) {
- clks = count = 0;
- } else {
- /* Respect maximum limits of the hardware */
- clks = usecs * COAL_CLOCKS_PER_USEC;
- if (clks > MAX_COAL_TIME_THRESHOLD)
- clks = MAX_COAL_TIME_THRESHOLD;
- if (count > MAX_COAL_IO_COUNT)
- count = MAX_COAL_IO_COUNT;
- }
- spin_lock_irqsave(&host->lock, flags);
- mv_set_main_irq_mask(host, coal_disable, 0);
- if (is_dual_hc && !IS_GEN_I(hpriv)) {
- /*
- * GEN_II/GEN_IIE with dual host controllers:
- * one set of global thresholds for the entire chip.
- */
- writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
- writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
- /* clear leftover coal IRQ bit */
- writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
- if (count)
- coal_enable = ALL_PORTS_COAL_DONE;
- clks = count = 0; /* force clearing of regular regs below */
- }
- /*
- * All chips: independent thresholds for each HC on the chip.
- */
- hc_mmio = mv_hc_base_from_port(mmio, 0);
- writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
- writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
- writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
- if (count)
- coal_enable |= PORTS_0_3_COAL_DONE;
- if (is_dual_hc) {
- hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
- writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
- writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
- writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
- if (count)
- coal_enable |= PORTS_4_7_COAL_DONE;
- }
- mv_set_main_irq_mask(host, 0, coal_enable);
- spin_unlock_irqrestore(&host->lock, flags);
- }
- /**
- * mv_start_edma - Enable eDMA engine
- * @base: port base address
- * @pp: port private data
- *
- * Verify the local cache of the eDMA state is accurate with a
- * WARN_ON.
- *
- * LOCKING:
- * Inherited from caller.
- */
- static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
- struct mv_port_priv *pp, u8 protocol)
- {
- int want_ncq = (protocol == ATA_PROT_NCQ);
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
- if (want_ncq != using_ncq)
- mv_stop_edma(ap);
- }
- if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
- struct mv_host_priv *hpriv = ap->host->private_data;
- mv_edma_cfg(ap, want_ncq, 1);
- mv_set_edma_ptrs(port_mmio, hpriv, pp);
- mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
- writelfl(EDMA_EN, port_mmio + EDMA_CMD);
- pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
- }
- }
- static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
- {
- void __iomem *port_mmio = mv_ap_base(ap);
- const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
- const int per_loop = 5, timeout = (15 * 1000 / per_loop);
- int i;
- /*
- * Wait for the EDMA engine to finish transactions in progress.
- * No idea what a good "timeout" value might be, but measurements
- * indicate that it often requires hundreds of microseconds
- * with two drives in-use. So we use the 15msec value above
- * as a rough guess at what even more drives might require.
- */
- for (i = 0; i < timeout; ++i) {
- u32 edma_stat = readl(port_mmio + EDMA_STATUS);
- if ((edma_stat & empty_idle) == empty_idle)
- break;
- udelay(per_loop);
- }
- /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
- }
- /**
- * mv_stop_edma_engine - Disable eDMA engine
- * @port_mmio: io base address
- *
- * LOCKING:
- * Inherited from caller.
- */
- static int mv_stop_edma_engine(void __iomem *port_mmio)
- {
- int i;
- /* Disable eDMA. The disable bit auto clears. */
- writelfl(EDMA_DS, port_mmio + EDMA_CMD);
- /* Wait for the chip to confirm eDMA is off. */
- for (i = 10000; i > 0; i--) {
- u32 reg = readl(port_mmio + EDMA_CMD);
- if (!(reg & EDMA_EN))
- return 0;
- udelay(10);
- }
- return -EIO;
- }
- static int mv_stop_edma(struct ata_port *ap)
- {
- void __iomem *port_mmio = mv_ap_base(ap);
- struct mv_port_priv *pp = ap->private_data;
- int err = 0;
- if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
- return 0;
- pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
- mv_wait_for_edma_empty_idle(ap);
- if (mv_stop_edma_engine(port_mmio)) {
- ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
- err = -EIO;
- }
- mv_edma_cfg(ap, 0, 0);
- return err;
- }
- #ifdef ATA_DEBUG
- static void mv_dump_mem(void __iomem *start, unsigned bytes)
- {
- int b, w;
- for (b = 0; b < bytes; ) {
- DPRINTK("%p: ", start + b);
- for (w = 0; b < bytes && w < 4; w++) {
- printk("%08x ", readl(start + b));
- b += sizeof(u32);
- }
- printk("\n");
- }
- }
- #endif
- static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
- {
- #ifdef ATA_DEBUG
- int b, w;
- u32 dw;
- for (b = 0; b < bytes; ) {
- DPRINTK("%02x: ", b);
- for (w = 0; b < bytes && w < 4; w++) {
- (void) pci_read_config_dword(pdev, b, &dw);
- printk("%08x ", dw);
- b += sizeof(u32);
- }
- printk("\n");
- }
- #endif
- }
- static void mv_dump_all_regs(void __iomem *mmio_base, int port,
- struct pci_dev *pdev)
- {
- #ifdef ATA_DEBUG
- void __iomem *hc_base = mv_hc_base(mmio_base,
- port >> MV_PORT_HC_SHIFT);
- void __iomem *port_base;
- int start_port, num_ports, p, start_hc, num_hcs, hc;
- if (0 > port) {
- start_hc = start_port = 0;
- num_ports = 8; /* shld be benign for 4 port devs */
- num_hcs = 2;
- } else {
- start_hc = port >> MV_PORT_HC_SHIFT;
- start_port = port;
- num_ports = num_hcs = 1;
- }
- DPRINTK("All registers for port(s) %u-%u:\n", start_port,
- num_ports > 1 ? num_ports - 1 : start_port);
- if (NULL != pdev) {
- DPRINTK("PCI config space regs:\n");
- mv_dump_pci_cfg(pdev, 0x68);
- }
- DPRINTK("PCI regs:\n");
- mv_dump_mem(mmio_base+0xc00, 0x3c);
- mv_dump_mem(mmio_base+0xd00, 0x34);
- mv_dump_mem(mmio_base+0xf00, 0x4);
- mv_dump_mem(mmio_base+0x1d00, 0x6c);
- for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
- hc_base = mv_hc_base(mmio_base, hc);
- DPRINTK("HC regs (HC %i):\n", hc);
- mv_dump_mem(hc_base, 0x1c);
- }
- for (p = start_port; p < start_port + num_ports; p++) {
- port_base = mv_port_base(mmio_base, p);
- DPRINTK("EDMA regs (port %i):\n", p);
- mv_dump_mem(port_base, 0x54);
- DPRINTK("SATA regs (port %i):\n", p);
- mv_dump_mem(port_base+0x300, 0x60);
- }
- #endif
- }
- static unsigned int mv_scr_offset(unsigned int sc_reg_in)
- {
- unsigned int ofs;
- switch (sc_reg_in) {
- case SCR_STATUS:
- case SCR_CONTROL:
- case SCR_ERROR:
- ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
- break;
- case SCR_ACTIVE:
- ofs = SATA_ACTIVE; /* active is not with the others */
- break;
- default:
- ofs = 0xffffffffU;
- break;
- }
- return ofs;
- }
- static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
- {
- unsigned int ofs = mv_scr_offset(sc_reg_in);
- if (ofs != 0xffffffffU) {
- *val = readl(mv_ap_base(link->ap) + ofs);
- return 0;
- } else
- return -EINVAL;
- }
- static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
- {
- unsigned int ofs = mv_scr_offset(sc_reg_in);
- if (ofs != 0xffffffffU) {
- void __iomem *addr = mv_ap_base(link->ap) + ofs;
- if (sc_reg_in == SCR_CONTROL) {
- /*
- * Workaround for 88SX60x1 FEr SATA#26:
- *
- * COMRESETs have to take care not to accidentally
- * put the drive to sleep when writing SCR_CONTROL.
- * Setting bits 12..15 prevents this problem.
- *
- * So if we see an outbound COMMRESET, set those bits.
- * Ditto for the followup write that clears the reset.
- *
- * The proprietary driver does this for
- * all chip versions, and so do we.
- */
- if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
- val |= 0xf000;
- }
- writelfl(val, addr);
- return 0;
- } else
- return -EINVAL;
- }
- static void mv6_dev_config(struct ata_device *adev)
- {
- /*
- * Deal with Gen-II ("mv6") hardware quirks/restrictions:
- *
- * Gen-II does not support NCQ over a port multiplier
- * (no FIS-based switching).
- */
- if (adev->flags & ATA_DFLAG_NCQ) {
- if (sata_pmp_attached(adev->link->ap)) {
- adev->flags &= ~ATA_DFLAG_NCQ;
- ata_dev_printk(adev, KERN_INFO,
- "NCQ disabled for command-based switching\n");
- }
- }
- }
- static int mv_qc_defer(struct ata_queued_cmd *qc)
- {
- struct ata_link *link = qc->dev->link;
- struct ata_port *ap = link->ap;
- struct mv_port_priv *pp = ap->private_data;
- /*
- * Don't allow new commands if we're in a delayed EH state
- * for NCQ and/or FIS-based switching.
- */
- if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
- return ATA_DEFER_PORT;
- /* PIO commands need exclusive link: no other commands [DMA or PIO]
- * can run concurrently.
- * set excl_link when we want to send a PIO command in DMA mode
- * or a non-NCQ command in NCQ mode.
- * When we receive a command from that link, and there are no
- * outstanding commands, mark a flag to clear excl_link and let
- * the command go through.
- */
- if (unlikely(ap->excl_link)) {
- if (link == ap->excl_link) {
- if (ap->nr_active_links)
- return ATA_DEFER_PORT;
- qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
- return 0;
- } else
- return ATA_DEFER_PORT;
- }
- /*
- * If the port is completely idle, then allow the new qc.
- */
- if (ap->nr_active_links == 0)
- return 0;
- /*
- * The port is operating in host queuing mode (EDMA) with NCQ
- * enabled, allow multiple NCQ commands. EDMA also allows
- * queueing multiple DMA commands but libata core currently
- * doesn't allow it.
- */
- if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
- (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
- if (ata_is_ncq(qc->tf.protocol))
- return 0;
- else {
- ap->excl_link = link;
- return ATA_DEFER_PORT;
- }
- }
- return ATA_DEFER_PORT;
- }
- static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
- {
- struct mv_port_priv *pp = ap->private_data;
- void __iomem *port_mmio;
- u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
- u32 ltmode, *old_ltmode = &pp->cached.ltmode;
- u32 haltcond, *old_haltcond = &pp->cached.haltcond;
- ltmode = *old_ltmode & ~LTMODE_BIT8;
- haltcond = *old_haltcond | EDMA_ERR_DEV;
- if (want_fbs) {
- fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
- ltmode = *old_ltmode | LTMODE_BIT8;
- if (want_ncq)
- haltcond &= ~EDMA_ERR_DEV;
- else
- fiscfg |= FISCFG_WAIT_DEV_ERR;
- } else {
- fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
- }
- port_mmio = mv_ap_base(ap);
- mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
- mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
- mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
- }
- static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
- {
- struct mv_host_priv *hpriv = ap->host->private_data;
- u32 old, new;
- /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
- old = readl(hpriv->base + GPIO_PORT_CTL);
- if (want_ncq)
- new = old | (1 << 22);
- else
- new = old & ~(1 << 22);
- if (new != old)
- writel(new, hpriv->base + GPIO_PORT_CTL);
- }
- /**
- * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
- * @ap: Port being initialized
- *
- * There are two DMA modes on these chips: basic DMA, and EDMA.
- *
- * Bit-0 of the "EDMA RESERVED" register enables/disables use
- * of basic DMA on the GEN_IIE versions of the chips.
- *
- * This bit survives EDMA resets, and must be set for basic DMA
- * to function, and should be cleared when EDMA is active.
- */
- static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
- {
- struct mv_port_priv *pp = ap->private_data;
- u32 new, *old = &pp->cached.unknown_rsvd;
- if (enable_bmdma)
- new = *old | 1;
- else
- new = *old & ~1;
- mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
- }
- /*
- * SOC chips have an issue whereby the HDD LEDs don't always blink
- * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
- * of the SOC takes care of it, generating a steady blink rate when
- * any drive on the chip is active.
- *
- * Unfortunately, the blink mode is a global hardware setting for the SOC,
- * so we must use it whenever at least one port on the SOC has NCQ enabled.
- *
- * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
- * LED operation works then, and provides better (more accurate) feedback.
- *
- * Note that this code assumes that an SOC never has more than one HC onboard.
- */
- static void mv_soc_led_blink_enable(struct ata_port *ap)
- {
- struct ata_host *host = ap->host;
- struct mv_host_priv *hpriv = host->private_data;
- void __iomem *hc_mmio;
- u32 led_ctrl;
- if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
- return;
- hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
- hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
- led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
- writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
- }
- static void mv_soc_led_blink_disable(struct ata_port *ap)
- {
- struct ata_host *host = ap->host;
- struct mv_host_priv *hpriv = host->private_data;
- void __iomem *hc_mmio;
- u32 led_ctrl;
- unsigned int port;
- if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
- return;
- /* disable led-blink only if no ports are using NCQ */
- for (port = 0; port < hpriv->n_ports; port++) {
- struct ata_port *this_ap = host->ports[port];
- struct mv_port_priv *pp = this_ap->private_data;
- if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
- return;
- }
- hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
- hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
- led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
- writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
- }
- static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
- {
- u32 cfg;
- struct mv_port_priv *pp = ap->private_data;
- struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = mv_ap_base(ap);
- /* set up non-NCQ EDMA configuration */
- cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
- pp->pp_flags &=
- ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
- if (IS_GEN_I(hpriv))
- cfg |= (1 << 8); /* enab config burst size mask */
- else if (IS_GEN_II(hpriv)) {
- cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
- mv_60x1_errata_sata25(ap, want_ncq);
- } else if (IS_GEN_IIE(hpriv)) {
- int want_fbs = sata_pmp_attached(ap);
- /*
- * Possible future enhancement:
- *
- * The chip can use FBS with non-NCQ, if we allow it,
- * But first we need to have the error handling in place
- * for this mode (datasheet section 7.3.15.4.2.3).
- * So disallow non-NCQ FBS for now.
- */
- want_fbs &= want_ncq;
- mv_config_fbs(ap, want_ncq, want_fbs);
- if (want_fbs) {
- pp->pp_flags |= MV_PP_FLAG_FBS_EN;
- cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
- }
- cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
- if (want_edma) {
- cfg |= (1 << 22); /* enab 4-entry host queue cache */
- if (!IS_SOC(hpriv))
- cfg |= (1 << 18); /* enab early completion */
- }
- if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
- cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
- mv_bmdma_enable_iie(ap, !want_edma);
- if (IS_SOC(hpriv)) {
- if (want_ncq)
- mv_soc_led_blink_enable(ap);
- else
- mv_soc_led_blink_disable(ap);
- }
- }
- if (want_ncq) {
- cfg |= EDMA_CFG_NCQ;
- pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
- }
- writelfl(cfg, port_mmio + EDMA_CFG);
- }
- static void mv_port_free_dma_mem(struct ata_port *ap)
- {
- struct mv_host_priv *hpriv = ap->host->private_data;
- struct mv_port_priv *pp = ap->private_data;
- int tag;
- if (pp->crqb) {
- dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
- pp->crqb = NULL;
- }
- if (pp->crpb) {
- dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
- pp->crpb = NULL;
- }
- /*
- * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
- * For later hardware, we have one unique sg_tbl per NCQ tag.
- */
- for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
- if (pp->sg_tbl[tag]) {
- if (tag == 0 || !IS_GEN_I(hpriv))
- dma_pool_free(hpriv->sg_tbl_pool,
- pp->sg_tbl[tag],
- pp->sg_tbl_dma[tag]);
- pp->sg_tbl[tag] = NULL;
- }
- }
- }
- /**
- * mv_port_start - Port specific init/start routine.
- * @ap: ATA channel to manipulate
- *
- * Allocate and point to DMA memory, init port private memory,
- * zero indices.
- *
- * LOCKING:
- * Inherited from caller.
- */
- static int mv_port_start(struct ata_port *ap)
- {
- struct device *dev = ap->host->dev;
- struct mv_host_priv…