PageRenderTime 59ms CodeModel.GetById 9ms app.highlight 36ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/forcedeth.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 6010 lines | 4811 code | 707 blank | 492 comment | 928 complexity | 70cf9487e9803ef5670962268cc00f5c MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
   3 *
   4 * Note: This driver is a cleanroom reimplementation based on reverse
   5 *      engineered documentation written by Carl-Daniel Hailfinger
   6 *      and Andrew de Quincey.
   7 *
   8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
   9 * trademarks of NVIDIA Corporation in the United States and other
  10 * countries.
  11 *
  12 * Copyright (C) 2003,4,5 Manfred Spraul
  13 * Copyright (C) 2004 Andrew de Quincey (wol support)
  14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  15 *		IRQ rate fixes, bigendian fixes, cleanups, verification)
  16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  17 *
  18 * This program is free software; you can redistribute it and/or modify
  19 * it under the terms of the GNU General Public License as published by
  20 * the Free Software Foundation; either version 2 of the License, or
  21 * (at your option) any later version.
  22 *
  23 * This program is distributed in the hope that it will be useful,
  24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  26 * GNU General Public License for more details.
  27 *
  28 * You should have received a copy of the GNU General Public License
  29 * along with this program; if not, write to the Free Software
  30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  31 *
  32 * Known bugs:
  33 * We suspect that on some hardware no TX done interrupts are generated.
  34 * This means recovery from netif_stop_queue only happens if the hw timer
  35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  37 * If your hardware reliably generates tx done interrupts, then you can remove
  38 * DEV_NEED_TIMERIRQ from the driver_data flags.
  39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  40 * superfluous timer interrupts from the nic.
  41 */
  42
  43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  44
  45#define FORCEDETH_VERSION		"0.64"
  46#define DRV_NAME			"forcedeth"
  47
  48#include <linux/module.h>
  49#include <linux/types.h>
  50#include <linux/pci.h>
  51#include <linux/interrupt.h>
  52#include <linux/netdevice.h>
  53#include <linux/etherdevice.h>
  54#include <linux/delay.h>
  55#include <linux/sched.h>
  56#include <linux/spinlock.h>
  57#include <linux/ethtool.h>
  58#include <linux/timer.h>
  59#include <linux/skbuff.h>
  60#include <linux/mii.h>
  61#include <linux/random.h>
  62#include <linux/init.h>
  63#include <linux/if_vlan.h>
  64#include <linux/dma-mapping.h>
  65#include <linux/slab.h>
  66#include <linux/uaccess.h>
  67#include <linux/prefetch.h>
  68#include  <linux/io.h>
  69
  70#include <asm/irq.h>
  71#include <asm/system.h>
  72
  73#define TX_WORK_PER_LOOP  64
  74#define RX_WORK_PER_LOOP  64
  75
  76/*
  77 * Hardware access:
  78 */
  79
  80#define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
  81#define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
  82#define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
  83#define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
  84#define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
  85#define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
  86#define DEV_HAS_MSI                0x0000040  /* device supports MSI */
  87#define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
  88#define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
  89#define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
  90#define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
  91#define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
  92#define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
  93#define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
  94#define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
  95#define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
  96#define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
  97#define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
  98#define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
  99#define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
 100#define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
 101#define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
 102#define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
 103#define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
 104#define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
 105#define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
 106#define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
 107
 108enum {
 109	NvRegIrqStatus = 0x000,
 110#define NVREG_IRQSTAT_MIIEVENT	0x040
 111#define NVREG_IRQSTAT_MASK		0x83ff
 112	NvRegIrqMask = 0x004,
 113#define NVREG_IRQ_RX_ERROR		0x0001
 114#define NVREG_IRQ_RX			0x0002
 115#define NVREG_IRQ_RX_NOBUF		0x0004
 116#define NVREG_IRQ_TX_ERR		0x0008
 117#define NVREG_IRQ_TX_OK			0x0010
 118#define NVREG_IRQ_TIMER			0x0020
 119#define NVREG_IRQ_LINK			0x0040
 120#define NVREG_IRQ_RX_FORCED		0x0080
 121#define NVREG_IRQ_TX_FORCED		0x0100
 122#define NVREG_IRQ_RECOVER_ERROR		0x8200
 123#define NVREG_IRQMASK_THROUGHPUT	0x00df
 124#define NVREG_IRQMASK_CPU		0x0060
 125#define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
 126#define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
 127#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
 128
 129	NvRegUnknownSetupReg6 = 0x008,
 130#define NVREG_UNKSETUP6_VAL		3
 131
 132/*
 133 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
 134 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
 135 */
 136	NvRegPollingInterval = 0x00c,
 137#define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
 138#define NVREG_POLL_DEFAULT_CPU	13
 139	NvRegMSIMap0 = 0x020,
 140	NvRegMSIMap1 = 0x024,
 141	NvRegMSIIrqMask = 0x030,
 142#define NVREG_MSI_VECTOR_0_ENABLED 0x01
 143	NvRegMisc1 = 0x080,
 144#define NVREG_MISC1_PAUSE_TX	0x01
 145#define NVREG_MISC1_HD		0x02
 146#define NVREG_MISC1_FORCE	0x3b0f3c
 147
 148	NvRegMacReset = 0x34,
 149#define NVREG_MAC_RESET_ASSERT	0x0F3
 150	NvRegTransmitterControl = 0x084,
 151#define NVREG_XMITCTL_START	0x01
 152#define NVREG_XMITCTL_MGMT_ST	0x40000000
 153#define NVREG_XMITCTL_SYNC_MASK		0x000f0000
 154#define NVREG_XMITCTL_SYNC_NOT_READY	0x0
 155#define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
 156#define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
 157#define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
 158#define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
 159#define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
 160#define NVREG_XMITCTL_HOST_LOADED	0x00004000
 161#define NVREG_XMITCTL_TX_PATH_EN	0x01000000
 162#define NVREG_XMITCTL_DATA_START	0x00100000
 163#define NVREG_XMITCTL_DATA_READY	0x00010000
 164#define NVREG_XMITCTL_DATA_ERROR	0x00020000
 165	NvRegTransmitterStatus = 0x088,
 166#define NVREG_XMITSTAT_BUSY	0x01
 167
 168	NvRegPacketFilterFlags = 0x8c,
 169#define NVREG_PFF_PAUSE_RX	0x08
 170#define NVREG_PFF_ALWAYS	0x7F0000
 171#define NVREG_PFF_PROMISC	0x80
 172#define NVREG_PFF_MYADDR	0x20
 173#define NVREG_PFF_LOOPBACK	0x10
 174
 175	NvRegOffloadConfig = 0x90,
 176#define NVREG_OFFLOAD_HOMEPHY	0x601
 177#define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
 178	NvRegReceiverControl = 0x094,
 179#define NVREG_RCVCTL_START	0x01
 180#define NVREG_RCVCTL_RX_PATH_EN	0x01000000
 181	NvRegReceiverStatus = 0x98,
 182#define NVREG_RCVSTAT_BUSY	0x01
 183
 184	NvRegSlotTime = 0x9c,
 185#define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
 186#define NVREG_SLOTTIME_10_100_FULL	0x00007f00
 187#define NVREG_SLOTTIME_1000_FULL	0x0003ff00
 188#define NVREG_SLOTTIME_HALF		0x0000ff00
 189#define NVREG_SLOTTIME_DEFAULT		0x00007f00
 190#define NVREG_SLOTTIME_MASK		0x000000ff
 191
 192	NvRegTxDeferral = 0xA0,
 193#define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
 194#define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
 195#define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
 196#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
 197#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
 198#define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
 199	NvRegRxDeferral = 0xA4,
 200#define NVREG_RX_DEFERRAL_DEFAULT	0x16
 201	NvRegMacAddrA = 0xA8,
 202	NvRegMacAddrB = 0xAC,
 203	NvRegMulticastAddrA = 0xB0,
 204#define NVREG_MCASTADDRA_FORCE	0x01
 205	NvRegMulticastAddrB = 0xB4,
 206	NvRegMulticastMaskA = 0xB8,
 207#define NVREG_MCASTMASKA_NONE		0xffffffff
 208	NvRegMulticastMaskB = 0xBC,
 209#define NVREG_MCASTMASKB_NONE		0xffff
 210
 211	NvRegPhyInterface = 0xC0,
 212#define PHY_RGMII		0x10000000
 213	NvRegBackOffControl = 0xC4,
 214#define NVREG_BKOFFCTRL_DEFAULT			0x70000000
 215#define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
 216#define NVREG_BKOFFCTRL_SELECT			24
 217#define NVREG_BKOFFCTRL_GEAR			12
 218
 219	NvRegTxRingPhysAddr = 0x100,
 220	NvRegRxRingPhysAddr = 0x104,
 221	NvRegRingSizes = 0x108,
 222#define NVREG_RINGSZ_TXSHIFT 0
 223#define NVREG_RINGSZ_RXSHIFT 16
 224	NvRegTransmitPoll = 0x10c,
 225#define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
 226	NvRegLinkSpeed = 0x110,
 227#define NVREG_LINKSPEED_FORCE 0x10000
 228#define NVREG_LINKSPEED_10	1000
 229#define NVREG_LINKSPEED_100	100
 230#define NVREG_LINKSPEED_1000	50
 231#define NVREG_LINKSPEED_MASK	(0xFFF)
 232	NvRegUnknownSetupReg5 = 0x130,
 233#define NVREG_UNKSETUP5_BIT31	(1<<31)
 234	NvRegTxWatermark = 0x13c,
 235#define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
 236#define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
 237#define NVREG_TX_WM_DESC2_3_1000	0xfe08000
 238	NvRegTxRxControl = 0x144,
 239#define NVREG_TXRXCTL_KICK	0x0001
 240#define NVREG_TXRXCTL_BIT1	0x0002
 241#define NVREG_TXRXCTL_BIT2	0x0004
 242#define NVREG_TXRXCTL_IDLE	0x0008
 243#define NVREG_TXRXCTL_RESET	0x0010
 244#define NVREG_TXRXCTL_RXCHECK	0x0400
 245#define NVREG_TXRXCTL_DESC_1	0
 246#define NVREG_TXRXCTL_DESC_2	0x002100
 247#define NVREG_TXRXCTL_DESC_3	0xc02200
 248#define NVREG_TXRXCTL_VLANSTRIP 0x00040
 249#define NVREG_TXRXCTL_VLANINS	0x00080
 250	NvRegTxRingPhysAddrHigh = 0x148,
 251	NvRegRxRingPhysAddrHigh = 0x14C,
 252	NvRegTxPauseFrame = 0x170,
 253#define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
 254#define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
 255#define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
 256#define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
 257	NvRegTxPauseFrameLimit = 0x174,
 258#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
 259	NvRegMIIStatus = 0x180,
 260#define NVREG_MIISTAT_ERROR		0x0001
 261#define NVREG_MIISTAT_LINKCHANGE	0x0008
 262#define NVREG_MIISTAT_MASK_RW		0x0007
 263#define NVREG_MIISTAT_MASK_ALL		0x000f
 264	NvRegMIIMask = 0x184,
 265#define NVREG_MII_LINKCHANGE		0x0008
 266
 267	NvRegAdapterControl = 0x188,
 268#define NVREG_ADAPTCTL_START	0x02
 269#define NVREG_ADAPTCTL_LINKUP	0x04
 270#define NVREG_ADAPTCTL_PHYVALID	0x40000
 271#define NVREG_ADAPTCTL_RUNNING	0x100000
 272#define NVREG_ADAPTCTL_PHYSHIFT	24
 273	NvRegMIISpeed = 0x18c,
 274#define NVREG_MIISPEED_BIT8	(1<<8)
 275#define NVREG_MIIDELAY	5
 276	NvRegMIIControl = 0x190,
 277#define NVREG_MIICTL_INUSE	0x08000
 278#define NVREG_MIICTL_WRITE	0x00400
 279#define NVREG_MIICTL_ADDRSHIFT	5
 280	NvRegMIIData = 0x194,
 281	NvRegTxUnicast = 0x1a0,
 282	NvRegTxMulticast = 0x1a4,
 283	NvRegTxBroadcast = 0x1a8,
 284	NvRegWakeUpFlags = 0x200,
 285#define NVREG_WAKEUPFLAGS_VAL		0x7770
 286#define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
 287#define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
 288#define NVREG_WAKEUPFLAGS_D3SHIFT	12
 289#define NVREG_WAKEUPFLAGS_D2SHIFT	8
 290#define NVREG_WAKEUPFLAGS_D1SHIFT	4
 291#define NVREG_WAKEUPFLAGS_D0SHIFT	0
 292#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
 293#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
 294#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
 295#define NVREG_WAKEUPFLAGS_ENABLE	0x1111
 296
 297	NvRegMgmtUnitGetVersion = 0x204,
 298#define NVREG_MGMTUNITGETVERSION	0x01
 299	NvRegMgmtUnitVersion = 0x208,
 300#define NVREG_MGMTUNITVERSION		0x08
 301	NvRegPowerCap = 0x268,
 302#define NVREG_POWERCAP_D3SUPP	(1<<30)
 303#define NVREG_POWERCAP_D2SUPP	(1<<26)
 304#define NVREG_POWERCAP_D1SUPP	(1<<25)
 305	NvRegPowerState = 0x26c,
 306#define NVREG_POWERSTATE_POWEREDUP	0x8000
 307#define NVREG_POWERSTATE_VALID		0x0100
 308#define NVREG_POWERSTATE_MASK		0x0003
 309#define NVREG_POWERSTATE_D0		0x0000
 310#define NVREG_POWERSTATE_D1		0x0001
 311#define NVREG_POWERSTATE_D2		0x0002
 312#define NVREG_POWERSTATE_D3		0x0003
 313	NvRegMgmtUnitControl = 0x278,
 314#define NVREG_MGMTUNITCONTROL_INUSE	0x20000
 315	NvRegTxCnt = 0x280,
 316	NvRegTxZeroReXmt = 0x284,
 317	NvRegTxOneReXmt = 0x288,
 318	NvRegTxManyReXmt = 0x28c,
 319	NvRegTxLateCol = 0x290,
 320	NvRegTxUnderflow = 0x294,
 321	NvRegTxLossCarrier = 0x298,
 322	NvRegTxExcessDef = 0x29c,
 323	NvRegTxRetryErr = 0x2a0,
 324	NvRegRxFrameErr = 0x2a4,
 325	NvRegRxExtraByte = 0x2a8,
 326	NvRegRxLateCol = 0x2ac,
 327	NvRegRxRunt = 0x2b0,
 328	NvRegRxFrameTooLong = 0x2b4,
 329	NvRegRxOverflow = 0x2b8,
 330	NvRegRxFCSErr = 0x2bc,
 331	NvRegRxFrameAlignErr = 0x2c0,
 332	NvRegRxLenErr = 0x2c4,
 333	NvRegRxUnicast = 0x2c8,
 334	NvRegRxMulticast = 0x2cc,
 335	NvRegRxBroadcast = 0x2d0,
 336	NvRegTxDef = 0x2d4,
 337	NvRegTxFrame = 0x2d8,
 338	NvRegRxCnt = 0x2dc,
 339	NvRegTxPause = 0x2e0,
 340	NvRegRxPause = 0x2e4,
 341	NvRegRxDropFrame = 0x2e8,
 342	NvRegVlanControl = 0x300,
 343#define NVREG_VLANCONTROL_ENABLE	0x2000
 344	NvRegMSIXMap0 = 0x3e0,
 345	NvRegMSIXMap1 = 0x3e4,
 346	NvRegMSIXIrqStatus = 0x3f0,
 347
 348	NvRegPowerState2 = 0x600,
 349#define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
 350#define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
 351#define NVREG_POWERSTATE2_PHY_RESET		0x0004
 352#define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
 353};
 354
 355/* Big endian: should work, but is untested */
 356struct ring_desc {
 357	__le32 buf;
 358	__le32 flaglen;
 359};
 360
 361struct ring_desc_ex {
 362	__le32 bufhigh;
 363	__le32 buflow;
 364	__le32 txvlan;
 365	__le32 flaglen;
 366};
 367
 368union ring_type {
 369	struct ring_desc *orig;
 370	struct ring_desc_ex *ex;
 371};
 372
 373#define FLAG_MASK_V1 0xffff0000
 374#define FLAG_MASK_V2 0xffffc000
 375#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
 376#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
 377
 378#define NV_TX_LASTPACKET	(1<<16)
 379#define NV_TX_RETRYERROR	(1<<19)
 380#define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
 381#define NV_TX_FORCED_INTERRUPT	(1<<24)
 382#define NV_TX_DEFERRED		(1<<26)
 383#define NV_TX_CARRIERLOST	(1<<27)
 384#define NV_TX_LATECOLLISION	(1<<28)
 385#define NV_TX_UNDERFLOW		(1<<29)
 386#define NV_TX_ERROR		(1<<30)
 387#define NV_TX_VALID		(1<<31)
 388
 389#define NV_TX2_LASTPACKET	(1<<29)
 390#define NV_TX2_RETRYERROR	(1<<18)
 391#define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
 392#define NV_TX2_FORCED_INTERRUPT	(1<<30)
 393#define NV_TX2_DEFERRED		(1<<25)
 394#define NV_TX2_CARRIERLOST	(1<<26)
 395#define NV_TX2_LATECOLLISION	(1<<27)
 396#define NV_TX2_UNDERFLOW	(1<<28)
 397/* error and valid are the same for both */
 398#define NV_TX2_ERROR		(1<<30)
 399#define NV_TX2_VALID		(1<<31)
 400#define NV_TX2_TSO		(1<<28)
 401#define NV_TX2_TSO_SHIFT	14
 402#define NV_TX2_TSO_MAX_SHIFT	14
 403#define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
 404#define NV_TX2_CHECKSUM_L3	(1<<27)
 405#define NV_TX2_CHECKSUM_L4	(1<<26)
 406
 407#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
 408
 409#define NV_RX_DESCRIPTORVALID	(1<<16)
 410#define NV_RX_MISSEDFRAME	(1<<17)
 411#define NV_RX_SUBSTRACT1	(1<<18)
 412#define NV_RX_ERROR1		(1<<23)
 413#define NV_RX_ERROR2		(1<<24)
 414#define NV_RX_ERROR3		(1<<25)
 415#define NV_RX_ERROR4		(1<<26)
 416#define NV_RX_CRCERR		(1<<27)
 417#define NV_RX_OVERFLOW		(1<<28)
 418#define NV_RX_FRAMINGERR	(1<<29)
 419#define NV_RX_ERROR		(1<<30)
 420#define NV_RX_AVAIL		(1<<31)
 421#define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
 422
 423#define NV_RX2_CHECKSUMMASK	(0x1C000000)
 424#define NV_RX2_CHECKSUM_IP	(0x10000000)
 425#define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
 426#define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
 427#define NV_RX2_DESCRIPTORVALID	(1<<29)
 428#define NV_RX2_SUBSTRACT1	(1<<25)
 429#define NV_RX2_ERROR1		(1<<18)
 430#define NV_RX2_ERROR2		(1<<19)
 431#define NV_RX2_ERROR3		(1<<20)
 432#define NV_RX2_ERROR4		(1<<21)
 433#define NV_RX2_CRCERR		(1<<22)
 434#define NV_RX2_OVERFLOW		(1<<23)
 435#define NV_RX2_FRAMINGERR	(1<<24)
 436/* error and avail are the same for both */
 437#define NV_RX2_ERROR		(1<<30)
 438#define NV_RX2_AVAIL		(1<<31)
 439#define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
 440
 441#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
 442#define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
 443
 444/* Miscellaneous hardware related defines: */
 445#define NV_PCI_REGSZ_VER1	0x270
 446#define NV_PCI_REGSZ_VER2	0x2d4
 447#define NV_PCI_REGSZ_VER3	0x604
 448#define NV_PCI_REGSZ_MAX	0x604
 449
 450/* various timeout delays: all in usec */
 451#define NV_TXRX_RESET_DELAY	4
 452#define NV_TXSTOP_DELAY1	10
 453#define NV_TXSTOP_DELAY1MAX	500000
 454#define NV_TXSTOP_DELAY2	100
 455#define NV_RXSTOP_DELAY1	10
 456#define NV_RXSTOP_DELAY1MAX	500000
 457#define NV_RXSTOP_DELAY2	100
 458#define NV_SETUP5_DELAY		5
 459#define NV_SETUP5_DELAYMAX	50000
 460#define NV_POWERUP_DELAY	5
 461#define NV_POWERUP_DELAYMAX	5000
 462#define NV_MIIBUSY_DELAY	50
 463#define NV_MIIPHY_DELAY	10
 464#define NV_MIIPHY_DELAYMAX	10000
 465#define NV_MAC_RESET_DELAY	64
 466
 467#define NV_WAKEUPPATTERNS	5
 468#define NV_WAKEUPMASKENTRIES	4
 469
 470/* General driver defaults */
 471#define NV_WATCHDOG_TIMEO	(5*HZ)
 472
 473#define RX_RING_DEFAULT		512
 474#define TX_RING_DEFAULT		256
 475#define RX_RING_MIN		128
 476#define TX_RING_MIN		64
 477#define RING_MAX_DESC_VER_1	1024
 478#define RING_MAX_DESC_VER_2_3	16384
 479
 480/* rx/tx mac addr + type + vlan + align + slack*/
 481#define NV_RX_HEADERS		(64)
 482/* even more slack. */
 483#define NV_RX_ALLOC_PAD		(64)
 484
 485/* maximum mtu size */
 486#define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
 487#define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
 488
 489#define OOM_REFILL	(1+HZ/20)
 490#define POLL_WAIT	(1+HZ/100)
 491#define LINK_TIMEOUT	(3*HZ)
 492#define STATS_INTERVAL	(10*HZ)
 493
 494/*
 495 * desc_ver values:
 496 * The nic supports three different descriptor types:
 497 * - DESC_VER_1: Original
 498 * - DESC_VER_2: support for jumbo frames.
 499 * - DESC_VER_3: 64-bit format.
 500 */
 501#define DESC_VER_1	1
 502#define DESC_VER_2	2
 503#define DESC_VER_3	3
 504
 505/* PHY defines */
 506#define PHY_OUI_MARVELL		0x5043
 507#define PHY_OUI_CICADA		0x03f1
 508#define PHY_OUI_VITESSE		0x01c1
 509#define PHY_OUI_REALTEK		0x0732
 510#define PHY_OUI_REALTEK2	0x0020
 511#define PHYID1_OUI_MASK	0x03ff
 512#define PHYID1_OUI_SHFT	6
 513#define PHYID2_OUI_MASK	0xfc00
 514#define PHYID2_OUI_SHFT	10
 515#define PHYID2_MODEL_MASK		0x03f0
 516#define PHY_MODEL_REALTEK_8211		0x0110
 517#define PHY_REV_MASK			0x0001
 518#define PHY_REV_REALTEK_8211B		0x0000
 519#define PHY_REV_REALTEK_8211C		0x0001
 520#define PHY_MODEL_REALTEK_8201		0x0200
 521#define PHY_MODEL_MARVELL_E3016		0x0220
 522#define PHY_MARVELL_E3016_INITMASK	0x0300
 523#define PHY_CICADA_INIT1	0x0f000
 524#define PHY_CICADA_INIT2	0x0e00
 525#define PHY_CICADA_INIT3	0x01000
 526#define PHY_CICADA_INIT4	0x0200
 527#define PHY_CICADA_INIT5	0x0004
 528#define PHY_CICADA_INIT6	0x02000
 529#define PHY_VITESSE_INIT_REG1	0x1f
 530#define PHY_VITESSE_INIT_REG2	0x10
 531#define PHY_VITESSE_INIT_REG3	0x11
 532#define PHY_VITESSE_INIT_REG4	0x12
 533#define PHY_VITESSE_INIT_MSK1	0xc
 534#define PHY_VITESSE_INIT_MSK2	0x0180
 535#define PHY_VITESSE_INIT1	0x52b5
 536#define PHY_VITESSE_INIT2	0xaf8a
 537#define PHY_VITESSE_INIT3	0x8
 538#define PHY_VITESSE_INIT4	0x8f8a
 539#define PHY_VITESSE_INIT5	0xaf86
 540#define PHY_VITESSE_INIT6	0x8f86
 541#define PHY_VITESSE_INIT7	0xaf82
 542#define PHY_VITESSE_INIT8	0x0100
 543#define PHY_VITESSE_INIT9	0x8f82
 544#define PHY_VITESSE_INIT10	0x0
 545#define PHY_REALTEK_INIT_REG1	0x1f
 546#define PHY_REALTEK_INIT_REG2	0x19
 547#define PHY_REALTEK_INIT_REG3	0x13
 548#define PHY_REALTEK_INIT_REG4	0x14
 549#define PHY_REALTEK_INIT_REG5	0x18
 550#define PHY_REALTEK_INIT_REG6	0x11
 551#define PHY_REALTEK_INIT_REG7	0x01
 552#define PHY_REALTEK_INIT1	0x0000
 553#define PHY_REALTEK_INIT2	0x8e00
 554#define PHY_REALTEK_INIT3	0x0001
 555#define PHY_REALTEK_INIT4	0xad17
 556#define PHY_REALTEK_INIT5	0xfb54
 557#define PHY_REALTEK_INIT6	0xf5c7
 558#define PHY_REALTEK_INIT7	0x1000
 559#define PHY_REALTEK_INIT8	0x0003
 560#define PHY_REALTEK_INIT9	0x0008
 561#define PHY_REALTEK_INIT10	0x0005
 562#define PHY_REALTEK_INIT11	0x0200
 563#define PHY_REALTEK_INIT_MSK1	0x0003
 564
 565#define PHY_GIGABIT	0x0100
 566
 567#define PHY_TIMEOUT	0x1
 568#define PHY_ERROR	0x2
 569
 570#define PHY_100	0x1
 571#define PHY_1000	0x2
 572#define PHY_HALF	0x100
 573
 574#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
 575#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
 576#define NV_PAUSEFRAME_RX_ENABLE  0x0004
 577#define NV_PAUSEFRAME_TX_ENABLE  0x0008
 578#define NV_PAUSEFRAME_RX_REQ     0x0010
 579#define NV_PAUSEFRAME_TX_REQ     0x0020
 580#define NV_PAUSEFRAME_AUTONEG    0x0040
 581
 582/* MSI/MSI-X defines */
 583#define NV_MSI_X_MAX_VECTORS  8
 584#define NV_MSI_X_VECTORS_MASK 0x000f
 585#define NV_MSI_CAPABLE        0x0010
 586#define NV_MSI_X_CAPABLE      0x0020
 587#define NV_MSI_ENABLED        0x0040
 588#define NV_MSI_X_ENABLED      0x0080
 589
 590#define NV_MSI_X_VECTOR_ALL   0x0
 591#define NV_MSI_X_VECTOR_RX    0x0
 592#define NV_MSI_X_VECTOR_TX    0x1
 593#define NV_MSI_X_VECTOR_OTHER 0x2
 594
 595#define NV_MSI_PRIV_OFFSET 0x68
 596#define NV_MSI_PRIV_VALUE  0xffffffff
 597
 598#define NV_RESTART_TX         0x1
 599#define NV_RESTART_RX         0x2
 600
 601#define NV_TX_LIMIT_COUNT     16
 602
 603#define NV_DYNAMIC_THRESHOLD        4
 604#define NV_DYNAMIC_MAX_QUIET_COUNT  2048
 605
 606/* statistics */
 607struct nv_ethtool_str {
 608	char name[ETH_GSTRING_LEN];
 609};
 610
 611static const struct nv_ethtool_str nv_estats_str[] = {
 612	{ "tx_bytes" },
 613	{ "tx_zero_rexmt" },
 614	{ "tx_one_rexmt" },
 615	{ "tx_many_rexmt" },
 616	{ "tx_late_collision" },
 617	{ "tx_fifo_errors" },
 618	{ "tx_carrier_errors" },
 619	{ "tx_excess_deferral" },
 620	{ "tx_retry_error" },
 621	{ "rx_frame_error" },
 622	{ "rx_extra_byte" },
 623	{ "rx_late_collision" },
 624	{ "rx_runt" },
 625	{ "rx_frame_too_long" },
 626	{ "rx_over_errors" },
 627	{ "rx_crc_errors" },
 628	{ "rx_frame_align_error" },
 629	{ "rx_length_error" },
 630	{ "rx_unicast" },
 631	{ "rx_multicast" },
 632	{ "rx_broadcast" },
 633	{ "rx_packets" },
 634	{ "rx_errors_total" },
 635	{ "tx_errors_total" },
 636
 637	/* version 2 stats */
 638	{ "tx_deferral" },
 639	{ "tx_packets" },
 640	{ "rx_bytes" },
 641	{ "tx_pause" },
 642	{ "rx_pause" },
 643	{ "rx_drop_frame" },
 644
 645	/* version 3 stats */
 646	{ "tx_unicast" },
 647	{ "tx_multicast" },
 648	{ "tx_broadcast" }
 649};
 650
 651struct nv_ethtool_stats {
 652	u64 tx_bytes;
 653	u64 tx_zero_rexmt;
 654	u64 tx_one_rexmt;
 655	u64 tx_many_rexmt;
 656	u64 tx_late_collision;
 657	u64 tx_fifo_errors;
 658	u64 tx_carrier_errors;
 659	u64 tx_excess_deferral;
 660	u64 tx_retry_error;
 661	u64 rx_frame_error;
 662	u64 rx_extra_byte;
 663	u64 rx_late_collision;
 664	u64 rx_runt;
 665	u64 rx_frame_too_long;
 666	u64 rx_over_errors;
 667	u64 rx_crc_errors;
 668	u64 rx_frame_align_error;
 669	u64 rx_length_error;
 670	u64 rx_unicast;
 671	u64 rx_multicast;
 672	u64 rx_broadcast;
 673	u64 rx_packets;
 674	u64 rx_errors_total;
 675	u64 tx_errors_total;
 676
 677	/* version 2 stats */
 678	u64 tx_deferral;
 679	u64 tx_packets;
 680	u64 rx_bytes;
 681	u64 tx_pause;
 682	u64 rx_pause;
 683	u64 rx_drop_frame;
 684
 685	/* version 3 stats */
 686	u64 tx_unicast;
 687	u64 tx_multicast;
 688	u64 tx_broadcast;
 689};
 690
 691#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
 692#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
 693#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
 694
 695/* diagnostics */
 696#define NV_TEST_COUNT_BASE 3
 697#define NV_TEST_COUNT_EXTENDED 4
 698
 699static const struct nv_ethtool_str nv_etests_str[] = {
 700	{ "link      (online/offline)" },
 701	{ "register  (offline)       " },
 702	{ "interrupt (offline)       " },
 703	{ "loopback  (offline)       " }
 704};
 705
 706struct register_test {
 707	__u32 reg;
 708	__u32 mask;
 709};
 710
 711static const struct register_test nv_registers_test[] = {
 712	{ NvRegUnknownSetupReg6, 0x01 },
 713	{ NvRegMisc1, 0x03c },
 714	{ NvRegOffloadConfig, 0x03ff },
 715	{ NvRegMulticastAddrA, 0xffffffff },
 716	{ NvRegTxWatermark, 0x0ff },
 717	{ NvRegWakeUpFlags, 0x07777 },
 718	{ 0, 0 }
 719};
 720
 721struct nv_skb_map {
 722	struct sk_buff *skb;
 723	dma_addr_t dma;
 724	unsigned int dma_len:31;
 725	unsigned int dma_single:1;
 726	struct ring_desc_ex *first_tx_desc;
 727	struct nv_skb_map *next_tx_ctx;
 728};
 729
 730/*
 731 * SMP locking:
 732 * All hardware access under netdev_priv(dev)->lock, except the performance
 733 * critical parts:
 734 * - rx is (pseudo-) lockless: it relies on the single-threading provided
 735 *	by the arch code for interrupts.
 736 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
 737 *	needs netdev_priv(dev)->lock :-(
 738 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
 739 */
 740
 741/* in dev: base, irq */
 742struct fe_priv {
 743	spinlock_t lock;
 744
 745	struct net_device *dev;
 746	struct napi_struct napi;
 747
 748	/* General data:
 749	 * Locking: spin_lock(&np->lock); */
 750	struct nv_ethtool_stats estats;
 751	int in_shutdown;
 752	u32 linkspeed;
 753	int duplex;
 754	int autoneg;
 755	int fixed_mode;
 756	int phyaddr;
 757	int wolenabled;
 758	unsigned int phy_oui;
 759	unsigned int phy_model;
 760	unsigned int phy_rev;
 761	u16 gigabit;
 762	int intr_test;
 763	int recover_error;
 764	int quiet_count;
 765
 766	/* General data: RO fields */
 767	dma_addr_t ring_addr;
 768	struct pci_dev *pci_dev;
 769	u32 orig_mac[2];
 770	u32 events;
 771	u32 irqmask;
 772	u32 desc_ver;
 773	u32 txrxctl_bits;
 774	u32 vlanctl_bits;
 775	u32 driver_data;
 776	u32 device_id;
 777	u32 register_size;
 778	u32 mac_in_use;
 779	int mgmt_version;
 780	int mgmt_sema;
 781
 782	void __iomem *base;
 783
 784	/* rx specific fields.
 785	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 786	 */
 787	union ring_type get_rx, put_rx, first_rx, last_rx;
 788	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
 789	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
 790	struct nv_skb_map *rx_skb;
 791
 792	union ring_type rx_ring;
 793	unsigned int rx_buf_sz;
 794	unsigned int pkt_limit;
 795	struct timer_list oom_kick;
 796	struct timer_list nic_poll;
 797	struct timer_list stats_poll;
 798	u32 nic_poll_irq;
 799	int rx_ring_size;
 800
 801	/* media detection workaround.
 802	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 803	 */
 804	int need_linktimer;
 805	unsigned long link_timeout;
 806	/*
 807	 * tx specific fields.
 808	 */
 809	union ring_type get_tx, put_tx, first_tx, last_tx;
 810	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
 811	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
 812	struct nv_skb_map *tx_skb;
 813
 814	union ring_type tx_ring;
 815	u32 tx_flags;
 816	int tx_ring_size;
 817	int tx_limit;
 818	u32 tx_pkts_in_progress;
 819	struct nv_skb_map *tx_change_owner;
 820	struct nv_skb_map *tx_end_flip;
 821	int tx_stop;
 822
 823	/* vlan fields */
 824	struct vlan_group *vlangrp;
 825
 826	/* msi/msi-x fields */
 827	u32 msi_flags;
 828	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
 829
 830	/* flow control */
 831	u32 pause_flags;
 832
 833	/* power saved state */
 834	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
 835
 836	/* for different msi-x irq type */
 837	char name_rx[IFNAMSIZ + 3];       /* -rx    */
 838	char name_tx[IFNAMSIZ + 3];       /* -tx    */
 839	char name_other[IFNAMSIZ + 6];    /* -other */
 840};
 841
 842/*
 843 * Maximum number of loops until we assume that a bit in the irq mask
 844 * is stuck. Overridable with module param.
 845 */
 846static int max_interrupt_work = 4;
 847
 848/*
 849 * Optimization can be either throuput mode or cpu mode
 850 *
 851 * Throughput Mode: Every tx and rx packet will generate an interrupt.
 852 * CPU Mode: Interrupts are controlled by a timer.
 853 */
 854enum {
 855	NV_OPTIMIZATION_MODE_THROUGHPUT,
 856	NV_OPTIMIZATION_MODE_CPU,
 857	NV_OPTIMIZATION_MODE_DYNAMIC
 858};
 859static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
 860
 861/*
 862 * Poll interval for timer irq
 863 *
 864 * This interval determines how frequent an interrupt is generated.
 865 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
 866 * Min = 0, and Max = 65535
 867 */
 868static int poll_interval = -1;
 869
 870/*
 871 * MSI interrupts
 872 */
 873enum {
 874	NV_MSI_INT_DISABLED,
 875	NV_MSI_INT_ENABLED
 876};
 877static int msi = NV_MSI_INT_ENABLED;
 878
 879/*
 880 * MSIX interrupts
 881 */
 882enum {
 883	NV_MSIX_INT_DISABLED,
 884	NV_MSIX_INT_ENABLED
 885};
 886static int msix = NV_MSIX_INT_ENABLED;
 887
 888/*
 889 * DMA 64bit
 890 */
 891enum {
 892	NV_DMA_64BIT_DISABLED,
 893	NV_DMA_64BIT_ENABLED
 894};
 895static int dma_64bit = NV_DMA_64BIT_ENABLED;
 896
 897/*
 898 * Crossover Detection
 899 * Realtek 8201 phy + some OEM boards do not work properly.
 900 */
 901enum {
 902	NV_CROSSOVER_DETECTION_DISABLED,
 903	NV_CROSSOVER_DETECTION_ENABLED
 904};
 905static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
 906
 907/*
 908 * Power down phy when interface is down (persists through reboot;
 909 * older Linux and other OSes may not power it up again)
 910 */
 911static int phy_power_down;
 912
 913static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 914{
 915	return netdev_priv(dev);
 916}
 917
 918static inline u8 __iomem *get_hwbase(struct net_device *dev)
 919{
 920	return ((struct fe_priv *)netdev_priv(dev))->base;
 921}
 922
 923static inline void pci_push(u8 __iomem *base)
 924{
 925	/* force out pending posted writes */
 926	readl(base);
 927}
 928
 929static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
 930{
 931	return le32_to_cpu(prd->flaglen)
 932		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
 933}
 934
 935static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
 936{
 937	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
 938}
 939
 940static bool nv_optimized(struct fe_priv *np)
 941{
 942	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 943		return false;
 944	return true;
 945}
 946
 947static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
 948		     int delay, int delaymax)
 949{
 950	u8 __iomem *base = get_hwbase(dev);
 951
 952	pci_push(base);
 953	do {
 954		udelay(delay);
 955		delaymax -= delay;
 956		if (delaymax < 0)
 957			return 1;
 958	} while ((readl(base + offset) & mask) != target);
 959	return 0;
 960}
 961
 962#define NV_SETUP_RX_RING 0x01
 963#define NV_SETUP_TX_RING 0x02
 964
 965static inline u32 dma_low(dma_addr_t addr)
 966{
 967	return addr;
 968}
 969
 970static inline u32 dma_high(dma_addr_t addr)
 971{
 972	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
 973}
 974
 975static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
 976{
 977	struct fe_priv *np = get_nvpriv(dev);
 978	u8 __iomem *base = get_hwbase(dev);
 979
 980	if (!nv_optimized(np)) {
 981		if (rxtx_flags & NV_SETUP_RX_RING)
 982			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
 983		if (rxtx_flags & NV_SETUP_TX_RING)
 984			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
 985	} else {
 986		if (rxtx_flags & NV_SETUP_RX_RING) {
 987			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
 988			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
 989		}
 990		if (rxtx_flags & NV_SETUP_TX_RING) {
 991			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
 992			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
 993		}
 994	}
 995}
 996
 997static void free_rings(struct net_device *dev)
 998{
 999	struct fe_priv *np = get_nvpriv(dev);
1000
1001	if (!nv_optimized(np)) {
1002		if (np->rx_ring.orig)
1003			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1004					    np->rx_ring.orig, np->ring_addr);
1005	} else {
1006		if (np->rx_ring.ex)
1007			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1008					    np->rx_ring.ex, np->ring_addr);
1009	}
1010	kfree(np->rx_skb);
1011	kfree(np->tx_skb);
1012}
1013
1014static int using_multi_irqs(struct net_device *dev)
1015{
1016	struct fe_priv *np = get_nvpriv(dev);
1017
1018	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1019	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
1020	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1021		return 0;
1022	else
1023		return 1;
1024}
1025
1026static void nv_txrx_gate(struct net_device *dev, bool gate)
1027{
1028	struct fe_priv *np = get_nvpriv(dev);
1029	u8 __iomem *base = get_hwbase(dev);
1030	u32 powerstate;
1031
1032	if (!np->mac_in_use &&
1033	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1034		powerstate = readl(base + NvRegPowerState2);
1035		if (gate)
1036			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1037		else
1038			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1039		writel(powerstate, base + NvRegPowerState2);
1040	}
1041}
1042
1043static void nv_enable_irq(struct net_device *dev)
1044{
1045	struct fe_priv *np = get_nvpriv(dev);
1046
1047	if (!using_multi_irqs(dev)) {
1048		if (np->msi_flags & NV_MSI_X_ENABLED)
1049			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1050		else
1051			enable_irq(np->pci_dev->irq);
1052	} else {
1053		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1054		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1055		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1056	}
1057}
1058
1059static void nv_disable_irq(struct net_device *dev)
1060{
1061	struct fe_priv *np = get_nvpriv(dev);
1062
1063	if (!using_multi_irqs(dev)) {
1064		if (np->msi_flags & NV_MSI_X_ENABLED)
1065			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1066		else
1067			disable_irq(np->pci_dev->irq);
1068	} else {
1069		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1070		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1071		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1072	}
1073}
1074
1075/* In MSIX mode, a write to irqmask behaves as XOR */
1076static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1077{
1078	u8 __iomem *base = get_hwbase(dev);
1079
1080	writel(mask, base + NvRegIrqMask);
1081}
1082
1083static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1084{
1085	struct fe_priv *np = get_nvpriv(dev);
1086	u8 __iomem *base = get_hwbase(dev);
1087
1088	if (np->msi_flags & NV_MSI_X_ENABLED) {
1089		writel(mask, base + NvRegIrqMask);
1090	} else {
1091		if (np->msi_flags & NV_MSI_ENABLED)
1092			writel(0, base + NvRegMSIIrqMask);
1093		writel(0, base + NvRegIrqMask);
1094	}
1095}
1096
1097static void nv_napi_enable(struct net_device *dev)
1098{
1099	struct fe_priv *np = get_nvpriv(dev);
1100
1101	napi_enable(&np->napi);
1102}
1103
1104static void nv_napi_disable(struct net_device *dev)
1105{
1106	struct fe_priv *np = get_nvpriv(dev);
1107
1108	napi_disable(&np->napi);
1109}
1110
1111#define MII_READ	(-1)
1112/* mii_rw: read/write a register on the PHY.
1113 *
1114 * Caller must guarantee serialization
1115 */
1116static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1117{
1118	u8 __iomem *base = get_hwbase(dev);
1119	u32 reg;
1120	int retval;
1121
1122	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1123
1124	reg = readl(base + NvRegMIIControl);
1125	if (reg & NVREG_MIICTL_INUSE) {
1126		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1127		udelay(NV_MIIBUSY_DELAY);
1128	}
1129
1130	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1131	if (value != MII_READ) {
1132		writel(value, base + NvRegMIIData);
1133		reg |= NVREG_MIICTL_WRITE;
1134	}
1135	writel(reg, base + NvRegMIIControl);
1136
1137	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1138			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1139		retval = -1;
1140	} else if (value != MII_READ) {
1141		/* it was a write operation - fewer failures are detectable */
1142		retval = 0;
1143	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1144		retval = -1;
1145	} else {
1146		retval = readl(base + NvRegMIIData);
1147	}
1148
1149	return retval;
1150}
1151
1152static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1153{
1154	struct fe_priv *np = netdev_priv(dev);
1155	u32 miicontrol;
1156	unsigned int tries = 0;
1157
1158	miicontrol = BMCR_RESET | bmcr_setup;
1159	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1160		return -1;
1161
1162	/* wait for 500ms */
1163	msleep(500);
1164
1165	/* must wait till reset is deasserted */
1166	while (miicontrol & BMCR_RESET) {
1167		usleep_range(10000, 20000);
1168		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1169		/* FIXME: 100 tries seem excessive */
1170		if (tries++ > 100)
1171			return -1;
1172	}
1173	return 0;
1174}
1175
1176static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1177{
1178	static const struct {
1179		int reg;
1180		int init;
1181	} ri[] = {
1182		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1183		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1184		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1185		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1186		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1187		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1188		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1189	};
1190	int i;
1191
1192	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1193		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1194			return PHY_ERROR;
1195	}
1196
1197	return 0;
1198}
1199
1200static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1201{
1202	u32 reg;
1203	u8 __iomem *base = get_hwbase(dev);
1204	u32 powerstate = readl(base + NvRegPowerState2);
1205
1206	/* need to perform hw phy reset */
1207	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1208	writel(powerstate, base + NvRegPowerState2);
1209	msleep(25);
1210
1211	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1212	writel(powerstate, base + NvRegPowerState2);
1213	msleep(25);
1214
1215	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1216	reg |= PHY_REALTEK_INIT9;
1217	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1218		return PHY_ERROR;
1219	if (mii_rw(dev, np->phyaddr,
1220		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1221		return PHY_ERROR;
1222	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1223	if (!(reg & PHY_REALTEK_INIT11)) {
1224		reg |= PHY_REALTEK_INIT11;
1225		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1226			return PHY_ERROR;
1227	}
1228	if (mii_rw(dev, np->phyaddr,
1229		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1230		return PHY_ERROR;
1231
1232	return 0;
1233}
1234
1235static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1236{
1237	u32 phy_reserved;
1238
1239	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1240		phy_reserved = mii_rw(dev, np->phyaddr,
1241				      PHY_REALTEK_INIT_REG6, MII_READ);
1242		phy_reserved |= PHY_REALTEK_INIT7;
1243		if (mii_rw(dev, np->phyaddr,
1244			   PHY_REALTEK_INIT_REG6, phy_reserved))
1245			return PHY_ERROR;
1246	}
1247
1248	return 0;
1249}
1250
1251static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1252{
1253	u32 phy_reserved;
1254
1255	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1256		if (mii_rw(dev, np->phyaddr,
1257			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1258			return PHY_ERROR;
1259		phy_reserved = mii_rw(dev, np->phyaddr,
1260				      PHY_REALTEK_INIT_REG2, MII_READ);
1261		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1262		phy_reserved |= PHY_REALTEK_INIT3;
1263		if (mii_rw(dev, np->phyaddr,
1264			   PHY_REALTEK_INIT_REG2, phy_reserved))
1265			return PHY_ERROR;
1266		if (mii_rw(dev, np->phyaddr,
1267			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1268			return PHY_ERROR;
1269	}
1270
1271	return 0;
1272}
1273
1274static int init_cicada(struct net_device *dev, struct fe_priv *np,
1275		       u32 phyinterface)
1276{
1277	u32 phy_reserved;
1278
1279	if (phyinterface & PHY_RGMII) {
1280		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1281		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1282		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1283		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1284			return PHY_ERROR;
1285		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1286		phy_reserved |= PHY_CICADA_INIT5;
1287		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1288			return PHY_ERROR;
1289	}
1290	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1291	phy_reserved |= PHY_CICADA_INIT6;
1292	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1293		return PHY_ERROR;
1294
1295	return 0;
1296}
1297
1298static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1299{
1300	u32 phy_reserved;
1301
1302	if (mii_rw(dev, np->phyaddr,
1303		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1304		return PHY_ERROR;
1305	if (mii_rw(dev, np->phyaddr,
1306		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1307		return PHY_ERROR;
1308	phy_reserved = mii_rw(dev, np->phyaddr,
1309			      PHY_VITESSE_INIT_REG4, MII_READ);
1310	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1311		return PHY_ERROR;
1312	phy_reserved = mii_rw(dev, np->phyaddr,
1313			      PHY_VITESSE_INIT_REG3, MII_READ);
1314	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1315	phy_reserved |= PHY_VITESSE_INIT3;
1316	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1317		return PHY_ERROR;
1318	if (mii_rw(dev, np->phyaddr,
1319		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1320		return PHY_ERROR;
1321	if (mii_rw(dev, np->phyaddr,
1322		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1323		return PHY_ERROR;
1324	phy_reserved = mii_rw(dev, np->phyaddr,
1325			      PHY_VITESSE_INIT_REG4, MII_READ);
1326	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1327	phy_reserved |= PHY_VITESSE_INIT3;
1328	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1329		return PHY_ERROR;
1330	phy_reserved = mii_rw(dev, np->phyaddr,
1331			      PHY_VITESSE_INIT_REG3, MII_READ);
1332	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1333		return PHY_ERROR;
1334	if (mii_rw(dev, np->phyaddr,
1335		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1336		return PHY_ERROR;
1337	if (mii_rw(dev, np->phyaddr,
1338		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1339		return PHY_ERROR;
1340	phy_reserved = mii_rw(dev, np->phyaddr,
1341			      PHY_VITESSE_INIT_REG4, MII_READ);
1342	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1343		return PHY_ERROR;
1344	phy_reserved = mii_rw(dev, np->phyaddr,
1345			      PHY_VITESSE_INIT_REG3, MII_READ);
1346	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1347	phy_reserved |= PHY_VITESSE_INIT8;
1348	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1349		return PHY_ERROR;
1350	if (mii_rw(dev, np->phyaddr,
1351		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1352		return PHY_ERROR;
1353	if (mii_rw(dev, np->phyaddr,
1354		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1355		return PHY_ERROR;
1356
1357	return 0;
1358}
1359
1360static int phy_init(struct net_device *dev)
1361{
1362	struct fe_priv *np = get_nvpriv(dev);
1363	u8 __iomem *base = get_hwbase(dev);
1364	u32 phyinterface;
1365	u32 mii_status, mii_control, mii_control_1000, reg;
1366
1367	/* phy errata for E3016 phy */
1368	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1369		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1370		reg &= ~PHY_MARVELL_E3016_INITMASK;
1371		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1372			netdev_info(dev, "%s: phy write to errata reg failed\n",
1373				    pci_name(np->pci_dev));
1374			return PHY_ERROR;
1375		}
1376	}
1377	if (np->phy_oui == PHY_OUI_REALTEK) {
1378		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1379		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1380			if (init_realtek_8211b(dev, np)) {
1381				netdev_info(dev, "%s: phy init failed\n",
1382					    pci_name(np->pci_dev));
1383				return PHY_ERROR;
1384			}
1385		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1386			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1387			if (init_realtek_8211c(dev, np)) {
1388				netdev_info(dev, "%s: phy init failed\n",
1389					    pci_name(np->pci_dev));
1390				return PHY_ERROR;
1391			}
1392		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1393			if (init_realtek_8201(dev, np)) {
1394				netdev_info(dev, "%s: phy init failed\n",
1395					    pci_name(np->pci_dev));
1396				return PHY_ERROR;
1397			}
1398		}
1399	}
1400
1401	/* set advertise register */
1402	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1403	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1404		ADVERTISE_100HALF | ADVERTISE_100FULL |
1405		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1406	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1407		netdev_info(dev, "%s: phy write to advertise failed\n",
1408			    pci_name(np->pci_dev));
1409		return PHY_ERROR;
1410	}
1411
1412	/* get phy interface type */
1413	phyinterface = readl(base + NvRegPhyInterface);
1414
1415	/* see if gigabit phy */
1416	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1417	if (mii_status & PHY_GIGABIT) {
1418		np->gigabit = PHY_GIGABIT;
1419		mii_control_1000 = mii_rw(dev, np->phyaddr,
1420					  MII_CTRL1000, MII_READ);
1421		mii_control_1000 &= ~ADVERTISE_1000HALF;
1422		if (phyinterface & PHY_RGMII)
1423			mii_control_1000 |= ADVERTISE_1000FULL;
1424		else
1425			mii_control_1000 &= ~ADVERTISE_1000FULL;
1426
1427		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1428			netdev_info(dev, "%s: phy init failed\n",
1429				    pci_name(np->pci_dev));
1430			return PHY_ERROR;
1431		}
1432	} else
1433		np->gigabit = 0;
1434
1435	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1436	mii_control |= BMCR_ANENABLE;
1437
1438	if (np->phy_oui == PHY_OUI_REALTEK &&
1439	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1440	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1441		/* start autoneg since we already performed hw reset above */
1442		mii_control |= BMCR_ANRESTART;
1443		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1444			netdev_info(dev, "%s: phy init failed\n",
1445				    pci_name(np->pci_dev));
1446			return PHY_ERROR;
1447		}
1448	} else {
1449		/* reset the phy
1450		 * (certain phys need bmcr to be setup with reset)
1451		 */
1452		if (phy_reset(dev, mii_control)) {
1453			netdev_info(dev, "%s: phy reset failed\n",
1454				    pci_name(np->pci_dev));
1455			return PHY_ERROR;
1456		}
1457	}
1458
1459	/* phy vendor specific configuration */
1460	if ((np->phy_oui == PHY_OUI_CICADA)) {
1461		if (init_cicada(dev, np, phyinterface)) {
1462			netdev_info(dev, "%s: phy init failed\n",
1463				    pci_name(np->pci_dev));
1464			return PHY_ERROR;
1465		}
1466	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1467		if (init_vitesse(dev, np)) {
1468			netdev_info(dev, "%s: phy init failed\n",
1469				    pci_name(np->pci_dev));
1470			return PHY_ERROR;
1471		}
1472	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1473		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1474		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1475			/* reset could have cleared these out, set them back */
1476			if (init_realtek_8211b(dev, np)) {
1477				netdev_info(dev, "%s: phy init failed\n",
1478					    pci_name(np->pci_dev));
1479				return PHY_ERROR;
1480			}
1481		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1482			if (init_realtek_8201(dev, np) ||
1483			    init_realtek_8201_cross(dev, np)) {
1484				netdev_info(dev, "%s: phy init failed\n",
1485					    pci_name(np->pci_dev));
1486				return PHY_ERROR;
1487			}
1488		}
1489	}
1490
1491	/* some phys clear out pause advertisement on reset, set it back */
1492	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1493
1494	/* restart auto negotiation, power down phy */
1495	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1496	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1497	if (phy_power_down)
1498		mii_control |= BMCR_PDOWN;
1499	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1500		return PHY_ERROR;
1501
1502	return 0;
1503}
1504
1505static void nv_start_rx(struct net_device *dev)
1506{
1507	struct fe_priv *np = netdev_priv(dev);
1508	u8 __iomem *base = get_hwbase(dev);
1509	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1510
1511	/* Already running? Stop it. */
1512	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1513		rx_ctrl &= ~NVREG_RCVCTL_START;
1514		writel(rx_ctrl, base + NvRegReceiverControl);
1515		pci_push(base);
1516	}
1517	writel(np->linkspeed, base + NvRegLinkSpeed);
1518	pci_push(base);
1519	rx_ctrl |= NVREG_RCVCTL_START;
1520	if (np->mac_in_use)
1521		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1522	writel(rx_ctrl, base + NvRegReceiverControl);
1523	pci_push(base);
1524}
1525
1526static void nv_stop_rx(struct net_device *dev)
1527{
1528	struct fe_priv *np = netdev_priv(dev);
1529	u8 __iomem *base = get_hwbase(dev);
1530	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1531
1532	if (!np->mac_in_use)
1533		rx_ctrl &= ~NVREG_RCVCTL_START;
1534	else
1535		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1536	writel(rx_ctrl, base + NvRegReceiverControl);
1537	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1538		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1539		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1540			    __func__);
1541
1542	udelay(NV_RXSTOP_DELAY2);
1543	if (!np->mac_in_use)
1544		writel(0, base + NvRegLinkSpeed);
1545}
1546
1547static void nv_start_tx(struct net_device *dev)
1548{
1549	struct fe_priv *np = netdev_priv(dev);
1550	u8 __iomem *base = get_hwbase(dev);
1551	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1552
1553	tx_ctrl |= NVREG_XMITCTL_START;
1554	if (np->mac_in_use)
1555		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1556	writel(tx_ctrl, base + NvRegTransmitterControl);
1557	pci_push(base);
1558}
1559
1560static void nv_stop_tx(struct net_device *dev)
1561{
1562	struct fe_priv *np = netdev_priv(dev);
1563	u8 __iomem *base = get_hwbase(dev);
1564	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1565
1566	if (!np->mac_in_use)
1567		tx_ctrl &= ~NVREG_XMITCTL_START;
1568	else
1569		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1570	writel(tx_ctrl, base + NvRegTransmitterControl);
1571	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1572		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1573		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1574			    __func__);
1575
1576	udelay(NV_TXSTOP_DELAY2);
1577	if (!np->mac_in_use)
1578		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1579		       base + NvRegTransmitPoll);
1580}
1581
1582static void nv_start_rxtx(struct net_device *dev)
1583{
1584	nv_start_rx(dev);
1585	nv_start_tx(dev);
1586}
1587
1588static void nv_stop_rxtx(struct net_device *dev)
1589{
1590	nv_stop_rx(dev);
1591	nv_stop_tx(dev);
1592}
1593
1594static void nv_txrx_reset(struct net_device *dev)
1595{
1596	struct fe_priv *np = netdev_priv(dev);
1597	u8 __iomem *base = get_hwbase(dev);
1598
1599	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1600	pci_push(base);
1601	udelay(NV_TXRX_RESET_DELAY);
1602	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1603	pci_push(base);
1604}
1605
1606static void nv_mac_reset(struct net_device *dev)
1607{
1608	struct fe_priv *np = netdev_priv(dev);
1609	u8 __iomem *base = get_hwbase(dev);
1610	u32 temp1, temp2, temp3;
1611
1612	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1613	pci_push(base);
1614
1615	/* save registers since they will be cleared on reset */
1616	temp1 = readl(base + NvRegMacAddrA);
1617	temp2 = readl(base + NvRegMacAddrB);
1618	temp3 = readl(base + NvRegTransmitPoll);
1619
1620	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1621	pci_push(base);
1622	udelay(NV_MAC_RESET_DELAY);
1623	writel(0, base + NvRegMacReset);
1624	pci_push(base);
1625	udelay(NV_MAC_RESET_DELAY);
1626
1627	/* restore saved registers */
1628	writel(temp1, base + NvRegMacAddrA);
1629	writel(temp2, base + NvRegMacAddrB);
1630	writel(temp3, base + NvRegTransmitPoll);
1631
1632	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1633	pci_push(base);
1634}
1635
1636static void nv_get_hw_stats(struct net_device *dev)
1637{
1638	struct fe_priv *np = netdev_priv(dev);
1639	u8 __iomem *base = get_hwbase(dev);
1640
1641	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1642	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1643	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1644	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1645	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1646	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1647	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1648	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1649	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1650	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1651	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1652	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1653	np->estats.rx_runt += readl(base + NvRegRxRunt);
1654	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1655	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1656	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1657	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1658	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1659	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1660	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1661	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1662	np->estats.rx_packets =
1663		np->estats.rx_unicast +
1664		np->estats.rx_multicast +
1665		np->estats.rx_broadcast;
1666	np->estats.rx_errors_total =
1667		np->estats.rx_crc_errors +
1668		np->estats.rx_over_errors +
1669		np->estats.rx_frame_error +
1670		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1671		np->estats.rx_late_collision +
1672		np->estats.rx_runt +
1673		np->estats.rx_frame_too_long;
1674	np->estats.tx_errors_total =
1675		np->estats.tx_late_collision +
1676		np->estats.tx_fifo_errors +
1677		np->estats.tx_carrier_errors +
1678		np->estats.tx_excess_deferral +
1679		np->estats.tx_retry_error;
1680
1681	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1682		np->estats.tx_def

Large files files are truncated, but you can click here to view the full file