/drivers/net/forcedeth.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 6010 lines · 4811 code · 707 blank · 492 comment · 928 complexity · 70cf9487e9803ef5670962268cc00f5c MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
  3. *
  4. * Note: This driver is a cleanroom reimplementation based on reverse
  5. * engineered documentation written by Carl-Daniel Hailfinger
  6. * and Andrew de Quincey.
  7. *
  8. * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  9. * trademarks of NVIDIA Corporation in the United States and other
  10. * countries.
  11. *
  12. * Copyright (C) 2003,4,5 Manfred Spraul
  13. * Copyright (C) 2004 Andrew de Quincey (wol support)
  14. * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  15. * IRQ rate fixes, bigendian fixes, cleanups, verification)
  16. * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2 of the License, or
  21. * (at your option) any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  31. *
  32. * Known bugs:
  33. * We suspect that on some hardware no TX done interrupts are generated.
  34. * This means recovery from netif_stop_queue only happens if the hw timer
  35. * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  36. * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  37. * If your hardware reliably generates tx done interrupts, then you can remove
  38. * DEV_NEED_TIMERIRQ from the driver_data flags.
  39. * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  40. * superfluous timer interrupts from the nic.
  41. */
  42. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43. #define FORCEDETH_VERSION "0.64"
  44. #define DRV_NAME "forcedeth"
  45. #include <linux/module.h>
  46. #include <linux/types.h>
  47. #include <linux/pci.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/netdevice.h>
  50. #include <linux/etherdevice.h>
  51. #include <linux/delay.h>
  52. #include <linux/sched.h>
  53. #include <linux/spinlock.h>
  54. #include <linux/ethtool.h>
  55. #include <linux/timer.h>
  56. #include <linux/skbuff.h>
  57. #include <linux/mii.h>
  58. #include <linux/random.h>
  59. #include <linux/init.h>
  60. #include <linux/if_vlan.h>
  61. #include <linux/dma-mapping.h>
  62. #include <linux/slab.h>
  63. #include <linux/uaccess.h>
  64. #include <linux/prefetch.h>
  65. #include <linux/io.h>
  66. #include <asm/irq.h>
  67. #include <asm/system.h>
  68. #define TX_WORK_PER_LOOP 64
  69. #define RX_WORK_PER_LOOP 64
  70. /*
  71. * Hardware access:
  72. */
  73. #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
  74. #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
  75. #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
  76. #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
  77. #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
  78. #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
  79. #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
  80. #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
  81. #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
  82. #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
  83. #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
  84. #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
  85. #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
  86. #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
  87. #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
  88. #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
  89. #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
  90. #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
  91. #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
  92. #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
  93. #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
  94. #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
  95. #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
  96. #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
  97. #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
  98. #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
  99. #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
  100. enum {
  101. NvRegIrqStatus = 0x000,
  102. #define NVREG_IRQSTAT_MIIEVENT 0x040
  103. #define NVREG_IRQSTAT_MASK 0x83ff
  104. NvRegIrqMask = 0x004,
  105. #define NVREG_IRQ_RX_ERROR 0x0001
  106. #define NVREG_IRQ_RX 0x0002
  107. #define NVREG_IRQ_RX_NOBUF 0x0004
  108. #define NVREG_IRQ_TX_ERR 0x0008
  109. #define NVREG_IRQ_TX_OK 0x0010
  110. #define NVREG_IRQ_TIMER 0x0020
  111. #define NVREG_IRQ_LINK 0x0040
  112. #define NVREG_IRQ_RX_FORCED 0x0080
  113. #define NVREG_IRQ_TX_FORCED 0x0100
  114. #define NVREG_IRQ_RECOVER_ERROR 0x8200
  115. #define NVREG_IRQMASK_THROUGHPUT 0x00df
  116. #define NVREG_IRQMASK_CPU 0x0060
  117. #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
  118. #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
  119. #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
  120. NvRegUnknownSetupReg6 = 0x008,
  121. #define NVREG_UNKSETUP6_VAL 3
  122. /*
  123. * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
  124. * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  125. */
  126. NvRegPollingInterval = 0x00c,
  127. #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
  128. #define NVREG_POLL_DEFAULT_CPU 13
  129. NvRegMSIMap0 = 0x020,
  130. NvRegMSIMap1 = 0x024,
  131. NvRegMSIIrqMask = 0x030,
  132. #define NVREG_MSI_VECTOR_0_ENABLED 0x01
  133. NvRegMisc1 = 0x080,
  134. #define NVREG_MISC1_PAUSE_TX 0x01
  135. #define NVREG_MISC1_HD 0x02
  136. #define NVREG_MISC1_FORCE 0x3b0f3c
  137. NvRegMacReset = 0x34,
  138. #define NVREG_MAC_RESET_ASSERT 0x0F3
  139. NvRegTransmitterControl = 0x084,
  140. #define NVREG_XMITCTL_START 0x01
  141. #define NVREG_XMITCTL_MGMT_ST 0x40000000
  142. #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
  143. #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
  144. #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
  145. #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
  146. #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
  147. #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
  148. #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
  149. #define NVREG_XMITCTL_HOST_LOADED 0x00004000
  150. #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
  151. #define NVREG_XMITCTL_DATA_START 0x00100000
  152. #define NVREG_XMITCTL_DATA_READY 0x00010000
  153. #define NVREG_XMITCTL_DATA_ERROR 0x00020000
  154. NvRegTransmitterStatus = 0x088,
  155. #define NVREG_XMITSTAT_BUSY 0x01
  156. NvRegPacketFilterFlags = 0x8c,
  157. #define NVREG_PFF_PAUSE_RX 0x08
  158. #define NVREG_PFF_ALWAYS 0x7F0000
  159. #define NVREG_PFF_PROMISC 0x80
  160. #define NVREG_PFF_MYADDR 0x20
  161. #define NVREG_PFF_LOOPBACK 0x10
  162. NvRegOffloadConfig = 0x90,
  163. #define NVREG_OFFLOAD_HOMEPHY 0x601
  164. #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
  165. NvRegReceiverControl = 0x094,
  166. #define NVREG_RCVCTL_START 0x01
  167. #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
  168. NvRegReceiverStatus = 0x98,
  169. #define NVREG_RCVSTAT_BUSY 0x01
  170. NvRegSlotTime = 0x9c,
  171. #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
  172. #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
  173. #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
  174. #define NVREG_SLOTTIME_HALF 0x0000ff00
  175. #define NVREG_SLOTTIME_DEFAULT 0x00007f00
  176. #define NVREG_SLOTTIME_MASK 0x000000ff
  177. NvRegTxDeferral = 0xA0,
  178. #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
  179. #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
  180. #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
  181. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
  182. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
  183. #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
  184. NvRegRxDeferral = 0xA4,
  185. #define NVREG_RX_DEFERRAL_DEFAULT 0x16
  186. NvRegMacAddrA = 0xA8,
  187. NvRegMacAddrB = 0xAC,
  188. NvRegMulticastAddrA = 0xB0,
  189. #define NVREG_MCASTADDRA_FORCE 0x01
  190. NvRegMulticastAddrB = 0xB4,
  191. NvRegMulticastMaskA = 0xB8,
  192. #define NVREG_MCASTMASKA_NONE 0xffffffff
  193. NvRegMulticastMaskB = 0xBC,
  194. #define NVREG_MCASTMASKB_NONE 0xffff
  195. NvRegPhyInterface = 0xC0,
  196. #define PHY_RGMII 0x10000000
  197. NvRegBackOffControl = 0xC4,
  198. #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
  199. #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
  200. #define NVREG_BKOFFCTRL_SELECT 24
  201. #define NVREG_BKOFFCTRL_GEAR 12
  202. NvRegTxRingPhysAddr = 0x100,
  203. NvRegRxRingPhysAddr = 0x104,
  204. NvRegRingSizes = 0x108,
  205. #define NVREG_RINGSZ_TXSHIFT 0
  206. #define NVREG_RINGSZ_RXSHIFT 16
  207. NvRegTransmitPoll = 0x10c,
  208. #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
  209. NvRegLinkSpeed = 0x110,
  210. #define NVREG_LINKSPEED_FORCE 0x10000
  211. #define NVREG_LINKSPEED_10 1000
  212. #define NVREG_LINKSPEED_100 100
  213. #define NVREG_LINKSPEED_1000 50
  214. #define NVREG_LINKSPEED_MASK (0xFFF)
  215. NvRegUnknownSetupReg5 = 0x130,
  216. #define NVREG_UNKSETUP5_BIT31 (1<<31)
  217. NvRegTxWatermark = 0x13c,
  218. #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
  219. #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
  220. #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
  221. NvRegTxRxControl = 0x144,
  222. #define NVREG_TXRXCTL_KICK 0x0001
  223. #define NVREG_TXRXCTL_BIT1 0x0002
  224. #define NVREG_TXRXCTL_BIT2 0x0004
  225. #define NVREG_TXRXCTL_IDLE 0x0008
  226. #define NVREG_TXRXCTL_RESET 0x0010
  227. #define NVREG_TXRXCTL_RXCHECK 0x0400
  228. #define NVREG_TXRXCTL_DESC_1 0
  229. #define NVREG_TXRXCTL_DESC_2 0x002100
  230. #define NVREG_TXRXCTL_DESC_3 0xc02200
  231. #define NVREG_TXRXCTL_VLANSTRIP 0x00040
  232. #define NVREG_TXRXCTL_VLANINS 0x00080
  233. NvRegTxRingPhysAddrHigh = 0x148,
  234. NvRegRxRingPhysAddrHigh = 0x14C,
  235. NvRegTxPauseFrame = 0x170,
  236. #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
  237. #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
  238. #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
  239. #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
  240. NvRegTxPauseFrameLimit = 0x174,
  241. #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
  242. NvRegMIIStatus = 0x180,
  243. #define NVREG_MIISTAT_ERROR 0x0001
  244. #define NVREG_MIISTAT_LINKCHANGE 0x0008
  245. #define NVREG_MIISTAT_MASK_RW 0x0007
  246. #define NVREG_MIISTAT_MASK_ALL 0x000f
  247. NvRegMIIMask = 0x184,
  248. #define NVREG_MII_LINKCHANGE 0x0008
  249. NvRegAdapterControl = 0x188,
  250. #define NVREG_ADAPTCTL_START 0x02
  251. #define NVREG_ADAPTCTL_LINKUP 0x04
  252. #define NVREG_ADAPTCTL_PHYVALID 0x40000
  253. #define NVREG_ADAPTCTL_RUNNING 0x100000
  254. #define NVREG_ADAPTCTL_PHYSHIFT 24
  255. NvRegMIISpeed = 0x18c,
  256. #define NVREG_MIISPEED_BIT8 (1<<8)
  257. #define NVREG_MIIDELAY 5
  258. NvRegMIIControl = 0x190,
  259. #define NVREG_MIICTL_INUSE 0x08000
  260. #define NVREG_MIICTL_WRITE 0x00400
  261. #define NVREG_MIICTL_ADDRSHIFT 5
  262. NvRegMIIData = 0x194,
  263. NvRegTxUnicast = 0x1a0,
  264. NvRegTxMulticast = 0x1a4,
  265. NvRegTxBroadcast = 0x1a8,
  266. NvRegWakeUpFlags = 0x200,
  267. #define NVREG_WAKEUPFLAGS_VAL 0x7770
  268. #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
  269. #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
  270. #define NVREG_WAKEUPFLAGS_D3SHIFT 12
  271. #define NVREG_WAKEUPFLAGS_D2SHIFT 8
  272. #define NVREG_WAKEUPFLAGS_D1SHIFT 4
  273. #define NVREG_WAKEUPFLAGS_D0SHIFT 0
  274. #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
  275. #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
  276. #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
  277. #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
  278. NvRegMgmtUnitGetVersion = 0x204,
  279. #define NVREG_MGMTUNITGETVERSION 0x01
  280. NvRegMgmtUnitVersion = 0x208,
  281. #define NVREG_MGMTUNITVERSION 0x08
  282. NvRegPowerCap = 0x268,
  283. #define NVREG_POWERCAP_D3SUPP (1<<30)
  284. #define NVREG_POWERCAP_D2SUPP (1<<26)
  285. #define NVREG_POWERCAP_D1SUPP (1<<25)
  286. NvRegPowerState = 0x26c,
  287. #define NVREG_POWERSTATE_POWEREDUP 0x8000
  288. #define NVREG_POWERSTATE_VALID 0x0100
  289. #define NVREG_POWERSTATE_MASK 0x0003
  290. #define NVREG_POWERSTATE_D0 0x0000
  291. #define NVREG_POWERSTATE_D1 0x0001
  292. #define NVREG_POWERSTATE_D2 0x0002
  293. #define NVREG_POWERSTATE_D3 0x0003
  294. NvRegMgmtUnitControl = 0x278,
  295. #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
  296. NvRegTxCnt = 0x280,
  297. NvRegTxZeroReXmt = 0x284,
  298. NvRegTxOneReXmt = 0x288,
  299. NvRegTxManyReXmt = 0x28c,
  300. NvRegTxLateCol = 0x290,
  301. NvRegTxUnderflow = 0x294,
  302. NvRegTxLossCarrier = 0x298,
  303. NvRegTxExcessDef = 0x29c,
  304. NvRegTxRetryErr = 0x2a0,
  305. NvRegRxFrameErr = 0x2a4,
  306. NvRegRxExtraByte = 0x2a8,
  307. NvRegRxLateCol = 0x2ac,
  308. NvRegRxRunt = 0x2b0,
  309. NvRegRxFrameTooLong = 0x2b4,
  310. NvRegRxOverflow = 0x2b8,
  311. NvRegRxFCSErr = 0x2bc,
  312. NvRegRxFrameAlignErr = 0x2c0,
  313. NvRegRxLenErr = 0x2c4,
  314. NvRegRxUnicast = 0x2c8,
  315. NvRegRxMulticast = 0x2cc,
  316. NvRegRxBroadcast = 0x2d0,
  317. NvRegTxDef = 0x2d4,
  318. NvRegTxFrame = 0x2d8,
  319. NvRegRxCnt = 0x2dc,
  320. NvRegTxPause = 0x2e0,
  321. NvRegRxPause = 0x2e4,
  322. NvRegRxDropFrame = 0x2e8,
  323. NvRegVlanControl = 0x300,
  324. #define NVREG_VLANCONTROL_ENABLE 0x2000
  325. NvRegMSIXMap0 = 0x3e0,
  326. NvRegMSIXMap1 = 0x3e4,
  327. NvRegMSIXIrqStatus = 0x3f0,
  328. NvRegPowerState2 = 0x600,
  329. #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
  330. #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
  331. #define NVREG_POWERSTATE2_PHY_RESET 0x0004
  332. #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
  333. };
  334. /* Big endian: should work, but is untested */
  335. struct ring_desc {
  336. __le32 buf;
  337. __le32 flaglen;
  338. };
  339. struct ring_desc_ex {
  340. __le32 bufhigh;
  341. __le32 buflow;
  342. __le32 txvlan;
  343. __le32 flaglen;
  344. };
  345. union ring_type {
  346. struct ring_desc *orig;
  347. struct ring_desc_ex *ex;
  348. };
  349. #define FLAG_MASK_V1 0xffff0000
  350. #define FLAG_MASK_V2 0xffffc000
  351. #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
  352. #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
  353. #define NV_TX_LASTPACKET (1<<16)
  354. #define NV_TX_RETRYERROR (1<<19)
  355. #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
  356. #define NV_TX_FORCED_INTERRUPT (1<<24)
  357. #define NV_TX_DEFERRED (1<<26)
  358. #define NV_TX_CARRIERLOST (1<<27)
  359. #define NV_TX_LATECOLLISION (1<<28)
  360. #define NV_TX_UNDERFLOW (1<<29)
  361. #define NV_TX_ERROR (1<<30)
  362. #define NV_TX_VALID (1<<31)
  363. #define NV_TX2_LASTPACKET (1<<29)
  364. #define NV_TX2_RETRYERROR (1<<18)
  365. #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
  366. #define NV_TX2_FORCED_INTERRUPT (1<<30)
  367. #define NV_TX2_DEFERRED (1<<25)
  368. #define NV_TX2_CARRIERLOST (1<<26)
  369. #define NV_TX2_LATECOLLISION (1<<27)
  370. #define NV_TX2_UNDERFLOW (1<<28)
  371. /* error and valid are the same for both */
  372. #define NV_TX2_ERROR (1<<30)
  373. #define NV_TX2_VALID (1<<31)
  374. #define NV_TX2_TSO (1<<28)
  375. #define NV_TX2_TSO_SHIFT 14
  376. #define NV_TX2_TSO_MAX_SHIFT 14
  377. #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
  378. #define NV_TX2_CHECKSUM_L3 (1<<27)
  379. #define NV_TX2_CHECKSUM_L4 (1<<26)
  380. #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
  381. #define NV_RX_DESCRIPTORVALID (1<<16)
  382. #define NV_RX_MISSEDFRAME (1<<17)
  383. #define NV_RX_SUBSTRACT1 (1<<18)
  384. #define NV_RX_ERROR1 (1<<23)
  385. #define NV_RX_ERROR2 (1<<24)
  386. #define NV_RX_ERROR3 (1<<25)
  387. #define NV_RX_ERROR4 (1<<26)
  388. #define NV_RX_CRCERR (1<<27)
  389. #define NV_RX_OVERFLOW (1<<28)
  390. #define NV_RX_FRAMINGERR (1<<29)
  391. #define NV_RX_ERROR (1<<30)
  392. #define NV_RX_AVAIL (1<<31)
  393. #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
  394. #define NV_RX2_CHECKSUMMASK (0x1C000000)
  395. #define NV_RX2_CHECKSUM_IP (0x10000000)
  396. #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
  397. #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
  398. #define NV_RX2_DESCRIPTORVALID (1<<29)
  399. #define NV_RX2_SUBSTRACT1 (1<<25)
  400. #define NV_RX2_ERROR1 (1<<18)
  401. #define NV_RX2_ERROR2 (1<<19)
  402. #define NV_RX2_ERROR3 (1<<20)
  403. #define NV_RX2_ERROR4 (1<<21)
  404. #define NV_RX2_CRCERR (1<<22)
  405. #define NV_RX2_OVERFLOW (1<<23)
  406. #define NV_RX2_FRAMINGERR (1<<24)
  407. /* error and avail are the same for both */
  408. #define NV_RX2_ERROR (1<<30)
  409. #define NV_RX2_AVAIL (1<<31)
  410. #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
  411. #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
  412. #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
  413. /* Miscellaneous hardware related defines: */
  414. #define NV_PCI_REGSZ_VER1 0x270
  415. #define NV_PCI_REGSZ_VER2 0x2d4
  416. #define NV_PCI_REGSZ_VER3 0x604
  417. #define NV_PCI_REGSZ_MAX 0x604
  418. /* various timeout delays: all in usec */
  419. #define NV_TXRX_RESET_DELAY 4
  420. #define NV_TXSTOP_DELAY1 10
  421. #define NV_TXSTOP_DELAY1MAX 500000
  422. #define NV_TXSTOP_DELAY2 100
  423. #define NV_RXSTOP_DELAY1 10
  424. #define NV_RXSTOP_DELAY1MAX 500000
  425. #define NV_RXSTOP_DELAY2 100
  426. #define NV_SETUP5_DELAY 5
  427. #define NV_SETUP5_DELAYMAX 50000
  428. #define NV_POWERUP_DELAY 5
  429. #define NV_POWERUP_DELAYMAX 5000
  430. #define NV_MIIBUSY_DELAY 50
  431. #define NV_MIIPHY_DELAY 10
  432. #define NV_MIIPHY_DELAYMAX 10000
  433. #define NV_MAC_RESET_DELAY 64
  434. #define NV_WAKEUPPATTERNS 5
  435. #define NV_WAKEUPMASKENTRIES 4
  436. /* General driver defaults */
  437. #define NV_WATCHDOG_TIMEO (5*HZ)
  438. #define RX_RING_DEFAULT 512
  439. #define TX_RING_DEFAULT 256
  440. #define RX_RING_MIN 128
  441. #define TX_RING_MIN 64
  442. #define RING_MAX_DESC_VER_1 1024
  443. #define RING_MAX_DESC_VER_2_3 16384
  444. /* rx/tx mac addr + type + vlan + align + slack*/
  445. #define NV_RX_HEADERS (64)
  446. /* even more slack. */
  447. #define NV_RX_ALLOC_PAD (64)
  448. /* maximum mtu size */
  449. #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
  450. #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
  451. #define OOM_REFILL (1+HZ/20)
  452. #define POLL_WAIT (1+HZ/100)
  453. #define LINK_TIMEOUT (3*HZ)
  454. #define STATS_INTERVAL (10*HZ)
  455. /*
  456. * desc_ver values:
  457. * The nic supports three different descriptor types:
  458. * - DESC_VER_1: Original
  459. * - DESC_VER_2: support for jumbo frames.
  460. * - DESC_VER_3: 64-bit format.
  461. */
  462. #define DESC_VER_1 1
  463. #define DESC_VER_2 2
  464. #define DESC_VER_3 3
  465. /* PHY defines */
  466. #define PHY_OUI_MARVELL 0x5043
  467. #define PHY_OUI_CICADA 0x03f1
  468. #define PHY_OUI_VITESSE 0x01c1
  469. #define PHY_OUI_REALTEK 0x0732
  470. #define PHY_OUI_REALTEK2 0x0020
  471. #define PHYID1_OUI_MASK 0x03ff
  472. #define PHYID1_OUI_SHFT 6
  473. #define PHYID2_OUI_MASK 0xfc00
  474. #define PHYID2_OUI_SHFT 10
  475. #define PHYID2_MODEL_MASK 0x03f0
  476. #define PHY_MODEL_REALTEK_8211 0x0110
  477. #define PHY_REV_MASK 0x0001
  478. #define PHY_REV_REALTEK_8211B 0x0000
  479. #define PHY_REV_REALTEK_8211C 0x0001
  480. #define PHY_MODEL_REALTEK_8201 0x0200
  481. #define PHY_MODEL_MARVELL_E3016 0x0220
  482. #define PHY_MARVELL_E3016_INITMASK 0x0300
  483. #define PHY_CICADA_INIT1 0x0f000
  484. #define PHY_CICADA_INIT2 0x0e00
  485. #define PHY_CICADA_INIT3 0x01000
  486. #define PHY_CICADA_INIT4 0x0200
  487. #define PHY_CICADA_INIT5 0x0004
  488. #define PHY_CICADA_INIT6 0x02000
  489. #define PHY_VITESSE_INIT_REG1 0x1f
  490. #define PHY_VITESSE_INIT_REG2 0x10
  491. #define PHY_VITESSE_INIT_REG3 0x11
  492. #define PHY_VITESSE_INIT_REG4 0x12
  493. #define PHY_VITESSE_INIT_MSK1 0xc
  494. #define PHY_VITESSE_INIT_MSK2 0x0180
  495. #define PHY_VITESSE_INIT1 0x52b5
  496. #define PHY_VITESSE_INIT2 0xaf8a
  497. #define PHY_VITESSE_INIT3 0x8
  498. #define PHY_VITESSE_INIT4 0x8f8a
  499. #define PHY_VITESSE_INIT5 0xaf86
  500. #define PHY_VITESSE_INIT6 0x8f86
  501. #define PHY_VITESSE_INIT7 0xaf82
  502. #define PHY_VITESSE_INIT8 0x0100
  503. #define PHY_VITESSE_INIT9 0x8f82
  504. #define PHY_VITESSE_INIT10 0x0
  505. #define PHY_REALTEK_INIT_REG1 0x1f
  506. #define PHY_REALTEK_INIT_REG2 0x19
  507. #define PHY_REALTEK_INIT_REG3 0x13
  508. #define PHY_REALTEK_INIT_REG4 0x14
  509. #define PHY_REALTEK_INIT_REG5 0x18
  510. #define PHY_REALTEK_INIT_REG6 0x11
  511. #define PHY_REALTEK_INIT_REG7 0x01
  512. #define PHY_REALTEK_INIT1 0x0000
  513. #define PHY_REALTEK_INIT2 0x8e00
  514. #define PHY_REALTEK_INIT3 0x0001
  515. #define PHY_REALTEK_INIT4 0xad17
  516. #define PHY_REALTEK_INIT5 0xfb54
  517. #define PHY_REALTEK_INIT6 0xf5c7
  518. #define PHY_REALTEK_INIT7 0x1000
  519. #define PHY_REALTEK_INIT8 0x0003
  520. #define PHY_REALTEK_INIT9 0x0008
  521. #define PHY_REALTEK_INIT10 0x0005
  522. #define PHY_REALTEK_INIT11 0x0200
  523. #define PHY_REALTEK_INIT_MSK1 0x0003
  524. #define PHY_GIGABIT 0x0100
  525. #define PHY_TIMEOUT 0x1
  526. #define PHY_ERROR 0x2
  527. #define PHY_100 0x1
  528. #define PHY_1000 0x2
  529. #define PHY_HALF 0x100
  530. #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
  531. #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
  532. #define NV_PAUSEFRAME_RX_ENABLE 0x0004
  533. #define NV_PAUSEFRAME_TX_ENABLE 0x0008
  534. #define NV_PAUSEFRAME_RX_REQ 0x0010
  535. #define NV_PAUSEFRAME_TX_REQ 0x0020
  536. #define NV_PAUSEFRAME_AUTONEG 0x0040
  537. /* MSI/MSI-X defines */
  538. #define NV_MSI_X_MAX_VECTORS 8
  539. #define NV_MSI_X_VECTORS_MASK 0x000f
  540. #define NV_MSI_CAPABLE 0x0010
  541. #define NV_MSI_X_CAPABLE 0x0020
  542. #define NV_MSI_ENABLED 0x0040
  543. #define NV_MSI_X_ENABLED 0x0080
  544. #define NV_MSI_X_VECTOR_ALL 0x0
  545. #define NV_MSI_X_VECTOR_RX 0x0
  546. #define NV_MSI_X_VECTOR_TX 0x1
  547. #define NV_MSI_X_VECTOR_OTHER 0x2
  548. #define NV_MSI_PRIV_OFFSET 0x68
  549. #define NV_MSI_PRIV_VALUE 0xffffffff
  550. #define NV_RESTART_TX 0x1
  551. #define NV_RESTART_RX 0x2
  552. #define NV_TX_LIMIT_COUNT 16
  553. #define NV_DYNAMIC_THRESHOLD 4
  554. #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
  555. /* statistics */
  556. struct nv_ethtool_str {
  557. char name[ETH_GSTRING_LEN];
  558. };
  559. static const struct nv_ethtool_str nv_estats_str[] = {
  560. { "tx_bytes" },
  561. { "tx_zero_rexmt" },
  562. { "tx_one_rexmt" },
  563. { "tx_many_rexmt" },
  564. { "tx_late_collision" },
  565. { "tx_fifo_errors" },
  566. { "tx_carrier_errors" },
  567. { "tx_excess_deferral" },
  568. { "tx_retry_error" },
  569. { "rx_frame_error" },
  570. { "rx_extra_byte" },
  571. { "rx_late_collision" },
  572. { "rx_runt" },
  573. { "rx_frame_too_long" },
  574. { "rx_over_errors" },
  575. { "rx_crc_errors" },
  576. { "rx_frame_align_error" },
  577. { "rx_length_error" },
  578. { "rx_unicast" },
  579. { "rx_multicast" },
  580. { "rx_broadcast" },
  581. { "rx_packets" },
  582. { "rx_errors_total" },
  583. { "tx_errors_total" },
  584. /* version 2 stats */
  585. { "tx_deferral" },
  586. { "tx_packets" },
  587. { "rx_bytes" },
  588. { "tx_pause" },
  589. { "rx_pause" },
  590. { "rx_drop_frame" },
  591. /* version 3 stats */
  592. { "tx_unicast" },
  593. { "tx_multicast" },
  594. { "tx_broadcast" }
  595. };
  596. struct nv_ethtool_stats {
  597. u64 tx_bytes;
  598. u64 tx_zero_rexmt;
  599. u64 tx_one_rexmt;
  600. u64 tx_many_rexmt;
  601. u64 tx_late_collision;
  602. u64 tx_fifo_errors;
  603. u64 tx_carrier_errors;
  604. u64 tx_excess_deferral;
  605. u64 tx_retry_error;
  606. u64 rx_frame_error;
  607. u64 rx_extra_byte;
  608. u64 rx_late_collision;
  609. u64 rx_runt;
  610. u64 rx_frame_too_long;
  611. u64 rx_over_errors;
  612. u64 rx_crc_errors;
  613. u64 rx_frame_align_error;
  614. u64 rx_length_error;
  615. u64 rx_unicast;
  616. u64 rx_multicast;
  617. u64 rx_broadcast;
  618. u64 rx_packets;
  619. u64 rx_errors_total;
  620. u64 tx_errors_total;
  621. /* version 2 stats */
  622. u64 tx_deferral;
  623. u64 tx_packets;
  624. u64 rx_bytes;
  625. u64 tx_pause;
  626. u64 rx_pause;
  627. u64 rx_drop_frame;
  628. /* version 3 stats */
  629. u64 tx_unicast;
  630. u64 tx_multicast;
  631. u64 tx_broadcast;
  632. };
  633. #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
  634. #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
  635. #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
  636. /* diagnostics */
  637. #define NV_TEST_COUNT_BASE 3
  638. #define NV_TEST_COUNT_EXTENDED 4
  639. static const struct nv_ethtool_str nv_etests_str[] = {
  640. { "link (online/offline)" },
  641. { "register (offline) " },
  642. { "interrupt (offline) " },
  643. { "loopback (offline) " }
  644. };
  645. struct register_test {
  646. __u32 reg;
  647. __u32 mask;
  648. };
  649. static const struct register_test nv_registers_test[] = {
  650. { NvRegUnknownSetupReg6, 0x01 },
  651. { NvRegMisc1, 0x03c },
  652. { NvRegOffloadConfig, 0x03ff },
  653. { NvRegMulticastAddrA, 0xffffffff },
  654. { NvRegTxWatermark, 0x0ff },
  655. { NvRegWakeUpFlags, 0x07777 },
  656. { 0, 0 }
  657. };
  658. struct nv_skb_map {
  659. struct sk_buff *skb;
  660. dma_addr_t dma;
  661. unsigned int dma_len:31;
  662. unsigned int dma_single:1;
  663. struct ring_desc_ex *first_tx_desc;
  664. struct nv_skb_map *next_tx_ctx;
  665. };
  666. /*
  667. * SMP locking:
  668. * All hardware access under netdev_priv(dev)->lock, except the performance
  669. * critical parts:
  670. * - rx is (pseudo-) lockless: it relies on the single-threading provided
  671. * by the arch code for interrupts.
  672. * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  673. * needs netdev_priv(dev)->lock :-(
  674. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  675. */
  676. /* in dev: base, irq */
  677. struct fe_priv {
  678. spinlock_t lock;
  679. struct net_device *dev;
  680. struct napi_struct napi;
  681. /* General data:
  682. * Locking: spin_lock(&np->lock); */
  683. struct nv_ethtool_stats estats;
  684. int in_shutdown;
  685. u32 linkspeed;
  686. int duplex;
  687. int autoneg;
  688. int fixed_mode;
  689. int phyaddr;
  690. int wolenabled;
  691. unsigned int phy_oui;
  692. unsigned int phy_model;
  693. unsigned int phy_rev;
  694. u16 gigabit;
  695. int intr_test;
  696. int recover_error;
  697. int quiet_count;
  698. /* General data: RO fields */
  699. dma_addr_t ring_addr;
  700. struct pci_dev *pci_dev;
  701. u32 orig_mac[2];
  702. u32 events;
  703. u32 irqmask;
  704. u32 desc_ver;
  705. u32 txrxctl_bits;
  706. u32 vlanctl_bits;
  707. u32 driver_data;
  708. u32 device_id;
  709. u32 register_size;
  710. u32 mac_in_use;
  711. int mgmt_version;
  712. int mgmt_sema;
  713. void __iomem *base;
  714. /* rx specific fields.
  715. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  716. */
  717. union ring_type get_rx, put_rx, first_rx, last_rx;
  718. struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
  719. struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
  720. struct nv_skb_map *rx_skb;
  721. union ring_type rx_ring;
  722. unsigned int rx_buf_sz;
  723. unsigned int pkt_limit;
  724. struct timer_list oom_kick;
  725. struct timer_list nic_poll;
  726. struct timer_list stats_poll;
  727. u32 nic_poll_irq;
  728. int rx_ring_size;
  729. /* media detection workaround.
  730. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  731. */
  732. int need_linktimer;
  733. unsigned long link_timeout;
  734. /*
  735. * tx specific fields.
  736. */
  737. union ring_type get_tx, put_tx, first_tx, last_tx;
  738. struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
  739. struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
  740. struct nv_skb_map *tx_skb;
  741. union ring_type tx_ring;
  742. u32 tx_flags;
  743. int tx_ring_size;
  744. int tx_limit;
  745. u32 tx_pkts_in_progress;
  746. struct nv_skb_map *tx_change_owner;
  747. struct nv_skb_map *tx_end_flip;
  748. int tx_stop;
  749. /* vlan fields */
  750. struct vlan_group *vlangrp;
  751. /* msi/msi-x fields */
  752. u32 msi_flags;
  753. struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
  754. /* flow control */
  755. u32 pause_flags;
  756. /* power saved state */
  757. u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
  758. /* for different msi-x irq type */
  759. char name_rx[IFNAMSIZ + 3]; /* -rx */
  760. char name_tx[IFNAMSIZ + 3]; /* -tx */
  761. char name_other[IFNAMSIZ + 6]; /* -other */
  762. };
  763. /*
  764. * Maximum number of loops until we assume that a bit in the irq mask
  765. * is stuck. Overridable with module param.
  766. */
  767. static int max_interrupt_work = 4;
  768. /*
  769. * Optimization can be either throuput mode or cpu mode
  770. *
  771. * Throughput Mode: Every tx and rx packet will generate an interrupt.
  772. * CPU Mode: Interrupts are controlled by a timer.
  773. */
  774. enum {
  775. NV_OPTIMIZATION_MODE_THROUGHPUT,
  776. NV_OPTIMIZATION_MODE_CPU,
  777. NV_OPTIMIZATION_MODE_DYNAMIC
  778. };
  779. static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
  780. /*
  781. * Poll interval for timer irq
  782. *
  783. * This interval determines how frequent an interrupt is generated.
  784. * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
  785. * Min = 0, and Max = 65535
  786. */
  787. static int poll_interval = -1;
  788. /*
  789. * MSI interrupts
  790. */
  791. enum {
  792. NV_MSI_INT_DISABLED,
  793. NV_MSI_INT_ENABLED
  794. };
  795. static int msi = NV_MSI_INT_ENABLED;
  796. /*
  797. * MSIX interrupts
  798. */
  799. enum {
  800. NV_MSIX_INT_DISABLED,
  801. NV_MSIX_INT_ENABLED
  802. };
  803. static int msix = NV_MSIX_INT_ENABLED;
  804. /*
  805. * DMA 64bit
  806. */
  807. enum {
  808. NV_DMA_64BIT_DISABLED,
  809. NV_DMA_64BIT_ENABLED
  810. };
  811. static int dma_64bit = NV_DMA_64BIT_ENABLED;
  812. /*
  813. * Crossover Detection
  814. * Realtek 8201 phy + some OEM boards do not work properly.
  815. */
  816. enum {
  817. NV_CROSSOVER_DETECTION_DISABLED,
  818. NV_CROSSOVER_DETECTION_ENABLED
  819. };
  820. static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
  821. /*
  822. * Power down phy when interface is down (persists through reboot;
  823. * older Linux and other OSes may not power it up again)
  824. */
  825. static int phy_power_down;
  826. static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  827. {
  828. return netdev_priv(dev);
  829. }
  830. static inline u8 __iomem *get_hwbase(struct net_device *dev)
  831. {
  832. return ((struct fe_priv *)netdev_priv(dev))->base;
  833. }
  834. static inline void pci_push(u8 __iomem *base)
  835. {
  836. /* force out pending posted writes */
  837. readl(base);
  838. }
  839. static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
  840. {
  841. return le32_to_cpu(prd->flaglen)
  842. & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
  843. }
  844. static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
  845. {
  846. return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
  847. }
  848. static bool nv_optimized(struct fe_priv *np)
  849. {
  850. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  851. return false;
  852. return true;
  853. }
  854. static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  855. int delay, int delaymax)
  856. {
  857. u8 __iomem *base = get_hwbase(dev);
  858. pci_push(base);
  859. do {
  860. udelay(delay);
  861. delaymax -= delay;
  862. if (delaymax < 0)
  863. return 1;
  864. } while ((readl(base + offset) & mask) != target);
  865. return 0;
  866. }
  867. #define NV_SETUP_RX_RING 0x01
  868. #define NV_SETUP_TX_RING 0x02
  869. static inline u32 dma_low(dma_addr_t addr)
  870. {
  871. return addr;
  872. }
  873. static inline u32 dma_high(dma_addr_t addr)
  874. {
  875. return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
  876. }
  877. static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
  878. {
  879. struct fe_priv *np = get_nvpriv(dev);
  880. u8 __iomem *base = get_hwbase(dev);
  881. if (!nv_optimized(np)) {
  882. if (rxtx_flags & NV_SETUP_RX_RING)
  883. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  884. if (rxtx_flags & NV_SETUP_TX_RING)
  885. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
  886. } else {
  887. if (rxtx_flags & NV_SETUP_RX_RING) {
  888. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  889. writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
  890. }
  891. if (rxtx_flags & NV_SETUP_TX_RING) {
  892. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
  893. writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
  894. }
  895. }
  896. }
  897. static void free_rings(struct net_device *dev)
  898. {
  899. struct fe_priv *np = get_nvpriv(dev);
  900. if (!nv_optimized(np)) {
  901. if (np->rx_ring.orig)
  902. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  903. np->rx_ring.orig, np->ring_addr);
  904. } else {
  905. if (np->rx_ring.ex)
  906. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  907. np->rx_ring.ex, np->ring_addr);
  908. }
  909. kfree(np->rx_skb);
  910. kfree(np->tx_skb);
  911. }
  912. static int using_multi_irqs(struct net_device *dev)
  913. {
  914. struct fe_priv *np = get_nvpriv(dev);
  915. if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  916. ((np->msi_flags & NV_MSI_X_ENABLED) &&
  917. ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  918. return 0;
  919. else
  920. return 1;
  921. }
  922. static void nv_txrx_gate(struct net_device *dev, bool gate)
  923. {
  924. struct fe_priv *np = get_nvpriv(dev);
  925. u8 __iomem *base = get_hwbase(dev);
  926. u32 powerstate;
  927. if (!np->mac_in_use &&
  928. (np->driver_data & DEV_HAS_POWER_CNTRL)) {
  929. powerstate = readl(base + NvRegPowerState2);
  930. if (gate)
  931. powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
  932. else
  933. powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
  934. writel(powerstate, base + NvRegPowerState2);
  935. }
  936. }
  937. static void nv_enable_irq(struct net_device *dev)
  938. {
  939. struct fe_priv *np = get_nvpriv(dev);
  940. if (!using_multi_irqs(dev)) {
  941. if (np->msi_flags & NV_MSI_X_ENABLED)
  942. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  943. else
  944. enable_irq(np->pci_dev->irq);
  945. } else {
  946. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  947. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  948. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  949. }
  950. }
  951. static void nv_disable_irq(struct net_device *dev)
  952. {
  953. struct fe_priv *np = get_nvpriv(dev);
  954. if (!using_multi_irqs(dev)) {
  955. if (np->msi_flags & NV_MSI_X_ENABLED)
  956. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  957. else
  958. disable_irq(np->pci_dev->irq);
  959. } else {
  960. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  961. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  962. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  963. }
  964. }
  965. /* In MSIX mode, a write to irqmask behaves as XOR */
  966. static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  967. {
  968. u8 __iomem *base = get_hwbase(dev);
  969. writel(mask, base + NvRegIrqMask);
  970. }
  971. static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  972. {
  973. struct fe_priv *np = get_nvpriv(dev);
  974. u8 __iomem *base = get_hwbase(dev);
  975. if (np->msi_flags & NV_MSI_X_ENABLED) {
  976. writel(mask, base + NvRegIrqMask);
  977. } else {
  978. if (np->msi_flags & NV_MSI_ENABLED)
  979. writel(0, base + NvRegMSIIrqMask);
  980. writel(0, base + NvRegIrqMask);
  981. }
  982. }
  983. static void nv_napi_enable(struct net_device *dev)
  984. {
  985. struct fe_priv *np = get_nvpriv(dev);
  986. napi_enable(&np->napi);
  987. }
  988. static void nv_napi_disable(struct net_device *dev)
  989. {
  990. struct fe_priv *np = get_nvpriv(dev);
  991. napi_disable(&np->napi);
  992. }
  993. #define MII_READ (-1)
  994. /* mii_rw: read/write a register on the PHY.
  995. *
  996. * Caller must guarantee serialization
  997. */
  998. static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
  999. {
  1000. u8 __iomem *base = get_hwbase(dev);
  1001. u32 reg;
  1002. int retval;
  1003. writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
  1004. reg = readl(base + NvRegMIIControl);
  1005. if (reg & NVREG_MIICTL_INUSE) {
  1006. writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
  1007. udelay(NV_MIIBUSY_DELAY);
  1008. }
  1009. reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
  1010. if (value != MII_READ) {
  1011. writel(value, base + NvRegMIIData);
  1012. reg |= NVREG_MIICTL_WRITE;
  1013. }
  1014. writel(reg, base + NvRegMIIControl);
  1015. if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  1016. NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
  1017. retval = -1;
  1018. } else if (value != MII_READ) {
  1019. /* it was a write operation - fewer failures are detectable */
  1020. retval = 0;
  1021. } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
  1022. retval = -1;
  1023. } else {
  1024. retval = readl(base + NvRegMIIData);
  1025. }
  1026. return retval;
  1027. }
  1028. static int phy_reset(struct net_device *dev, u32 bmcr_setup)
  1029. {
  1030. struct fe_priv *np = netdev_priv(dev);
  1031. u32 miicontrol;
  1032. unsigned int tries = 0;
  1033. miicontrol = BMCR_RESET | bmcr_setup;
  1034. if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
  1035. return -1;
  1036. /* wait for 500ms */
  1037. msleep(500);
  1038. /* must wait till reset is deasserted */
  1039. while (miicontrol & BMCR_RESET) {
  1040. usleep_range(10000, 20000);
  1041. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1042. /* FIXME: 100 tries seem excessive */
  1043. if (tries++ > 100)
  1044. return -1;
  1045. }
  1046. return 0;
  1047. }
  1048. static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
  1049. {
  1050. static const struct {
  1051. int reg;
  1052. int init;
  1053. } ri[] = {
  1054. { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
  1055. { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
  1056. { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
  1057. { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
  1058. { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
  1059. { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
  1060. { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
  1061. };
  1062. int i;
  1063. for (i = 0; i < ARRAY_SIZE(ri); i++) {
  1064. if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
  1065. return PHY_ERROR;
  1066. }
  1067. return 0;
  1068. }
  1069. static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
  1070. {
  1071. u32 reg;
  1072. u8 __iomem *base = get_hwbase(dev);
  1073. u32 powerstate = readl(base + NvRegPowerState2);
  1074. /* need to perform hw phy reset */
  1075. powerstate |= NVREG_POWERSTATE2_PHY_RESET;
  1076. writel(powerstate, base + NvRegPowerState2);
  1077. msleep(25);
  1078. powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
  1079. writel(powerstate, base + NvRegPowerState2);
  1080. msleep(25);
  1081. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1082. reg |= PHY_REALTEK_INIT9;
  1083. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
  1084. return PHY_ERROR;
  1085. if (mii_rw(dev, np->phyaddr,
  1086. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
  1087. return PHY_ERROR;
  1088. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
  1089. if (!(reg & PHY_REALTEK_INIT11)) {
  1090. reg |= PHY_REALTEK_INIT11;
  1091. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
  1092. return PHY_ERROR;
  1093. }
  1094. if (mii_rw(dev, np->phyaddr,
  1095. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
  1096. return PHY_ERROR;
  1097. return 0;
  1098. }
  1099. static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
  1100. {
  1101. u32 phy_reserved;
  1102. if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
  1103. phy_reserved = mii_rw(dev, np->phyaddr,
  1104. PHY_REALTEK_INIT_REG6, MII_READ);
  1105. phy_reserved |= PHY_REALTEK_INIT7;
  1106. if (mii_rw(dev, np->phyaddr,
  1107. PHY_REALTEK_INIT_REG6, phy_reserved))
  1108. return PHY_ERROR;
  1109. }
  1110. return 0;
  1111. }
  1112. static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
  1113. {
  1114. u32 phy_reserved;
  1115. if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  1116. if (mii_rw(dev, np->phyaddr,
  1117. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
  1118. return PHY_ERROR;
  1119. phy_reserved = mii_rw(dev, np->phyaddr,
  1120. PHY_REALTEK_INIT_REG2, MII_READ);
  1121. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  1122. phy_reserved |= PHY_REALTEK_INIT3;
  1123. if (mii_rw(dev, np->phyaddr,
  1124. PHY_REALTEK_INIT_REG2, phy_reserved))
  1125. return PHY_ERROR;
  1126. if (mii_rw(dev, np->phyaddr,
  1127. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
  1128. return PHY_ERROR;
  1129. }
  1130. return 0;
  1131. }
  1132. static int init_cicada(struct net_device *dev, struct fe_priv *np,
  1133. u32 phyinterface)
  1134. {
  1135. u32 phy_reserved;
  1136. if (phyinterface & PHY_RGMII) {
  1137. phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  1138. phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
  1139. phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
  1140. if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
  1141. return PHY_ERROR;
  1142. phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1143. phy_reserved |= PHY_CICADA_INIT5;
  1144. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
  1145. return PHY_ERROR;
  1146. }
  1147. phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  1148. phy_reserved |= PHY_CICADA_INIT6;
  1149. if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
  1150. return PHY_ERROR;
  1151. return 0;
  1152. }
  1153. static int init_vitesse(struct net_device *dev, struct fe_priv *np)
  1154. {
  1155. u32 phy_reserved;
  1156. if (mii_rw(dev, np->phyaddr,
  1157. PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
  1158. return PHY_ERROR;
  1159. if (mii_rw(dev, np->phyaddr,
  1160. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
  1161. return PHY_ERROR;
  1162. phy_reserved = mii_rw(dev, np->phyaddr,
  1163. PHY_VITESSE_INIT_REG4, MII_READ);
  1164. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
  1165. return PHY_ERROR;
  1166. phy_reserved = mii_rw(dev, np->phyaddr,
  1167. PHY_VITESSE_INIT_REG3, MII_READ);
  1168. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1169. phy_reserved |= PHY_VITESSE_INIT3;
  1170. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
  1171. return PHY_ERROR;
  1172. if (mii_rw(dev, np->phyaddr,
  1173. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
  1174. return PHY_ERROR;
  1175. if (mii_rw(dev, np->phyaddr,
  1176. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
  1177. return PHY_ERROR;
  1178. phy_reserved = mii_rw(dev, np->phyaddr,
  1179. PHY_VITESSE_INIT_REG4, MII_READ);
  1180. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1181. phy_reserved |= PHY_VITESSE_INIT3;
  1182. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
  1183. return PHY_ERROR;
  1184. phy_reserved = mii_rw(dev, np->phyaddr,
  1185. PHY_VITESSE_INIT_REG3, MII_READ);
  1186. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
  1187. return PHY_ERROR;
  1188. if (mii_rw(dev, np->phyaddr,
  1189. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
  1190. return PHY_ERROR;
  1191. if (mii_rw(dev, np->phyaddr,
  1192. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
  1193. return PHY_ERROR;
  1194. phy_reserved = mii_rw(dev, np->phyaddr,
  1195. PHY_VITESSE_INIT_REG4, MII_READ);
  1196. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
  1197. return PHY_ERROR;
  1198. phy_reserved = mii_rw(dev, np->phyaddr,
  1199. PHY_VITESSE_INIT_REG3, MII_READ);
  1200. phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
  1201. phy_reserved |= PHY_VITESSE_INIT8;
  1202. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
  1203. return PHY_ERROR;
  1204. if (mii_rw(dev, np->phyaddr,
  1205. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
  1206. return PHY_ERROR;
  1207. if (mii_rw(dev, np->phyaddr,
  1208. PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
  1209. return PHY_ERROR;
  1210. return 0;
  1211. }
  1212. static int phy_init(struct net_device *dev)
  1213. {
  1214. struct fe_priv *np = get_nvpriv(dev);
  1215. u8 __iomem *base = get_hwbase(dev);
  1216. u32 phyinterface;
  1217. u32 mii_status, mii_control, mii_control_1000, reg;
  1218. /* phy errata for E3016 phy */
  1219. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1220. reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1221. reg &= ~PHY_MARVELL_E3016_INITMASK;
  1222. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
  1223. netdev_info(dev, "%s: phy write to errata reg failed\n",
  1224. pci_name(np->pci_dev));
  1225. return PHY_ERROR;
  1226. }
  1227. }
  1228. if (np->phy_oui == PHY_OUI_REALTEK) {
  1229. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1230. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1231. if (init_realtek_8211b(dev, np)) {
  1232. netdev_info(dev, "%s: phy init failed\n",
  1233. pci_name(np->pci_dev));
  1234. return PHY_ERROR;
  1235. }
  1236. } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1237. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1238. if (init_realtek_8211c(dev, np)) {
  1239. netdev_info(dev, "%s: phy init failed\n",
  1240. pci_name(np->pci_dev));
  1241. return PHY_ERROR;
  1242. }
  1243. } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1244. if (init_realtek_8201(dev, np)) {
  1245. netdev_info(dev, "%s: phy init failed\n",
  1246. pci_name(np->pci_dev));
  1247. return PHY_ERROR;
  1248. }
  1249. }
  1250. }
  1251. /* set advertise register */
  1252. reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1253. reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
  1254. ADVERTISE_100HALF | ADVERTISE_100FULL |
  1255. ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
  1256. if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  1257. netdev_info(dev, "%s: phy write to advertise failed\n",
  1258. pci_name(np->pci_dev));
  1259. return PHY_ERROR;
  1260. }
  1261. /* get phy interface type */
  1262. phyinterface = readl(base + NvRegPhyInterface);
  1263. /* see if gigabit phy */
  1264. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1265. if (mii_status & PHY_GIGABIT) {
  1266. np->gigabit = PHY_GIGABIT;
  1267. mii_control_1000 = mii_rw(dev, np->phyaddr,
  1268. MII_CTRL1000, MII_READ);
  1269. mii_control_1000 &= ~ADVERTISE_1000HALF;
  1270. if (phyinterface & PHY_RGMII)
  1271. mii_control_1000 |= ADVERTISE_1000FULL;
  1272. else
  1273. mii_control_1000 &= ~ADVERTISE_1000FULL;
  1274. if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  1275. netdev_info(dev, "%s: phy init failed\n",
  1276. pci_name(np->pci_dev));
  1277. return PHY_ERROR;
  1278. }
  1279. } else
  1280. np->gigabit = 0;
  1281. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1282. mii_control |= BMCR_ANENABLE;
  1283. if (np->phy_oui == PHY_OUI_REALTEK &&
  1284. np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1285. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1286. /* start autoneg since we already performed hw reset above */
  1287. mii_control |= BMCR_ANRESTART;
  1288. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1289. netdev_info(dev, "%s: phy init failed\n",
  1290. pci_name(np->pci_dev));
  1291. return PHY_ERROR;
  1292. }
  1293. } else {
  1294. /* reset the phy
  1295. * (certain phys need bmcr to be setup with reset)
  1296. */
  1297. if (phy_reset(dev, mii_control)) {
  1298. netdev_info(dev, "%s: phy reset failed\n",
  1299. pci_name(np->pci_dev));
  1300. return PHY_ERROR;
  1301. }
  1302. }
  1303. /* phy vendor specific configuration */
  1304. if ((np->phy_oui == PHY_OUI_CICADA)) {
  1305. if (init_cicada(dev, np, phyinterface)) {
  1306. netdev_info(dev, "%s: phy init failed\n",
  1307. pci_name(np->pci_dev));
  1308. return PHY_ERROR;
  1309. }
  1310. } else if (np->phy_oui == PHY_OUI_VITESSE) {
  1311. if (init_vitesse(dev, np)) {
  1312. netdev_info(dev, "%s: phy init failed\n",
  1313. pci_name(np->pci_dev));
  1314. return PHY_ERROR;
  1315. }
  1316. } else if (np->phy_oui == PHY_OUI_REALTEK) {
  1317. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1318. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1319. /* reset could have cleared these out, set them back */
  1320. if (init_realtek_8211b(dev, np)) {
  1321. netdev_info(dev, "%s: phy init failed\n",
  1322. pci_name(np->pci_dev));
  1323. return PHY_ERROR;
  1324. }
  1325. } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1326. if (init_realtek_8201(dev, np) ||
  1327. init_realtek_8201_cross(dev, np)) {
  1328. netdev_info(dev, "%s: phy init failed\n",
  1329. pci_name(np->pci_dev));
  1330. return PHY_ERROR;
  1331. }
  1332. }
  1333. }
  1334. /* some phys clear out pause advertisement on reset, set it back */
  1335. mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1336. /* restart auto negotiation, power down phy */
  1337. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1338. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1339. if (phy_power_down)
  1340. mii_control |= BMCR_PDOWN;
  1341. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
  1342. return PHY_ERROR;
  1343. return 0;
  1344. }
  1345. static void nv_start_rx(struct net_device *dev)
  1346. {
  1347. struct fe_priv *np = netdev_priv(dev);
  1348. u8 __iomem *base = get_hwbase(dev);
  1349. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1350. /* Already running? Stop it. */
  1351. if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
  1352. rx_ctrl &= ~NVREG_RCVCTL_START;
  1353. writel(rx_ctrl, base + NvRegReceiverControl);
  1354. pci_push(base);
  1355. }
  1356. writel(np->linkspeed, base + NvRegLinkSpeed);
  1357. pci_push(base);
  1358. rx_ctrl |= NVREG_RCVCTL_START;
  1359. if (np->mac_in_use)
  1360. rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
  1361. writel(rx_ctrl, base + NvRegReceiverControl);
  1362. pci_push(base);
  1363. }
  1364. static void nv_stop_rx(struct net_device *dev)
  1365. {
  1366. struct fe_priv *np = netdev_priv(dev);
  1367. u8 __iomem *base = get_hwbase(dev);
  1368. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1369. if (!np->mac_in_use)
  1370. rx_ctrl &= ~NVREG_RCVCTL_START;
  1371. else
  1372. rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
  1373. writel(rx_ctrl, base + NvRegReceiverControl);
  1374. if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1375. NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
  1376. netdev_info(dev, "%s: ReceiverStatus remained busy\n",
  1377. __func__);
  1378. udelay(NV_RXSTOP_DELAY2);
  1379. if (!np->mac_in_use)
  1380. writel(0, base + NvRegLinkSpeed);
  1381. }
  1382. static void nv_start_tx(struct net_device *dev)
  1383. {
  1384. struct fe_priv *np = netdev_priv(dev);
  1385. u8 __iomem *base = get_hwbase(dev);
  1386. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1387. tx_ctrl |= NVREG_XMITCTL_START;
  1388. if (np->mac_in_use)
  1389. tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
  1390. writel(tx_ctrl, base + NvRegTransmitterControl);
  1391. pci_push(base);
  1392. }
  1393. static void nv_stop_tx(struct net_device *dev)
  1394. {
  1395. struct fe_priv *np = netdev_priv(dev);
  1396. u8 __iomem *base = get_hwbase(dev);
  1397. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1398. if (!np->mac_in_use)
  1399. tx_ctrl &= ~NVREG_XMITCTL_START;
  1400. else
  1401. tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
  1402. writel(tx_ctrl, base + NvRegTransmitterControl);
  1403. if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1404. NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
  1405. netdev_info(dev, "%s: TransmitterStatus remained busy\n",
  1406. __func__);
  1407. udelay(NV_TXSTOP_DELAY2);
  1408. if (!np->mac_in_use)
  1409. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  1410. base + NvRegTransmitPoll);
  1411. }
  1412. static void nv_start_rxtx(struct net_device *dev)
  1413. {
  1414. nv_start_rx(dev);
  1415. nv_start_tx(dev);
  1416. }
  1417. static void nv_stop_rxtx(struct net_device *dev)
  1418. {
  1419. nv_stop_rx(dev);
  1420. nv_stop_tx(dev);
  1421. }
  1422. static void nv_txrx_reset(struct net_device *dev)
  1423. {
  1424. struct fe_priv *np = netdev_priv(dev);
  1425. u8 __iomem *base = get_hwbase(dev);
  1426. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1427. pci_push(base);
  1428. udelay(NV_TXRX_RESET_DELAY);
  1429. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1430. pci_push(base);
  1431. }
  1432. static void nv_mac_reset(struct net_device *dev)
  1433. {
  1434. struct fe_priv *np = netdev_priv(dev);
  1435. u8 __iomem *base = get_hwbase(dev);
  1436. u32 temp1, temp2, temp3;
  1437. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1438. pci_push(base);
  1439. /* save registers since they will be cleared on reset */
  1440. temp1 = readl(base + NvRegMacAddrA);
  1441. temp2 = readl(base + NvRegMacAddrB);
  1442. temp3 = readl(base + NvRegTransmitPoll);
  1443. writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1444. pci_push(base);
  1445. udelay(NV_MAC_RESET_DELAY);
  1446. writel(0, base + NvRegMacReset);
  1447. pci_push(base);
  1448. udelay(NV_MAC_RESET_DELAY);
  1449. /* restore saved registers */
  1450. writel(temp1, base + NvRegMacAddrA);
  1451. writel(temp2, base + NvRegMacAddrB);
  1452. writel(temp3, base + NvRegTransmitPoll);
  1453. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1454. pci_push(base);
  1455. }
  1456. static void nv_get_hw_stats(struct net_device *dev)
  1457. {
  1458. struct fe_priv *np = netdev_priv(dev);
  1459. u8 __iomem *base = get_hwbase(dev);
  1460. np->estats.tx_bytes += readl(base + NvRegTxCnt);
  1461. np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  1462. np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  1463. np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  1464. np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  1465. np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  1466. np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  1467. np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  1468. np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  1469. np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  1470. np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  1471. np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  1472. np->estats.rx_runt += readl(base + NvRegRxRunt);
  1473. np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  1474. np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  1475. np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  1476. np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  1477. np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  1478. np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  1479. np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  1480. np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  1481. np->estats.rx_packets =
  1482. np->estats.rx_unicast +
  1483. np->estats.rx_multicast +
  1484. np->estats.rx_broadcast;
  1485. np->estats.rx_errors_total =
  1486. np->estats.rx_crc_errors +
  1487. np->estats.rx_over_errors +
  1488. np->estats.rx_frame_error +
  1489. (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  1490. np->estats.rx_late_collision +
  1491. np->estats.rx_runt +
  1492. np->estats.rx_frame_too_long;
  1493. np->estats.tx_errors_total =
  1494. np->estats.tx_late_collision +
  1495. np->estats.tx_fifo_errors +
  1496. np->estats.tx_carrier_errors +
  1497. np->estats.tx_excess_deferral +
  1498. np->estats.tx_retry_error;
  1499. if (np->driver_data & DEV_HAS_STATISTICS_V2) {
  1500. np->estats.tx_def