PageRenderTime 92ms CodeModel.GetById 28ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/ethernet/micrel/ksz884x.c

http://github.com/mirrors/linux
C | 7258 lines | 4600 code | 1016 blank | 1642 comment | 551 complexity | 7327a5821464bade45f1b97a615821cb MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /**
  3. * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
  4. *
  5. * Copyright (c) 2009-2010 Micrel, Inc.
  6. * Tristram Ha <Tristram.Ha@micrel.com>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/init.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/ioport.h>
  14. #include <linux/pci.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/mii.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/ethtool.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/in.h>
  21. #include <linux/ip.h>
  22. #include <linux/if_vlan.h>
  23. #include <linux/crc32.h>
  24. #include <linux/sched.h>
  25. #include <linux/slab.h>
  26. /* DMA Registers */
  27. #define KS_DMA_TX_CTRL 0x0000
  28. #define DMA_TX_ENABLE 0x00000001
  29. #define DMA_TX_CRC_ENABLE 0x00000002
  30. #define DMA_TX_PAD_ENABLE 0x00000004
  31. #define DMA_TX_LOOPBACK 0x00000100
  32. #define DMA_TX_FLOW_ENABLE 0x00000200
  33. #define DMA_TX_CSUM_IP 0x00010000
  34. #define DMA_TX_CSUM_TCP 0x00020000
  35. #define DMA_TX_CSUM_UDP 0x00040000
  36. #define DMA_TX_BURST_SIZE 0x3F000000
  37. #define KS_DMA_RX_CTRL 0x0004
  38. #define DMA_RX_ENABLE 0x00000001
  39. #define KS884X_DMA_RX_MULTICAST 0x00000002
  40. #define DMA_RX_PROMISCUOUS 0x00000004
  41. #define DMA_RX_ERROR 0x00000008
  42. #define DMA_RX_UNICAST 0x00000010
  43. #define DMA_RX_ALL_MULTICAST 0x00000020
  44. #define DMA_RX_BROADCAST 0x00000040
  45. #define DMA_RX_FLOW_ENABLE 0x00000200
  46. #define DMA_RX_CSUM_IP 0x00010000
  47. #define DMA_RX_CSUM_TCP 0x00020000
  48. #define DMA_RX_CSUM_UDP 0x00040000
  49. #define DMA_RX_BURST_SIZE 0x3F000000
  50. #define DMA_BURST_SHIFT 24
  51. #define DMA_BURST_DEFAULT 8
  52. #define KS_DMA_TX_START 0x0008
  53. #define KS_DMA_RX_START 0x000C
  54. #define DMA_START 0x00000001
  55. #define KS_DMA_TX_ADDR 0x0010
  56. #define KS_DMA_RX_ADDR 0x0014
  57. #define DMA_ADDR_LIST_MASK 0xFFFFFFFC
  58. #define DMA_ADDR_LIST_SHIFT 2
  59. /* MTR0 */
  60. #define KS884X_MULTICAST_0_OFFSET 0x0020
  61. #define KS884X_MULTICAST_1_OFFSET 0x0021
  62. #define KS884X_MULTICAST_2_OFFSET 0x0022
  63. #define KS884x_MULTICAST_3_OFFSET 0x0023
  64. /* MTR1 */
  65. #define KS884X_MULTICAST_4_OFFSET 0x0024
  66. #define KS884X_MULTICAST_5_OFFSET 0x0025
  67. #define KS884X_MULTICAST_6_OFFSET 0x0026
  68. #define KS884X_MULTICAST_7_OFFSET 0x0027
  69. /* Interrupt Registers */
  70. /* INTEN */
  71. #define KS884X_INTERRUPTS_ENABLE 0x0028
  72. /* INTST */
  73. #define KS884X_INTERRUPTS_STATUS 0x002C
  74. #define KS884X_INT_RX_STOPPED 0x02000000
  75. #define KS884X_INT_TX_STOPPED 0x04000000
  76. #define KS884X_INT_RX_OVERRUN 0x08000000
  77. #define KS884X_INT_TX_EMPTY 0x10000000
  78. #define KS884X_INT_RX 0x20000000
  79. #define KS884X_INT_TX 0x40000000
  80. #define KS884X_INT_PHY 0x80000000
  81. #define KS884X_INT_RX_MASK \
  82. (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
  83. #define KS884X_INT_TX_MASK \
  84. (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
  85. #define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
  86. /* MAC Additional Station Address */
  87. /* MAAL0 */
  88. #define KS_ADD_ADDR_0_LO 0x0080
  89. /* MAAH0 */
  90. #define KS_ADD_ADDR_0_HI 0x0084
  91. /* MAAL1 */
  92. #define KS_ADD_ADDR_1_LO 0x0088
  93. /* MAAH1 */
  94. #define KS_ADD_ADDR_1_HI 0x008C
  95. /* MAAL2 */
  96. #define KS_ADD_ADDR_2_LO 0x0090
  97. /* MAAH2 */
  98. #define KS_ADD_ADDR_2_HI 0x0094
  99. /* MAAL3 */
  100. #define KS_ADD_ADDR_3_LO 0x0098
  101. /* MAAH3 */
  102. #define KS_ADD_ADDR_3_HI 0x009C
  103. /* MAAL4 */
  104. #define KS_ADD_ADDR_4_LO 0x00A0
  105. /* MAAH4 */
  106. #define KS_ADD_ADDR_4_HI 0x00A4
  107. /* MAAL5 */
  108. #define KS_ADD_ADDR_5_LO 0x00A8
  109. /* MAAH5 */
  110. #define KS_ADD_ADDR_5_HI 0x00AC
  111. /* MAAL6 */
  112. #define KS_ADD_ADDR_6_LO 0x00B0
  113. /* MAAH6 */
  114. #define KS_ADD_ADDR_6_HI 0x00B4
  115. /* MAAL7 */
  116. #define KS_ADD_ADDR_7_LO 0x00B8
  117. /* MAAH7 */
  118. #define KS_ADD_ADDR_7_HI 0x00BC
  119. /* MAAL8 */
  120. #define KS_ADD_ADDR_8_LO 0x00C0
  121. /* MAAH8 */
  122. #define KS_ADD_ADDR_8_HI 0x00C4
  123. /* MAAL9 */
  124. #define KS_ADD_ADDR_9_LO 0x00C8
  125. /* MAAH9 */
  126. #define KS_ADD_ADDR_9_HI 0x00CC
  127. /* MAAL10 */
  128. #define KS_ADD_ADDR_A_LO 0x00D0
  129. /* MAAH10 */
  130. #define KS_ADD_ADDR_A_HI 0x00D4
  131. /* MAAL11 */
  132. #define KS_ADD_ADDR_B_LO 0x00D8
  133. /* MAAH11 */
  134. #define KS_ADD_ADDR_B_HI 0x00DC
  135. /* MAAL12 */
  136. #define KS_ADD_ADDR_C_LO 0x00E0
  137. /* MAAH12 */
  138. #define KS_ADD_ADDR_C_HI 0x00E4
  139. /* MAAL13 */
  140. #define KS_ADD_ADDR_D_LO 0x00E8
  141. /* MAAH13 */
  142. #define KS_ADD_ADDR_D_HI 0x00EC
  143. /* MAAL14 */
  144. #define KS_ADD_ADDR_E_LO 0x00F0
  145. /* MAAH14 */
  146. #define KS_ADD_ADDR_E_HI 0x00F4
  147. /* MAAL15 */
  148. #define KS_ADD_ADDR_F_LO 0x00F8
  149. /* MAAH15 */
  150. #define KS_ADD_ADDR_F_HI 0x00FC
  151. #define ADD_ADDR_HI_MASK 0x0000FFFF
  152. #define ADD_ADDR_ENABLE 0x80000000
  153. #define ADD_ADDR_INCR 8
  154. /* Miscellaneous Registers */
  155. /* MARL */
  156. #define KS884X_ADDR_0_OFFSET 0x0200
  157. #define KS884X_ADDR_1_OFFSET 0x0201
  158. /* MARM */
  159. #define KS884X_ADDR_2_OFFSET 0x0202
  160. #define KS884X_ADDR_3_OFFSET 0x0203
  161. /* MARH */
  162. #define KS884X_ADDR_4_OFFSET 0x0204
  163. #define KS884X_ADDR_5_OFFSET 0x0205
  164. /* OBCR */
  165. #define KS884X_BUS_CTRL_OFFSET 0x0210
  166. #define BUS_SPEED_125_MHZ 0x0000
  167. #define BUS_SPEED_62_5_MHZ 0x0001
  168. #define BUS_SPEED_41_66_MHZ 0x0002
  169. #define BUS_SPEED_25_MHZ 0x0003
  170. /* EEPCR */
  171. #define KS884X_EEPROM_CTRL_OFFSET 0x0212
  172. #define EEPROM_CHIP_SELECT 0x0001
  173. #define EEPROM_SERIAL_CLOCK 0x0002
  174. #define EEPROM_DATA_OUT 0x0004
  175. #define EEPROM_DATA_IN 0x0008
  176. #define EEPROM_ACCESS_ENABLE 0x0010
  177. /* MBIR */
  178. #define KS884X_MEM_INFO_OFFSET 0x0214
  179. #define RX_MEM_TEST_FAILED 0x0008
  180. #define RX_MEM_TEST_FINISHED 0x0010
  181. #define TX_MEM_TEST_FAILED 0x0800
  182. #define TX_MEM_TEST_FINISHED 0x1000
  183. /* GCR */
  184. #define KS884X_GLOBAL_CTRL_OFFSET 0x0216
  185. #define GLOBAL_SOFTWARE_RESET 0x0001
  186. #define KS8841_POWER_MANAGE_OFFSET 0x0218
  187. /* WFCR */
  188. #define KS8841_WOL_CTRL_OFFSET 0x021A
  189. #define KS8841_WOL_MAGIC_ENABLE 0x0080
  190. #define KS8841_WOL_FRAME3_ENABLE 0x0008
  191. #define KS8841_WOL_FRAME2_ENABLE 0x0004
  192. #define KS8841_WOL_FRAME1_ENABLE 0x0002
  193. #define KS8841_WOL_FRAME0_ENABLE 0x0001
  194. /* WF0 */
  195. #define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
  196. #define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
  197. #define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
  198. /* IACR */
  199. #define KS884X_IACR_P 0x04A0
  200. #define KS884X_IACR_OFFSET KS884X_IACR_P
  201. /* IADR1 */
  202. #define KS884X_IADR1_P 0x04A2
  203. #define KS884X_IADR2_P 0x04A4
  204. #define KS884X_IADR3_P 0x04A6
  205. #define KS884X_IADR4_P 0x04A8
  206. #define KS884X_IADR5_P 0x04AA
  207. #define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
  208. #define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
  209. #define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
  210. #define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
  211. #define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
  212. #define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
  213. #define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
  214. #define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
  215. #define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
  216. #define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
  217. #define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
  218. /* P1MBCR */
  219. #define KS884X_P1MBCR_P 0x04D0
  220. #define KS884X_P1MBSR_P 0x04D2
  221. #define KS884X_PHY1ILR_P 0x04D4
  222. #define KS884X_PHY1IHR_P 0x04D6
  223. #define KS884X_P1ANAR_P 0x04D8
  224. #define KS884X_P1ANLPR_P 0x04DA
  225. /* P2MBCR */
  226. #define KS884X_P2MBCR_P 0x04E0
  227. #define KS884X_P2MBSR_P 0x04E2
  228. #define KS884X_PHY2ILR_P 0x04E4
  229. #define KS884X_PHY2IHR_P 0x04E6
  230. #define KS884X_P2ANAR_P 0x04E8
  231. #define KS884X_P2ANLPR_P 0x04EA
  232. #define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
  233. #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
  234. #define KS884X_PHY_CTRL_OFFSET 0x00
  235. /* Mode Control Register */
  236. #define PHY_REG_CTRL 0
  237. #define PHY_RESET 0x8000
  238. #define PHY_LOOPBACK 0x4000
  239. #define PHY_SPEED_100MBIT 0x2000
  240. #define PHY_AUTO_NEG_ENABLE 0x1000
  241. #define PHY_POWER_DOWN 0x0800
  242. #define PHY_MII_DISABLE 0x0400
  243. #define PHY_AUTO_NEG_RESTART 0x0200
  244. #define PHY_FULL_DUPLEX 0x0100
  245. #define PHY_COLLISION_TEST 0x0080
  246. #define PHY_HP_MDIX 0x0020
  247. #define PHY_FORCE_MDIX 0x0010
  248. #define PHY_AUTO_MDIX_DISABLE 0x0008
  249. #define PHY_REMOTE_FAULT_DISABLE 0x0004
  250. #define PHY_TRANSMIT_DISABLE 0x0002
  251. #define PHY_LED_DISABLE 0x0001
  252. #define KS884X_PHY_STATUS_OFFSET 0x02
  253. /* Mode Status Register */
  254. #define PHY_REG_STATUS 1
  255. #define PHY_100BT4_CAPABLE 0x8000
  256. #define PHY_100BTX_FD_CAPABLE 0x4000
  257. #define PHY_100BTX_CAPABLE 0x2000
  258. #define PHY_10BT_FD_CAPABLE 0x1000
  259. #define PHY_10BT_CAPABLE 0x0800
  260. #define PHY_MII_SUPPRESS_CAPABLE 0x0040
  261. #define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
  262. #define PHY_REMOTE_FAULT 0x0010
  263. #define PHY_AUTO_NEG_CAPABLE 0x0008
  264. #define PHY_LINK_STATUS 0x0004
  265. #define PHY_JABBER_DETECT 0x0002
  266. #define PHY_EXTENDED_CAPABILITY 0x0001
  267. #define KS884X_PHY_ID_1_OFFSET 0x04
  268. #define KS884X_PHY_ID_2_OFFSET 0x06
  269. /* PHY Identifier Registers */
  270. #define PHY_REG_ID_1 2
  271. #define PHY_REG_ID_2 3
  272. #define KS884X_PHY_AUTO_NEG_OFFSET 0x08
  273. /* Auto-Negotiation Advertisement Register */
  274. #define PHY_REG_AUTO_NEGOTIATION 4
  275. #define PHY_AUTO_NEG_NEXT_PAGE 0x8000
  276. #define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
  277. /* Not supported. */
  278. #define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
  279. #define PHY_AUTO_NEG_SYM_PAUSE 0x0400
  280. #define PHY_AUTO_NEG_100BT4 0x0200
  281. #define PHY_AUTO_NEG_100BTX_FD 0x0100
  282. #define PHY_AUTO_NEG_100BTX 0x0080
  283. #define PHY_AUTO_NEG_10BT_FD 0x0040
  284. #define PHY_AUTO_NEG_10BT 0x0020
  285. #define PHY_AUTO_NEG_SELECTOR 0x001F
  286. #define PHY_AUTO_NEG_802_3 0x0001
  287. #define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
  288. #define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
  289. /* Auto-Negotiation Link Partner Ability Register */
  290. #define PHY_REG_REMOTE_CAPABILITY 5
  291. #define PHY_REMOTE_NEXT_PAGE 0x8000
  292. #define PHY_REMOTE_ACKNOWLEDGE 0x4000
  293. #define PHY_REMOTE_REMOTE_FAULT 0x2000
  294. #define PHY_REMOTE_SYM_PAUSE 0x0400
  295. #define PHY_REMOTE_100BTX_FD 0x0100
  296. #define PHY_REMOTE_100BTX 0x0080
  297. #define PHY_REMOTE_10BT_FD 0x0040
  298. #define PHY_REMOTE_10BT 0x0020
  299. /* P1VCT */
  300. #define KS884X_P1VCT_P 0x04F0
  301. #define KS884X_P1PHYCTRL_P 0x04F2
  302. /* P2VCT */
  303. #define KS884X_P2VCT_P 0x04F4
  304. #define KS884X_P2PHYCTRL_P 0x04F6
  305. #define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
  306. #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
  307. #define KS884X_PHY_LINK_MD_OFFSET 0x00
  308. #define PHY_START_CABLE_DIAG 0x8000
  309. #define PHY_CABLE_DIAG_RESULT 0x6000
  310. #define PHY_CABLE_STAT_NORMAL 0x0000
  311. #define PHY_CABLE_STAT_OPEN 0x2000
  312. #define PHY_CABLE_STAT_SHORT 0x4000
  313. #define PHY_CABLE_STAT_FAILED 0x6000
  314. #define PHY_CABLE_10M_SHORT 0x1000
  315. #define PHY_CABLE_FAULT_COUNTER 0x01FF
  316. #define KS884X_PHY_PHY_CTRL_OFFSET 0x02
  317. #define PHY_STAT_REVERSED_POLARITY 0x0020
  318. #define PHY_STAT_MDIX 0x0010
  319. #define PHY_FORCE_LINK 0x0008
  320. #define PHY_POWER_SAVING_DISABLE 0x0004
  321. #define PHY_REMOTE_LOOPBACK 0x0002
  322. /* SIDER */
  323. #define KS884X_SIDER_P 0x0400
  324. #define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
  325. #define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
  326. #define REG_FAMILY_ID 0x88
  327. #define REG_CHIP_ID_41 0x8810
  328. #define REG_CHIP_ID_42 0x8800
  329. #define KS884X_CHIP_ID_MASK_41 0xFF10
  330. #define KS884X_CHIP_ID_MASK 0xFFF0
  331. #define KS884X_CHIP_ID_SHIFT 4
  332. #define KS884X_REVISION_MASK 0x000E
  333. #define KS884X_REVISION_SHIFT 1
  334. #define KS8842_START 0x0001
  335. #define CHIP_IP_41_M 0x8810
  336. #define CHIP_IP_42_M 0x8800
  337. #define CHIP_IP_61_M 0x8890
  338. #define CHIP_IP_62_M 0x8880
  339. #define CHIP_IP_41_P 0x8850
  340. #define CHIP_IP_42_P 0x8840
  341. #define CHIP_IP_61_P 0x88D0
  342. #define CHIP_IP_62_P 0x88C0
  343. /* SGCR1 */
  344. #define KS8842_SGCR1_P 0x0402
  345. #define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
  346. #define SWITCH_PASS_ALL 0x8000
  347. #define SWITCH_TX_FLOW_CTRL 0x2000
  348. #define SWITCH_RX_FLOW_CTRL 0x1000
  349. #define SWITCH_CHECK_LENGTH 0x0800
  350. #define SWITCH_AGING_ENABLE 0x0400
  351. #define SWITCH_FAST_AGING 0x0200
  352. #define SWITCH_AGGR_BACKOFF 0x0100
  353. #define SWITCH_PASS_PAUSE 0x0008
  354. #define SWITCH_LINK_AUTO_AGING 0x0001
  355. /* SGCR2 */
  356. #define KS8842_SGCR2_P 0x0404
  357. #define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
  358. #define SWITCH_VLAN_ENABLE 0x8000
  359. #define SWITCH_IGMP_SNOOP 0x4000
  360. #define IPV6_MLD_SNOOP_ENABLE 0x2000
  361. #define IPV6_MLD_SNOOP_OPTION 0x1000
  362. #define PRIORITY_SCHEME_SELECT 0x0800
  363. #define SWITCH_MIRROR_RX_TX 0x0100
  364. #define UNICAST_VLAN_BOUNDARY 0x0080
  365. #define MULTICAST_STORM_DISABLE 0x0040
  366. #define SWITCH_BACK_PRESSURE 0x0020
  367. #define FAIR_FLOW_CTRL 0x0010
  368. #define NO_EXC_COLLISION_DROP 0x0008
  369. #define SWITCH_HUGE_PACKET 0x0004
  370. #define SWITCH_LEGAL_PACKET 0x0002
  371. #define SWITCH_BUF_RESERVE 0x0001
  372. /* SGCR3 */
  373. #define KS8842_SGCR3_P 0x0406
  374. #define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
  375. #define BROADCAST_STORM_RATE_LO 0xFF00
  376. #define SWITCH_REPEATER 0x0080
  377. #define SWITCH_HALF_DUPLEX 0x0040
  378. #define SWITCH_FLOW_CTRL 0x0020
  379. #define SWITCH_10_MBIT 0x0010
  380. #define SWITCH_REPLACE_NULL_VID 0x0008
  381. #define BROADCAST_STORM_RATE_HI 0x0007
  382. #define BROADCAST_STORM_RATE 0x07FF
  383. /* SGCR4 */
  384. #define KS8842_SGCR4_P 0x0408
  385. /* SGCR5 */
  386. #define KS8842_SGCR5_P 0x040A
  387. #define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
  388. #define LED_MODE 0x8200
  389. #define LED_SPEED_DUPLEX_ACT 0x0000
  390. #define LED_SPEED_DUPLEX_LINK_ACT 0x8000
  391. #define LED_DUPLEX_10_100 0x0200
  392. /* SGCR6 */
  393. #define KS8842_SGCR6_P 0x0410
  394. #define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
  395. #define KS8842_PRIORITY_MASK 3
  396. #define KS8842_PRIORITY_SHIFT 2
  397. /* SGCR7 */
  398. #define KS8842_SGCR7_P 0x0412
  399. #define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
  400. #define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
  401. #define SWITCH_UNK_DEF_PORT_3 0x0004
  402. #define SWITCH_UNK_DEF_PORT_2 0x0002
  403. #define SWITCH_UNK_DEF_PORT_1 0x0001
  404. /* MACAR1 */
  405. #define KS8842_MACAR1_P 0x0470
  406. #define KS8842_MACAR2_P 0x0472
  407. #define KS8842_MACAR3_P 0x0474
  408. #define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
  409. #define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
  410. #define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
  411. #define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
  412. #define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
  413. #define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
  414. /* TOSR1 */
  415. #define KS8842_TOSR1_P 0x0480
  416. #define KS8842_TOSR2_P 0x0482
  417. #define KS8842_TOSR3_P 0x0484
  418. #define KS8842_TOSR4_P 0x0486
  419. #define KS8842_TOSR5_P 0x0488
  420. #define KS8842_TOSR6_P 0x048A
  421. #define KS8842_TOSR7_P 0x0490
  422. #define KS8842_TOSR8_P 0x0492
  423. #define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
  424. #define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
  425. #define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
  426. #define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
  427. #define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
  428. #define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
  429. #define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
  430. #define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
  431. /* P1CR1 */
  432. #define KS8842_P1CR1_P 0x0500
  433. #define KS8842_P1CR2_P 0x0502
  434. #define KS8842_P1VIDR_P 0x0504
  435. #define KS8842_P1CR3_P 0x0506
  436. #define KS8842_P1IRCR_P 0x0508
  437. #define KS8842_P1ERCR_P 0x050A
  438. #define KS884X_P1SCSLMD_P 0x0510
  439. #define KS884X_P1CR4_P 0x0512
  440. #define KS884X_P1SR_P 0x0514
  441. /* P2CR1 */
  442. #define KS8842_P2CR1_P 0x0520
  443. #define KS8842_P2CR2_P 0x0522
  444. #define KS8842_P2VIDR_P 0x0524
  445. #define KS8842_P2CR3_P 0x0526
  446. #define KS8842_P2IRCR_P 0x0528
  447. #define KS8842_P2ERCR_P 0x052A
  448. #define KS884X_P2SCSLMD_P 0x0530
  449. #define KS884X_P2CR4_P 0x0532
  450. #define KS884X_P2SR_P 0x0534
  451. /* P3CR1 */
  452. #define KS8842_P3CR1_P 0x0540
  453. #define KS8842_P3CR2_P 0x0542
  454. #define KS8842_P3VIDR_P 0x0544
  455. #define KS8842_P3CR3_P 0x0546
  456. #define KS8842_P3IRCR_P 0x0548
  457. #define KS8842_P3ERCR_P 0x054A
  458. #define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
  459. #define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
  460. #define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
  461. #define PORT_CTRL_ADDR(port, addr) \
  462. (addr = KS8842_PORT_1_CTRL_1 + (port) * \
  463. (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
  464. #define KS8842_PORT_CTRL_1_OFFSET 0x00
  465. #define PORT_BROADCAST_STORM 0x0080
  466. #define PORT_DIFFSERV_ENABLE 0x0040
  467. #define PORT_802_1P_ENABLE 0x0020
  468. #define PORT_BASED_PRIORITY_MASK 0x0018
  469. #define PORT_BASED_PRIORITY_BASE 0x0003
  470. #define PORT_BASED_PRIORITY_SHIFT 3
  471. #define PORT_BASED_PRIORITY_0 0x0000
  472. #define PORT_BASED_PRIORITY_1 0x0008
  473. #define PORT_BASED_PRIORITY_2 0x0010
  474. #define PORT_BASED_PRIORITY_3 0x0018
  475. #define PORT_INSERT_TAG 0x0004
  476. #define PORT_REMOVE_TAG 0x0002
  477. #define PORT_PRIO_QUEUE_ENABLE 0x0001
  478. #define KS8842_PORT_CTRL_2_OFFSET 0x02
  479. #define PORT_INGRESS_VLAN_FILTER 0x4000
  480. #define PORT_DISCARD_NON_VID 0x2000
  481. #define PORT_FORCE_FLOW_CTRL 0x1000
  482. #define PORT_BACK_PRESSURE 0x0800
  483. #define PORT_TX_ENABLE 0x0400
  484. #define PORT_RX_ENABLE 0x0200
  485. #define PORT_LEARN_DISABLE 0x0100
  486. #define PORT_MIRROR_SNIFFER 0x0080
  487. #define PORT_MIRROR_RX 0x0040
  488. #define PORT_MIRROR_TX 0x0020
  489. #define PORT_USER_PRIORITY_CEILING 0x0008
  490. #define PORT_VLAN_MEMBERSHIP 0x0007
  491. #define KS8842_PORT_CTRL_VID_OFFSET 0x04
  492. #define PORT_DEFAULT_VID 0x0001
  493. #define KS8842_PORT_CTRL_3_OFFSET 0x06
  494. #define PORT_INGRESS_LIMIT_MODE 0x000C
  495. #define PORT_INGRESS_ALL 0x0000
  496. #define PORT_INGRESS_UNICAST 0x0004
  497. #define PORT_INGRESS_MULTICAST 0x0008
  498. #define PORT_INGRESS_BROADCAST 0x000C
  499. #define PORT_COUNT_IFG 0x0002
  500. #define PORT_COUNT_PREAMBLE 0x0001
  501. #define KS8842_PORT_IN_RATE_OFFSET 0x08
  502. #define KS8842_PORT_OUT_RATE_OFFSET 0x0A
  503. #define PORT_PRIORITY_RATE 0x0F
  504. #define PORT_PRIORITY_RATE_SHIFT 4
  505. #define KS884X_PORT_LINK_MD 0x10
  506. #define PORT_CABLE_10M_SHORT 0x8000
  507. #define PORT_CABLE_DIAG_RESULT 0x6000
  508. #define PORT_CABLE_STAT_NORMAL 0x0000
  509. #define PORT_CABLE_STAT_OPEN 0x2000
  510. #define PORT_CABLE_STAT_SHORT 0x4000
  511. #define PORT_CABLE_STAT_FAILED 0x6000
  512. #define PORT_START_CABLE_DIAG 0x1000
  513. #define PORT_FORCE_LINK 0x0800
  514. #define PORT_POWER_SAVING_DISABLE 0x0400
  515. #define PORT_PHY_REMOTE_LOOPBACK 0x0200
  516. #define PORT_CABLE_FAULT_COUNTER 0x01FF
  517. #define KS884X_PORT_CTRL_4_OFFSET 0x12
  518. #define PORT_LED_OFF 0x8000
  519. #define PORT_TX_DISABLE 0x4000
  520. #define PORT_AUTO_NEG_RESTART 0x2000
  521. #define PORT_REMOTE_FAULT_DISABLE 0x1000
  522. #define PORT_POWER_DOWN 0x0800
  523. #define PORT_AUTO_MDIX_DISABLE 0x0400
  524. #define PORT_FORCE_MDIX 0x0200
  525. #define PORT_LOOPBACK 0x0100
  526. #define PORT_AUTO_NEG_ENABLE 0x0080
  527. #define PORT_FORCE_100_MBIT 0x0040
  528. #define PORT_FORCE_FULL_DUPLEX 0x0020
  529. #define PORT_AUTO_NEG_SYM_PAUSE 0x0010
  530. #define PORT_AUTO_NEG_100BTX_FD 0x0008
  531. #define PORT_AUTO_NEG_100BTX 0x0004
  532. #define PORT_AUTO_NEG_10BT_FD 0x0002
  533. #define PORT_AUTO_NEG_10BT 0x0001
  534. #define KS884X_PORT_STATUS_OFFSET 0x14
  535. #define PORT_HP_MDIX 0x8000
  536. #define PORT_REVERSED_POLARITY 0x2000
  537. #define PORT_RX_FLOW_CTRL 0x0800
  538. #define PORT_TX_FLOW_CTRL 0x1000
  539. #define PORT_STATUS_SPEED_100MBIT 0x0400
  540. #define PORT_STATUS_FULL_DUPLEX 0x0200
  541. #define PORT_REMOTE_FAULT 0x0100
  542. #define PORT_MDIX_STATUS 0x0080
  543. #define PORT_AUTO_NEG_COMPLETE 0x0040
  544. #define PORT_STATUS_LINK_GOOD 0x0020
  545. #define PORT_REMOTE_SYM_PAUSE 0x0010
  546. #define PORT_REMOTE_100BTX_FD 0x0008
  547. #define PORT_REMOTE_100BTX 0x0004
  548. #define PORT_REMOTE_10BT_FD 0x0002
  549. #define PORT_REMOTE_10BT 0x0001
  550. /*
  551. #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  552. #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
  553. #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
  554. #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
  555. #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
  556. #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
  557. */
  558. #define STATIC_MAC_TABLE_ADDR 0x0000FFFF
  559. #define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
  560. #define STATIC_MAC_TABLE_VALID 0x00080000
  561. #define STATIC_MAC_TABLE_OVERRIDE 0x00100000
  562. #define STATIC_MAC_TABLE_USE_FID 0x00200000
  563. #define STATIC_MAC_TABLE_FID 0x03C00000
  564. #define STATIC_MAC_FWD_PORTS_SHIFT 16
  565. #define STATIC_MAC_FID_SHIFT 22
  566. /*
  567. #define VLAN_TABLE_VID 00-00000000-00000FFF
  568. #define VLAN_TABLE_FID 00-00000000-0000F000
  569. #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
  570. #define VLAN_TABLE_VALID 00-00000000-00080000
  571. */
  572. #define VLAN_TABLE_VID 0x00000FFF
  573. #define VLAN_TABLE_FID 0x0000F000
  574. #define VLAN_TABLE_MEMBERSHIP 0x00070000
  575. #define VLAN_TABLE_VALID 0x00080000
  576. #define VLAN_TABLE_FID_SHIFT 12
  577. #define VLAN_TABLE_MEMBERSHIP_SHIFT 16
  578. /*
  579. #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  580. #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
  581. #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
  582. #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
  583. #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
  584. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
  585. #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
  586. #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
  587. */
  588. #define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
  589. #define DYNAMIC_MAC_TABLE_FID 0x000F0000
  590. #define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
  591. #define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
  592. #define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
  593. #define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
  594. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
  595. #define DYNAMIC_MAC_TABLE_RESERVED 0x78
  596. #define DYNAMIC_MAC_TABLE_NOT_READY 0x80
  597. #define DYNAMIC_MAC_FID_SHIFT 16
  598. #define DYNAMIC_MAC_SRC_PORT_SHIFT 20
  599. #define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
  600. #define DYNAMIC_MAC_ENTRIES_SHIFT 24
  601. #define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
  602. /*
  603. #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
  604. #define MIB_COUNTER_VALID 00-00000000-40000000
  605. #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
  606. */
  607. #define MIB_COUNTER_VALUE 0x3FFFFFFF
  608. #define MIB_COUNTER_VALID 0x40000000
  609. #define MIB_COUNTER_OVERFLOW 0x80000000
  610. #define MIB_PACKET_DROPPED 0x0000FFFF
  611. #define KS_MIB_PACKET_DROPPED_TX_0 0x100
  612. #define KS_MIB_PACKET_DROPPED_TX_1 0x101
  613. #define KS_MIB_PACKET_DROPPED_TX 0x102
  614. #define KS_MIB_PACKET_DROPPED_RX_0 0x103
  615. #define KS_MIB_PACKET_DROPPED_RX_1 0x104
  616. #define KS_MIB_PACKET_DROPPED_RX 0x105
  617. /* Change default LED mode. */
  618. #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
  619. #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
  620. #define MAX_ETHERNET_BODY_SIZE 1500
  621. #define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
  622. #define MAX_ETHERNET_PACKET_SIZE \
  623. (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
  624. #define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
  625. #define MAX_RX_BUF_SIZE (1912 + 4)
  626. #define ADDITIONAL_ENTRIES 16
  627. #define MAX_MULTICAST_LIST 32
  628. #define HW_MULTICAST_SIZE 8
  629. #define HW_TO_DEV_PORT(port) (port - 1)
  630. enum {
  631. media_connected,
  632. media_disconnected
  633. };
  634. enum {
  635. OID_COUNTER_UNKOWN,
  636. OID_COUNTER_FIRST,
  637. /* total transmit errors */
  638. OID_COUNTER_XMIT_ERROR,
  639. /* total receive errors */
  640. OID_COUNTER_RCV_ERROR,
  641. OID_COUNTER_LAST
  642. };
  643. /*
  644. * Hardware descriptor definitions
  645. */
  646. #define DESC_ALIGNMENT 16
  647. #define BUFFER_ALIGNMENT 8
  648. #define NUM_OF_RX_DESC 64
  649. #define NUM_OF_TX_DESC 64
  650. #define KS_DESC_RX_FRAME_LEN 0x000007FF
  651. #define KS_DESC_RX_FRAME_TYPE 0x00008000
  652. #define KS_DESC_RX_ERROR_CRC 0x00010000
  653. #define KS_DESC_RX_ERROR_RUNT 0x00020000
  654. #define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
  655. #define KS_DESC_RX_ERROR_PHY 0x00080000
  656. #define KS884X_DESC_RX_PORT_MASK 0x00300000
  657. #define KS_DESC_RX_MULTICAST 0x01000000
  658. #define KS_DESC_RX_ERROR 0x02000000
  659. #define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
  660. #define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
  661. #define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
  662. #define KS_DESC_RX_LAST 0x20000000
  663. #define KS_DESC_RX_FIRST 0x40000000
  664. #define KS_DESC_RX_ERROR_COND \
  665. (KS_DESC_RX_ERROR_CRC | \
  666. KS_DESC_RX_ERROR_RUNT | \
  667. KS_DESC_RX_ERROR_PHY | \
  668. KS_DESC_RX_ERROR_TOO_LONG)
  669. #define KS_DESC_HW_OWNED 0x80000000
  670. #define KS_DESC_BUF_SIZE 0x000007FF
  671. #define KS884X_DESC_TX_PORT_MASK 0x00300000
  672. #define KS_DESC_END_OF_RING 0x02000000
  673. #define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
  674. #define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
  675. #define KS_DESC_TX_CSUM_GEN_IP 0x10000000
  676. #define KS_DESC_TX_LAST 0x20000000
  677. #define KS_DESC_TX_FIRST 0x40000000
  678. #define KS_DESC_TX_INTERRUPT 0x80000000
  679. #define KS_DESC_PORT_SHIFT 20
  680. #define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
  681. #define KS_DESC_TX_MASK \
  682. (KS_DESC_TX_INTERRUPT | \
  683. KS_DESC_TX_FIRST | \
  684. KS_DESC_TX_LAST | \
  685. KS_DESC_TX_CSUM_GEN_IP | \
  686. KS_DESC_TX_CSUM_GEN_TCP | \
  687. KS_DESC_TX_CSUM_GEN_UDP | \
  688. KS_DESC_BUF_SIZE)
  689. struct ksz_desc_rx_stat {
  690. #ifdef __BIG_ENDIAN_BITFIELD
  691. u32 hw_owned:1;
  692. u32 first_desc:1;
  693. u32 last_desc:1;
  694. u32 csum_err_ip:1;
  695. u32 csum_err_tcp:1;
  696. u32 csum_err_udp:1;
  697. u32 error:1;
  698. u32 multicast:1;
  699. u32 src_port:4;
  700. u32 err_phy:1;
  701. u32 err_too_long:1;
  702. u32 err_runt:1;
  703. u32 err_crc:1;
  704. u32 frame_type:1;
  705. u32 reserved1:4;
  706. u32 frame_len:11;
  707. #else
  708. u32 frame_len:11;
  709. u32 reserved1:4;
  710. u32 frame_type:1;
  711. u32 err_crc:1;
  712. u32 err_runt:1;
  713. u32 err_too_long:1;
  714. u32 err_phy:1;
  715. u32 src_port:4;
  716. u32 multicast:1;
  717. u32 error:1;
  718. u32 csum_err_udp:1;
  719. u32 csum_err_tcp:1;
  720. u32 csum_err_ip:1;
  721. u32 last_desc:1;
  722. u32 first_desc:1;
  723. u32 hw_owned:1;
  724. #endif
  725. };
  726. struct ksz_desc_tx_stat {
  727. #ifdef __BIG_ENDIAN_BITFIELD
  728. u32 hw_owned:1;
  729. u32 reserved1:31;
  730. #else
  731. u32 reserved1:31;
  732. u32 hw_owned:1;
  733. #endif
  734. };
  735. struct ksz_desc_rx_buf {
  736. #ifdef __BIG_ENDIAN_BITFIELD
  737. u32 reserved4:6;
  738. u32 end_of_ring:1;
  739. u32 reserved3:14;
  740. u32 buf_size:11;
  741. #else
  742. u32 buf_size:11;
  743. u32 reserved3:14;
  744. u32 end_of_ring:1;
  745. u32 reserved4:6;
  746. #endif
  747. };
  748. struct ksz_desc_tx_buf {
  749. #ifdef __BIG_ENDIAN_BITFIELD
  750. u32 intr:1;
  751. u32 first_seg:1;
  752. u32 last_seg:1;
  753. u32 csum_gen_ip:1;
  754. u32 csum_gen_tcp:1;
  755. u32 csum_gen_udp:1;
  756. u32 end_of_ring:1;
  757. u32 reserved4:1;
  758. u32 dest_port:4;
  759. u32 reserved3:9;
  760. u32 buf_size:11;
  761. #else
  762. u32 buf_size:11;
  763. u32 reserved3:9;
  764. u32 dest_port:4;
  765. u32 reserved4:1;
  766. u32 end_of_ring:1;
  767. u32 csum_gen_udp:1;
  768. u32 csum_gen_tcp:1;
  769. u32 csum_gen_ip:1;
  770. u32 last_seg:1;
  771. u32 first_seg:1;
  772. u32 intr:1;
  773. #endif
  774. };
  775. union desc_stat {
  776. struct ksz_desc_rx_stat rx;
  777. struct ksz_desc_tx_stat tx;
  778. u32 data;
  779. };
  780. union desc_buf {
  781. struct ksz_desc_rx_buf rx;
  782. struct ksz_desc_tx_buf tx;
  783. u32 data;
  784. };
  785. /**
  786. * struct ksz_hw_desc - Hardware descriptor data structure
  787. * @ctrl: Descriptor control value.
  788. * @buf: Descriptor buffer value.
  789. * @addr: Physical address of memory buffer.
  790. * @next: Pointer to next hardware descriptor.
  791. */
  792. struct ksz_hw_desc {
  793. union desc_stat ctrl;
  794. union desc_buf buf;
  795. u32 addr;
  796. u32 next;
  797. };
  798. /**
  799. * struct ksz_sw_desc - Software descriptor data structure
  800. * @ctrl: Descriptor control value.
  801. * @buf: Descriptor buffer value.
  802. * @buf_size: Current buffers size value in hardware descriptor.
  803. */
  804. struct ksz_sw_desc {
  805. union desc_stat ctrl;
  806. union desc_buf buf;
  807. u32 buf_size;
  808. };
  809. /**
  810. * struct ksz_dma_buf - OS dependent DMA buffer data structure
  811. * @skb: Associated socket buffer.
  812. * @dma: Associated physical DMA address.
  813. * len: Actual len used.
  814. */
  815. struct ksz_dma_buf {
  816. struct sk_buff *skb;
  817. dma_addr_t dma;
  818. int len;
  819. };
  820. /**
  821. * struct ksz_desc - Descriptor structure
  822. * @phw: Hardware descriptor pointer to uncached physical memory.
  823. * @sw: Cached memory to hold hardware descriptor values for
  824. * manipulation.
  825. * @dma_buf: Operating system dependent data structure to hold physical
  826. * memory buffer allocation information.
  827. */
  828. struct ksz_desc {
  829. struct ksz_hw_desc *phw;
  830. struct ksz_sw_desc sw;
  831. struct ksz_dma_buf dma_buf;
  832. };
  833. #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
  834. /**
  835. * struct ksz_desc_info - Descriptor information data structure
  836. * @ring: First descriptor in the ring.
  837. * @cur: Current descriptor being manipulated.
  838. * @ring_virt: First hardware descriptor in the ring.
  839. * @ring_phys: The physical address of the first descriptor of the ring.
  840. * @size: Size of hardware descriptor.
  841. * @alloc: Number of descriptors allocated.
  842. * @avail: Number of descriptors available for use.
  843. * @last: Index for last descriptor released to hardware.
  844. * @next: Index for next descriptor available for use.
  845. * @mask: Mask for index wrapping.
  846. */
  847. struct ksz_desc_info {
  848. struct ksz_desc *ring;
  849. struct ksz_desc *cur;
  850. struct ksz_hw_desc *ring_virt;
  851. u32 ring_phys;
  852. int size;
  853. int alloc;
  854. int avail;
  855. int last;
  856. int next;
  857. int mask;
  858. };
  859. /*
  860. * KSZ8842 switch definitions
  861. */
  862. enum {
  863. TABLE_STATIC_MAC = 0,
  864. TABLE_VLAN,
  865. TABLE_DYNAMIC_MAC,
  866. TABLE_MIB
  867. };
  868. #define LEARNED_MAC_TABLE_ENTRIES 1024
  869. #define STATIC_MAC_TABLE_ENTRIES 8
  870. /**
  871. * struct ksz_mac_table - Static MAC table data structure
  872. * @mac_addr: MAC address to filter.
  873. * @vid: VID value.
  874. * @fid: FID value.
  875. * @ports: Port membership.
  876. * @override: Override setting.
  877. * @use_fid: FID use setting.
  878. * @valid: Valid setting indicating the entry is being used.
  879. */
  880. struct ksz_mac_table {
  881. u8 mac_addr[ETH_ALEN];
  882. u16 vid;
  883. u8 fid;
  884. u8 ports;
  885. u8 override:1;
  886. u8 use_fid:1;
  887. u8 valid:1;
  888. };
  889. #define VLAN_TABLE_ENTRIES 16
  890. /**
  891. * struct ksz_vlan_table - VLAN table data structure
  892. * @vid: VID value.
  893. * @fid: FID value.
  894. * @member: Port membership.
  895. */
  896. struct ksz_vlan_table {
  897. u16 vid;
  898. u8 fid;
  899. u8 member;
  900. };
  901. #define DIFFSERV_ENTRIES 64
  902. #define PRIO_802_1P_ENTRIES 8
  903. #define PRIO_QUEUES 4
  904. #define SWITCH_PORT_NUM 2
  905. #define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
  906. #define HOST_MASK (1 << SWITCH_PORT_NUM)
  907. #define PORT_MASK 7
  908. #define MAIN_PORT 0
  909. #define OTHER_PORT 1
  910. #define HOST_PORT SWITCH_PORT_NUM
  911. #define PORT_COUNTER_NUM 0x20
  912. #define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
  913. #define MIB_COUNTER_RX_LO_PRIORITY 0x00
  914. #define MIB_COUNTER_RX_HI_PRIORITY 0x01
  915. #define MIB_COUNTER_RX_UNDERSIZE 0x02
  916. #define MIB_COUNTER_RX_FRAGMENT 0x03
  917. #define MIB_COUNTER_RX_OVERSIZE 0x04
  918. #define MIB_COUNTER_RX_JABBER 0x05
  919. #define MIB_COUNTER_RX_SYMBOL_ERR 0x06
  920. #define MIB_COUNTER_RX_CRC_ERR 0x07
  921. #define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
  922. #define MIB_COUNTER_RX_CTRL_8808 0x09
  923. #define MIB_COUNTER_RX_PAUSE 0x0A
  924. #define MIB_COUNTER_RX_BROADCAST 0x0B
  925. #define MIB_COUNTER_RX_MULTICAST 0x0C
  926. #define MIB_COUNTER_RX_UNICAST 0x0D
  927. #define MIB_COUNTER_RX_OCTET_64 0x0E
  928. #define MIB_COUNTER_RX_OCTET_65_127 0x0F
  929. #define MIB_COUNTER_RX_OCTET_128_255 0x10
  930. #define MIB_COUNTER_RX_OCTET_256_511 0x11
  931. #define MIB_COUNTER_RX_OCTET_512_1023 0x12
  932. #define MIB_COUNTER_RX_OCTET_1024_1522 0x13
  933. #define MIB_COUNTER_TX_LO_PRIORITY 0x14
  934. #define MIB_COUNTER_TX_HI_PRIORITY 0x15
  935. #define MIB_COUNTER_TX_LATE_COLLISION 0x16
  936. #define MIB_COUNTER_TX_PAUSE 0x17
  937. #define MIB_COUNTER_TX_BROADCAST 0x18
  938. #define MIB_COUNTER_TX_MULTICAST 0x19
  939. #define MIB_COUNTER_TX_UNICAST 0x1A
  940. #define MIB_COUNTER_TX_DEFERRED 0x1B
  941. #define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
  942. #define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
  943. #define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
  944. #define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
  945. #define MIB_COUNTER_RX_DROPPED_PACKET 0x20
  946. #define MIB_COUNTER_TX_DROPPED_PACKET 0x21
  947. /**
  948. * struct ksz_port_mib - Port MIB data structure
  949. * @cnt_ptr: Current pointer to MIB counter index.
  950. * @link_down: Indication the link has just gone down.
  951. * @state: Connection status of the port.
  952. * @mib_start: The starting counter index. Some ports do not start at 0.
  953. * @counter: 64-bit MIB counter value.
  954. * @dropped: Temporary buffer to remember last read packet dropped values.
  955. *
  956. * MIB counters needs to be read periodically so that counters do not get
  957. * overflowed and give incorrect values. A right balance is needed to
  958. * satisfy this condition and not waste too much CPU time.
  959. *
  960. * It is pointless to read MIB counters when the port is disconnected. The
  961. * @state provides the connection status so that MIB counters are read only
  962. * when the port is connected. The @link_down indicates the port is just
  963. * disconnected so that all MIB counters are read one last time to update the
  964. * information.
  965. */
  966. struct ksz_port_mib {
  967. u8 cnt_ptr;
  968. u8 link_down;
  969. u8 state;
  970. u8 mib_start;
  971. u64 counter[TOTAL_PORT_COUNTER_NUM];
  972. u32 dropped[2];
  973. };
  974. /**
  975. * struct ksz_port_cfg - Port configuration data structure
  976. * @vid: VID value.
  977. * @member: Port membership.
  978. * @port_prio: Port priority.
  979. * @rx_rate: Receive priority rate.
  980. * @tx_rate: Transmit priority rate.
  981. * @stp_state: Current Spanning Tree Protocol state.
  982. */
  983. struct ksz_port_cfg {
  984. u16 vid;
  985. u8 member;
  986. u8 port_prio;
  987. u32 rx_rate[PRIO_QUEUES];
  988. u32 tx_rate[PRIO_QUEUES];
  989. int stp_state;
  990. };
  991. /**
  992. * struct ksz_switch - KSZ8842 switch data structure
  993. * @mac_table: MAC table entries information.
  994. * @vlan_table: VLAN table entries information.
  995. * @port_cfg: Port configuration information.
  996. * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
  997. * (bit7 ~ bit2) field.
  998. * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
  999. * Tag priority field.
  1000. * @br_addr: Bridge address. Used for STP.
  1001. * @other_addr: Other MAC address. Used for multiple network device mode.
  1002. * @broad_per: Broadcast storm percentage.
  1003. * @member: Current port membership. Used for STP.
  1004. */
  1005. struct ksz_switch {
  1006. struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
  1007. struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
  1008. struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
  1009. u8 diffserv[DIFFSERV_ENTRIES];
  1010. u8 p_802_1p[PRIO_802_1P_ENTRIES];
  1011. u8 br_addr[ETH_ALEN];
  1012. u8 other_addr[ETH_ALEN];
  1013. u8 broad_per;
  1014. u8 member;
  1015. };
  1016. #define TX_RATE_UNIT 10000
  1017. /**
  1018. * struct ksz_port_info - Port information data structure
  1019. * @state: Connection status of the port.
  1020. * @tx_rate: Transmit rate divided by 10000 to get Mbit.
  1021. * @duplex: Duplex mode.
  1022. * @advertised: Advertised auto-negotiation setting. Used to determine link.
  1023. * @partner: Auto-negotiation partner setting. Used to determine link.
  1024. * @port_id: Port index to access actual hardware register.
  1025. * @pdev: Pointer to OS dependent network device.
  1026. */
  1027. struct ksz_port_info {
  1028. uint state;
  1029. uint tx_rate;
  1030. u8 duplex;
  1031. u8 advertised;
  1032. u8 partner;
  1033. u8 port_id;
  1034. void *pdev;
  1035. };
  1036. #define MAX_TX_HELD_SIZE 52000
  1037. /* Hardware features and bug fixes. */
  1038. #define LINK_INT_WORKING (1 << 0)
  1039. #define SMALL_PACKET_TX_BUG (1 << 1)
  1040. #define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
  1041. #define RX_HUGE_FRAME (1 << 4)
  1042. #define STP_SUPPORT (1 << 8)
  1043. /* Software overrides. */
  1044. #define PAUSE_FLOW_CTRL (1 << 0)
  1045. #define FAST_AGING (1 << 1)
  1046. /**
  1047. * struct ksz_hw - KSZ884X hardware data structure
  1048. * @io: Virtual address assigned.
  1049. * @ksz_switch: Pointer to KSZ8842 switch.
  1050. * @port_info: Port information.
  1051. * @port_mib: Port MIB information.
  1052. * @dev_count: Number of network devices this hardware supports.
  1053. * @dst_ports: Destination ports in switch for transmission.
  1054. * @id: Hardware ID. Used for display only.
  1055. * @mib_cnt: Number of MIB counters this hardware has.
  1056. * @mib_port_cnt: Number of ports with MIB counters.
  1057. * @tx_cfg: Cached transmit control settings.
  1058. * @rx_cfg: Cached receive control settings.
  1059. * @intr_mask: Current interrupt mask.
  1060. * @intr_set: Current interrup set.
  1061. * @intr_blocked: Interrupt blocked.
  1062. * @rx_desc_info: Receive descriptor information.
  1063. * @tx_desc_info: Transmit descriptor information.
  1064. * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
  1065. * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
  1066. * @tx_size: Transmit data size. Used for TX optimization.
  1067. * The maximum is defined by MAX_TX_HELD_SIZE.
  1068. * @perm_addr: Permanent MAC address.
  1069. * @override_addr: Overridden MAC address.
  1070. * @address: Additional MAC address entries.
  1071. * @addr_list_size: Additional MAC address list size.
  1072. * @mac_override: Indication of MAC address overridden.
  1073. * @promiscuous: Counter to keep track of promiscuous mode set.
  1074. * @all_multi: Counter to keep track of all multicast mode set.
  1075. * @multi_list: Multicast address entries.
  1076. * @multi_bits: Cached multicast hash table settings.
  1077. * @multi_list_size: Multicast address list size.
  1078. * @enabled: Indication of hardware enabled.
  1079. * @rx_stop: Indication of receive process stop.
  1080. * @features: Hardware features to enable.
  1081. * @overrides: Hardware features to override.
  1082. * @parent: Pointer to parent, network device private structure.
  1083. */
  1084. struct ksz_hw {
  1085. void __iomem *io;
  1086. struct ksz_switch *ksz_switch;
  1087. struct ksz_port_info port_info[SWITCH_PORT_NUM];
  1088. struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
  1089. int dev_count;
  1090. int dst_ports;
  1091. int id;
  1092. int mib_cnt;
  1093. int mib_port_cnt;
  1094. u32 tx_cfg;
  1095. u32 rx_cfg;
  1096. u32 intr_mask;
  1097. u32 intr_set;
  1098. uint intr_blocked;
  1099. struct ksz_desc_info rx_desc_info;
  1100. struct ksz_desc_info tx_desc_info;
  1101. int tx_int_cnt;
  1102. int tx_int_mask;
  1103. int tx_size;
  1104. u8 perm_addr[ETH_ALEN];
  1105. u8 override_addr[ETH_ALEN];
  1106. u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
  1107. u8 addr_list_size;
  1108. u8 mac_override;
  1109. u8 promiscuous;
  1110. u8 all_multi;
  1111. u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
  1112. u8 multi_bits[HW_MULTICAST_SIZE];
  1113. u8 multi_list_size;
  1114. u8 enabled;
  1115. u8 rx_stop;
  1116. u8 reserved2[1];
  1117. uint features;
  1118. uint overrides;
  1119. void *parent;
  1120. };
  1121. enum {
  1122. PHY_NO_FLOW_CTRL,
  1123. PHY_FLOW_CTRL,
  1124. PHY_TX_ONLY,
  1125. PHY_RX_ONLY
  1126. };
  1127. /**
  1128. * struct ksz_port - Virtual port data structure
  1129. * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
  1130. * duplex, and 0 for auto, which normally results in full
  1131. * duplex.
  1132. * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
  1133. * 0 for auto, which normally results in 100 Mbit.
  1134. * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
  1135. * force.
  1136. * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
  1137. * control, and PHY_FLOW_CTRL for flow control.
  1138. * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
  1139. * Mbit PHY.
  1140. * @first_port: Index of first port this port supports.
  1141. * @mib_port_cnt: Number of ports with MIB counters.
  1142. * @port_cnt: Number of ports this port supports.
  1143. * @counter: Port statistics counter.
  1144. * @hw: Pointer to hardware structure.
  1145. * @linked: Pointer to port information linked to this port.
  1146. */
  1147. struct ksz_port {
  1148. u8 duplex;
  1149. u8 speed;
  1150. u8 force_link;
  1151. u8 flow_ctrl;
  1152. int first_port;
  1153. int mib_port_cnt;
  1154. int port_cnt;
  1155. u64 counter[OID_COUNTER_LAST];
  1156. struct ksz_hw *hw;
  1157. struct ksz_port_info *linked;
  1158. };
  1159. /**
  1160. * struct ksz_timer_info - Timer information data structure
  1161. * @timer: Kernel timer.
  1162. * @cnt: Running timer counter.
  1163. * @max: Number of times to run timer; -1 for infinity.
  1164. * @period: Timer period in jiffies.
  1165. */
  1166. struct ksz_timer_info {
  1167. struct timer_list timer;
  1168. int cnt;
  1169. int max;
  1170. int period;
  1171. };
  1172. /**
  1173. * struct ksz_shared_mem - OS dependent shared memory data structure
  1174. * @dma_addr: Physical DMA address allocated.
  1175. * @alloc_size: Allocation size.
  1176. * @phys: Actual physical address used.
  1177. * @alloc_virt: Virtual address allocated.
  1178. * @virt: Actual virtual address used.
  1179. */
  1180. struct ksz_shared_mem {
  1181. dma_addr_t dma_addr;
  1182. uint alloc_size;
  1183. uint phys;
  1184. u8 *alloc_virt;
  1185. u8 *virt;
  1186. };
  1187. /**
  1188. * struct ksz_counter_info - OS dependent counter information data structure
  1189. * @counter: Wait queue to wakeup after counters are read.
  1190. * @time: Next time in jiffies to read counter.
  1191. * @read: Indication of counters read in full or not.
  1192. */
  1193. struct ksz_counter_info {
  1194. wait_queue_head_t counter;
  1195. unsigned long time;
  1196. int read;
  1197. };
  1198. /**
  1199. * struct dev_info - Network device information data structure
  1200. * @dev: Pointer to network device.
  1201. * @pdev: Pointer to PCI device.
  1202. * @hw: Hardware structure.
  1203. * @desc_pool: Physical memory used for descriptor pool.
  1204. * @hwlock: Spinlock to prevent hardware from accessing.
  1205. * @lock: Mutex lock to prevent device from accessing.
  1206. * @dev_rcv: Receive process function used.
  1207. * @last_skb: Socket buffer allocated for descriptor rx fragments.
  1208. * @skb_index: Buffer index for receiving fragments.
  1209. * @skb_len: Buffer length for receiving fragments.
  1210. * @mib_read: Workqueue to read MIB counters.
  1211. * @mib_timer_info: Timer to read MIB counters.
  1212. * @counter: Used for MIB reading.
  1213. * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
  1214. * the maximum is MAX_RX_BUF_SIZE.
  1215. * @opened: Counter to keep track of device open.
  1216. * @rx_tasklet: Receive processing tasklet.
  1217. * @tx_tasklet: Transmit processing tasklet.
  1218. * @wol_enable: Wake-on-LAN enable set by ethtool.
  1219. * @wol_support: Wake-on-LAN support used by ethtool.
  1220. * @pme_wait: Used for KSZ8841 power management.
  1221. */
  1222. struct dev_info {
  1223. struct net_device *dev;
  1224. struct pci_dev *pdev;
  1225. struct ksz_hw hw;
  1226. struct ksz_shared_mem desc_pool;
  1227. spinlock_t hwlock;
  1228. struct mutex lock;
  1229. int (*dev_rcv)(struct dev_info *);
  1230. struct sk_buff *last_skb;
  1231. int skb_index;
  1232. int skb_len;
  1233. struct work_struct mib_read;
  1234. struct ksz_timer_info mib_timer_info;
  1235. struct ksz_counter_info counter[TOTAL_PORT_NUM];
  1236. int mtu;
  1237. int opened;
  1238. struct tasklet_struct rx_tasklet;
  1239. struct tasklet_struct tx_tasklet;
  1240. int wol_enable;
  1241. int wol_support;
  1242. unsigned long pme_wait;
  1243. };
  1244. /**
  1245. * struct dev_priv - Network device private data structure
  1246. * @adapter: Adapter device information.
  1247. * @port: Port information.
  1248. * @monitor_time_info: Timer to monitor ports.
  1249. * @proc_sem: Semaphore for proc accessing.
  1250. * @id: Device ID.
  1251. * @mii_if: MII interface information.
  1252. * @advertising: Temporary variable to store advertised settings.
  1253. * @msg_enable: The message flags controlling driver output.
  1254. * @media_state: The connection status of the device.
  1255. * @multicast: The all multicast state of the device.
  1256. * @promiscuous: The promiscuous state of the device.
  1257. */
  1258. struct dev_priv {
  1259. struct dev_info *adapter;
  1260. struct ksz_port port;
  1261. struct ksz_timer_info monitor_timer_info;
  1262. struct semaphore proc_sem;
  1263. int id;
  1264. struct mii_if_info mii_if;
  1265. u32 advertising;
  1266. u32 msg_enable;
  1267. int media_state;
  1268. int multicast;
  1269. int promiscuous;
  1270. };
  1271. #define DRV_NAME "KSZ884X PCI"
  1272. #define DEVICE_NAME "KSZ884x PCI"
  1273. #define DRV_VERSION "1.0.0"
  1274. #define DRV_RELDATE "Feb 8, 2010"
  1275. static char version[] =
  1276. "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
  1277. static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
  1278. /*
  1279. * Interrupt processing primary routines
  1280. */
  1281. static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
  1282. {
  1283. writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
  1284. }
  1285. static inline void hw_dis_intr(struct ksz_hw *hw)
  1286. {
  1287. hw->intr_blocked = hw->intr_mask;
  1288. writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
  1289. hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1290. }
  1291. static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
  1292. {
  1293. hw->intr_set = interrupt;
  1294. writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
  1295. }
  1296. static inline void hw_ena_intr(struct ksz_hw *hw)
  1297. {
  1298. hw->intr_blocked = 0;
  1299. hw_set_intr(hw, hw->intr_mask);
  1300. }
  1301. static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
  1302. {
  1303. hw->intr_mask &= ~(bit);
  1304. }
  1305. static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
  1306. {
  1307. u32 read_intr;
  1308. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1309. hw->intr_set = read_intr & ~interrupt;
  1310. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1311. hw_dis_intr_bit(hw, interrupt);
  1312. }
  1313. /**
  1314. * hw_turn_on_intr - turn on specified interrupts
  1315. * @hw: The hardware instance.
  1316. * @bit: The interrupt bits to be on.
  1317. *
  1318. * This routine turns on the specified interrupts in the interrupt mask so that
  1319. * those interrupts will be enabled.
  1320. */
  1321. static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
  1322. {
  1323. hw->intr_mask |= bit;
  1324. if (!hw->intr_blocked)
  1325. hw_set_intr(hw, hw->intr_mask);
  1326. }
  1327. static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
  1328. {
  1329. u32 read_intr;
  1330. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1331. hw->intr_set = read_intr | interrupt;
  1332. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1333. }
  1334. static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
  1335. {
  1336. *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
  1337. *status = *status & hw->intr_set;
  1338. }
  1339. static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
  1340. {
  1341. if (interrupt)
  1342. hw_ena_intr(hw);
  1343. }
  1344. /**
  1345. * hw_block_intr - block hardware interrupts
  1346. *
  1347. * This function blocks all interrupts of the hardware and returns the current
  1348. * interrupt enable mask so that interrupts can be restored later.
  1349. *
  1350. * Return the current interrupt enable mask.
  1351. */
  1352. static uint hw_block_intr(struct ksz_hw *hw)
  1353. {
  1354. uint interrupt = 0;
  1355. if (!hw->intr_blocked) {
  1356. hw_dis_intr(hw);
  1357. interrupt = hw->intr_blocked;
  1358. }
  1359. return interrupt;
  1360. }
  1361. /*
  1362. * Hardware descriptor routines
  1363. */
  1364. static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
  1365. {
  1366. status.rx.hw_owned = 0;
  1367. desc->phw->ctrl.data = cpu_to_le32(status.data);
  1368. }
  1369. static inline void release_desc(struct ksz_desc *desc)
  1370. {
  1371. desc->sw.ctrl.tx.hw_owned = 1;
  1372. if (desc->sw.buf_size != desc->sw.buf.data) {
  1373. desc->sw.buf_size = desc->sw.buf.data;
  1374. desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
  1375. }
  1376. desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
  1377. }
  1378. static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
  1379. {
  1380. *desc = &info->ring[info->last];
  1381. info->last++;
  1382. info->last &= info->mask;
  1383. info->avail--;
  1384. (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
  1385. }
  1386. static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
  1387. {
  1388. desc->phw->addr = cpu_to_le32(addr);
  1389. }
  1390. static inline void set_rx_len(struct ksz_desc *desc, u32 len)
  1391. {
  1392. desc->sw.buf.rx.buf_size = len;
  1393. }
  1394. static inline void get_tx_pkt(struct ksz_desc_info *info,
  1395. struct ksz_desc **desc)
  1396. {
  1397. *desc = &info->ring[info->next];
  1398. info->next++;
  1399. info->next &= info->mask;
  1400. info->avail--;
  1401. (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
  1402. }
  1403. static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
  1404. {
  1405. desc->phw->addr = cpu_to_le32(addr);
  1406. }
  1407. static inline void set_tx_len(struct ksz_desc *desc, u32 len)
  1408. {
  1409. desc->sw.buf.tx.buf_size = len;
  1410. }
  1411. /* Switch functions */
  1412. #define TABLE_READ 0x10
  1413. #define TABLE_SEL_SHIFT 2
  1414. #define HW_DELAY(hw, reg) \
  1415. do { \
  1416. u16 dummy; \
  1417. dummy = readw(hw->io + reg); \
  1418. } while (0)
  1419. /**
  1420. * sw_r_table - read 4 bytes of data from switch table
  1421. * @hw: The hardware instance.
  1422. * @table: The table selector.
  1423. * @addr: The address of the table entry.
  1424. * @data: Buffer to store the read data.
  1425. *
  1426. * This routine reads 4 bytes of data from the table of the switch.
  1427. * Hardware interrupts are disabled to minimize corruption of read data.
  1428. */
  1429. static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
  1430. {
  1431. u16 ctrl_addr;
  1432. uint interrupt;
  1433. ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
  1434. interrupt = hw_block_intr(hw);
  1435. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1436. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1437. *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1438. hw_restore_intr(hw, interrupt);
  1439. }
  1440. /**
  1441. * sw_w_table_64 - write 8 bytes of data to the switch table
  1442. * @hw: The hardware instance.
  1443. * @table: The table selector.
  1444. * @addr: The address of the table entry.
  1445. * @data_hi: The high part of data to be written (bit63 ~ bit32).
  1446. * @data_lo: The low part of data to be written (bit31 ~ bit0).
  1447. *
  1448. * This routine writes 8 bytes of data to the table of the switch.
  1449. * Hardware interrupts are disabled to minimize corruption of written data.
  1450. */
  1451. static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
  1452. u32 data_lo)
  1453. {
  1454. u16 ctrl_addr;
  1455. uint interrupt;
  1456. ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
  1457. interrupt = hw_block_intr(hw);
  1458. writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
  1459. writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
  1460. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1461. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1462. hw_restore_intr(hw, interrupt);
  1463. }
  1464. /**
  1465. * sw_w_sta_mac_table - write to the static MAC table
  1466. * @hw: The hardware instance.
  1467. * @addr: The address of the table entry.
  1468. * @mac_addr: The MAC address.
  1469. * @ports: The port members.
  1470. * @override: The flag to override the port receive/transmit settings.
  1471. * @valid: The flag to indicate entry is valid.
  1472. * @use_fid: The flag to indicate the FID is valid.
  1473. * @fid: The FID value.
  1474. *
  1475. * This routine writes an entry of the static MAC table of the switch. It
  1476. * calls sw_w_table_64() to write the data.
  1477. */
  1478. static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
  1479. u8 ports, int override, int valid, int use_fid, u8 fid)
  1480. {
  1481. u32 data_hi;
  1482. u32 data_lo;
  1483. data_lo = ((u32) mac_addr[2] << 24) |
  1484. ((u32) mac_addr[3] << 16) |
  1485. ((u32) mac_addr[4] << 8) | mac_addr[5];
  1486. data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
  1487. data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
  1488. if (override)
  1489. data_hi |= STATIC_MAC_TABLE_OVERRIDE;
  1490. if (use_fid) {
  1491. data_hi |= STATIC_MAC_TABLE_USE_FID;
  1492. data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
  1493. }
  1494. if (valid)
  1495. data_hi |= STATIC_MAC_TABLE_VALID;
  1496. sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
  1497. }
  1498. /**
  1499. * sw_r_vlan_table - read from the VLAN table
  1500. * @hw: The hardware instance.
  1501. * @addr: The address of the table entry.
  1502. * @vid: Buffer to store the VID.
  1503. * @fid: Buffer to store the VID.
  1504. * @member: Buffer to store the port membership.
  1505. *
  1506. * This function reads an entry of the VLAN table of the switch. It calls
  1507. * sw_r_table() to get the data.
  1508. *
  1509. * Return 0 if the entry is valid; otherwise -1.
  1510. */
  1511. static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
  1512. u8 *member)
  1513. {
  1514. u32 data;
  1515. sw_r_table(hw, TABLE_VLAN, addr, &data);
  1516. if (data & VLAN_TABLE_VALID) {
  1517. *vid = (u16)(data & VLAN_TABLE_VID);
  1518. *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
  1519. *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
  1520. VLAN_TABLE_MEMBERSHIP_SHIFT);
  1521. return 0;
  1522. }
  1523. return -1;
  1524. }
  1525. /**
  1526. * port_r_mib_cnt - read MIB counter
  1527. * @hw: The hardware instance.
  1528. * @port: The port index.
  1529. * @addr: The address of the counter.
  1530. * @cnt: Buffer to store the counter.
  1531. *
  1532. * This routine reads a MIB counter of the port.
  1533. * Hardware interrupts are disabled to minimize corruption of read data.
  1534. */
  1535. static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
  1536. {
  1537. u32 data;
  1538. u16 ctrl_addr;
  1539. uint interrupt;
  1540. int timeout;
  1541. ctrl_addr = addr + PORT_COUNTER_NUM * port;
  1542. interrupt = hw_block_intr(hw);
  1543. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
  1544. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1545. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1546. for (timeout = 100; timeout > 0; timeout--) {
  1547. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1548. if (data & MIB_COUNTER_VALID) {
  1549. if (data & MIB_COUNTER_OVERFLOW)
  1550. *cnt += MIB_COUNTER_VALUE + 1;
  1551. *cnt += data & MIB_COUNTER_VALUE;
  1552. break;
  1553. }
  1554. }
  1555. hw_restore_intr(hw, interrupt);
  1556. }
  1557. /**
  1558. * port_r_mib_pkt - read dropped packet counts
  1559. * @hw: The hardware instance.
  1560. * @port: The port index.
  1561. * @cnt: Buffer to store the receive and transmit dropped packet counts.
  1562. *
  1563. * This routine reads the dropped packet counts of the port.
  1564. * Hardware interrupts are disabled to minimize corruption of read data.
  1565. */
  1566. static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
  1567. {
  1568. u32 cur;
  1569. u32 data;
  1570. u16 ctrl_addr;
  1571. uint interrupt;
  1572. int index;
  1573. index = KS_MIB_PACKET_DROPPED_RX_0 + port;
  1574. do {
  1575. interrupt = hw_block_intr(hw);
  1576. ctrl_addr = (u16) index;
  1577. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
  1578. << 8);
  1579. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1580. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1581. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1582. hw_restore_intr(hw, interrupt);
  1583. data &= MIB_PACKET_DROPPED;
  1584. cur = *last;
  1585. if (data != cur) {
  1586. *last = data;
  1587. if (data < cur)
  1588. data += MIB_PACKET_DROPPED + 1;
  1589. data -= cur;
  1590. *cnt += data;
  1591. }
  1592. ++last;
  1593. ++cnt;
  1594. index -= KS_MIB_PACKET_DROPPED_TX -
  1595. KS_MIB_PACKET_DROPPED_TX_0 + 1;
  1596. } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
  1597. }
  1598. /**
  1599. * port_r_cnt - read MIB counters periodically
  1600. * @hw: The hardware instance.
  1601. * @port: The port index.
  1602. *
  1603. * This routine is used to read the counters of the port periodically to avoid
  1604. * counter overflow. The hardware should be acquired first before calling this
  1605. * routine.
  1606. *
  1607. * Return non-zero when not all counters not read.
  1608. */
  1609. static int port_r_cnt(struct ksz_hw *hw, int port)
  1610. {
  1611. struct ksz_port_mib *mib = &hw->port_mib[port];
  1612. if (mib->mib_start < PORT_COUNTER_NUM)
  1613. while (mib->cnt_ptr < PORT_COUNTER_NUM) {
  1614. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1615. &mib->counter[mib->cnt_ptr]);
  1616. ++mib->cnt_ptr;
  1617. }
  1618. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1619. port_r_mib_pkt(hw, port, mib->dropped,
  1620. &mib->counter[PORT_COUNTER_NUM]);
  1621. mib->cnt_ptr = 0;
  1622. return 0;
  1623. }
  1624. /**
  1625. * port_init_cnt - initialize MIB counter values
  1626. * @hw: The hardware instance.
  1627. * @port: The port index.
  1628. *
  1629. * This routine is used to initialize all counters to zero if the hardware
  1630. * cannot do it after reset.
  1631. */
  1632. static void port_init_cnt(struct ksz_hw *hw, int port)
  1633. {
  1634. struct ksz_port_mib *mib = &hw->port_mib[port];
  1635. mib->cnt_ptr = 0;
  1636. if (mib->mib_start < PORT_COUNTER_NUM)
  1637. do {
  1638. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1639. &mib->counter[mib->cnt_ptr]);
  1640. ++mib->cnt_ptr;
  1641. } while (mib->cnt_ptr < PORT_COUNTER_NUM);
  1642. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1643. port_r_mib_pkt(hw, port, mib->dropped,
  1644. &mib->counter[PORT_COUNTER_NUM]);
  1645. memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  1646. mib->cnt_ptr = 0;
  1647. }
  1648. /*
  1649. * Port functions
  1650. */
  1651. /**
  1652. * port_chk - check port register bits
  1653. * @hw: The hardware instance.
  1654. * @port: The port index.
  1655. * @offset: The offset of the port register.
  1656. * @bits: The data bits to check.
  1657. *
  1658. * This function checks whether the specified bits of the port register are set
  1659. * or not.
  1660. *
  1661. * Return 0 if the bits are not set.
  1662. */
  1663. static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
  1664. {
  1665. u32 addr;
  1666. u16 data;
  1667. PORT_CTRL_ADDR(port, addr);
  1668. addr += offset;
  1669. data = readw(hw->io + addr);
  1670. return (data & bits) == bits;
  1671. }
  1672. /**
  1673. * port_cfg - set port register bits
  1674. * @hw: The hardware instance.
  1675. * @port: The port index.
  1676. * @offset: The offset of the port register.
  1677. * @bits: The data bits to set.
  1678. * @set: The flag indicating whether the bits are to be set or not.
  1679. *
  1680. * This routine sets or resets the specified bits of the port register.
  1681. */
  1682. static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
  1683. int set)
  1684. {
  1685. u32 addr;
  1686. u16 data;
  1687. PORT_CTRL_ADDR(port, addr);
  1688. addr += offset;
  1689. data = readw(hw->io + addr);
  1690. if (set)
  1691. data |= bits;
  1692. else
  1693. data &= ~bits;
  1694. writew(data, hw->io + addr);
  1695. }
  1696. /**
  1697. * port_chk_shift - check port bit
  1698. * @hw: The hardware instance.
  1699. * @port: The port index.
  1700. * @offset: The offset of the register.
  1701. * @shift: Number of bits to shift.
  1702. *
  1703. * This function checks whether the specified port is set in the register or
  1704. * not.
  1705. *
  1706. * Return 0 if the port is not set.
  1707. */
  1708. static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
  1709. {
  1710. u16 data;
  1711. u16 bit = 1 << port;
  1712. data = readw(hw->io + addr);
  1713. data >>= shift;
  1714. return (data & bit) == bit;
  1715. }
  1716. /**
  1717. * port_cfg_shift - set port bit
  1718. * @hw: The hardware instance.
  1719. * @port: The port index.
  1720. * @offset: The offset of the register.
  1721. * @shift: Number of bits to shift.
  1722. * @set: The flag indicating whether the port is to be set or not.
  1723. *
  1724. * This routine sets or resets the specified port in the register.
  1725. */
  1726. static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
  1727. int set)
  1728. {
  1729. u16 data;
  1730. u16 bits = 1 << port;
  1731. data = readw(hw->io + addr);
  1732. bits <<= shift;
  1733. if (set)
  1734. data |= bits;
  1735. else
  1736. data &= ~bits;
  1737. writew(data, hw->io + addr);
  1738. }
  1739. /**
  1740. * port_r8 - read byte from port register
  1741. * @hw: The hardware instance.
  1742. * @port: The port index.
  1743. * @offset: The offset of the port register.
  1744. * @data: Buffer to store the data.
  1745. *
  1746. * This routine reads a byte from the port register.
  1747. */
  1748. static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
  1749. {
  1750. u32 addr;
  1751. PORT_CTRL_ADDR(port, addr);
  1752. addr += offset;
  1753. *data = readb(hw->io + addr);
  1754. }
  1755. /**
  1756. * port_r16 - read word from port register.
  1757. * @hw: The hardware instance.
  1758. * @port: The port index.
  1759. * @offset: The offset of the port register.
  1760. * @data: Buffer to store the data.
  1761. *
  1762. * This routine reads a word from the port register.
  1763. */
  1764. static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
  1765. {
  1766. u32 addr;
  1767. PORT_CTRL_ADDR(port, addr);
  1768. addr += offset;
  1769. *data = readw(hw->io + addr);
  1770. }
  1771. /**
  1772. * port_w16 - write word to port register.
  1773. * @hw: The hardware instance.
  1774. * @port: The port index.
  1775. * @offset: The offset of the port register.
  1776. * @data: Data to write.
  1777. *
  1778. * This routine writes a word to the port register.
  1779. */
  1780. static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
  1781. {
  1782. u32 addr;
  1783. PORT_CTRL_ADDR(port, addr);
  1784. addr += offset;
  1785. writew(data, hw->io + addr);
  1786. }
  1787. /**
  1788. * sw_chk - check switch register bits
  1789. * @hw: The hardware instance.
  1790. * @addr: The address of the switch register.
  1791. * @bits: The data bits to check.
  1792. *
  1793. * This function checks whether the specified bits of the switch register are
  1794. * set or not.
  1795. *
  1796. * Return 0 if the bits are not set.
  1797. */
  1798. static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
  1799. {
  1800. u16 data;
  1801. data = readw(hw->io + addr);
  1802. return (data & bits) == bits;
  1803. }
  1804. /**
  1805. * sw_cfg - set switch register bits
  1806. * @hw: The hardware instance.
  1807. * @addr: The address of the switch register.
  1808. * @bits: The data bits to set.
  1809. * @set: The flag indicating whether the bits are to be set or not.
  1810. *
  1811. * This function sets or resets the specified bits of the switch register.
  1812. */
  1813. static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
  1814. {
  1815. u16 data;
  1816. data = readw(hw->io + addr);
  1817. if (set)
  1818. data |= bits;
  1819. else
  1820. data &= ~bits;
  1821. writew(data, hw->io + addr);
  1822. }
  1823. /* Bandwidth */
  1824. static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
  1825. {
  1826. port_cfg(hw, p,
  1827. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
  1828. }
  1829. static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
  1830. {
  1831. return port_chk(hw, p,
  1832. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
  1833. }
  1834. /* Driver set switch broadcast storm protection at 10% rate. */
  1835. #define BROADCAST_STORM_PROTECTION_RATE 10
  1836. /* 148,800 frames * 67 ms / 100 */
  1837. #define BROADCAST_STORM_VALUE 9969
  1838. /**
  1839. * sw_cfg_broad_storm - configure broadcast storm threshold
  1840. * @hw: The hardware instance.
  1841. * @percent: Broadcast storm threshold in percent of transmit rate.
  1842. *
  1843. * This routine configures the broadcast storm threshold of the switch.
  1844. */
  1845. static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1846. {
  1847. u16 data;
  1848. u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
  1849. if (value > BROADCAST_STORM_RATE)
  1850. value = BROADCAST_STORM_RATE;
  1851. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1852. data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
  1853. data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
  1854. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1855. }
  1856. /**
  1857. * sw_get_board_storm - get broadcast storm threshold
  1858. * @hw: The hardware instance.
  1859. * @percent: Buffer to store the broadcast storm threshold percentage.
  1860. *
  1861. * This routine retrieves the broadcast storm threshold of the switch.
  1862. */
  1863. static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
  1864. {
  1865. int num;
  1866. u16 data;
  1867. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1868. num = (data & BROADCAST_STORM_RATE_HI);
  1869. num <<= 8;
  1870. num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
  1871. num = DIV_ROUND_CLOSEST(num * 100, BROADCAST_STORM_VALUE);
  1872. *percent = (u8) num;
  1873. }
  1874. /**
  1875. * sw_dis_broad_storm - disable broadstorm
  1876. * @hw: The hardware instance.
  1877. * @port: The port index.
  1878. *
  1879. * This routine disables the broadcast storm limit function of the switch.
  1880. */
  1881. static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
  1882. {
  1883. port_cfg_broad_storm(hw, port, 0);
  1884. }
  1885. /**
  1886. * sw_ena_broad_storm - enable broadcast storm
  1887. * @hw: The hardware instance.
  1888. * @port: The port index.
  1889. *
  1890. * This routine enables the broadcast storm limit function of the switch.
  1891. */
  1892. static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
  1893. {
  1894. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1895. port_cfg_broad_storm(hw, port, 1);
  1896. }
  1897. /**
  1898. * sw_init_broad_storm - initialize broadcast storm
  1899. * @hw: The hardware instance.
  1900. *
  1901. * This routine initializes the broadcast storm limit function of the switch.
  1902. */
  1903. static void sw_init_broad_storm(struct ksz_hw *hw)
  1904. {
  1905. int port;
  1906. hw->ksz_switch->broad_per = 1;
  1907. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1908. for (port = 0; port < TOTAL_PORT_NUM; port++)
  1909. sw_dis_broad_storm(hw, port);
  1910. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
  1911. }
  1912. /**
  1913. * hw_cfg_broad_storm - configure broadcast storm
  1914. * @hw: The hardware instance.
  1915. * @percent: Broadcast storm threshold in percent of transmit rate.
  1916. *
  1917. * This routine configures the broadcast storm threshold of the switch.
  1918. * It is called by user functions. The hardware should be acquired first.
  1919. */
  1920. static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1921. {
  1922. if (percent > 100)
  1923. percent = 100;
  1924. sw_cfg_broad_storm(hw, percent);
  1925. sw_get_broad_storm(hw, &percent);
  1926. hw->ksz_switch->broad_per = percent;
  1927. }
  1928. /**
  1929. * sw_dis_prio_rate - disable switch priority rate
  1930. * @hw: The hardware instance.
  1931. * @port: The port index.
  1932. *
  1933. * This routine disables the priority rate function of the switch.
  1934. */
  1935. static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
  1936. {
  1937. u32 addr;
  1938. PORT_CTRL_ADDR(port, addr);
  1939. addr += KS8842_PORT_IN_RATE_OFFSET;
  1940. writel(0, hw->io + addr);
  1941. }
  1942. /**
  1943. * sw_init_prio_rate - initialize switch prioirty rate
  1944. * @hw: The hardware instance.
  1945. *
  1946. * This routine initializes the priority rate function of the switch.
  1947. */
  1948. static void sw_init_prio_rate(struct ksz_hw *hw)
  1949. {
  1950. int port;
  1951. int prio;
  1952. struct ksz_switch *sw = hw->ksz_switch;
  1953. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  1954. for (prio = 0; prio < PRIO_QUEUES; prio++) {
  1955. sw->port_cfg[port].rx_rate[prio] =
  1956. sw->port_cfg[port].tx_rate[prio] = 0;
  1957. }
  1958. sw_dis_prio_rate(hw, port);
  1959. }
  1960. }
  1961. /* Communication */
  1962. static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
  1963. {
  1964. port_cfg(hw, p,
  1965. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
  1966. }
  1967. static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
  1968. {
  1969. port_cfg(hw, p,
  1970. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
  1971. }
  1972. static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
  1973. {
  1974. return port_chk(hw, p,
  1975. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
  1976. }
  1977. static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
  1978. {
  1979. return port_chk(hw, p,
  1980. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
  1981. }
  1982. /* Spanning Tree */
  1983. static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
  1984. {
  1985. port_cfg(hw, p,
  1986. KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
  1987. }
  1988. static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
  1989. {
  1990. port_cfg(hw, p,
  1991. KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
  1992. }
  1993. static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
  1994. {
  1995. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
  1996. }
  1997. static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
  1998. {
  1999. if (!(hw->overrides & FAST_AGING)) {
  2000. sw_cfg_fast_aging(hw, 1);
  2001. mdelay(1);
  2002. sw_cfg_fast_aging(hw, 0);
  2003. }
  2004. }
  2005. /* VLAN */
  2006. static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
  2007. {
  2008. port_cfg(hw, p,
  2009. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
  2010. }
  2011. static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
  2012. {
  2013. port_cfg(hw, p,
  2014. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
  2015. }
  2016. static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
  2017. {
  2018. return port_chk(hw, p,
  2019. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
  2020. }
  2021. static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
  2022. {
  2023. return port_chk(hw, p,
  2024. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
  2025. }
  2026. static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
  2027. {
  2028. port_cfg(hw, p,
  2029. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
  2030. }
  2031. static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
  2032. {
  2033. port_cfg(hw, p,
  2034. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
  2035. }
  2036. static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
  2037. {
  2038. return port_chk(hw, p,
  2039. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
  2040. }
  2041. static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
  2042. {
  2043. return port_chk(hw, p,
  2044. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
  2045. }
  2046. /* Mirroring */
  2047. static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
  2048. {
  2049. port_cfg(hw, p,
  2050. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
  2051. }
  2052. static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
  2053. {
  2054. port_cfg(hw, p,
  2055. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
  2056. }
  2057. static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
  2058. {
  2059. port_cfg(hw, p,
  2060. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
  2061. }
  2062. static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
  2063. {
  2064. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
  2065. }
  2066. static void sw_init_mirror(struct ksz_hw *hw)
  2067. {
  2068. int port;
  2069. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2070. port_cfg_mirror_sniffer(hw, port, 0);
  2071. port_cfg_mirror_rx(hw, port, 0);
  2072. port_cfg_mirror_tx(hw, port, 0);
  2073. }
  2074. sw_cfg_mirror_rx_tx(hw, 0);
  2075. }
  2076. static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
  2077. {
  2078. sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2079. SWITCH_UNK_DEF_PORT_ENABLE, set);
  2080. }
  2081. static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
  2082. {
  2083. return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2084. SWITCH_UNK_DEF_PORT_ENABLE);
  2085. }
  2086. static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
  2087. {
  2088. port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
  2089. }
  2090. static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
  2091. {
  2092. return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
  2093. }
  2094. /* Priority */
  2095. static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
  2096. {
  2097. port_cfg(hw, p,
  2098. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
  2099. }
  2100. static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
  2101. {
  2102. port_cfg(hw, p,
  2103. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
  2104. }
  2105. static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
  2106. {
  2107. port_cfg(hw, p,
  2108. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
  2109. }
  2110. static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
  2111. {
  2112. port_cfg(hw, p,
  2113. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
  2114. }
  2115. static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
  2116. {
  2117. return port_chk(hw, p,
  2118. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
  2119. }
  2120. static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
  2121. {
  2122. return port_chk(hw, p,
  2123. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
  2124. }
  2125. static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
  2126. {
  2127. return port_chk(hw, p,
  2128. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
  2129. }
  2130. static inline int port_chk_prio(struct ksz_hw *hw, int p)
  2131. {
  2132. return port_chk(hw, p,
  2133. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
  2134. }
  2135. /**
  2136. * sw_dis_diffserv - disable switch DiffServ priority
  2137. * @hw: The hardware instance.
  2138. * @port: The port index.
  2139. *
  2140. * This routine disables the DiffServ priority function of the switch.
  2141. */
  2142. static void sw_dis_diffserv(struct ksz_hw *hw, int port)
  2143. {
  2144. port_cfg_diffserv(hw, port, 0);
  2145. }
  2146. /**
  2147. * sw_dis_802_1p - disable switch 802.1p priority
  2148. * @hw: The hardware instance.
  2149. * @port: The port index.
  2150. *
  2151. * This routine disables the 802.1p priority function of the switch.
  2152. */
  2153. static void sw_dis_802_1p(struct ksz_hw *hw, int port)
  2154. {
  2155. port_cfg_802_1p(hw, port, 0);
  2156. }
  2157. /**
  2158. * sw_cfg_replace_null_vid -
  2159. * @hw: The hardware instance.
  2160. * @set: The flag to disable or enable.
  2161. *
  2162. */
  2163. static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
  2164. {
  2165. sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
  2166. }
  2167. /**
  2168. * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
  2169. * @hw: The hardware instance.
  2170. * @port: The port index.
  2171. * @set: The flag to disable or enable.
  2172. *
  2173. * This routine enables the 802.1p priority re-mapping function of the switch.
  2174. * That allows 802.1p priority field to be replaced with the port's default
  2175. * tag's priority value if the ingress packet's 802.1p priority has a higher
  2176. * priority than port's default tag's priority.
  2177. */
  2178. static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
  2179. {
  2180. port_cfg_replace_vid(hw, port, set);
  2181. }
  2182. /**
  2183. * sw_cfg_port_based - configure switch port based priority
  2184. * @hw: The hardware instance.
  2185. * @port: The port index.
  2186. * @prio: The priority to set.
  2187. *
  2188. * This routine configures the port based priority of the switch.
  2189. */
  2190. static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
  2191. {
  2192. u16 data;
  2193. if (prio > PORT_BASED_PRIORITY_BASE)
  2194. prio = PORT_BASED_PRIORITY_BASE;
  2195. hw->ksz_switch->port_cfg[port].port_prio = prio;
  2196. port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
  2197. data &= ~PORT_BASED_PRIORITY_MASK;
  2198. data |= prio << PORT_BASED_PRIORITY_SHIFT;
  2199. port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
  2200. }
  2201. /**
  2202. * sw_dis_multi_queue - disable transmit multiple queues
  2203. * @hw: The hardware instance.
  2204. * @port: The port index.
  2205. *
  2206. * This routine disables the transmit multiple queues selection of the switch
  2207. * port. Only single transmit queue on the port.
  2208. */
  2209. static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
  2210. {
  2211. port_cfg_prio(hw, port, 0);
  2212. }
  2213. /**
  2214. * sw_init_prio - initialize switch priority
  2215. * @hw: The hardware instance.
  2216. *
  2217. * This routine initializes the switch QoS priority functions.
  2218. */
  2219. static void sw_init_prio(struct ksz_hw *hw)
  2220. {
  2221. int port;
  2222. int tos;
  2223. struct ksz_switch *sw = hw->ksz_switch;
  2224. /*
  2225. * Init all the 802.1p tag priority value to be assigned to different
  2226. * priority queue.
  2227. */
  2228. sw->p_802_1p[0] = 0;
  2229. sw->p_802_1p[1] = 0;
  2230. sw->p_802_1p[2] = 1;
  2231. sw->p_802_1p[3] = 1;
  2232. sw->p_802_1p[4] = 2;
  2233. sw->p_802_1p[5] = 2;
  2234. sw->p_802_1p[6] = 3;
  2235. sw->p_802_1p[7] = 3;
  2236. /*
  2237. * Init all the DiffServ priority value to be assigned to priority
  2238. * queue 0.
  2239. */
  2240. for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
  2241. sw->diffserv[tos] = 0;
  2242. /* All QoS functions disabled. */
  2243. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2244. sw_dis_multi_queue(hw, port);
  2245. sw_dis_diffserv(hw, port);
  2246. sw_dis_802_1p(hw, port);
  2247. sw_cfg_replace_vid(hw, port, 0);
  2248. sw->port_cfg[port].port_prio = 0;
  2249. sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
  2250. }
  2251. sw_cfg_replace_null_vid(hw, 0);
  2252. }
  2253. /**
  2254. * port_get_def_vid - get port default VID.
  2255. * @hw: The hardware instance.
  2256. * @port: The port index.
  2257. * @vid: Buffer to store the VID.
  2258. *
  2259. * This routine retrieves the default VID of the port.
  2260. */
  2261. static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
  2262. {
  2263. u32 addr;
  2264. PORT_CTRL_ADDR(port, addr);
  2265. addr += KS8842_PORT_CTRL_VID_OFFSET;
  2266. *vid = readw(hw->io + addr);
  2267. }
  2268. /**
  2269. * sw_init_vlan - initialize switch VLAN
  2270. * @hw: The hardware instance.
  2271. *
  2272. * This routine initializes the VLAN function of the switch.
  2273. */
  2274. static void sw_init_vlan(struct ksz_hw *hw)
  2275. {
  2276. int port;
  2277. int entry;
  2278. struct ksz_switch *sw = hw->ksz_switch;
  2279. /* Read 16 VLAN entries from device's VLAN table. */
  2280. for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
  2281. sw_r_vlan_table(hw, entry,
  2282. &sw->vlan_table[entry].vid,
  2283. &sw->vlan_table[entry].fid,
  2284. &sw->vlan_table[entry].member);
  2285. }
  2286. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2287. port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
  2288. sw->port_cfg[port].member = PORT_MASK;
  2289. }
  2290. }
  2291. /**
  2292. * sw_cfg_port_base_vlan - configure port-based VLAN membership
  2293. * @hw: The hardware instance.
  2294. * @port: The port index.
  2295. * @member: The port-based VLAN membership.
  2296. *
  2297. * This routine configures the port-based VLAN membership of the port.
  2298. */
  2299. static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
  2300. {
  2301. u32 addr;
  2302. u8 data;
  2303. PORT_CTRL_ADDR(port, addr);
  2304. addr += KS8842_PORT_CTRL_2_OFFSET;
  2305. data = readb(hw->io + addr);
  2306. data &= ~PORT_VLAN_MEMBERSHIP;
  2307. data |= (member & PORT_MASK);
  2308. writeb(data, hw->io + addr);
  2309. hw->ksz_switch->port_cfg[port].member = member;
  2310. }
  2311. /**
  2312. * sw_get_addr - get the switch MAC address.
  2313. * @hw: The hardware instance.
  2314. * @mac_addr: Buffer to store the MAC address.
  2315. *
  2316. * This function retrieves the MAC address of the switch.
  2317. */
  2318. static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
  2319. {
  2320. int i;
  2321. for (i = 0; i < 6; i += 2) {
  2322. mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2323. mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2324. }
  2325. }
  2326. /**
  2327. * sw_set_addr - configure switch MAC address
  2328. * @hw: The hardware instance.
  2329. * @mac_addr: The MAC address.
  2330. *
  2331. * This function configures the MAC address of the switch.
  2332. */
  2333. static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
  2334. {
  2335. int i;
  2336. for (i = 0; i < 6; i += 2) {
  2337. writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2338. writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2339. }
  2340. }
  2341. /**
  2342. * sw_set_global_ctrl - set switch global control
  2343. * @hw: The hardware instance.
  2344. *
  2345. * This routine sets the global control of the switch function.
  2346. */
  2347. static void sw_set_global_ctrl(struct ksz_hw *hw)
  2348. {
  2349. u16 data;
  2350. /* Enable switch MII flow control. */
  2351. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2352. data |= SWITCH_FLOW_CTRL;
  2353. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2354. data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2355. /* Enable aggressive back off algorithm in half duplex mode. */
  2356. data |= SWITCH_AGGR_BACKOFF;
  2357. /* Enable automatic fast aging when link changed detected. */
  2358. data |= SWITCH_AGING_ENABLE;
  2359. data |= SWITCH_LINK_AUTO_AGING;
  2360. if (hw->overrides & FAST_AGING)
  2361. data |= SWITCH_FAST_AGING;
  2362. else
  2363. data &= ~SWITCH_FAST_AGING;
  2364. writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2365. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2366. /* Enable no excessive collision drop. */
  2367. data |= NO_EXC_COLLISION_DROP;
  2368. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2369. }
  2370. enum {
  2371. STP_STATE_DISABLED = 0,
  2372. STP_STATE_LISTENING,
  2373. STP_STATE_LEARNING,
  2374. STP_STATE_FORWARDING,
  2375. STP_STATE_BLOCKED,
  2376. STP_STATE_SIMPLE
  2377. };
  2378. /**
  2379. * port_set_stp_state - configure port spanning tree state
  2380. * @hw: The hardware instance.
  2381. * @port: The port index.
  2382. * @state: The spanning tree state.
  2383. *
  2384. * This routine configures the spanning tree state of the port.
  2385. */
  2386. static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
  2387. {
  2388. u16 data;
  2389. port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
  2390. switch (state) {
  2391. case STP_STATE_DISABLED:
  2392. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2393. data |= PORT_LEARN_DISABLE;
  2394. break;
  2395. case STP_STATE_LISTENING:
  2396. /*
  2397. * No need to turn on transmit because of port direct mode.
  2398. * Turning on receive is required if static MAC table is not setup.
  2399. */
  2400. data &= ~PORT_TX_ENABLE;
  2401. data |= PORT_RX_ENABLE;
  2402. data |= PORT_LEARN_DISABLE;
  2403. break;
  2404. case STP_STATE_LEARNING:
  2405. data &= ~PORT_TX_ENABLE;
  2406. data |= PORT_RX_ENABLE;
  2407. data &= ~PORT_LEARN_DISABLE;
  2408. break;
  2409. case STP_STATE_FORWARDING:
  2410. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2411. data &= ~PORT_LEARN_DISABLE;
  2412. break;
  2413. case STP_STATE_BLOCKED:
  2414. /*
  2415. * Need to setup static MAC table with override to keep receiving BPDU
  2416. * messages. See sw_init_stp routine.
  2417. */
  2418. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2419. data |= PORT_LEARN_DISABLE;
  2420. break;
  2421. case STP_STATE_SIMPLE:
  2422. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2423. data |= PORT_LEARN_DISABLE;
  2424. break;
  2425. }
  2426. port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
  2427. hw->ksz_switch->port_cfg[port].stp_state = state;
  2428. }
  2429. #define STP_ENTRY 0
  2430. #define BROADCAST_ENTRY 1
  2431. #define BRIDGE_ADDR_ENTRY 2
  2432. #define IPV6_ADDR_ENTRY 3
  2433. /**
  2434. * sw_clr_sta_mac_table - clear static MAC table
  2435. * @hw: The hardware instance.
  2436. *
  2437. * This routine clears the static MAC table.
  2438. */
  2439. static void sw_clr_sta_mac_table(struct ksz_hw *hw)
  2440. {
  2441. struct ksz_mac_table *entry;
  2442. int i;
  2443. for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
  2444. entry = &hw->ksz_switch->mac_table[i];
  2445. sw_w_sta_mac_table(hw, i,
  2446. entry->mac_addr, entry->ports,
  2447. entry->override, 0,
  2448. entry->use_fid, entry->fid);
  2449. }
  2450. }
  2451. /**
  2452. * sw_init_stp - initialize switch spanning tree support
  2453. * @hw: The hardware instance.
  2454. *
  2455. * This routine initializes the spanning tree support of the switch.
  2456. */
  2457. static void sw_init_stp(struct ksz_hw *hw)
  2458. {
  2459. struct ksz_mac_table *entry;
  2460. entry = &hw->ksz_switch->mac_table[STP_ENTRY];
  2461. entry->mac_addr[0] = 0x01;
  2462. entry->mac_addr[1] = 0x80;
  2463. entry->mac_addr[2] = 0xC2;
  2464. entry->mac_addr[3] = 0x00;
  2465. entry->mac_addr[4] = 0x00;
  2466. entry->mac_addr[5] = 0x00;
  2467. entry->ports = HOST_MASK;
  2468. entry->override = 1;
  2469. entry->valid = 1;
  2470. sw_w_sta_mac_table(hw, STP_ENTRY,
  2471. entry->mac_addr, entry->ports,
  2472. entry->override, entry->valid,
  2473. entry->use_fid, entry->fid);
  2474. }
  2475. /**
  2476. * sw_block_addr - block certain packets from the host port
  2477. * @hw: The hardware instance.
  2478. *
  2479. * This routine blocks certain packets from reaching to the host port.
  2480. */
  2481. static void sw_block_addr(struct ksz_hw *hw)
  2482. {
  2483. struct ksz_mac_table *entry;
  2484. int i;
  2485. for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
  2486. entry = &hw->ksz_switch->mac_table[i];
  2487. entry->valid = 0;
  2488. sw_w_sta_mac_table(hw, i,
  2489. entry->mac_addr, entry->ports,
  2490. entry->override, entry->valid,
  2491. entry->use_fid, entry->fid);
  2492. }
  2493. }
  2494. #define PHY_LINK_SUPPORT \
  2495. (PHY_AUTO_NEG_ASYM_PAUSE | \
  2496. PHY_AUTO_NEG_SYM_PAUSE | \
  2497. PHY_AUTO_NEG_100BT4 | \
  2498. PHY_AUTO_NEG_100BTX_FD | \
  2499. PHY_AUTO_NEG_100BTX | \
  2500. PHY_AUTO_NEG_10BT_FD | \
  2501. PHY_AUTO_NEG_10BT)
  2502. static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
  2503. {
  2504. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2505. }
  2506. static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
  2507. {
  2508. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2509. }
  2510. static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
  2511. {
  2512. *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
  2513. }
  2514. static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
  2515. {
  2516. *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2517. }
  2518. static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
  2519. {
  2520. writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2521. }
  2522. static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
  2523. {
  2524. *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
  2525. }
  2526. static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
  2527. {
  2528. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2529. }
  2530. static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
  2531. {
  2532. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2533. }
  2534. static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
  2535. {
  2536. *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2537. }
  2538. static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
  2539. {
  2540. writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2541. }
  2542. static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
  2543. {
  2544. *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2545. }
  2546. static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
  2547. {
  2548. writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2549. }
  2550. /**
  2551. * hw_r_phy - read data from PHY register
  2552. * @hw: The hardware instance.
  2553. * @port: Port to read.
  2554. * @reg: PHY register to read.
  2555. * @val: Buffer to store the read data.
  2556. *
  2557. * This routine reads data from the PHY register.
  2558. */
  2559. static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
  2560. {
  2561. int phy;
  2562. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2563. *val = readw(hw->io + phy);
  2564. }
  2565. /**
  2566. * port_w_phy - write data to PHY register
  2567. * @hw: The hardware instance.
  2568. * @port: Port to write.
  2569. * @reg: PHY register to write.
  2570. * @val: Word data to write.
  2571. *
  2572. * This routine writes data to the PHY register.
  2573. */
  2574. static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
  2575. {
  2576. int phy;
  2577. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2578. writew(val, hw->io + phy);
  2579. }
  2580. /*
  2581. * EEPROM access functions
  2582. */
  2583. #define AT93C_CODE 0
  2584. #define AT93C_WR_OFF 0x00
  2585. #define AT93C_WR_ALL 0x10
  2586. #define AT93C_ER_ALL 0x20
  2587. #define AT93C_WR_ON 0x30
  2588. #define AT93C_WRITE 1
  2589. #define AT93C_READ 2
  2590. #define AT93C_ERASE 3
  2591. #define EEPROM_DELAY 4
  2592. static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
  2593. {
  2594. u16 data;
  2595. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2596. data &= ~gpio;
  2597. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2598. }
  2599. static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
  2600. {
  2601. u16 data;
  2602. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2603. data |= gpio;
  2604. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2605. }
  2606. static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
  2607. {
  2608. u16 data;
  2609. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2610. return (u8)(data & gpio);
  2611. }
  2612. static void eeprom_clk(struct ksz_hw *hw)
  2613. {
  2614. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2615. udelay(EEPROM_DELAY);
  2616. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2617. udelay(EEPROM_DELAY);
  2618. }
  2619. static u16 spi_r(struct ksz_hw *hw)
  2620. {
  2621. int i;
  2622. u16 temp = 0;
  2623. for (i = 15; i >= 0; i--) {
  2624. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2625. udelay(EEPROM_DELAY);
  2626. temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
  2627. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2628. udelay(EEPROM_DELAY);
  2629. }
  2630. return temp;
  2631. }
  2632. static void spi_w(struct ksz_hw *hw, u16 data)
  2633. {
  2634. int i;
  2635. for (i = 15; i >= 0; i--) {
  2636. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2637. drop_gpio(hw, EEPROM_DATA_OUT);
  2638. eeprom_clk(hw);
  2639. }
  2640. }
  2641. static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
  2642. {
  2643. int i;
  2644. /* Initial start bit */
  2645. raise_gpio(hw, EEPROM_DATA_OUT);
  2646. eeprom_clk(hw);
  2647. /* AT93C operation */
  2648. for (i = 1; i >= 0; i--) {
  2649. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2650. drop_gpio(hw, EEPROM_DATA_OUT);
  2651. eeprom_clk(hw);
  2652. }
  2653. /* Address location */
  2654. for (i = 5; i >= 0; i--) {
  2655. (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2656. drop_gpio(hw, EEPROM_DATA_OUT);
  2657. eeprom_clk(hw);
  2658. }
  2659. }
  2660. #define EEPROM_DATA_RESERVED 0
  2661. #define EEPROM_DATA_MAC_ADDR_0 1
  2662. #define EEPROM_DATA_MAC_ADDR_1 2
  2663. #define EEPROM_DATA_MAC_ADDR_2 3
  2664. #define EEPROM_DATA_SUBSYS_ID 4
  2665. #define EEPROM_DATA_SUBSYS_VEN_ID 5
  2666. #define EEPROM_DATA_PM_CAP 6
  2667. /* User defined EEPROM data */
  2668. #define EEPROM_DATA_OTHER_MAC_ADDR 9
  2669. /**
  2670. * eeprom_read - read from AT93C46 EEPROM
  2671. * @hw: The hardware instance.
  2672. * @reg: The register offset.
  2673. *
  2674. * This function reads a word from the AT93C46 EEPROM.
  2675. *
  2676. * Return the data value.
  2677. */
  2678. static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
  2679. {
  2680. u16 data;
  2681. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2682. spi_reg(hw, AT93C_READ, reg);
  2683. data = spi_r(hw);
  2684. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2685. return data;
  2686. }
  2687. /**
  2688. * eeprom_write - write to AT93C46 EEPROM
  2689. * @hw: The hardware instance.
  2690. * @reg: The register offset.
  2691. * @data: The data value.
  2692. *
  2693. * This procedure writes a word to the AT93C46 EEPROM.
  2694. */
  2695. static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
  2696. {
  2697. int timeout;
  2698. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2699. /* Enable write. */
  2700. spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
  2701. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2702. udelay(1);
  2703. /* Erase the register. */
  2704. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2705. spi_reg(hw, AT93C_ERASE, reg);
  2706. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2707. udelay(1);
  2708. /* Check operation complete. */
  2709. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2710. timeout = 8;
  2711. mdelay(2);
  2712. do {
  2713. mdelay(1);
  2714. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2715. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2716. udelay(1);
  2717. /* Write the register. */
  2718. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2719. spi_reg(hw, AT93C_WRITE, reg);
  2720. spi_w(hw, data);
  2721. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2722. udelay(1);
  2723. /* Check operation complete. */
  2724. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2725. timeout = 8;
  2726. mdelay(2);
  2727. do {
  2728. mdelay(1);
  2729. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2730. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2731. udelay(1);
  2732. /* Disable write. */
  2733. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2734. spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
  2735. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2736. }
  2737. /*
  2738. * Link detection routines
  2739. */
  2740. static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
  2741. {
  2742. ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
  2743. switch (port->flow_ctrl) {
  2744. case PHY_FLOW_CTRL:
  2745. ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
  2746. break;
  2747. /* Not supported. */
  2748. case PHY_TX_ONLY:
  2749. case PHY_RX_ONLY:
  2750. default:
  2751. break;
  2752. }
  2753. return ctrl;
  2754. }
  2755. static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
  2756. {
  2757. u32 rx_cfg;
  2758. u32 tx_cfg;
  2759. rx_cfg = hw->rx_cfg;
  2760. tx_cfg = hw->tx_cfg;
  2761. if (rx)
  2762. hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
  2763. else
  2764. hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
  2765. if (tx)
  2766. hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
  2767. else
  2768. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2769. if (hw->enabled) {
  2770. if (rx_cfg != hw->rx_cfg)
  2771. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  2772. if (tx_cfg != hw->tx_cfg)
  2773. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2774. }
  2775. }
  2776. static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
  2777. u16 local, u16 remote)
  2778. {
  2779. int rx;
  2780. int tx;
  2781. if (hw->overrides & PAUSE_FLOW_CTRL)
  2782. return;
  2783. rx = tx = 0;
  2784. if (port->force_link)
  2785. rx = tx = 1;
  2786. if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
  2787. if (local & PHY_AUTO_NEG_SYM_PAUSE) {
  2788. rx = tx = 1;
  2789. } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
  2790. (local & PHY_AUTO_NEG_PAUSE) ==
  2791. PHY_AUTO_NEG_ASYM_PAUSE) {
  2792. tx = 1;
  2793. }
  2794. } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
  2795. if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
  2796. rx = 1;
  2797. }
  2798. if (!hw->ksz_switch)
  2799. set_flow_ctrl(hw, rx, tx);
  2800. }
  2801. static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
  2802. struct ksz_port_info *info, u16 link_status)
  2803. {
  2804. if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
  2805. !(hw->overrides & PAUSE_FLOW_CTRL)) {
  2806. u32 cfg = hw->tx_cfg;
  2807. /* Disable flow control in the half duplex mode. */
  2808. if (1 == info->duplex)
  2809. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2810. if (hw->enabled && cfg != hw->tx_cfg)
  2811. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2812. }
  2813. }
  2814. /**
  2815. * port_get_link_speed - get current link status
  2816. * @port: The port instance.
  2817. *
  2818. * This routine reads PHY registers to determine the current link status of the
  2819. * switch ports.
  2820. */
  2821. static void port_get_link_speed(struct ksz_port *port)
  2822. {
  2823. uint interrupt;
  2824. struct ksz_port_info *info;
  2825. struct ksz_port_info *linked = NULL;
  2826. struct ksz_hw *hw = port->hw;
  2827. u16 data;
  2828. u16 status;
  2829. u8 local;
  2830. u8 remote;
  2831. int i;
  2832. int p;
  2833. int change = 0;
  2834. interrupt = hw_block_intr(hw);
  2835. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2836. info = &hw->port_info[p];
  2837. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2838. port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2839. /*
  2840. * Link status is changing all the time even when there is no
  2841. * cable connection!
  2842. */
  2843. remote = status & (PORT_AUTO_NEG_COMPLETE |
  2844. PORT_STATUS_LINK_GOOD);
  2845. local = (u8) data;
  2846. /* No change to status. */
  2847. if (local == info->advertised && remote == info->partner)
  2848. continue;
  2849. info->advertised = local;
  2850. info->partner = remote;
  2851. if (status & PORT_STATUS_LINK_GOOD) {
  2852. /* Remember the first linked port. */
  2853. if (!linked)
  2854. linked = info;
  2855. info->tx_rate = 10 * TX_RATE_UNIT;
  2856. if (status & PORT_STATUS_SPEED_100MBIT)
  2857. info->tx_rate = 100 * TX_RATE_UNIT;
  2858. info->duplex = 1;
  2859. if (status & PORT_STATUS_FULL_DUPLEX)
  2860. info->duplex = 2;
  2861. if (media_connected != info->state) {
  2862. hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
  2863. &data);
  2864. hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
  2865. &status);
  2866. determine_flow_ctrl(hw, port, data, status);
  2867. if (hw->ksz_switch) {
  2868. port_cfg_back_pressure(hw, p,
  2869. (1 == info->duplex));
  2870. }
  2871. change |= 1 << i;
  2872. port_cfg_change(hw, port, info, status);
  2873. }
  2874. info->state = media_connected;
  2875. } else {
  2876. if (media_disconnected != info->state) {
  2877. change |= 1 << i;
  2878. /* Indicate the link just goes down. */
  2879. hw->port_mib[p].link_down = 1;
  2880. }
  2881. info->state = media_disconnected;
  2882. }
  2883. hw->port_mib[p].state = (u8) info->state;
  2884. }
  2885. if (linked && media_disconnected == port->linked->state)
  2886. port->linked = linked;
  2887. hw_restore_intr(hw, interrupt);
  2888. }
  2889. #define PHY_RESET_TIMEOUT 10
  2890. /**
  2891. * port_set_link_speed - set port speed
  2892. * @port: The port instance.
  2893. *
  2894. * This routine sets the link speed of the switch ports.
  2895. */
  2896. static void port_set_link_speed(struct ksz_port *port)
  2897. {
  2898. struct ksz_hw *hw = port->hw;
  2899. u16 data;
  2900. u16 cfg;
  2901. u8 status;
  2902. int i;
  2903. int p;
  2904. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2905. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2906. port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2907. cfg = 0;
  2908. if (status & PORT_STATUS_LINK_GOOD)
  2909. cfg = data;
  2910. data |= PORT_AUTO_NEG_ENABLE;
  2911. data = advertised_flow_ctrl(port, data);
  2912. data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
  2913. PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
  2914. /* Check if manual configuration is specified by the user. */
  2915. if (port->speed || port->duplex) {
  2916. if (10 == port->speed)
  2917. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2918. PORT_AUTO_NEG_100BTX);
  2919. else if (100 == port->speed)
  2920. data &= ~(PORT_AUTO_NEG_10BT_FD |
  2921. PORT_AUTO_NEG_10BT);
  2922. if (1 == port->duplex)
  2923. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2924. PORT_AUTO_NEG_10BT_FD);
  2925. else if (2 == port->duplex)
  2926. data &= ~(PORT_AUTO_NEG_100BTX |
  2927. PORT_AUTO_NEG_10BT);
  2928. }
  2929. if (data != cfg) {
  2930. data |= PORT_AUTO_NEG_RESTART;
  2931. port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
  2932. }
  2933. }
  2934. }
  2935. /**
  2936. * port_force_link_speed - force port speed
  2937. * @port: The port instance.
  2938. *
  2939. * This routine forces the link speed of the switch ports.
  2940. */
  2941. static void port_force_link_speed(struct ksz_port *port)
  2942. {
  2943. struct ksz_hw *hw = port->hw;
  2944. u16 data;
  2945. int i;
  2946. int phy;
  2947. int p;
  2948. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2949. phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
  2950. hw_r_phy_ctrl(hw, phy, &data);
  2951. data &= ~PHY_AUTO_NEG_ENABLE;
  2952. if (10 == port->speed)
  2953. data &= ~PHY_SPEED_100MBIT;
  2954. else if (100 == port->speed)
  2955. data |= PHY_SPEED_100MBIT;
  2956. if (1 == port->duplex)
  2957. data &= ~PHY_FULL_DUPLEX;
  2958. else if (2 == port->duplex)
  2959. data |= PHY_FULL_DUPLEX;
  2960. hw_w_phy_ctrl(hw, phy, data);
  2961. }
  2962. }
  2963. static void port_set_power_saving(struct ksz_port *port, int enable)
  2964. {
  2965. struct ksz_hw *hw = port->hw;
  2966. int i;
  2967. int p;
  2968. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
  2969. port_cfg(hw, p,
  2970. KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
  2971. }
  2972. /*
  2973. * KSZ8841 power management functions
  2974. */
  2975. /**
  2976. * hw_chk_wol_pme_status - check PMEN pin
  2977. * @hw: The hardware instance.
  2978. *
  2979. * This function is used to check PMEN pin is asserted.
  2980. *
  2981. * Return 1 if PMEN pin is asserted; otherwise, 0.
  2982. */
  2983. static int hw_chk_wol_pme_status(struct ksz_hw *hw)
  2984. {
  2985. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  2986. struct pci_dev *pdev = hw_priv->pdev;
  2987. u16 data;
  2988. if (!pdev->pm_cap)
  2989. return 0;
  2990. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  2991. return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
  2992. }
  2993. /**
  2994. * hw_clr_wol_pme_status - clear PMEN pin
  2995. * @hw: The hardware instance.
  2996. *
  2997. * This routine is used to clear PME_Status to deassert PMEN pin.
  2998. */
  2999. static void hw_clr_wol_pme_status(struct ksz_hw *hw)
  3000. {
  3001. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3002. struct pci_dev *pdev = hw_priv->pdev;
  3003. u16 data;
  3004. if (!pdev->pm_cap)
  3005. return;
  3006. /* Clear PME_Status to deassert PMEN pin. */
  3007. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3008. data |= PCI_PM_CTRL_PME_STATUS;
  3009. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  3010. }
  3011. /**
  3012. * hw_cfg_wol_pme - enable or disable Wake-on-LAN
  3013. * @hw: The hardware instance.
  3014. * @set: The flag indicating whether to enable or disable.
  3015. *
  3016. * This routine is used to enable or disable Wake-on-LAN.
  3017. */
  3018. static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
  3019. {
  3020. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3021. struct pci_dev *pdev = hw_priv->pdev;
  3022. u16 data;
  3023. if (!pdev->pm_cap)
  3024. return;
  3025. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3026. data &= ~PCI_PM_CTRL_STATE_MASK;
  3027. if (set)
  3028. data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
  3029. else
  3030. data &= ~PCI_PM_CTRL_PME_ENABLE;
  3031. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  3032. }
  3033. /**
  3034. * hw_cfg_wol - configure Wake-on-LAN features
  3035. * @hw: The hardware instance.
  3036. * @frame: The pattern frame bit.
  3037. * @set: The flag indicating whether to enable or disable.
  3038. *
  3039. * This routine is used to enable or disable certain Wake-on-LAN features.
  3040. */
  3041. static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
  3042. {
  3043. u16 data;
  3044. data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
  3045. if (set)
  3046. data |= frame;
  3047. else
  3048. data &= ~frame;
  3049. writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
  3050. }
  3051. /**
  3052. * hw_set_wol_frame - program Wake-on-LAN pattern
  3053. * @hw: The hardware instance.
  3054. * @i: The frame index.
  3055. * @mask_size: The size of the mask.
  3056. * @mask: Mask to ignore certain bytes in the pattern.
  3057. * @frame_size: The size of the frame.
  3058. * @pattern: The frame data.
  3059. *
  3060. * This routine is used to program Wake-on-LAN pattern.
  3061. */
  3062. static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
  3063. const u8 *mask, uint frame_size, const u8 *pattern)
  3064. {
  3065. int bits;
  3066. int from;
  3067. int len;
  3068. int to;
  3069. u32 crc;
  3070. u8 data[64];
  3071. u8 val = 0;
  3072. if (frame_size > mask_size * 8)
  3073. frame_size = mask_size * 8;
  3074. if (frame_size > 64)
  3075. frame_size = 64;
  3076. i *= 0x10;
  3077. writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
  3078. writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
  3079. bits = len = from = to = 0;
  3080. do {
  3081. if (bits) {
  3082. if ((val & 1))
  3083. data[to++] = pattern[from];
  3084. val >>= 1;
  3085. ++from;
  3086. --bits;
  3087. } else {
  3088. val = mask[len];
  3089. writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
  3090. + len);
  3091. ++len;
  3092. if (val)
  3093. bits = 8;
  3094. else
  3095. from += 8;
  3096. }
  3097. } while (from < (int) frame_size);
  3098. if (val) {
  3099. bits = mask[len - 1];
  3100. val <<= (from % 8);
  3101. bits &= ~val;
  3102. writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
  3103. 1);
  3104. }
  3105. crc = ether_crc(to, data);
  3106. writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
  3107. }
  3108. /**
  3109. * hw_add_wol_arp - add ARP pattern
  3110. * @hw: The hardware instance.
  3111. * @ip_addr: The IPv4 address assigned to the device.
  3112. *
  3113. * This routine is used to add ARP pattern for waking up the host.
  3114. */
  3115. static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
  3116. {
  3117. static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
  3118. u8 pattern[42] = {
  3119. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  3120. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3121. 0x08, 0x06,
  3122. 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
  3123. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3124. 0x00, 0x00, 0x00, 0x00,
  3125. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3126. 0x00, 0x00, 0x00, 0x00 };
  3127. memcpy(&pattern[38], ip_addr, 4);
  3128. hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
  3129. }
  3130. /**
  3131. * hw_add_wol_bcast - add broadcast pattern
  3132. * @hw: The hardware instance.
  3133. *
  3134. * This routine is used to add broadcast pattern for waking up the host.
  3135. */
  3136. static void hw_add_wol_bcast(struct ksz_hw *hw)
  3137. {
  3138. static const u8 mask[] = { 0x3F };
  3139. static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
  3140. hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
  3141. }
  3142. /**
  3143. * hw_add_wol_mcast - add multicast pattern
  3144. * @hw: The hardware instance.
  3145. *
  3146. * This routine is used to add multicast pattern for waking up the host.
  3147. *
  3148. * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
  3149. * by IPv6 ping command. Note that multicast packets are filtred through the
  3150. * multicast hash table, so not all multicast packets can wake up the host.
  3151. */
  3152. static void hw_add_wol_mcast(struct ksz_hw *hw)
  3153. {
  3154. static const u8 mask[] = { 0x3F };
  3155. u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
  3156. memcpy(&pattern[3], &hw->override_addr[3], 3);
  3157. hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
  3158. }
  3159. /**
  3160. * hw_add_wol_ucast - add unicast pattern
  3161. * @hw: The hardware instance.
  3162. *
  3163. * This routine is used to add unicast pattern to wakeup the host.
  3164. *
  3165. * It is assumed the unicast packet is directed to the device, as the hardware
  3166. * can only receive them in normal case.
  3167. */
  3168. static void hw_add_wol_ucast(struct ksz_hw *hw)
  3169. {
  3170. static const u8 mask[] = { 0x3F };
  3171. hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
  3172. }
  3173. /**
  3174. * hw_enable_wol - enable Wake-on-LAN
  3175. * @hw: The hardware instance.
  3176. * @wol_enable: The Wake-on-LAN settings.
  3177. * @net_addr: The IPv4 address assigned to the device.
  3178. *
  3179. * This routine is used to enable Wake-on-LAN depending on driver settings.
  3180. */
  3181. static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
  3182. {
  3183. hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
  3184. hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
  3185. hw_add_wol_ucast(hw);
  3186. hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
  3187. hw_add_wol_mcast(hw);
  3188. hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
  3189. hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
  3190. hw_add_wol_arp(hw, net_addr);
  3191. }
  3192. /**
  3193. * hw_init - check driver is correct for the hardware
  3194. * @hw: The hardware instance.
  3195. *
  3196. * This function checks the hardware is correct for this driver and sets the
  3197. * hardware up for proper initialization.
  3198. *
  3199. * Return number of ports or 0 if not right.
  3200. */
  3201. static int hw_init(struct ksz_hw *hw)
  3202. {
  3203. int rc = 0;
  3204. u16 data;
  3205. u16 revision;
  3206. /* Set bus speed to 125MHz. */
  3207. writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
  3208. /* Check KSZ884x chip ID. */
  3209. data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
  3210. revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
  3211. data &= KS884X_CHIP_ID_MASK_41;
  3212. if (REG_CHIP_ID_41 == data)
  3213. rc = 1;
  3214. else if (REG_CHIP_ID_42 == data)
  3215. rc = 2;
  3216. else
  3217. return 0;
  3218. /* Setup hardware features or bug workarounds. */
  3219. if (revision <= 1) {
  3220. hw->features |= SMALL_PACKET_TX_BUG;
  3221. if (1 == rc)
  3222. hw->features |= HALF_DUPLEX_SIGNAL_BUG;
  3223. }
  3224. return rc;
  3225. }
  3226. /**
  3227. * hw_reset - reset the hardware
  3228. * @hw: The hardware instance.
  3229. *
  3230. * This routine resets the hardware.
  3231. */
  3232. static void hw_reset(struct ksz_hw *hw)
  3233. {
  3234. writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3235. /* Wait for device to reset. */
  3236. mdelay(10);
  3237. /* Write 0 to clear device reset. */
  3238. writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3239. }
  3240. /**
  3241. * hw_setup - setup the hardware
  3242. * @hw: The hardware instance.
  3243. *
  3244. * This routine setup the hardware for proper operation.
  3245. */
  3246. static void hw_setup(struct ksz_hw *hw)
  3247. {
  3248. #if SET_DEFAULT_LED
  3249. u16 data;
  3250. /* Change default LED mode. */
  3251. data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3252. data &= ~LED_MODE;
  3253. data |= SET_DEFAULT_LED;
  3254. writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3255. #endif
  3256. /* Setup transmit control. */
  3257. hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
  3258. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
  3259. /* Setup receive control. */
  3260. hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
  3261. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
  3262. hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
  3263. /* Hardware cannot handle UDP packet in IP fragments. */
  3264. hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  3265. if (hw->all_multi)
  3266. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3267. if (hw->promiscuous)
  3268. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3269. }
  3270. /**
  3271. * hw_setup_intr - setup interrupt mask
  3272. * @hw: The hardware instance.
  3273. *
  3274. * This routine setup the interrupt mask for proper operation.
  3275. */
  3276. static void hw_setup_intr(struct ksz_hw *hw)
  3277. {
  3278. hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
  3279. }
  3280. static void ksz_check_desc_num(struct ksz_desc_info *info)
  3281. {
  3282. #define MIN_DESC_SHIFT 2
  3283. int alloc = info->alloc;
  3284. int shift;
  3285. shift = 0;
  3286. while (!(alloc & 1)) {
  3287. shift++;
  3288. alloc >>= 1;
  3289. }
  3290. if (alloc != 1 || shift < MIN_DESC_SHIFT) {
  3291. pr_alert("Hardware descriptor numbers not right!\n");
  3292. while (alloc) {
  3293. shift++;
  3294. alloc >>= 1;
  3295. }
  3296. if (shift < MIN_DESC_SHIFT)
  3297. shift = MIN_DESC_SHIFT;
  3298. alloc = 1 << shift;
  3299. info->alloc = alloc;
  3300. }
  3301. info->mask = info->alloc - 1;
  3302. }
  3303. static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
  3304. {
  3305. int i;
  3306. u32 phys = desc_info->ring_phys;
  3307. struct ksz_hw_desc *desc = desc_info->ring_virt;
  3308. struct ksz_desc *cur = desc_info->ring;
  3309. struct ksz_desc *previous = NULL;
  3310. for (i = 0; i < desc_info->alloc; i++) {
  3311. cur->phw = desc++;
  3312. phys += desc_info->size;
  3313. previous = cur++;
  3314. previous->phw->next = cpu_to_le32(phys);
  3315. }
  3316. previous->phw->next = cpu_to_le32(desc_info->ring_phys);
  3317. previous->sw.buf.rx.end_of_ring = 1;
  3318. previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
  3319. desc_info->avail = desc_info->alloc;
  3320. desc_info->last = desc_info->next = 0;
  3321. desc_info->cur = desc_info->ring;
  3322. }
  3323. /**
  3324. * hw_set_desc_base - set descriptor base addresses
  3325. * @hw: The hardware instance.
  3326. * @tx_addr: The transmit descriptor base.
  3327. * @rx_addr: The receive descriptor base.
  3328. *
  3329. * This routine programs the descriptor base addresses after reset.
  3330. */
  3331. static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
  3332. {
  3333. /* Set base address of Tx/Rx descriptors. */
  3334. writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
  3335. writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
  3336. }
  3337. static void hw_reset_pkts(struct ksz_desc_info *info)
  3338. {
  3339. info->cur = info->ring;
  3340. info->avail = info->alloc;
  3341. info->last = info->next = 0;
  3342. }
  3343. static inline void hw_resume_rx(struct ksz_hw *hw)
  3344. {
  3345. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3346. }
  3347. /**
  3348. * hw_start_rx - start receiving
  3349. * @hw: The hardware instance.
  3350. *
  3351. * This routine starts the receive function of the hardware.
  3352. */
  3353. static void hw_start_rx(struct ksz_hw *hw)
  3354. {
  3355. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  3356. /* Notify when the receive stops. */
  3357. hw->intr_mask |= KS884X_INT_RX_STOPPED;
  3358. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3359. hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
  3360. hw->rx_stop++;
  3361. /* Variable overflows. */
  3362. if (0 == hw->rx_stop)
  3363. hw->rx_stop = 2;
  3364. }
  3365. /**
  3366. * hw_stop_rx - stop receiving
  3367. * @hw: The hardware instance.
  3368. *
  3369. * This routine stops the receive function of the hardware.
  3370. */
  3371. static void hw_stop_rx(struct ksz_hw *hw)
  3372. {
  3373. hw->rx_stop = 0;
  3374. hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
  3375. writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
  3376. }
  3377. /**
  3378. * hw_start_tx - start transmitting
  3379. * @hw: The hardware instance.
  3380. *
  3381. * This routine starts the transmit function of the hardware.
  3382. */
  3383. static void hw_start_tx(struct ksz_hw *hw)
  3384. {
  3385. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  3386. }
  3387. /**
  3388. * hw_stop_tx - stop transmitting
  3389. * @hw: The hardware instance.
  3390. *
  3391. * This routine stops the transmit function of the hardware.
  3392. */
  3393. static void hw_stop_tx(struct ksz_hw *hw)
  3394. {
  3395. writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
  3396. }
  3397. /**
  3398. * hw_disable - disable hardware
  3399. * @hw: The hardware instance.
  3400. *
  3401. * This routine disables the hardware.
  3402. */
  3403. static void hw_disable(struct ksz_hw *hw)
  3404. {
  3405. hw_stop_rx(hw);
  3406. hw_stop_tx(hw);
  3407. hw->enabled = 0;
  3408. }
  3409. /**
  3410. * hw_enable - enable hardware
  3411. * @hw: The hardware instance.
  3412. *
  3413. * This routine enables the hardware.
  3414. */
  3415. static void hw_enable(struct ksz_hw *hw)
  3416. {
  3417. hw_start_tx(hw);
  3418. hw_start_rx(hw);
  3419. hw->enabled = 1;
  3420. }
  3421. /**
  3422. * hw_alloc_pkt - allocate enough descriptors for transmission
  3423. * @hw: The hardware instance.
  3424. * @length: The length of the packet.
  3425. * @physical: Number of descriptors required.
  3426. *
  3427. * This function allocates descriptors for transmission.
  3428. *
  3429. * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
  3430. */
  3431. static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
  3432. {
  3433. /* Always leave one descriptor free. */
  3434. if (hw->tx_desc_info.avail <= 1)
  3435. return 0;
  3436. /* Allocate a descriptor for transmission and mark it current. */
  3437. get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
  3438. hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
  3439. /* Keep track of number of transmit descriptors used so far. */
  3440. ++hw->tx_int_cnt;
  3441. hw->tx_size += length;
  3442. /* Cannot hold on too much data. */
  3443. if (hw->tx_size >= MAX_TX_HELD_SIZE)
  3444. hw->tx_int_cnt = hw->tx_int_mask + 1;
  3445. if (physical > hw->tx_desc_info.avail)
  3446. return 1;
  3447. return hw->tx_desc_info.avail;
  3448. }
  3449. /**
  3450. * hw_send_pkt - mark packet for transmission
  3451. * @hw: The hardware instance.
  3452. *
  3453. * This routine marks the packet for transmission in PCI version.
  3454. */
  3455. static void hw_send_pkt(struct ksz_hw *hw)
  3456. {
  3457. struct ksz_desc *cur = hw->tx_desc_info.cur;
  3458. cur->sw.buf.tx.last_seg = 1;
  3459. /* Interrupt only after specified number of descriptors used. */
  3460. if (hw->tx_int_cnt > hw->tx_int_mask) {
  3461. cur->sw.buf.tx.intr = 1;
  3462. hw->tx_int_cnt = 0;
  3463. hw->tx_size = 0;
  3464. }
  3465. /* KSZ8842 supports port directed transmission. */
  3466. cur->sw.buf.tx.dest_port = hw->dst_ports;
  3467. release_desc(cur);
  3468. writel(0, hw->io + KS_DMA_TX_START);
  3469. }
  3470. static int empty_addr(u8 *addr)
  3471. {
  3472. u32 *addr1 = (u32 *) addr;
  3473. u16 *addr2 = (u16 *) &addr[4];
  3474. return 0 == *addr1 && 0 == *addr2;
  3475. }
  3476. /**
  3477. * hw_set_addr - set MAC address
  3478. * @hw: The hardware instance.
  3479. *
  3480. * This routine programs the MAC address of the hardware when the address is
  3481. * overridden.
  3482. */
  3483. static void hw_set_addr(struct ksz_hw *hw)
  3484. {
  3485. int i;
  3486. for (i = 0; i < ETH_ALEN; i++)
  3487. writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
  3488. hw->io + KS884X_ADDR_0_OFFSET + i);
  3489. sw_set_addr(hw, hw->override_addr);
  3490. }
  3491. /**
  3492. * hw_read_addr - read MAC address
  3493. * @hw: The hardware instance.
  3494. *
  3495. * This routine retrieves the MAC address of the hardware.
  3496. */
  3497. static void hw_read_addr(struct ksz_hw *hw)
  3498. {
  3499. int i;
  3500. for (i = 0; i < ETH_ALEN; i++)
  3501. hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
  3502. KS884X_ADDR_0_OFFSET + i);
  3503. if (!hw->mac_override) {
  3504. memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
  3505. if (empty_addr(hw->override_addr)) {
  3506. memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
  3507. memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
  3508. ETH_ALEN);
  3509. hw->override_addr[5] += hw->id;
  3510. hw_set_addr(hw);
  3511. }
  3512. }
  3513. }
  3514. static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
  3515. {
  3516. int i;
  3517. u32 mac_addr_lo;
  3518. u32 mac_addr_hi;
  3519. mac_addr_hi = 0;
  3520. for (i = 0; i < 2; i++) {
  3521. mac_addr_hi <<= 8;
  3522. mac_addr_hi |= mac_addr[i];
  3523. }
  3524. mac_addr_hi |= ADD_ADDR_ENABLE;
  3525. mac_addr_lo = 0;
  3526. for (i = 2; i < 6; i++) {
  3527. mac_addr_lo <<= 8;
  3528. mac_addr_lo |= mac_addr[i];
  3529. }
  3530. index *= ADD_ADDR_INCR;
  3531. writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
  3532. writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
  3533. }
  3534. static void hw_set_add_addr(struct ksz_hw *hw)
  3535. {
  3536. int i;
  3537. for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
  3538. if (empty_addr(hw->address[i]))
  3539. writel(0, hw->io + ADD_ADDR_INCR * i +
  3540. KS_ADD_ADDR_0_HI);
  3541. else
  3542. hw_ena_add_addr(hw, i, hw->address[i]);
  3543. }
  3544. }
  3545. static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
  3546. {
  3547. int i;
  3548. int j = ADDITIONAL_ENTRIES;
  3549. if (ether_addr_equal(hw->override_addr, mac_addr))
  3550. return 0;
  3551. for (i = 0; i < hw->addr_list_size; i++) {
  3552. if (ether_addr_equal(hw->address[i], mac_addr))
  3553. return 0;
  3554. if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
  3555. j = i;
  3556. }
  3557. if (j < ADDITIONAL_ENTRIES) {
  3558. memcpy(hw->address[j], mac_addr, ETH_ALEN);
  3559. hw_ena_add_addr(hw, j, hw->address[j]);
  3560. return 0;
  3561. }
  3562. return -1;
  3563. }
  3564. static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
  3565. {
  3566. int i;
  3567. for (i = 0; i < hw->addr_list_size; i++) {
  3568. if (ether_addr_equal(hw->address[i], mac_addr)) {
  3569. eth_zero_addr(hw->address[i]);
  3570. writel(0, hw->io + ADD_ADDR_INCR * i +
  3571. KS_ADD_ADDR_0_HI);
  3572. return 0;
  3573. }
  3574. }
  3575. return -1;
  3576. }
  3577. /**
  3578. * hw_clr_multicast - clear multicast addresses
  3579. * @hw: The hardware instance.
  3580. *
  3581. * This routine removes all multicast addresses set in the hardware.
  3582. */
  3583. static void hw_clr_multicast(struct ksz_hw *hw)
  3584. {
  3585. int i;
  3586. for (i = 0; i < HW_MULTICAST_SIZE; i++) {
  3587. hw->multi_bits[i] = 0;
  3588. writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
  3589. }
  3590. }
  3591. /**
  3592. * hw_set_grp_addr - set multicast addresses
  3593. * @hw: The hardware instance.
  3594. *
  3595. * This routine programs multicast addresses for the hardware to accept those
  3596. * addresses.
  3597. */
  3598. static void hw_set_grp_addr(struct ksz_hw *hw)
  3599. {
  3600. int i;
  3601. int index;
  3602. int position;
  3603. int value;
  3604. memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
  3605. for (i = 0; i < hw->multi_list_size; i++) {
  3606. position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
  3607. index = position >> 3;
  3608. value = 1 << (position & 7);
  3609. hw->multi_bits[index] |= (u8) value;
  3610. }
  3611. for (i = 0; i < HW_MULTICAST_SIZE; i++)
  3612. writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
  3613. i);
  3614. }
  3615. /**
  3616. * hw_set_multicast - enable or disable all multicast receiving
  3617. * @hw: The hardware instance.
  3618. * @multicast: To turn on or off the all multicast feature.
  3619. *
  3620. * This routine enables/disables the hardware to accept all multicast packets.
  3621. */
  3622. static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
  3623. {
  3624. /* Stop receiving for reconfiguration. */
  3625. hw_stop_rx(hw);
  3626. if (multicast)
  3627. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3628. else
  3629. hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
  3630. if (hw->enabled)
  3631. hw_start_rx(hw);
  3632. }
  3633. /**
  3634. * hw_set_promiscuous - enable or disable promiscuous receiving
  3635. * @hw: The hardware instance.
  3636. * @prom: To turn on or off the promiscuous feature.
  3637. *
  3638. * This routine enables/disables the hardware to accept all packets.
  3639. */
  3640. static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
  3641. {
  3642. /* Stop receiving for reconfiguration. */
  3643. hw_stop_rx(hw);
  3644. if (prom)
  3645. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3646. else
  3647. hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
  3648. if (hw->enabled)
  3649. hw_start_rx(hw);
  3650. }
  3651. /**
  3652. * sw_enable - enable the switch
  3653. * @hw: The hardware instance.
  3654. * @enable: The flag to enable or disable the switch
  3655. *
  3656. * This routine is used to enable/disable the switch in KSZ8842.
  3657. */
  3658. static void sw_enable(struct ksz_hw *hw, int enable)
  3659. {
  3660. int port;
  3661. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  3662. if (hw->dev_count > 1) {
  3663. /* Set port-base vlan membership with host port. */
  3664. sw_cfg_port_base_vlan(hw, port,
  3665. HOST_MASK | (1 << port));
  3666. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  3667. } else {
  3668. sw_cfg_port_base_vlan(hw, port, PORT_MASK);
  3669. port_set_stp_state(hw, port, STP_STATE_FORWARDING);
  3670. }
  3671. }
  3672. if (hw->dev_count > 1)
  3673. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  3674. else
  3675. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
  3676. if (enable)
  3677. enable = KS8842_START;
  3678. writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
  3679. }
  3680. /**
  3681. * sw_setup - setup the switch
  3682. * @hw: The hardware instance.
  3683. *
  3684. * This routine setup the hardware switch engine for default operation.
  3685. */
  3686. static void sw_setup(struct ksz_hw *hw)
  3687. {
  3688. int port;
  3689. sw_set_global_ctrl(hw);
  3690. /* Enable switch broadcast storm protection at 10% percent rate. */
  3691. sw_init_broad_storm(hw);
  3692. hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
  3693. for (port = 0; port < SWITCH_PORT_NUM; port++)
  3694. sw_ena_broad_storm(hw, port);
  3695. sw_init_prio(hw);
  3696. sw_init_mirror(hw);
  3697. sw_init_prio_rate(hw);
  3698. sw_init_vlan(hw);
  3699. if (hw->features & STP_SUPPORT)
  3700. sw_init_stp(hw);
  3701. if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  3702. SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
  3703. hw->overrides |= PAUSE_FLOW_CTRL;
  3704. sw_enable(hw, 1);
  3705. }
  3706. /**
  3707. * ksz_start_timer - start kernel timer
  3708. * @info: Kernel timer information.
  3709. * @time: The time tick.
  3710. *
  3711. * This routine starts the kernel timer after the specified time tick.
  3712. */
  3713. static void ksz_start_timer(struct ksz_timer_info *info, int time)
  3714. {
  3715. info->cnt = 0;
  3716. info->timer.expires = jiffies + time;
  3717. add_timer(&info->timer);
  3718. /* infinity */
  3719. info->max = -1;
  3720. }
  3721. /**
  3722. * ksz_stop_timer - stop kernel timer
  3723. * @info: Kernel timer information.
  3724. *
  3725. * This routine stops the kernel timer.
  3726. */
  3727. static void ksz_stop_timer(struct ksz_timer_info *info)
  3728. {
  3729. if (info->max) {
  3730. info->max = 0;
  3731. del_timer_sync(&info->timer);
  3732. }
  3733. }
  3734. static void ksz_init_timer(struct ksz_timer_info *info, int period,
  3735. void (*function)(struct timer_list *))
  3736. {
  3737. info->max = 0;
  3738. info->period = period;
  3739. timer_setup(&info->timer, function, 0);
  3740. }
  3741. static void ksz_update_timer(struct ksz_timer_info *info)
  3742. {
  3743. ++info->cnt;
  3744. if (info->max > 0) {
  3745. if (info->cnt < info->max) {
  3746. info->timer.expires = jiffies + info->period;
  3747. add_timer(&info->timer);
  3748. } else
  3749. info->max = 0;
  3750. } else if (info->max < 0) {
  3751. info->timer.expires = jiffies + info->period;
  3752. add_timer(&info->timer);
  3753. }
  3754. }
  3755. /**
  3756. * ksz_alloc_soft_desc - allocate software descriptors
  3757. * @desc_info: Descriptor information structure.
  3758. * @transmit: Indication that descriptors are for transmit.
  3759. *
  3760. * This local function allocates software descriptors for manipulation in
  3761. * memory.
  3762. *
  3763. * Return 0 if successful.
  3764. */
  3765. static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
  3766. {
  3767. desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc),
  3768. GFP_KERNEL);
  3769. if (!desc_info->ring)
  3770. return 1;
  3771. hw_init_desc(desc_info, transmit);
  3772. return 0;
  3773. }
  3774. /**
  3775. * ksz_alloc_desc - allocate hardware descriptors
  3776. * @adapter: Adapter information structure.
  3777. *
  3778. * This local function allocates hardware descriptors for receiving and
  3779. * transmitting.
  3780. *
  3781. * Return 0 if successful.
  3782. */
  3783. static int ksz_alloc_desc(struct dev_info *adapter)
  3784. {
  3785. struct ksz_hw *hw = &adapter->hw;
  3786. int offset;
  3787. /* Allocate memory for RX & TX descriptors. */
  3788. adapter->desc_pool.alloc_size =
  3789. hw->rx_desc_info.size * hw->rx_desc_info.alloc +
  3790. hw->tx_desc_info.size * hw->tx_desc_info.alloc +
  3791. DESC_ALIGNMENT;
  3792. adapter->desc_pool.alloc_virt =
  3793. pci_zalloc_consistent(adapter->pdev,
  3794. adapter->desc_pool.alloc_size,
  3795. &adapter->desc_pool.dma_addr);
  3796. if (adapter->desc_pool.alloc_virt == NULL) {
  3797. adapter->desc_pool.alloc_size = 0;
  3798. return 1;
  3799. }
  3800. /* Align to the next cache line boundary. */
  3801. offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
  3802. (DESC_ALIGNMENT -
  3803. ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
  3804. adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
  3805. adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
  3806. /* Allocate receive/transmit descriptors. */
  3807. hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3808. adapter->desc_pool.virt;
  3809. hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
  3810. offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
  3811. hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3812. (adapter->desc_pool.virt + offset);
  3813. hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
  3814. if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
  3815. return 1;
  3816. if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
  3817. return 1;
  3818. return 0;
  3819. }
  3820. /**
  3821. * free_dma_buf - release DMA buffer resources
  3822. * @adapter: Adapter information structure.
  3823. *
  3824. * This routine is just a helper function to release the DMA buffer resources.
  3825. */
  3826. static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
  3827. int direction)
  3828. {
  3829. pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
  3830. dev_kfree_skb(dma_buf->skb);
  3831. dma_buf->skb = NULL;
  3832. dma_buf->dma = 0;
  3833. }
  3834. /**
  3835. * ksz_init_rx_buffers - initialize receive descriptors
  3836. * @adapter: Adapter information structure.
  3837. *
  3838. * This routine initializes DMA buffers for receiving.
  3839. */
  3840. static void ksz_init_rx_buffers(struct dev_info *adapter)
  3841. {
  3842. int i;
  3843. struct ksz_desc *desc;
  3844. struct ksz_dma_buf *dma_buf;
  3845. struct ksz_hw *hw = &adapter->hw;
  3846. struct ksz_desc_info *info = &hw->rx_desc_info;
  3847. for (i = 0; i < hw->rx_desc_info.alloc; i++) {
  3848. get_rx_pkt(info, &desc);
  3849. dma_buf = DMA_BUFFER(desc);
  3850. if (dma_buf->skb && dma_buf->len != adapter->mtu)
  3851. free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
  3852. dma_buf->len = adapter->mtu;
  3853. if (!dma_buf->skb)
  3854. dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
  3855. if (dma_buf->skb && !dma_buf->dma)
  3856. dma_buf->dma = pci_map_single(
  3857. adapter->pdev,
  3858. skb_tail_pointer(dma_buf->skb),
  3859. dma_buf->len,
  3860. PCI_DMA_FROMDEVICE);
  3861. /* Set descriptor. */
  3862. set_rx_buf(desc, dma_buf->dma);
  3863. set_rx_len(desc, dma_buf->len);
  3864. release_desc(desc);
  3865. }
  3866. }
  3867. /**
  3868. * ksz_alloc_mem - allocate memory for hardware descriptors
  3869. * @adapter: Adapter information structure.
  3870. *
  3871. * This function allocates memory for use by hardware descriptors for receiving
  3872. * and transmitting.
  3873. *
  3874. * Return 0 if successful.
  3875. */
  3876. static int ksz_alloc_mem(struct dev_info *adapter)
  3877. {
  3878. struct ksz_hw *hw = &adapter->hw;
  3879. /* Determine the number of receive and transmit descriptors. */
  3880. hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
  3881. hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
  3882. /* Determine how many descriptors to skip transmit interrupt. */
  3883. hw->tx_int_cnt = 0;
  3884. hw->tx_int_mask = NUM_OF_TX_DESC / 4;
  3885. if (hw->tx_int_mask > 8)
  3886. hw->tx_int_mask = 8;
  3887. while (hw->tx_int_mask) {
  3888. hw->tx_int_cnt++;
  3889. hw->tx_int_mask >>= 1;
  3890. }
  3891. if (hw->tx_int_cnt) {
  3892. hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
  3893. hw->tx_int_cnt = 0;
  3894. }
  3895. /* Determine the descriptor size. */
  3896. hw->rx_desc_info.size =
  3897. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3898. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3899. hw->tx_desc_info.size =
  3900. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3901. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3902. if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
  3903. pr_alert("Hardware descriptor size not right!\n");
  3904. ksz_check_desc_num(&hw->rx_desc_info);
  3905. ksz_check_desc_num(&hw->tx_desc_info);
  3906. /* Allocate descriptors. */
  3907. if (ksz_alloc_desc(adapter))
  3908. return 1;
  3909. return 0;
  3910. }
  3911. /**
  3912. * ksz_free_desc - free software and hardware descriptors
  3913. * @adapter: Adapter information structure.
  3914. *
  3915. * This local routine frees the software and hardware descriptors allocated by
  3916. * ksz_alloc_desc().
  3917. */
  3918. static void ksz_free_desc(struct dev_info *adapter)
  3919. {
  3920. struct ksz_hw *hw = &adapter->hw;
  3921. /* Reset descriptor. */
  3922. hw->rx_desc_info.ring_virt = NULL;
  3923. hw->tx_desc_info.ring_virt = NULL;
  3924. hw->rx_desc_info.ring_phys = 0;
  3925. hw->tx_desc_info.ring_phys = 0;
  3926. /* Free memory. */
  3927. if (adapter->desc_pool.alloc_virt)
  3928. pci_free_consistent(
  3929. adapter->pdev,
  3930. adapter->desc_pool.alloc_size,
  3931. adapter->desc_pool.alloc_virt,
  3932. adapter->desc_pool.dma_addr);
  3933. /* Reset resource pool. */
  3934. adapter->desc_pool.alloc_size = 0;
  3935. adapter->desc_pool.alloc_virt = NULL;
  3936. kfree(hw->rx_desc_info.ring);
  3937. hw->rx_desc_info.ring = NULL;
  3938. kfree(hw->tx_desc_info.ring);
  3939. hw->tx_desc_info.ring = NULL;
  3940. }
  3941. /**
  3942. * ksz_free_buffers - free buffers used in the descriptors
  3943. * @adapter: Adapter information structure.
  3944. * @desc_info: Descriptor information structure.
  3945. *
  3946. * This local routine frees buffers used in the DMA buffers.
  3947. */
  3948. static void ksz_free_buffers(struct dev_info *adapter,
  3949. struct ksz_desc_info *desc_info, int direction)
  3950. {
  3951. int i;
  3952. struct ksz_dma_buf *dma_buf;
  3953. struct ksz_desc *desc = desc_info->ring;
  3954. for (i = 0; i < desc_info->alloc; i++) {
  3955. dma_buf = DMA_BUFFER(desc);
  3956. if (dma_buf->skb)
  3957. free_dma_buf(adapter, dma_buf, direction);
  3958. desc++;
  3959. }
  3960. }
  3961. /**
  3962. * ksz_free_mem - free all resources used by descriptors
  3963. * @adapter: Adapter information structure.
  3964. *
  3965. * This local routine frees all the resources allocated by ksz_alloc_mem().
  3966. */
  3967. static void ksz_free_mem(struct dev_info *adapter)
  3968. {
  3969. /* Free transmit buffers. */
  3970. ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
  3971. PCI_DMA_TODEVICE);
  3972. /* Free receive buffers. */
  3973. ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
  3974. PCI_DMA_FROMDEVICE);
  3975. /* Free descriptors. */
  3976. ksz_free_desc(adapter);
  3977. }
  3978. static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
  3979. u64 *counter)
  3980. {
  3981. int i;
  3982. int mib;
  3983. int port;
  3984. struct ksz_port_mib *port_mib;
  3985. memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  3986. for (i = 0, port = first; i < cnt; i++, port++) {
  3987. port_mib = &hw->port_mib[port];
  3988. for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
  3989. counter[mib] += port_mib->counter[mib];
  3990. }
  3991. }
  3992. /**
  3993. * send_packet - send packet
  3994. * @skb: Socket buffer.
  3995. * @dev: Network device.
  3996. *
  3997. * This routine is used to send a packet out to the network.
  3998. */
  3999. static void send_packet(struct sk_buff *skb, struct net_device *dev)
  4000. {
  4001. struct ksz_desc *desc;
  4002. struct ksz_desc *first;
  4003. struct dev_priv *priv = netdev_priv(dev);
  4004. struct dev_info *hw_priv = priv->adapter;
  4005. struct ksz_hw *hw = &hw_priv->hw;
  4006. struct ksz_desc_info *info = &hw->tx_desc_info;
  4007. struct ksz_dma_buf *dma_buf;
  4008. int len;
  4009. int last_frag = skb_shinfo(skb)->nr_frags;
  4010. /*
  4011. * KSZ8842 with multiple device interfaces needs to be told which port
  4012. * to send.
  4013. */
  4014. if (hw->dev_count > 1)
  4015. hw->dst_ports = 1 << priv->port.first_port;
  4016. /* Hardware will pad the length to 60. */
  4017. len = skb->len;
  4018. /* Remember the very first descriptor. */
  4019. first = info->cur;
  4020. desc = first;
  4021. dma_buf = DMA_BUFFER(desc);
  4022. if (last_frag) {
  4023. int frag;
  4024. skb_frag_t *this_frag;
  4025. dma_buf->len = skb_headlen(skb);
  4026. dma_buf->dma = pci_map_single(
  4027. hw_priv->pdev, skb->data, dma_buf->len,
  4028. PCI_DMA_TODEVICE);
  4029. set_tx_buf(desc, dma_buf->dma);
  4030. set_tx_len(desc, dma_buf->len);
  4031. frag = 0;
  4032. do {
  4033. this_frag = &skb_shinfo(skb)->frags[frag];
  4034. /* Get a new descriptor. */
  4035. get_tx_pkt(info, &desc);
  4036. /* Keep track of descriptors used so far. */
  4037. ++hw->tx_int_cnt;
  4038. dma_buf = DMA_BUFFER(desc);
  4039. dma_buf->len = skb_frag_size(this_frag);
  4040. dma_buf->dma = pci_map_single(
  4041. hw_priv->pdev,
  4042. skb_frag_address(this_frag),
  4043. dma_buf->len,
  4044. PCI_DMA_TODEVICE);
  4045. set_tx_buf(desc, dma_buf->dma);
  4046. set_tx_len(desc, dma_buf->len);
  4047. frag++;
  4048. if (frag == last_frag)
  4049. break;
  4050. /* Do not release the last descriptor here. */
  4051. release_desc(desc);
  4052. } while (1);
  4053. /* current points to the last descriptor. */
  4054. info->cur = desc;
  4055. /* Release the first descriptor. */
  4056. release_desc(first);
  4057. } else {
  4058. dma_buf->len = len;
  4059. dma_buf->dma = pci_map_single(
  4060. hw_priv->pdev, skb->data, dma_buf->len,
  4061. PCI_DMA_TODEVICE);
  4062. set_tx_buf(desc, dma_buf->dma);
  4063. set_tx_len(desc, dma_buf->len);
  4064. }
  4065. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4066. (desc)->sw.buf.tx.csum_gen_tcp = 1;
  4067. (desc)->sw.buf.tx.csum_gen_udp = 1;
  4068. }
  4069. /*
  4070. * The last descriptor holds the packet so that it can be returned to
  4071. * network subsystem after all descriptors are transmitted.
  4072. */
  4073. dma_buf->skb = skb;
  4074. hw_send_pkt(hw);
  4075. /* Update transmit statistics. */
  4076. dev->stats.tx_packets++;
  4077. dev->stats.tx_bytes += len;
  4078. }
  4079. /**
  4080. * transmit_cleanup - clean up transmit descriptors
  4081. * @dev: Network device.
  4082. *
  4083. * This routine is called to clean up the transmitted buffers.
  4084. */
  4085. static void transmit_cleanup(struct dev_info *hw_priv, int normal)
  4086. {
  4087. int last;
  4088. union desc_stat status;
  4089. struct ksz_hw *hw = &hw_priv->hw;
  4090. struct ksz_desc_info *info = &hw->tx_desc_info;
  4091. struct ksz_desc *desc;
  4092. struct ksz_dma_buf *dma_buf;
  4093. struct net_device *dev = NULL;
  4094. spin_lock_irq(&hw_priv->hwlock);
  4095. last = info->last;
  4096. while (info->avail < info->alloc) {
  4097. /* Get next descriptor which is not hardware owned. */
  4098. desc = &info->ring[last];
  4099. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4100. if (status.tx.hw_owned) {
  4101. if (normal)
  4102. break;
  4103. else
  4104. reset_desc(desc, status);
  4105. }
  4106. dma_buf = DMA_BUFFER(desc);
  4107. pci_unmap_single(
  4108. hw_priv->pdev, dma_buf->dma, dma_buf->len,
  4109. PCI_DMA_TODEVICE);
  4110. /* This descriptor contains the last buffer in the packet. */
  4111. if (dma_buf->skb) {
  4112. dev = dma_buf->skb->dev;
  4113. /* Release the packet back to network subsystem. */
  4114. dev_kfree_skb_irq(dma_buf->skb);
  4115. dma_buf->skb = NULL;
  4116. }
  4117. /* Free the transmitted descriptor. */
  4118. last++;
  4119. last &= info->mask;
  4120. info->avail++;
  4121. }
  4122. info->last = last;
  4123. spin_unlock_irq(&hw_priv->hwlock);
  4124. /* Notify the network subsystem that the packet has been sent. */
  4125. if (dev)
  4126. netif_trans_update(dev);
  4127. }
  4128. /**
  4129. * transmit_done - transmit done processing
  4130. * @dev: Network device.
  4131. *
  4132. * This routine is called when the transmit interrupt is triggered, indicating
  4133. * either a packet is sent successfully or there are transmit errors.
  4134. */
  4135. static void tx_done(struct dev_info *hw_priv)
  4136. {
  4137. struct ksz_hw *hw = &hw_priv->hw;
  4138. int port;
  4139. transmit_cleanup(hw_priv, 1);
  4140. for (port = 0; port < hw->dev_count; port++) {
  4141. struct net_device *dev = hw->port_info[port].pdev;
  4142. if (netif_running(dev) && netif_queue_stopped(dev))
  4143. netif_wake_queue(dev);
  4144. }
  4145. }
  4146. static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
  4147. {
  4148. skb->dev = old->dev;
  4149. skb->protocol = old->protocol;
  4150. skb->ip_summed = old->ip_summed;
  4151. skb->csum = old->csum;
  4152. skb_set_network_header(skb, ETH_HLEN);
  4153. dev_consume_skb_any(old);
  4154. }
  4155. /**
  4156. * netdev_tx - send out packet
  4157. * @skb: Socket buffer.
  4158. * @dev: Network device.
  4159. *
  4160. * This function is used by the upper network layer to send out a packet.
  4161. *
  4162. * Return 0 if successful; otherwise an error code indicating failure.
  4163. */
  4164. static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
  4165. {
  4166. struct dev_priv *priv = netdev_priv(dev);
  4167. struct dev_info *hw_priv = priv->adapter;
  4168. struct ksz_hw *hw = &hw_priv->hw;
  4169. int left;
  4170. int num = 1;
  4171. int rc = 0;
  4172. if (hw->features & SMALL_PACKET_TX_BUG) {
  4173. struct sk_buff *org_skb = skb;
  4174. if (skb->len <= 48) {
  4175. if (skb_end_pointer(skb) - skb->data >= 50) {
  4176. memset(&skb->data[skb->len], 0, 50 - skb->len);
  4177. skb->len = 50;
  4178. } else {
  4179. skb = netdev_alloc_skb(dev, 50);
  4180. if (!skb)
  4181. return NETDEV_TX_BUSY;
  4182. memcpy(skb->data, org_skb->data, org_skb->len);
  4183. memset(&skb->data[org_skb->len], 0,
  4184. 50 - org_skb->len);
  4185. skb->len = 50;
  4186. copy_old_skb(org_skb, skb);
  4187. }
  4188. }
  4189. }
  4190. spin_lock_irq(&hw_priv->hwlock);
  4191. num = skb_shinfo(skb)->nr_frags + 1;
  4192. left = hw_alloc_pkt(hw, skb->len, num);
  4193. if (left) {
  4194. if (left < num ||
  4195. (CHECKSUM_PARTIAL == skb->ip_summed &&
  4196. skb->protocol == htons(ETH_P_IPV6))) {
  4197. struct sk_buff *org_skb = skb;
  4198. skb = netdev_alloc_skb(dev, org_skb->len);
  4199. if (!skb) {
  4200. rc = NETDEV_TX_BUSY;
  4201. goto unlock;
  4202. }
  4203. skb_copy_and_csum_dev(org_skb, skb->data);
  4204. org_skb->ip_summed = CHECKSUM_NONE;
  4205. skb->len = org_skb->len;
  4206. copy_old_skb(org_skb, skb);
  4207. }
  4208. send_packet(skb, dev);
  4209. if (left <= num)
  4210. netif_stop_queue(dev);
  4211. } else {
  4212. /* Stop the transmit queue until packet is allocated. */
  4213. netif_stop_queue(dev);
  4214. rc = NETDEV_TX_BUSY;
  4215. }
  4216. unlock:
  4217. spin_unlock_irq(&hw_priv->hwlock);
  4218. return rc;
  4219. }
  4220. /**
  4221. * netdev_tx_timeout - transmit timeout processing
  4222. * @dev: Network device.
  4223. *
  4224. * This routine is called when the transmit timer expires. That indicates the
  4225. * hardware is not running correctly because transmit interrupts are not
  4226. * triggered to free up resources so that the transmit routine can continue
  4227. * sending out packets. The hardware is reset to correct the problem.
  4228. */
  4229. static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
  4230. {
  4231. static unsigned long last_reset;
  4232. struct dev_priv *priv = netdev_priv(dev);
  4233. struct dev_info *hw_priv = priv->adapter;
  4234. struct ksz_hw *hw = &hw_priv->hw;
  4235. int port;
  4236. if (hw->dev_count > 1) {
  4237. /*
  4238. * Only reset the hardware if time between calls is long
  4239. * enough.
  4240. */
  4241. if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
  4242. hw_priv = NULL;
  4243. }
  4244. last_reset = jiffies;
  4245. if (hw_priv) {
  4246. hw_dis_intr(hw);
  4247. hw_disable(hw);
  4248. transmit_cleanup(hw_priv, 0);
  4249. hw_reset_pkts(&hw->rx_desc_info);
  4250. hw_reset_pkts(&hw->tx_desc_info);
  4251. ksz_init_rx_buffers(hw_priv);
  4252. hw_reset(hw);
  4253. hw_set_desc_base(hw,
  4254. hw->tx_desc_info.ring_phys,
  4255. hw->rx_desc_info.ring_phys);
  4256. hw_set_addr(hw);
  4257. if (hw->all_multi)
  4258. hw_set_multicast(hw, hw->all_multi);
  4259. else if (hw->multi_list_size)
  4260. hw_set_grp_addr(hw);
  4261. if (hw->dev_count > 1) {
  4262. hw_set_add_addr(hw);
  4263. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4264. struct net_device *port_dev;
  4265. port_set_stp_state(hw, port,
  4266. STP_STATE_DISABLED);
  4267. port_dev = hw->port_info[port].pdev;
  4268. if (netif_running(port_dev))
  4269. port_set_stp_state(hw, port,
  4270. STP_STATE_SIMPLE);
  4271. }
  4272. }
  4273. hw_enable(hw);
  4274. hw_ena_intr(hw);
  4275. }
  4276. netif_trans_update(dev);
  4277. netif_wake_queue(dev);
  4278. }
  4279. static inline void csum_verified(struct sk_buff *skb)
  4280. {
  4281. unsigned short protocol;
  4282. struct iphdr *iph;
  4283. protocol = skb->protocol;
  4284. skb_reset_network_header(skb);
  4285. iph = (struct iphdr *) skb_network_header(skb);
  4286. if (protocol == htons(ETH_P_8021Q)) {
  4287. protocol = iph->tot_len;
  4288. skb_set_network_header(skb, VLAN_HLEN);
  4289. iph = (struct iphdr *) skb_network_header(skb);
  4290. }
  4291. if (protocol == htons(ETH_P_IP)) {
  4292. if (iph->protocol == IPPROTO_TCP)
  4293. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4294. }
  4295. }
  4296. static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
  4297. struct ksz_desc *desc, union desc_stat status)
  4298. {
  4299. int packet_len;
  4300. struct dev_priv *priv = netdev_priv(dev);
  4301. struct dev_info *hw_priv = priv->adapter;
  4302. struct ksz_dma_buf *dma_buf;
  4303. struct sk_buff *skb;
  4304. int rx_status;
  4305. /* Received length includes 4-byte CRC. */
  4306. packet_len = status.rx.frame_len - 4;
  4307. dma_buf = DMA_BUFFER(desc);
  4308. pci_dma_sync_single_for_cpu(
  4309. hw_priv->pdev, dma_buf->dma, packet_len + 4,
  4310. PCI_DMA_FROMDEVICE);
  4311. do {
  4312. /* skb->data != skb->head */
  4313. skb = netdev_alloc_skb(dev, packet_len + 2);
  4314. if (!skb) {
  4315. dev->stats.rx_dropped++;
  4316. return -ENOMEM;
  4317. }
  4318. /*
  4319. * Align socket buffer in 4-byte boundary for better
  4320. * performance.
  4321. */
  4322. skb_reserve(skb, 2);
  4323. skb_put_data(skb, dma_buf->skb->data, packet_len);
  4324. } while (0);
  4325. skb->protocol = eth_type_trans(skb, dev);
  4326. if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
  4327. csum_verified(skb);
  4328. /* Update receive statistics. */
  4329. dev->stats.rx_packets++;
  4330. dev->stats.rx_bytes += packet_len;
  4331. /* Notify upper layer for received packet. */
  4332. rx_status = netif_rx(skb);
  4333. return 0;
  4334. }
  4335. static int dev_rcv_packets(struct dev_info *hw_priv)
  4336. {
  4337. int next;
  4338. union desc_stat status;
  4339. struct ksz_hw *hw = &hw_priv->hw;
  4340. struct net_device *dev = hw->port_info[0].pdev;
  4341. struct ksz_desc_info *info = &hw->rx_desc_info;
  4342. int left = info->alloc;
  4343. struct ksz_desc *desc;
  4344. int received = 0;
  4345. next = info->next;
  4346. while (left--) {
  4347. /* Get next descriptor which is not hardware owned. */
  4348. desc = &info->ring[next];
  4349. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4350. if (status.rx.hw_owned)
  4351. break;
  4352. /* Status valid only when last descriptor bit is set. */
  4353. if (status.rx.last_desc && status.rx.first_desc) {
  4354. if (rx_proc(dev, hw, desc, status))
  4355. goto release_packet;
  4356. received++;
  4357. }
  4358. release_packet:
  4359. release_desc(desc);
  4360. next++;
  4361. next &= info->mask;
  4362. }
  4363. info->next = next;
  4364. return received;
  4365. }
  4366. static int port_rcv_packets(struct dev_info *hw_priv)
  4367. {
  4368. int next;
  4369. union desc_stat status;
  4370. struct ksz_hw *hw = &hw_priv->hw;
  4371. struct net_device *dev = hw->port_info[0].pdev;
  4372. struct ksz_desc_info *info = &hw->rx_desc_info;
  4373. int left = info->alloc;
  4374. struct ksz_desc *desc;
  4375. int received = 0;
  4376. next = info->next;
  4377. while (left--) {
  4378. /* Get next descriptor which is not hardware owned. */
  4379. desc = &info->ring[next];
  4380. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4381. if (status.rx.hw_owned)
  4382. break;
  4383. if (hw->dev_count > 1) {
  4384. /* Get received port number. */
  4385. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4386. dev = hw->port_info[p].pdev;
  4387. if (!netif_running(dev))
  4388. goto release_packet;
  4389. }
  4390. /* Status valid only when last descriptor bit is set. */
  4391. if (status.rx.last_desc && status.rx.first_desc) {
  4392. if (rx_proc(dev, hw, desc, status))
  4393. goto release_packet;
  4394. received++;
  4395. }
  4396. release_packet:
  4397. release_desc(desc);
  4398. next++;
  4399. next &= info->mask;
  4400. }
  4401. info->next = next;
  4402. return received;
  4403. }
  4404. static int dev_rcv_special(struct dev_info *hw_priv)
  4405. {
  4406. int next;
  4407. union desc_stat status;
  4408. struct ksz_hw *hw = &hw_priv->hw;
  4409. struct net_device *dev = hw->port_info[0].pdev;
  4410. struct ksz_desc_info *info = &hw->rx_desc_info;
  4411. int left = info->alloc;
  4412. struct ksz_desc *desc;
  4413. int received = 0;
  4414. next = info->next;
  4415. while (left--) {
  4416. /* Get next descriptor which is not hardware owned. */
  4417. desc = &info->ring[next];
  4418. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4419. if (status.rx.hw_owned)
  4420. break;
  4421. if (hw->dev_count > 1) {
  4422. /* Get received port number. */
  4423. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4424. dev = hw->port_info[p].pdev;
  4425. if (!netif_running(dev))
  4426. goto release_packet;
  4427. }
  4428. /* Status valid only when last descriptor bit is set. */
  4429. if (status.rx.last_desc && status.rx.first_desc) {
  4430. /*
  4431. * Receive without error. With receive errors
  4432. * disabled, packets with receive errors will be
  4433. * dropped, so no need to check the error bit.
  4434. */
  4435. if (!status.rx.error || (status.data &
  4436. KS_DESC_RX_ERROR_COND) ==
  4437. KS_DESC_RX_ERROR_TOO_LONG) {
  4438. if (rx_proc(dev, hw, desc, status))
  4439. goto release_packet;
  4440. received++;
  4441. } else {
  4442. struct dev_priv *priv = netdev_priv(dev);
  4443. /* Update receive error statistics. */
  4444. priv->port.counter[OID_COUNTER_RCV_ERROR]++;
  4445. }
  4446. }
  4447. release_packet:
  4448. release_desc(desc);
  4449. next++;
  4450. next &= info->mask;
  4451. }
  4452. info->next = next;
  4453. return received;
  4454. }
  4455. static void rx_proc_task(unsigned long data)
  4456. {
  4457. struct dev_info *hw_priv = (struct dev_info *) data;
  4458. struct ksz_hw *hw = &hw_priv->hw;
  4459. if (!hw->enabled)
  4460. return;
  4461. if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
  4462. /* In case receive process is suspended because of overrun. */
  4463. hw_resume_rx(hw);
  4464. /* tasklets are interruptible. */
  4465. spin_lock_irq(&hw_priv->hwlock);
  4466. hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
  4467. spin_unlock_irq(&hw_priv->hwlock);
  4468. } else {
  4469. hw_ack_intr(hw, KS884X_INT_RX);
  4470. tasklet_schedule(&hw_priv->rx_tasklet);
  4471. }
  4472. }
  4473. static void tx_proc_task(unsigned long data)
  4474. {
  4475. struct dev_info *hw_priv = (struct dev_info *) data;
  4476. struct ksz_hw *hw = &hw_priv->hw;
  4477. hw_ack_intr(hw, KS884X_INT_TX_MASK);
  4478. tx_done(hw_priv);
  4479. /* tasklets are interruptible. */
  4480. spin_lock_irq(&hw_priv->hwlock);
  4481. hw_turn_on_intr(hw, KS884X_INT_TX);
  4482. spin_unlock_irq(&hw_priv->hwlock);
  4483. }
  4484. static inline void handle_rx_stop(struct ksz_hw *hw)
  4485. {
  4486. /* Receive just has been stopped. */
  4487. if (0 == hw->rx_stop)
  4488. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4489. else if (hw->rx_stop > 1) {
  4490. if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
  4491. hw_start_rx(hw);
  4492. } else {
  4493. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4494. hw->rx_stop = 0;
  4495. }
  4496. } else
  4497. /* Receive just has been started. */
  4498. hw->rx_stop++;
  4499. }
  4500. /**
  4501. * netdev_intr - interrupt handling
  4502. * @irq: Interrupt number.
  4503. * @dev_id: Network device.
  4504. *
  4505. * This function is called by upper network layer to signal interrupt.
  4506. *
  4507. * Return IRQ_HANDLED if interrupt is handled.
  4508. */
  4509. static irqreturn_t netdev_intr(int irq, void *dev_id)
  4510. {
  4511. uint int_enable = 0;
  4512. struct net_device *dev = (struct net_device *) dev_id;
  4513. struct dev_priv *priv = netdev_priv(dev);
  4514. struct dev_info *hw_priv = priv->adapter;
  4515. struct ksz_hw *hw = &hw_priv->hw;
  4516. spin_lock(&hw_priv->hwlock);
  4517. hw_read_intr(hw, &int_enable);
  4518. /* Not our interrupt! */
  4519. if (!int_enable) {
  4520. spin_unlock(&hw_priv->hwlock);
  4521. return IRQ_NONE;
  4522. }
  4523. do {
  4524. hw_ack_intr(hw, int_enable);
  4525. int_enable &= hw->intr_mask;
  4526. if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
  4527. hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
  4528. tasklet_schedule(&hw_priv->tx_tasklet);
  4529. }
  4530. if (likely(int_enable & KS884X_INT_RX)) {
  4531. hw_dis_intr_bit(hw, KS884X_INT_RX);
  4532. tasklet_schedule(&hw_priv->rx_tasklet);
  4533. }
  4534. if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
  4535. dev->stats.rx_fifo_errors++;
  4536. hw_resume_rx(hw);
  4537. }
  4538. if (unlikely(int_enable & KS884X_INT_PHY)) {
  4539. struct ksz_port *port = &priv->port;
  4540. hw->features |= LINK_INT_WORKING;
  4541. port_get_link_speed(port);
  4542. }
  4543. if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
  4544. handle_rx_stop(hw);
  4545. break;
  4546. }
  4547. if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
  4548. u32 data;
  4549. hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
  4550. pr_info("Tx stopped\n");
  4551. data = readl(hw->io + KS_DMA_TX_CTRL);
  4552. if (!(data & DMA_TX_ENABLE))
  4553. pr_info("Tx disabled\n");
  4554. break;
  4555. }
  4556. } while (0);
  4557. hw_ena_intr(hw);
  4558. spin_unlock(&hw_priv->hwlock);
  4559. return IRQ_HANDLED;
  4560. }
  4561. /*
  4562. * Linux network device functions
  4563. */
  4564. static unsigned long next_jiffies;
  4565. #ifdef CONFIG_NET_POLL_CONTROLLER
  4566. static void netdev_netpoll(struct net_device *dev)
  4567. {
  4568. struct dev_priv *priv = netdev_priv(dev);
  4569. struct dev_info *hw_priv = priv->adapter;
  4570. hw_dis_intr(&hw_priv->hw);
  4571. netdev_intr(dev->irq, dev);
  4572. }
  4573. #endif
  4574. static void bridge_change(struct ksz_hw *hw)
  4575. {
  4576. int port;
  4577. u8 member;
  4578. struct ksz_switch *sw = hw->ksz_switch;
  4579. /* No ports in forwarding state. */
  4580. if (!sw->member) {
  4581. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  4582. sw_block_addr(hw);
  4583. }
  4584. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4585. if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
  4586. member = HOST_MASK | sw->member;
  4587. else
  4588. member = HOST_MASK | (1 << port);
  4589. if (member != sw->port_cfg[port].member)
  4590. sw_cfg_port_base_vlan(hw, port, member);
  4591. }
  4592. }
  4593. /**
  4594. * netdev_close - close network device
  4595. * @dev: Network device.
  4596. *
  4597. * This function process the close operation of network device. This is caused
  4598. * by the user command "ifconfig ethX down."
  4599. *
  4600. * Return 0 if successful; otherwise an error code indicating failure.
  4601. */
  4602. static int netdev_close(struct net_device *dev)
  4603. {
  4604. struct dev_priv *priv = netdev_priv(dev);
  4605. struct dev_info *hw_priv = priv->adapter;
  4606. struct ksz_port *port = &priv->port;
  4607. struct ksz_hw *hw = &hw_priv->hw;
  4608. int pi;
  4609. netif_stop_queue(dev);
  4610. ksz_stop_timer(&priv->monitor_timer_info);
  4611. /* Need to shut the port manually in multiple device interfaces mode. */
  4612. if (hw->dev_count > 1) {
  4613. port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
  4614. /* Port is closed. Need to change bridge setting. */
  4615. if (hw->features & STP_SUPPORT) {
  4616. pi = 1 << port->first_port;
  4617. if (hw->ksz_switch->member & pi) {
  4618. hw->ksz_switch->member &= ~pi;
  4619. bridge_change(hw);
  4620. }
  4621. }
  4622. }
  4623. if (port->first_port > 0)
  4624. hw_del_addr(hw, dev->dev_addr);
  4625. if (!hw_priv->wol_enable)
  4626. port_set_power_saving(port, true);
  4627. if (priv->multicast)
  4628. --hw->all_multi;
  4629. if (priv->promiscuous)
  4630. --hw->promiscuous;
  4631. hw_priv->opened--;
  4632. if (!(hw_priv->opened)) {
  4633. ksz_stop_timer(&hw_priv->mib_timer_info);
  4634. flush_work(&hw_priv->mib_read);
  4635. hw_dis_intr(hw);
  4636. hw_disable(hw);
  4637. hw_clr_multicast(hw);
  4638. /* Delay for receive task to stop scheduling itself. */
  4639. msleep(2000 / HZ);
  4640. tasklet_kill(&hw_priv->rx_tasklet);
  4641. tasklet_kill(&hw_priv->tx_tasklet);
  4642. free_irq(dev->irq, hw_priv->dev);
  4643. transmit_cleanup(hw_priv, 0);
  4644. hw_reset_pkts(&hw->rx_desc_info);
  4645. hw_reset_pkts(&hw->tx_desc_info);
  4646. /* Clean out static MAC table when the switch is shutdown. */
  4647. if (hw->features & STP_SUPPORT)
  4648. sw_clr_sta_mac_table(hw);
  4649. }
  4650. return 0;
  4651. }
  4652. static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
  4653. {
  4654. if (hw->ksz_switch) {
  4655. u32 data;
  4656. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4657. if (hw->features & RX_HUGE_FRAME)
  4658. data |= SWITCH_HUGE_PACKET;
  4659. else
  4660. data &= ~SWITCH_HUGE_PACKET;
  4661. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4662. }
  4663. if (hw->features & RX_HUGE_FRAME) {
  4664. hw->rx_cfg |= DMA_RX_ERROR;
  4665. hw_priv->dev_rcv = dev_rcv_special;
  4666. } else {
  4667. hw->rx_cfg &= ~DMA_RX_ERROR;
  4668. if (hw->dev_count > 1)
  4669. hw_priv->dev_rcv = port_rcv_packets;
  4670. else
  4671. hw_priv->dev_rcv = dev_rcv_packets;
  4672. }
  4673. }
  4674. static int prepare_hardware(struct net_device *dev)
  4675. {
  4676. struct dev_priv *priv = netdev_priv(dev);
  4677. struct dev_info *hw_priv = priv->adapter;
  4678. struct ksz_hw *hw = &hw_priv->hw;
  4679. int rc = 0;
  4680. /* Remember the network device that requests interrupts. */
  4681. hw_priv->dev = dev;
  4682. rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
  4683. if (rc)
  4684. return rc;
  4685. tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
  4686. (unsigned long) hw_priv);
  4687. tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
  4688. (unsigned long) hw_priv);
  4689. hw->promiscuous = 0;
  4690. hw->all_multi = 0;
  4691. hw->multi_list_size = 0;
  4692. hw_reset(hw);
  4693. hw_set_desc_base(hw,
  4694. hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
  4695. hw_set_addr(hw);
  4696. hw_cfg_huge_frame(hw_priv, hw);
  4697. ksz_init_rx_buffers(hw_priv);
  4698. return 0;
  4699. }
  4700. static void set_media_state(struct net_device *dev, int media_state)
  4701. {
  4702. struct dev_priv *priv = netdev_priv(dev);
  4703. if (media_state == priv->media_state)
  4704. netif_carrier_on(dev);
  4705. else
  4706. netif_carrier_off(dev);
  4707. netif_info(priv, link, dev, "link %s\n",
  4708. media_state == priv->media_state ? "on" : "off");
  4709. }
  4710. /**
  4711. * netdev_open - open network device
  4712. * @dev: Network device.
  4713. *
  4714. * This function process the open operation of network device. This is caused
  4715. * by the user command "ifconfig ethX up."
  4716. *
  4717. * Return 0 if successful; otherwise an error code indicating failure.
  4718. */
  4719. static int netdev_open(struct net_device *dev)
  4720. {
  4721. struct dev_priv *priv = netdev_priv(dev);
  4722. struct dev_info *hw_priv = priv->adapter;
  4723. struct ksz_hw *hw = &hw_priv->hw;
  4724. struct ksz_port *port = &priv->port;
  4725. int i;
  4726. int p;
  4727. int rc = 0;
  4728. priv->multicast = 0;
  4729. priv->promiscuous = 0;
  4730. /* Reset device statistics. */
  4731. memset(&dev->stats, 0, sizeof(struct net_device_stats));
  4732. memset((void *) port->counter, 0,
  4733. (sizeof(u64) * OID_COUNTER_LAST));
  4734. if (!(hw_priv->opened)) {
  4735. rc = prepare_hardware(dev);
  4736. if (rc)
  4737. return rc;
  4738. for (i = 0; i < hw->mib_port_cnt; i++) {
  4739. if (next_jiffies < jiffies)
  4740. next_jiffies = jiffies + HZ * 2;
  4741. else
  4742. next_jiffies += HZ * 1;
  4743. hw_priv->counter[i].time = next_jiffies;
  4744. hw->port_mib[i].state = media_disconnected;
  4745. port_init_cnt(hw, i);
  4746. }
  4747. if (hw->ksz_switch)
  4748. hw->port_mib[HOST_PORT].state = media_connected;
  4749. else {
  4750. hw_add_wol_bcast(hw);
  4751. hw_cfg_wol_pme(hw, 0);
  4752. hw_clr_wol_pme_status(&hw_priv->hw);
  4753. }
  4754. }
  4755. port_set_power_saving(port, false);
  4756. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  4757. /*
  4758. * Initialize to invalid value so that link detection
  4759. * is done.
  4760. */
  4761. hw->port_info[p].partner = 0xFF;
  4762. hw->port_info[p].state = media_disconnected;
  4763. }
  4764. /* Need to open the port in multiple device interfaces mode. */
  4765. if (hw->dev_count > 1) {
  4766. port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
  4767. if (port->first_port > 0)
  4768. hw_add_addr(hw, dev->dev_addr);
  4769. }
  4770. port_get_link_speed(port);
  4771. if (port->force_link)
  4772. port_force_link_speed(port);
  4773. else
  4774. port_set_link_speed(port);
  4775. if (!(hw_priv->opened)) {
  4776. hw_setup_intr(hw);
  4777. hw_enable(hw);
  4778. hw_ena_intr(hw);
  4779. if (hw->mib_port_cnt)
  4780. ksz_start_timer(&hw_priv->mib_timer_info,
  4781. hw_priv->mib_timer_info.period);
  4782. }
  4783. hw_priv->opened++;
  4784. ksz_start_timer(&priv->monitor_timer_info,
  4785. priv->monitor_timer_info.period);
  4786. priv->media_state = port->linked->state;
  4787. set_media_state(dev, media_connected);
  4788. netif_start_queue(dev);
  4789. return 0;
  4790. }
  4791. /* RX errors = rx_errors */
  4792. /* RX dropped = rx_dropped */
  4793. /* RX overruns = rx_fifo_errors */
  4794. /* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
  4795. /* TX errors = tx_errors */
  4796. /* TX dropped = tx_dropped */
  4797. /* TX overruns = tx_fifo_errors */
  4798. /* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
  4799. /* collisions = collisions */
  4800. /**
  4801. * netdev_query_statistics - query network device statistics
  4802. * @dev: Network device.
  4803. *
  4804. * This function returns the statistics of the network device. The device
  4805. * needs not be opened.
  4806. *
  4807. * Return network device statistics.
  4808. */
  4809. static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
  4810. {
  4811. struct dev_priv *priv = netdev_priv(dev);
  4812. struct ksz_port *port = &priv->port;
  4813. struct ksz_hw *hw = &priv->adapter->hw;
  4814. struct ksz_port_mib *mib;
  4815. int i;
  4816. int p;
  4817. dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
  4818. dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
  4819. /* Reset to zero to add count later. */
  4820. dev->stats.multicast = 0;
  4821. dev->stats.collisions = 0;
  4822. dev->stats.rx_length_errors = 0;
  4823. dev->stats.rx_crc_errors = 0;
  4824. dev->stats.rx_frame_errors = 0;
  4825. dev->stats.tx_window_errors = 0;
  4826. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  4827. mib = &hw->port_mib[p];
  4828. dev->stats.multicast += (unsigned long)
  4829. mib->counter[MIB_COUNTER_RX_MULTICAST];
  4830. dev->stats.collisions += (unsigned long)
  4831. mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
  4832. dev->stats.rx_length_errors += (unsigned long)(
  4833. mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
  4834. mib->counter[MIB_COUNTER_RX_FRAGMENT] +
  4835. mib->counter[MIB_COUNTER_RX_OVERSIZE] +
  4836. mib->counter[MIB_COUNTER_RX_JABBER]);
  4837. dev->stats.rx_crc_errors += (unsigned long)
  4838. mib->counter[MIB_COUNTER_RX_CRC_ERR];
  4839. dev->stats.rx_frame_errors += (unsigned long)(
  4840. mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
  4841. mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
  4842. dev->stats.tx_window_errors += (unsigned long)
  4843. mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
  4844. }
  4845. return &dev->stats;
  4846. }
  4847. /**
  4848. * netdev_set_mac_address - set network device MAC address
  4849. * @dev: Network device.
  4850. * @addr: Buffer of MAC address.
  4851. *
  4852. * This function is used to set the MAC address of the network device.
  4853. *
  4854. * Return 0 to indicate success.
  4855. */
  4856. static int netdev_set_mac_address(struct net_device *dev, void *addr)
  4857. {
  4858. struct dev_priv *priv = netdev_priv(dev);
  4859. struct dev_info *hw_priv = priv->adapter;
  4860. struct ksz_hw *hw = &hw_priv->hw;
  4861. struct sockaddr *mac = addr;
  4862. uint interrupt;
  4863. if (priv->port.first_port > 0)
  4864. hw_del_addr(hw, dev->dev_addr);
  4865. else {
  4866. hw->mac_override = 1;
  4867. memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
  4868. }
  4869. memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
  4870. interrupt = hw_block_intr(hw);
  4871. if (priv->port.first_port > 0)
  4872. hw_add_addr(hw, dev->dev_addr);
  4873. else
  4874. hw_set_addr(hw);
  4875. hw_restore_intr(hw, interrupt);
  4876. return 0;
  4877. }
  4878. static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
  4879. struct ksz_hw *hw, int promiscuous)
  4880. {
  4881. if (promiscuous != priv->promiscuous) {
  4882. u8 prev_state = hw->promiscuous;
  4883. if (promiscuous)
  4884. ++hw->promiscuous;
  4885. else
  4886. --hw->promiscuous;
  4887. priv->promiscuous = promiscuous;
  4888. /* Turn on/off promiscuous mode. */
  4889. if (hw->promiscuous <= 1 && prev_state <= 1)
  4890. hw_set_promiscuous(hw, hw->promiscuous);
  4891. /*
  4892. * Port is not in promiscuous mode, meaning it is released
  4893. * from the bridge.
  4894. */
  4895. if ((hw->features & STP_SUPPORT) && !promiscuous &&
  4896. netif_is_bridge_port(dev)) {
  4897. struct ksz_switch *sw = hw->ksz_switch;
  4898. int port = priv->port.first_port;
  4899. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  4900. port = 1 << port;
  4901. if (sw->member & port) {
  4902. sw->member &= ~port;
  4903. bridge_change(hw);
  4904. }
  4905. }
  4906. }
  4907. }
  4908. static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
  4909. int multicast)
  4910. {
  4911. if (multicast != priv->multicast) {
  4912. u8 all_multi = hw->all_multi;
  4913. if (multicast)
  4914. ++hw->all_multi;
  4915. else
  4916. --hw->all_multi;
  4917. priv->multicast = multicast;
  4918. /* Turn on/off all multicast mode. */
  4919. if (hw->all_multi <= 1 && all_multi <= 1)
  4920. hw_set_multicast(hw, hw->all_multi);
  4921. }
  4922. }
  4923. /**
  4924. * netdev_set_rx_mode
  4925. * @dev: Network device.
  4926. *
  4927. * This routine is used to set multicast addresses or put the network device
  4928. * into promiscuous mode.
  4929. */
  4930. static void netdev_set_rx_mode(struct net_device *dev)
  4931. {
  4932. struct dev_priv *priv = netdev_priv(dev);
  4933. struct dev_info *hw_priv = priv->adapter;
  4934. struct ksz_hw *hw = &hw_priv->hw;
  4935. struct netdev_hw_addr *ha;
  4936. int multicast = (dev->flags & IFF_ALLMULTI);
  4937. dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
  4938. if (hw_priv->hw.dev_count > 1)
  4939. multicast |= (dev->flags & IFF_MULTICAST);
  4940. dev_set_multicast(priv, hw, multicast);
  4941. /* Cannot use different hashes in multiple device interfaces mode. */
  4942. if (hw_priv->hw.dev_count > 1)
  4943. return;
  4944. if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
  4945. int i = 0;
  4946. /* List too big to support so turn on all multicast mode. */
  4947. if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
  4948. if (MAX_MULTICAST_LIST != hw->multi_list_size) {
  4949. hw->multi_list_size = MAX_MULTICAST_LIST;
  4950. ++hw->all_multi;
  4951. hw_set_multicast(hw, hw->all_multi);
  4952. }
  4953. return;
  4954. }
  4955. netdev_for_each_mc_addr(ha, dev) {
  4956. if (i >= MAX_MULTICAST_LIST)
  4957. break;
  4958. memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
  4959. }
  4960. hw->multi_list_size = (u8) i;
  4961. hw_set_grp_addr(hw);
  4962. } else {
  4963. if (MAX_MULTICAST_LIST == hw->multi_list_size) {
  4964. --hw->all_multi;
  4965. hw_set_multicast(hw, hw->all_multi);
  4966. }
  4967. hw->multi_list_size = 0;
  4968. hw_clr_multicast(hw);
  4969. }
  4970. }
  4971. static int netdev_change_mtu(struct net_device *dev, int new_mtu)
  4972. {
  4973. struct dev_priv *priv = netdev_priv(dev);
  4974. struct dev_info *hw_priv = priv->adapter;
  4975. struct ksz_hw *hw = &hw_priv->hw;
  4976. int hw_mtu;
  4977. if (netif_running(dev))
  4978. return -EBUSY;
  4979. /* Cannot use different MTU in multiple device interfaces mode. */
  4980. if (hw->dev_count > 1)
  4981. if (dev != hw_priv->dev)
  4982. return 0;
  4983. hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
  4984. if (hw_mtu > REGULAR_RX_BUF_SIZE) {
  4985. hw->features |= RX_HUGE_FRAME;
  4986. hw_mtu = MAX_RX_BUF_SIZE;
  4987. } else {
  4988. hw->features &= ~RX_HUGE_FRAME;
  4989. hw_mtu = REGULAR_RX_BUF_SIZE;
  4990. }
  4991. hw_mtu = (hw_mtu + 3) & ~3;
  4992. hw_priv->mtu = hw_mtu;
  4993. dev->mtu = new_mtu;
  4994. return 0;
  4995. }
  4996. /**
  4997. * netdev_ioctl - I/O control processing
  4998. * @dev: Network device.
  4999. * @ifr: Interface request structure.
  5000. * @cmd: I/O control code.
  5001. *
  5002. * This function is used to process I/O control calls.
  5003. *
  5004. * Return 0 to indicate success.
  5005. */
  5006. static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  5007. {
  5008. struct dev_priv *priv = netdev_priv(dev);
  5009. struct dev_info *hw_priv = priv->adapter;
  5010. struct ksz_hw *hw = &hw_priv->hw;
  5011. struct ksz_port *port = &priv->port;
  5012. int result = 0;
  5013. struct mii_ioctl_data *data = if_mii(ifr);
  5014. if (down_interruptible(&priv->proc_sem))
  5015. return -ERESTARTSYS;
  5016. switch (cmd) {
  5017. /* Get address of MII PHY in use. */
  5018. case SIOCGMIIPHY:
  5019. data->phy_id = priv->id;
  5020. /* Fallthrough... */
  5021. /* Read MII PHY register. */
  5022. case SIOCGMIIREG:
  5023. if (data->phy_id != priv->id || data->reg_num >= 6)
  5024. result = -EIO;
  5025. else
  5026. hw_r_phy(hw, port->linked->port_id, data->reg_num,
  5027. &data->val_out);
  5028. break;
  5029. /* Write MII PHY register. */
  5030. case SIOCSMIIREG:
  5031. if (!capable(CAP_NET_ADMIN))
  5032. result = -EPERM;
  5033. else if (data->phy_id != priv->id || data->reg_num >= 6)
  5034. result = -EIO;
  5035. else
  5036. hw_w_phy(hw, port->linked->port_id, data->reg_num,
  5037. data->val_in);
  5038. break;
  5039. default:
  5040. result = -EOPNOTSUPP;
  5041. }
  5042. up(&priv->proc_sem);
  5043. return result;
  5044. }
  5045. /*
  5046. * MII support
  5047. */
  5048. /**
  5049. * mdio_read - read PHY register
  5050. * @dev: Network device.
  5051. * @phy_id: The PHY id.
  5052. * @reg_num: The register number.
  5053. *
  5054. * This function returns the PHY register value.
  5055. *
  5056. * Return the register value.
  5057. */
  5058. static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
  5059. {
  5060. struct dev_priv *priv = netdev_priv(dev);
  5061. struct ksz_port *port = &priv->port;
  5062. struct ksz_hw *hw = port->hw;
  5063. u16 val_out;
  5064. hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
  5065. return val_out;
  5066. }
  5067. /**
  5068. * mdio_write - set PHY register
  5069. * @dev: Network device.
  5070. * @phy_id: The PHY id.
  5071. * @reg_num: The register number.
  5072. * @val: The register value.
  5073. *
  5074. * This procedure sets the PHY register value.
  5075. */
  5076. static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
  5077. {
  5078. struct dev_priv *priv = netdev_priv(dev);
  5079. struct ksz_port *port = &priv->port;
  5080. struct ksz_hw *hw = port->hw;
  5081. int i;
  5082. int pi;
  5083. for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
  5084. hw_w_phy(hw, pi, reg_num << 1, val);
  5085. }
  5086. /*
  5087. * ethtool support
  5088. */
  5089. #define EEPROM_SIZE 0x40
  5090. static u16 eeprom_data[EEPROM_SIZE] = { 0 };
  5091. #define ADVERTISED_ALL \
  5092. (ADVERTISED_10baseT_Half | \
  5093. ADVERTISED_10baseT_Full | \
  5094. ADVERTISED_100baseT_Half | \
  5095. ADVERTISED_100baseT_Full)
  5096. /* These functions use the MII functions in mii.c. */
  5097. /**
  5098. * netdev_get_link_ksettings - get network device settings
  5099. * @dev: Network device.
  5100. * @cmd: Ethtool command.
  5101. *
  5102. * This function queries the PHY and returns its state in the ethtool command.
  5103. *
  5104. * Return 0 if successful; otherwise an error code.
  5105. */
  5106. static int netdev_get_link_ksettings(struct net_device *dev,
  5107. struct ethtool_link_ksettings *cmd)
  5108. {
  5109. struct dev_priv *priv = netdev_priv(dev);
  5110. struct dev_info *hw_priv = priv->adapter;
  5111. mutex_lock(&hw_priv->lock);
  5112. mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
  5113. ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
  5114. mutex_unlock(&hw_priv->lock);
  5115. /* Save advertised settings for workaround in next function. */
  5116. ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
  5117. cmd->link_modes.advertising);
  5118. return 0;
  5119. }
  5120. /**
  5121. * netdev_set_link_ksettings - set network device settings
  5122. * @dev: Network device.
  5123. * @cmd: Ethtool command.
  5124. *
  5125. * This function sets the PHY according to the ethtool command.
  5126. *
  5127. * Return 0 if successful; otherwise an error code.
  5128. */
  5129. static int netdev_set_link_ksettings(struct net_device *dev,
  5130. const struct ethtool_link_ksettings *cmd)
  5131. {
  5132. struct dev_priv *priv = netdev_priv(dev);
  5133. struct dev_info *hw_priv = priv->adapter;
  5134. struct ksz_port *port = &priv->port;
  5135. struct ethtool_link_ksettings copy_cmd;
  5136. u32 speed = cmd->base.speed;
  5137. u32 advertising;
  5138. int rc;
  5139. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  5140. cmd->link_modes.advertising);
  5141. /*
  5142. * ethtool utility does not change advertised setting if auto
  5143. * negotiation is not specified explicitly.
  5144. */
  5145. if (cmd->base.autoneg && priv->advertising == advertising) {
  5146. advertising |= ADVERTISED_ALL;
  5147. if (10 == speed)
  5148. advertising &=
  5149. ~(ADVERTISED_100baseT_Full |
  5150. ADVERTISED_100baseT_Half);
  5151. else if (100 == speed)
  5152. advertising &=
  5153. ~(ADVERTISED_10baseT_Full |
  5154. ADVERTISED_10baseT_Half);
  5155. if (0 == cmd->base.duplex)
  5156. advertising &=
  5157. ~(ADVERTISED_100baseT_Full |
  5158. ADVERTISED_10baseT_Full);
  5159. else if (1 == cmd->base.duplex)
  5160. advertising &=
  5161. ~(ADVERTISED_100baseT_Half |
  5162. ADVERTISED_10baseT_Half);
  5163. }
  5164. mutex_lock(&hw_priv->lock);
  5165. if (cmd->base.autoneg &&
  5166. (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
  5167. port->duplex = 0;
  5168. port->speed = 0;
  5169. port->force_link = 0;
  5170. } else {
  5171. port->duplex = cmd->base.duplex + 1;
  5172. if (1000 != speed)
  5173. port->speed = speed;
  5174. if (cmd->base.autoneg)
  5175. port->force_link = 0;
  5176. else
  5177. port->force_link = 1;
  5178. }
  5179. memcpy(&copy_cmd, cmd, sizeof(copy_cmd));
  5180. ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
  5181. advertising);
  5182. rc = mii_ethtool_set_link_ksettings(
  5183. &priv->mii_if,
  5184. (const struct ethtool_link_ksettings *)&copy_cmd);
  5185. mutex_unlock(&hw_priv->lock);
  5186. return rc;
  5187. }
  5188. /**
  5189. * netdev_nway_reset - restart auto-negotiation
  5190. * @dev: Network device.
  5191. *
  5192. * This function restarts the PHY for auto-negotiation.
  5193. *
  5194. * Return 0 if successful; otherwise an error code.
  5195. */
  5196. static int netdev_nway_reset(struct net_device *dev)
  5197. {
  5198. struct dev_priv *priv = netdev_priv(dev);
  5199. struct dev_info *hw_priv = priv->adapter;
  5200. int rc;
  5201. mutex_lock(&hw_priv->lock);
  5202. rc = mii_nway_restart(&priv->mii_if);
  5203. mutex_unlock(&hw_priv->lock);
  5204. return rc;
  5205. }
  5206. /**
  5207. * netdev_get_link - get network device link status
  5208. * @dev: Network device.
  5209. *
  5210. * This function gets the link status from the PHY.
  5211. *
  5212. * Return true if PHY is linked and false otherwise.
  5213. */
  5214. static u32 netdev_get_link(struct net_device *dev)
  5215. {
  5216. struct dev_priv *priv = netdev_priv(dev);
  5217. int rc;
  5218. rc = mii_link_ok(&priv->mii_if);
  5219. return rc;
  5220. }
  5221. /**
  5222. * netdev_get_drvinfo - get network driver information
  5223. * @dev: Network device.
  5224. * @info: Ethtool driver info data structure.
  5225. *
  5226. * This procedure returns the driver information.
  5227. */
  5228. static void netdev_get_drvinfo(struct net_device *dev,
  5229. struct ethtool_drvinfo *info)
  5230. {
  5231. struct dev_priv *priv = netdev_priv(dev);
  5232. struct dev_info *hw_priv = priv->adapter;
  5233. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  5234. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  5235. strlcpy(info->bus_info, pci_name(hw_priv->pdev),
  5236. sizeof(info->bus_info));
  5237. }
  5238. /**
  5239. * netdev_get_regs_len - get length of register dump
  5240. * @dev: Network device.
  5241. *
  5242. * This function returns the length of the register dump.
  5243. *
  5244. * Return length of the register dump.
  5245. */
  5246. static struct hw_regs {
  5247. int start;
  5248. int end;
  5249. } hw_regs_range[] = {
  5250. { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
  5251. { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
  5252. { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
  5253. { KS884X_SIDER_P, KS8842_SGCR7_P },
  5254. { KS8842_MACAR1_P, KS8842_TOSR8_P },
  5255. { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
  5256. { 0, 0 }
  5257. };
  5258. static int netdev_get_regs_len(struct net_device *dev)
  5259. {
  5260. struct hw_regs *range = hw_regs_range;
  5261. int regs_len = 0x10 * sizeof(u32);
  5262. while (range->end > range->start) {
  5263. regs_len += (range->end - range->start + 3) / 4 * 4;
  5264. range++;
  5265. }
  5266. return regs_len;
  5267. }
  5268. /**
  5269. * netdev_get_regs - get register dump
  5270. * @dev: Network device.
  5271. * @regs: Ethtool registers data structure.
  5272. * @ptr: Buffer to store the register values.
  5273. *
  5274. * This procedure dumps the register values in the provided buffer.
  5275. */
  5276. static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  5277. void *ptr)
  5278. {
  5279. struct dev_priv *priv = netdev_priv(dev);
  5280. struct dev_info *hw_priv = priv->adapter;
  5281. struct ksz_hw *hw = &hw_priv->hw;
  5282. int *buf = (int *) ptr;
  5283. struct hw_regs *range = hw_regs_range;
  5284. int len;
  5285. mutex_lock(&hw_priv->lock);
  5286. regs->version = 0;
  5287. for (len = 0; len < 0x40; len += 4) {
  5288. pci_read_config_dword(hw_priv->pdev, len, buf);
  5289. buf++;
  5290. }
  5291. while (range->end > range->start) {
  5292. for (len = range->start; len < range->end; len += 4) {
  5293. *buf = readl(hw->io + len);
  5294. buf++;
  5295. }
  5296. range++;
  5297. }
  5298. mutex_unlock(&hw_priv->lock);
  5299. }
  5300. #define WOL_SUPPORT \
  5301. (WAKE_PHY | WAKE_MAGIC | \
  5302. WAKE_UCAST | WAKE_MCAST | \
  5303. WAKE_BCAST | WAKE_ARP)
  5304. /**
  5305. * netdev_get_wol - get Wake-on-LAN support
  5306. * @dev: Network device.
  5307. * @wol: Ethtool Wake-on-LAN data structure.
  5308. *
  5309. * This procedure returns Wake-on-LAN support.
  5310. */
  5311. static void netdev_get_wol(struct net_device *dev,
  5312. struct ethtool_wolinfo *wol)
  5313. {
  5314. struct dev_priv *priv = netdev_priv(dev);
  5315. struct dev_info *hw_priv = priv->adapter;
  5316. wol->supported = hw_priv->wol_support;
  5317. wol->wolopts = hw_priv->wol_enable;
  5318. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5319. }
  5320. /**
  5321. * netdev_set_wol - set Wake-on-LAN support
  5322. * @dev: Network device.
  5323. * @wol: Ethtool Wake-on-LAN data structure.
  5324. *
  5325. * This function sets Wake-on-LAN support.
  5326. *
  5327. * Return 0 if successful; otherwise an error code.
  5328. */
  5329. static int netdev_set_wol(struct net_device *dev,
  5330. struct ethtool_wolinfo *wol)
  5331. {
  5332. struct dev_priv *priv = netdev_priv(dev);
  5333. struct dev_info *hw_priv = priv->adapter;
  5334. /* Need to find a way to retrieve the device IP address. */
  5335. static const u8 net_addr[] = { 192, 168, 1, 1 };
  5336. if (wol->wolopts & ~hw_priv->wol_support)
  5337. return -EINVAL;
  5338. hw_priv->wol_enable = wol->wolopts;
  5339. /* Link wakeup cannot really be disabled. */
  5340. if (wol->wolopts)
  5341. hw_priv->wol_enable |= WAKE_PHY;
  5342. hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
  5343. return 0;
  5344. }
  5345. /**
  5346. * netdev_get_msglevel - get debug message level
  5347. * @dev: Network device.
  5348. *
  5349. * This function returns current debug message level.
  5350. *
  5351. * Return current debug message flags.
  5352. */
  5353. static u32 netdev_get_msglevel(struct net_device *dev)
  5354. {
  5355. struct dev_priv *priv = netdev_priv(dev);
  5356. return priv->msg_enable;
  5357. }
  5358. /**
  5359. * netdev_set_msglevel - set debug message level
  5360. * @dev: Network device.
  5361. * @value: Debug message flags.
  5362. *
  5363. * This procedure sets debug message level.
  5364. */
  5365. static void netdev_set_msglevel(struct net_device *dev, u32 value)
  5366. {
  5367. struct dev_priv *priv = netdev_priv(dev);
  5368. priv->msg_enable = value;
  5369. }
  5370. /**
  5371. * netdev_get_eeprom_len - get EEPROM length
  5372. * @dev: Network device.
  5373. *
  5374. * This function returns the length of the EEPROM.
  5375. *
  5376. * Return length of the EEPROM.
  5377. */
  5378. static int netdev_get_eeprom_len(struct net_device *dev)
  5379. {
  5380. return EEPROM_SIZE * 2;
  5381. }
  5382. /**
  5383. * netdev_get_eeprom - get EEPROM data
  5384. * @dev: Network device.
  5385. * @eeprom: Ethtool EEPROM data structure.
  5386. * @data: Buffer to store the EEPROM data.
  5387. *
  5388. * This function dumps the EEPROM data in the provided buffer.
  5389. *
  5390. * Return 0 if successful; otherwise an error code.
  5391. */
  5392. #define EEPROM_MAGIC 0x10A18842
  5393. static int netdev_get_eeprom(struct net_device *dev,
  5394. struct ethtool_eeprom *eeprom, u8 *data)
  5395. {
  5396. struct dev_priv *priv = netdev_priv(dev);
  5397. struct dev_info *hw_priv = priv->adapter;
  5398. u8 *eeprom_byte = (u8 *) eeprom_data;
  5399. int i;
  5400. int len;
  5401. len = (eeprom->offset + eeprom->len + 1) / 2;
  5402. for (i = eeprom->offset / 2; i < len; i++)
  5403. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5404. eeprom->magic = EEPROM_MAGIC;
  5405. memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
  5406. return 0;
  5407. }
  5408. /**
  5409. * netdev_set_eeprom - write EEPROM data
  5410. * @dev: Network device.
  5411. * @eeprom: Ethtool EEPROM data structure.
  5412. * @data: Data buffer.
  5413. *
  5414. * This function modifies the EEPROM data one byte at a time.
  5415. *
  5416. * Return 0 if successful; otherwise an error code.
  5417. */
  5418. static int netdev_set_eeprom(struct net_device *dev,
  5419. struct ethtool_eeprom *eeprom, u8 *data)
  5420. {
  5421. struct dev_priv *priv = netdev_priv(dev);
  5422. struct dev_info *hw_priv = priv->adapter;
  5423. u16 eeprom_word[EEPROM_SIZE];
  5424. u8 *eeprom_byte = (u8 *) eeprom_word;
  5425. int i;
  5426. int len;
  5427. if (eeprom->magic != EEPROM_MAGIC)
  5428. return -EINVAL;
  5429. len = (eeprom->offset + eeprom->len + 1) / 2;
  5430. for (i = eeprom->offset / 2; i < len; i++)
  5431. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5432. memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
  5433. memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
  5434. for (i = 0; i < EEPROM_SIZE; i++)
  5435. if (eeprom_word[i] != eeprom_data[i]) {
  5436. eeprom_data[i] = eeprom_word[i];
  5437. eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
  5438. }
  5439. return 0;
  5440. }
  5441. /**
  5442. * netdev_get_pauseparam - get flow control parameters
  5443. * @dev: Network device.
  5444. * @pause: Ethtool PAUSE settings data structure.
  5445. *
  5446. * This procedure returns the PAUSE control flow settings.
  5447. */
  5448. static void netdev_get_pauseparam(struct net_device *dev,
  5449. struct ethtool_pauseparam *pause)
  5450. {
  5451. struct dev_priv *priv = netdev_priv(dev);
  5452. struct dev_info *hw_priv = priv->adapter;
  5453. struct ksz_hw *hw = &hw_priv->hw;
  5454. pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
  5455. if (!hw->ksz_switch) {
  5456. pause->rx_pause =
  5457. (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
  5458. pause->tx_pause =
  5459. (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
  5460. } else {
  5461. pause->rx_pause =
  5462. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5463. SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
  5464. pause->tx_pause =
  5465. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5466. SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
  5467. }
  5468. }
  5469. /**
  5470. * netdev_set_pauseparam - set flow control parameters
  5471. * @dev: Network device.
  5472. * @pause: Ethtool PAUSE settings data structure.
  5473. *
  5474. * This function sets the PAUSE control flow settings.
  5475. * Not implemented yet.
  5476. *
  5477. * Return 0 if successful; otherwise an error code.
  5478. */
  5479. static int netdev_set_pauseparam(struct net_device *dev,
  5480. struct ethtool_pauseparam *pause)
  5481. {
  5482. struct dev_priv *priv = netdev_priv(dev);
  5483. struct dev_info *hw_priv = priv->adapter;
  5484. struct ksz_hw *hw = &hw_priv->hw;
  5485. struct ksz_port *port = &priv->port;
  5486. mutex_lock(&hw_priv->lock);
  5487. if (pause->autoneg) {
  5488. if (!pause->rx_pause && !pause->tx_pause)
  5489. port->flow_ctrl = PHY_NO_FLOW_CTRL;
  5490. else
  5491. port->flow_ctrl = PHY_FLOW_CTRL;
  5492. hw->overrides &= ~PAUSE_FLOW_CTRL;
  5493. port->force_link = 0;
  5494. if (hw->ksz_switch) {
  5495. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5496. SWITCH_RX_FLOW_CTRL, 1);
  5497. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5498. SWITCH_TX_FLOW_CTRL, 1);
  5499. }
  5500. port_set_link_speed(port);
  5501. } else {
  5502. hw->overrides |= PAUSE_FLOW_CTRL;
  5503. if (hw->ksz_switch) {
  5504. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5505. SWITCH_RX_FLOW_CTRL, pause->rx_pause);
  5506. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5507. SWITCH_TX_FLOW_CTRL, pause->tx_pause);
  5508. } else
  5509. set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
  5510. }
  5511. mutex_unlock(&hw_priv->lock);
  5512. return 0;
  5513. }
  5514. /**
  5515. * netdev_get_ringparam - get tx/rx ring parameters
  5516. * @dev: Network device.
  5517. * @pause: Ethtool RING settings data structure.
  5518. *
  5519. * This procedure returns the TX/RX ring settings.
  5520. */
  5521. static void netdev_get_ringparam(struct net_device *dev,
  5522. struct ethtool_ringparam *ring)
  5523. {
  5524. struct dev_priv *priv = netdev_priv(dev);
  5525. struct dev_info *hw_priv = priv->adapter;
  5526. struct ksz_hw *hw = &hw_priv->hw;
  5527. ring->tx_max_pending = (1 << 9);
  5528. ring->tx_pending = hw->tx_desc_info.alloc;
  5529. ring->rx_max_pending = (1 << 9);
  5530. ring->rx_pending = hw->rx_desc_info.alloc;
  5531. }
  5532. #define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
  5533. static struct {
  5534. char string[ETH_GSTRING_LEN];
  5535. } ethtool_stats_keys[STATS_LEN] = {
  5536. { "rx_lo_priority_octets" },
  5537. { "rx_hi_priority_octets" },
  5538. { "rx_undersize_packets" },
  5539. { "rx_fragments" },
  5540. { "rx_oversize_packets" },
  5541. { "rx_jabbers" },
  5542. { "rx_symbol_errors" },
  5543. { "rx_crc_errors" },
  5544. { "rx_align_errors" },
  5545. { "rx_mac_ctrl_packets" },
  5546. { "rx_pause_packets" },
  5547. { "rx_bcast_packets" },
  5548. { "rx_mcast_packets" },
  5549. { "rx_ucast_packets" },
  5550. { "rx_64_or_less_octet_packets" },
  5551. { "rx_65_to_127_octet_packets" },
  5552. { "rx_128_to_255_octet_packets" },
  5553. { "rx_256_to_511_octet_packets" },
  5554. { "rx_512_to_1023_octet_packets" },
  5555. { "rx_1024_to_1522_octet_packets" },
  5556. { "tx_lo_priority_octets" },
  5557. { "tx_hi_priority_octets" },
  5558. { "tx_late_collisions" },
  5559. { "tx_pause_packets" },
  5560. { "tx_bcast_packets" },
  5561. { "tx_mcast_packets" },
  5562. { "tx_ucast_packets" },
  5563. { "tx_deferred" },
  5564. { "tx_total_collisions" },
  5565. { "tx_excessive_collisions" },
  5566. { "tx_single_collisions" },
  5567. { "tx_mult_collisions" },
  5568. { "rx_discards" },
  5569. { "tx_discards" },
  5570. };
  5571. /**
  5572. * netdev_get_strings - get statistics identity strings
  5573. * @dev: Network device.
  5574. * @stringset: String set identifier.
  5575. * @buf: Buffer to store the strings.
  5576. *
  5577. * This procedure returns the strings used to identify the statistics.
  5578. */
  5579. static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  5580. {
  5581. struct dev_priv *priv = netdev_priv(dev);
  5582. struct dev_info *hw_priv = priv->adapter;
  5583. struct ksz_hw *hw = &hw_priv->hw;
  5584. if (ETH_SS_STATS == stringset)
  5585. memcpy(buf, &ethtool_stats_keys,
  5586. ETH_GSTRING_LEN * hw->mib_cnt);
  5587. }
  5588. /**
  5589. * netdev_get_sset_count - get statistics size
  5590. * @dev: Network device.
  5591. * @sset: The statistics set number.
  5592. *
  5593. * This function returns the size of the statistics to be reported.
  5594. *
  5595. * Return size of the statistics to be reported.
  5596. */
  5597. static int netdev_get_sset_count(struct net_device *dev, int sset)
  5598. {
  5599. struct dev_priv *priv = netdev_priv(dev);
  5600. struct dev_info *hw_priv = priv->adapter;
  5601. struct ksz_hw *hw = &hw_priv->hw;
  5602. switch (sset) {
  5603. case ETH_SS_STATS:
  5604. return hw->mib_cnt;
  5605. default:
  5606. return -EOPNOTSUPP;
  5607. }
  5608. }
  5609. /**
  5610. * netdev_get_ethtool_stats - get network device statistics
  5611. * @dev: Network device.
  5612. * @stats: Ethtool statistics data structure.
  5613. * @data: Buffer to store the statistics.
  5614. *
  5615. * This procedure returns the statistics.
  5616. */
  5617. static void netdev_get_ethtool_stats(struct net_device *dev,
  5618. struct ethtool_stats *stats, u64 *data)
  5619. {
  5620. struct dev_priv *priv = netdev_priv(dev);
  5621. struct dev_info *hw_priv = priv->adapter;
  5622. struct ksz_hw *hw = &hw_priv->hw;
  5623. struct ksz_port *port = &priv->port;
  5624. int n_stats = stats->n_stats;
  5625. int i;
  5626. int n;
  5627. int p;
  5628. int rc;
  5629. u64 counter[TOTAL_PORT_COUNTER_NUM];
  5630. mutex_lock(&hw_priv->lock);
  5631. n = SWITCH_PORT_NUM;
  5632. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  5633. if (media_connected == hw->port_mib[p].state) {
  5634. hw_priv->counter[p].read = 1;
  5635. /* Remember first port that requests read. */
  5636. if (n == SWITCH_PORT_NUM)
  5637. n = p;
  5638. }
  5639. }
  5640. mutex_unlock(&hw_priv->lock);
  5641. if (n < SWITCH_PORT_NUM)
  5642. schedule_work(&hw_priv->mib_read);
  5643. if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
  5644. p = n;
  5645. rc = wait_event_interruptible_timeout(
  5646. hw_priv->counter[p].counter,
  5647. 2 == hw_priv->counter[p].read,
  5648. HZ * 1);
  5649. } else
  5650. for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
  5651. if (0 == i) {
  5652. rc = wait_event_interruptible_timeout(
  5653. hw_priv->counter[p].counter,
  5654. 2 == hw_priv->counter[p].read,
  5655. HZ * 2);
  5656. } else if (hw->port_mib[p].cnt_ptr) {
  5657. rc = wait_event_interruptible_timeout(
  5658. hw_priv->counter[p].counter,
  5659. 2 == hw_priv->counter[p].read,
  5660. HZ * 1);
  5661. }
  5662. }
  5663. get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
  5664. n = hw->mib_cnt;
  5665. if (n > n_stats)
  5666. n = n_stats;
  5667. n_stats -= n;
  5668. for (i = 0; i < n; i++)
  5669. *data++ = counter[i];
  5670. }
  5671. /**
  5672. * netdev_set_features - set receive checksum support
  5673. * @dev: Network device.
  5674. * @features: New device features (offloads).
  5675. *
  5676. * This function sets receive checksum support setting.
  5677. *
  5678. * Return 0 if successful; otherwise an error code.
  5679. */
  5680. static int netdev_set_features(struct net_device *dev,
  5681. netdev_features_t features)
  5682. {
  5683. struct dev_priv *priv = netdev_priv(dev);
  5684. struct dev_info *hw_priv = priv->adapter;
  5685. struct ksz_hw *hw = &hw_priv->hw;
  5686. mutex_lock(&hw_priv->lock);
  5687. /* see note in hw_setup() */
  5688. if (features & NETIF_F_RXCSUM)
  5689. hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
  5690. else
  5691. hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  5692. if (hw->enabled)
  5693. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  5694. mutex_unlock(&hw_priv->lock);
  5695. return 0;
  5696. }
  5697. static const struct ethtool_ops netdev_ethtool_ops = {
  5698. .nway_reset = netdev_nway_reset,
  5699. .get_link = netdev_get_link,
  5700. .get_drvinfo = netdev_get_drvinfo,
  5701. .get_regs_len = netdev_get_regs_len,
  5702. .get_regs = netdev_get_regs,
  5703. .get_wol = netdev_get_wol,
  5704. .set_wol = netdev_set_wol,
  5705. .get_msglevel = netdev_get_msglevel,
  5706. .set_msglevel = netdev_set_msglevel,
  5707. .get_eeprom_len = netdev_get_eeprom_len,
  5708. .get_eeprom = netdev_get_eeprom,
  5709. .set_eeprom = netdev_set_eeprom,
  5710. .get_pauseparam = netdev_get_pauseparam,
  5711. .set_pauseparam = netdev_set_pauseparam,
  5712. .get_ringparam = netdev_get_ringparam,
  5713. .get_strings = netdev_get_strings,
  5714. .get_sset_count = netdev_get_sset_count,
  5715. .get_ethtool_stats = netdev_get_ethtool_stats,
  5716. .get_link_ksettings = netdev_get_link_ksettings,
  5717. .set_link_ksettings = netdev_set_link_ksettings,
  5718. };
  5719. /*
  5720. * Hardware monitoring
  5721. */
  5722. static void update_link(struct net_device *dev, struct dev_priv *priv,
  5723. struct ksz_port *port)
  5724. {
  5725. if (priv->media_state != port->linked->state) {
  5726. priv->media_state = port->linked->state;
  5727. if (netif_running(dev))
  5728. set_media_state(dev, media_connected);
  5729. }
  5730. }
  5731. static void mib_read_work(struct work_struct *work)
  5732. {
  5733. struct dev_info *hw_priv =
  5734. container_of(work, struct dev_info, mib_read);
  5735. struct ksz_hw *hw = &hw_priv->hw;
  5736. struct ksz_port_mib *mib;
  5737. int i;
  5738. next_jiffies = jiffies;
  5739. for (i = 0; i < hw->mib_port_cnt; i++) {
  5740. mib = &hw->port_mib[i];
  5741. /* Reading MIB counters or requested to read. */
  5742. if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
  5743. /* Need to process receive interrupt. */
  5744. if (port_r_cnt(hw, i))
  5745. break;
  5746. hw_priv->counter[i].read = 0;
  5747. /* Finish reading counters. */
  5748. if (0 == mib->cnt_ptr) {
  5749. hw_priv->counter[i].read = 2;
  5750. wake_up_interruptible(
  5751. &hw_priv->counter[i].counter);
  5752. }
  5753. } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
  5754. /* Only read MIB counters when the port is connected. */
  5755. if (media_connected == mib->state)
  5756. hw_priv->counter[i].read = 1;
  5757. next_jiffies += HZ * 1 * hw->mib_port_cnt;
  5758. hw_priv->counter[i].time = next_jiffies;
  5759. /* Port is just disconnected. */
  5760. } else if (mib->link_down) {
  5761. mib->link_down = 0;
  5762. /* Read counters one last time after link is lost. */
  5763. hw_priv->counter[i].read = 1;
  5764. }
  5765. }
  5766. }
  5767. static void mib_monitor(struct timer_list *t)
  5768. {
  5769. struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
  5770. mib_read_work(&hw_priv->mib_read);
  5771. /* This is used to verify Wake-on-LAN is working. */
  5772. if (hw_priv->pme_wait) {
  5773. if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
  5774. hw_clr_wol_pme_status(&hw_priv->hw);
  5775. hw_priv->pme_wait = 0;
  5776. }
  5777. } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
  5778. /* PME is asserted. Wait 2 seconds to clear it. */
  5779. hw_priv->pme_wait = jiffies + HZ * 2;
  5780. }
  5781. ksz_update_timer(&hw_priv->mib_timer_info);
  5782. }
  5783. /**
  5784. * dev_monitor - periodic monitoring
  5785. * @ptr: Network device pointer.
  5786. *
  5787. * This routine is run in a kernel timer to monitor the network device.
  5788. */
  5789. static void dev_monitor(struct timer_list *t)
  5790. {
  5791. struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
  5792. struct net_device *dev = priv->mii_if.dev;
  5793. struct dev_info *hw_priv = priv->adapter;
  5794. struct ksz_hw *hw = &hw_priv->hw;
  5795. struct ksz_port *port = &priv->port;
  5796. if (!(hw->features & LINK_INT_WORKING))
  5797. port_get_link_speed(port);
  5798. update_link(dev, priv, port);
  5799. ksz_update_timer(&priv->monitor_timer_info);
  5800. }
  5801. /*
  5802. * Linux network device interface functions
  5803. */
  5804. /* Driver exported variables */
  5805. static int msg_enable;
  5806. static char *macaddr = ":";
  5807. static char *mac1addr = ":";
  5808. /*
  5809. * This enables multiple network device mode for KSZ8842, which contains a
  5810. * switch with two physical ports. Some users like to take control of the
  5811. * ports for running Spanning Tree Protocol. The driver will create an
  5812. * additional eth? device for the other port.
  5813. *
  5814. * Some limitations are the network devices cannot have different MTU and
  5815. * multicast hash tables.
  5816. */
  5817. static int multi_dev;
  5818. /*
  5819. * As most users select multiple network device mode to use Spanning Tree
  5820. * Protocol, this enables a feature in which most unicast and multicast packets
  5821. * are forwarded inside the switch and not passed to the host. Only packets
  5822. * that need the host's attention are passed to it. This prevents the host
  5823. * wasting CPU time to examine each and every incoming packets and do the
  5824. * forwarding itself.
  5825. *
  5826. * As the hack requires the private bridge header, the driver cannot compile
  5827. * with just the kernel headers.
  5828. *
  5829. * Enabling STP support also turns on multiple network device mode.
  5830. */
  5831. static int stp;
  5832. /*
  5833. * This enables fast aging in the KSZ8842 switch. Not sure what situation
  5834. * needs that. However, fast aging is used to flush the dynamic MAC table when
  5835. * STP support is enabled.
  5836. */
  5837. static int fast_aging;
  5838. /**
  5839. * netdev_init - initialize network device.
  5840. * @dev: Network device.
  5841. *
  5842. * This function initializes the network device.
  5843. *
  5844. * Return 0 if successful; otherwise an error code indicating failure.
  5845. */
  5846. static int __init netdev_init(struct net_device *dev)
  5847. {
  5848. struct dev_priv *priv = netdev_priv(dev);
  5849. /* 500 ms timeout */
  5850. ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
  5851. dev_monitor);
  5852. /* 500 ms timeout */
  5853. dev->watchdog_timeo = HZ / 2;
  5854. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
  5855. /*
  5856. * Hardware does not really support IPv6 checksum generation, but
  5857. * driver actually runs faster with this on.
  5858. */
  5859. dev->hw_features |= NETIF_F_IPV6_CSUM;
  5860. dev->features |= dev->hw_features;
  5861. sema_init(&priv->proc_sem, 1);
  5862. priv->mii_if.phy_id_mask = 0x1;
  5863. priv->mii_if.reg_num_mask = 0x7;
  5864. priv->mii_if.dev = dev;
  5865. priv->mii_if.mdio_read = mdio_read;
  5866. priv->mii_if.mdio_write = mdio_write;
  5867. priv->mii_if.phy_id = priv->port.first_port + 1;
  5868. priv->msg_enable = netif_msg_init(msg_enable,
  5869. (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
  5870. return 0;
  5871. }
  5872. static const struct net_device_ops netdev_ops = {
  5873. .ndo_init = netdev_init,
  5874. .ndo_open = netdev_open,
  5875. .ndo_stop = netdev_close,
  5876. .ndo_get_stats = netdev_query_statistics,
  5877. .ndo_start_xmit = netdev_tx,
  5878. .ndo_tx_timeout = netdev_tx_timeout,
  5879. .ndo_change_mtu = netdev_change_mtu,
  5880. .ndo_set_features = netdev_set_features,
  5881. .ndo_set_mac_address = netdev_set_mac_address,
  5882. .ndo_validate_addr = eth_validate_addr,
  5883. .ndo_do_ioctl = netdev_ioctl,
  5884. .ndo_set_rx_mode = netdev_set_rx_mode,
  5885. #ifdef CONFIG_NET_POLL_CONTROLLER
  5886. .ndo_poll_controller = netdev_netpoll,
  5887. #endif
  5888. };
  5889. static void netdev_free(struct net_device *dev)
  5890. {
  5891. if (dev->watchdog_timeo)
  5892. unregister_netdev(dev);
  5893. free_netdev(dev);
  5894. }
  5895. struct platform_info {
  5896. struct dev_info dev_info;
  5897. struct net_device *netdev[SWITCH_PORT_NUM];
  5898. };
  5899. static int net_device_present;
  5900. static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
  5901. {
  5902. int i;
  5903. int j;
  5904. int got_num;
  5905. int num;
  5906. i = j = num = got_num = 0;
  5907. while (j < ETH_ALEN) {
  5908. if (macaddr[i]) {
  5909. int digit;
  5910. got_num = 1;
  5911. digit = hex_to_bin(macaddr[i]);
  5912. if (digit >= 0)
  5913. num = num * 16 + digit;
  5914. else if (':' == macaddr[i])
  5915. got_num = 2;
  5916. else
  5917. break;
  5918. } else if (got_num)
  5919. got_num = 2;
  5920. else
  5921. break;
  5922. if (2 == got_num) {
  5923. if (MAIN_PORT == port) {
  5924. hw_priv->hw.override_addr[j++] = (u8) num;
  5925. hw_priv->hw.override_addr[5] +=
  5926. hw_priv->hw.id;
  5927. } else {
  5928. hw_priv->hw.ksz_switch->other_addr[j++] =
  5929. (u8) num;
  5930. hw_priv->hw.ksz_switch->other_addr[5] +=
  5931. hw_priv->hw.id;
  5932. }
  5933. num = got_num = 0;
  5934. }
  5935. i++;
  5936. }
  5937. if (ETH_ALEN == j) {
  5938. if (MAIN_PORT == port)
  5939. hw_priv->hw.mac_override = 1;
  5940. }
  5941. }
  5942. #define KS884X_DMA_MASK (~0x0UL)
  5943. static void read_other_addr(struct ksz_hw *hw)
  5944. {
  5945. int i;
  5946. u16 data[3];
  5947. struct ksz_switch *sw = hw->ksz_switch;
  5948. for (i = 0; i < 3; i++)
  5949. data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
  5950. if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
  5951. sw->other_addr[5] = (u8) data[0];
  5952. sw->other_addr[4] = (u8)(data[0] >> 8);
  5953. sw->other_addr[3] = (u8) data[1];
  5954. sw->other_addr[2] = (u8)(data[1] >> 8);
  5955. sw->other_addr[1] = (u8) data[2];
  5956. sw->other_addr[0] = (u8)(data[2] >> 8);
  5957. }
  5958. }
  5959. #ifndef PCI_VENDOR_ID_MICREL_KS
  5960. #define PCI_VENDOR_ID_MICREL_KS 0x16c6
  5961. #endif
  5962. static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
  5963. {
  5964. struct net_device *dev;
  5965. struct dev_priv *priv;
  5966. struct dev_info *hw_priv;
  5967. struct ksz_hw *hw;
  5968. struct platform_info *info;
  5969. struct ksz_port *port;
  5970. unsigned long reg_base;
  5971. unsigned long reg_len;
  5972. int cnt;
  5973. int i;
  5974. int mib_port_count;
  5975. int pi;
  5976. int port_count;
  5977. int result;
  5978. char banner[sizeof(version)];
  5979. struct ksz_switch *sw = NULL;
  5980. result = pci_enable_device(pdev);
  5981. if (result)
  5982. return result;
  5983. result = -ENODEV;
  5984. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
  5985. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
  5986. return result;
  5987. reg_base = pci_resource_start(pdev, 0);
  5988. reg_len = pci_resource_len(pdev, 0);
  5989. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
  5990. return result;
  5991. if (!request_mem_region(reg_base, reg_len, DRV_NAME))
  5992. return result;
  5993. pci_set_master(pdev);
  5994. result = -ENOMEM;
  5995. info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
  5996. if (!info)
  5997. goto pcidev_init_dev_err;
  5998. hw_priv = &info->dev_info;
  5999. hw_priv->pdev = pdev;
  6000. hw = &hw_priv->hw;
  6001. hw->io = ioremap(reg_base, reg_len);
  6002. if (!hw->io)
  6003. goto pcidev_init_io_err;
  6004. cnt = hw_init(hw);
  6005. if (!cnt) {
  6006. if (msg_enable & NETIF_MSG_PROBE)
  6007. pr_alert("chip not detected\n");
  6008. result = -ENODEV;
  6009. goto pcidev_init_alloc_err;
  6010. }
  6011. snprintf(banner, sizeof(banner), "%s", version);
  6012. banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
  6013. dev_info(&hw_priv->pdev->dev, "%s\n", banner);
  6014. dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
  6015. /* Assume device is KSZ8841. */
  6016. hw->dev_count = 1;
  6017. port_count = 1;
  6018. mib_port_count = 1;
  6019. hw->addr_list_size = 0;
  6020. hw->mib_cnt = PORT_COUNTER_NUM;
  6021. hw->mib_port_cnt = 1;
  6022. /* KSZ8842 has a switch with multiple ports. */
  6023. if (2 == cnt) {
  6024. if (fast_aging)
  6025. hw->overrides |= FAST_AGING;
  6026. hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
  6027. /* Multiple network device interfaces are required. */
  6028. if (multi_dev) {
  6029. hw->dev_count = SWITCH_PORT_NUM;
  6030. hw->addr_list_size = SWITCH_PORT_NUM - 1;
  6031. }
  6032. /* Single network device has multiple ports. */
  6033. if (1 == hw->dev_count) {
  6034. port_count = SWITCH_PORT_NUM;
  6035. mib_port_count = SWITCH_PORT_NUM;
  6036. }
  6037. hw->mib_port_cnt = TOTAL_PORT_NUM;
  6038. hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
  6039. if (!hw->ksz_switch)
  6040. goto pcidev_init_alloc_err;
  6041. sw = hw->ksz_switch;
  6042. }
  6043. for (i = 0; i < hw->mib_port_cnt; i++)
  6044. hw->port_mib[i].mib_start = 0;
  6045. hw->parent = hw_priv;
  6046. /* Default MTU is 1500. */
  6047. hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
  6048. if (ksz_alloc_mem(hw_priv))
  6049. goto pcidev_init_mem_err;
  6050. hw_priv->hw.id = net_device_present;
  6051. spin_lock_init(&hw_priv->hwlock);
  6052. mutex_init(&hw_priv->lock);
  6053. for (i = 0; i < TOTAL_PORT_NUM; i++)
  6054. init_waitqueue_head(&hw_priv->counter[i].counter);
  6055. if (macaddr[0] != ':')
  6056. get_mac_addr(hw_priv, macaddr, MAIN_PORT);
  6057. /* Read MAC address and initialize override address if not overridden. */
  6058. hw_read_addr(hw);
  6059. /* Multiple device interfaces mode requires a second MAC address. */
  6060. if (hw->dev_count > 1) {
  6061. memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
  6062. read_other_addr(hw);
  6063. if (mac1addr[0] != ':')
  6064. get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
  6065. }
  6066. hw_setup(hw);
  6067. if (hw->ksz_switch)
  6068. sw_setup(hw);
  6069. else {
  6070. hw_priv->wol_support = WOL_SUPPORT;
  6071. hw_priv->wol_enable = 0;
  6072. }
  6073. INIT_WORK(&hw_priv->mib_read, mib_read_work);
  6074. /* 500 ms timeout */
  6075. ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
  6076. mib_monitor);
  6077. for (i = 0; i < hw->dev_count; i++) {
  6078. dev = alloc_etherdev(sizeof(struct dev_priv));
  6079. if (!dev)
  6080. goto pcidev_init_reg_err;
  6081. SET_NETDEV_DEV(dev, &pdev->dev);
  6082. info->netdev[i] = dev;
  6083. priv = netdev_priv(dev);
  6084. priv->adapter = hw_priv;
  6085. priv->id = net_device_present++;
  6086. port = &priv->port;
  6087. port->port_cnt = port_count;
  6088. port->mib_port_cnt = mib_port_count;
  6089. port->first_port = i;
  6090. port->flow_ctrl = PHY_FLOW_CTRL;
  6091. port->hw = hw;
  6092. port->linked = &hw->port_info[port->first_port];
  6093. for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
  6094. hw->port_info[pi].port_id = pi;
  6095. hw->port_info[pi].pdev = dev;
  6096. hw->port_info[pi].state = media_disconnected;
  6097. }
  6098. dev->mem_start = (unsigned long) hw->io;
  6099. dev->mem_end = dev->mem_start + reg_len - 1;
  6100. dev->irq = pdev->irq;
  6101. if (MAIN_PORT == i)
  6102. memcpy(dev->dev_addr, hw_priv->hw.override_addr,
  6103. ETH_ALEN);
  6104. else {
  6105. memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
  6106. if (ether_addr_equal(sw->other_addr, hw->override_addr))
  6107. dev->dev_addr[5] += port->first_port;
  6108. }
  6109. dev->netdev_ops = &netdev_ops;
  6110. dev->ethtool_ops = &netdev_ethtool_ops;
  6111. /* MTU range: 60 - 1894 */
  6112. dev->min_mtu = ETH_ZLEN;
  6113. dev->max_mtu = MAX_RX_BUF_SIZE -
  6114. (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  6115. if (register_netdev(dev))
  6116. goto pcidev_init_reg_err;
  6117. port_set_power_saving(port, true);
  6118. }
  6119. pci_dev_get(hw_priv->pdev);
  6120. pci_set_drvdata(pdev, info);
  6121. return 0;
  6122. pcidev_init_reg_err:
  6123. for (i = 0; i < hw->dev_count; i++) {
  6124. if (info->netdev[i]) {
  6125. netdev_free(info->netdev[i]);
  6126. info->netdev[i] = NULL;
  6127. }
  6128. }
  6129. pcidev_init_mem_err:
  6130. ksz_free_mem(hw_priv);
  6131. kfree(hw->ksz_switch);
  6132. pcidev_init_alloc_err:
  6133. iounmap(hw->io);
  6134. pcidev_init_io_err:
  6135. kfree(info);
  6136. pcidev_init_dev_err:
  6137. release_mem_region(reg_base, reg_len);
  6138. return result;
  6139. }
  6140. static void pcidev_exit(struct pci_dev *pdev)
  6141. {
  6142. int i;
  6143. struct platform_info *info = pci_get_drvdata(pdev);
  6144. struct dev_info *hw_priv = &info->dev_info;
  6145. release_mem_region(pci_resource_start(pdev, 0),
  6146. pci_resource_len(pdev, 0));
  6147. for (i = 0; i < hw_priv->hw.dev_count; i++) {
  6148. if (info->netdev[i])
  6149. netdev_free(info->netdev[i]);
  6150. }
  6151. if (hw_priv->hw.io)
  6152. iounmap(hw_priv->hw.io);
  6153. ksz_free_mem(hw_priv);
  6154. kfree(hw_priv->hw.ksz_switch);
  6155. pci_dev_put(hw_priv->pdev);
  6156. kfree(info);
  6157. }
  6158. #ifdef CONFIG_PM
  6159. static int pcidev_resume(struct pci_dev *pdev)
  6160. {
  6161. int i;
  6162. struct platform_info *info = pci_get_drvdata(pdev);
  6163. struct dev_info *hw_priv = &info->dev_info;
  6164. struct ksz_hw *hw = &hw_priv->hw;
  6165. pci_set_power_state(pdev, PCI_D0);
  6166. pci_restore_state(pdev);
  6167. pci_enable_wake(pdev, PCI_D0, 0);
  6168. if (hw_priv->wol_enable)
  6169. hw_cfg_wol_pme(hw, 0);
  6170. for (i = 0; i < hw->dev_count; i++) {
  6171. if (info->netdev[i]) {
  6172. struct net_device *dev = info->netdev[i];
  6173. if (netif_running(dev)) {
  6174. netdev_open(dev);
  6175. netif_device_attach(dev);
  6176. }
  6177. }
  6178. }
  6179. return 0;
  6180. }
  6181. static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
  6182. {
  6183. int i;
  6184. struct platform_info *info = pci_get_drvdata(pdev);
  6185. struct dev_info *hw_priv = &info->dev_info;
  6186. struct ksz_hw *hw = &hw_priv->hw;
  6187. /* Need to find a way to retrieve the device IP address. */
  6188. static const u8 net_addr[] = { 192, 168, 1, 1 };
  6189. for (i = 0; i < hw->dev_count; i++) {
  6190. if (info->netdev[i]) {
  6191. struct net_device *dev = info->netdev[i];
  6192. if (netif_running(dev)) {
  6193. netif_device_detach(dev);
  6194. netdev_close(dev);
  6195. }
  6196. }
  6197. }
  6198. if (hw_priv->wol_enable) {
  6199. hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
  6200. hw_cfg_wol_pme(hw, 1);
  6201. }
  6202. pci_save_state(pdev);
  6203. pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
  6204. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  6205. return 0;
  6206. }
  6207. #endif
  6208. static char pcidev_name[] = "ksz884xp";
  6209. static const struct pci_device_id pcidev_table[] = {
  6210. { PCI_VENDOR_ID_MICREL_KS, 0x8841,
  6211. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6212. { PCI_VENDOR_ID_MICREL_KS, 0x8842,
  6213. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6214. { 0 }
  6215. };
  6216. MODULE_DEVICE_TABLE(pci, pcidev_table);
  6217. static struct pci_driver pci_device_driver = {
  6218. #ifdef CONFIG_PM
  6219. .suspend = pcidev_suspend,
  6220. .resume = pcidev_resume,
  6221. #endif
  6222. .name = pcidev_name,
  6223. .id_table = pcidev_table,
  6224. .probe = pcidev_init,
  6225. .remove = pcidev_exit
  6226. };
  6227. module_pci_driver(pci_device_driver);
  6228. MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
  6229. MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
  6230. MODULE_LICENSE("GPL");
  6231. module_param_named(message, msg_enable, int, 0);
  6232. MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
  6233. module_param(macaddr, charp, 0);
  6234. module_param(mac1addr, charp, 0);
  6235. module_param(fast_aging, int, 0);
  6236. module_param(multi_dev, int, 0);
  6237. module_param(stp, int, 0);
  6238. MODULE_PARM_DESC(macaddr, "MAC address");
  6239. MODULE_PARM_DESC(mac1addr, "Second MAC address");
  6240. MODULE_PARM_DESC(fast_aging, "Fast aging");
  6241. MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
  6242. MODULE_PARM_DESC(stp, "STP support");