PageRenderTime 59ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c

https://bitbucket.org/bradfa/linux
C | 3013 lines | 2348 code | 395 blank | 270 comment | 394 complexity | 9c2d31ac9c225293592f9ad25e230349 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2013 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. /* ethtool support for ixgbe */
  21. #include <linux/interrupt.h>
  22. #include <linux/types.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/pci.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/ethtool.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/highmem.h>
  30. #include <linux/uaccess.h>
  31. #include "ixgbe.h"
  32. #include "ixgbe_phy.h"
  33. #define IXGBE_ALL_RAR_ENTRIES 16
  34. enum {NETDEV_STATS, IXGBE_STATS};
  35. struct ixgbe_stats {
  36. char stat_string[ETH_GSTRING_LEN];
  37. int type;
  38. int sizeof_stat;
  39. int stat_offset;
  40. };
  41. #define IXGBE_STAT(m) IXGBE_STATS, \
  42. sizeof(((struct ixgbe_adapter *)0)->m), \
  43. offsetof(struct ixgbe_adapter, m)
  44. #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
  45. sizeof(((struct rtnl_link_stats64 *)0)->m), \
  46. offsetof(struct rtnl_link_stats64, m)
  47. static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  48. {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  49. {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  50. {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  51. {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  52. {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  53. {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  54. {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  55. {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  56. {"lsc_int", IXGBE_STAT(lsc_int)},
  57. {"tx_busy", IXGBE_STAT(tx_busy)},
  58. {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  59. {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  60. {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  61. {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  62. {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  63. {"multicast", IXGBE_NETDEV_STAT(multicast)},
  64. {"broadcast", IXGBE_STAT(stats.bprc)},
  65. {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  66. {"collisions", IXGBE_NETDEV_STAT(collisions)},
  67. {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  68. {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  69. {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  70. {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  71. {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  72. {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  73. {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  74. {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  75. {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  76. {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  77. {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  78. {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  79. {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  80. {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  81. {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  82. {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  83. {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
  84. {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
  85. {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  86. {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  87. {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  88. {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  89. {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  90. {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  91. {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  92. {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
  93. {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
  94. {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
  95. {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
  96. {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
  97. #ifdef IXGBE_FCOE
  98. {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
  99. {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
  100. {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
  101. {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
  102. {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
  103. {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
  104. {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
  105. {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
  106. #endif /* IXGBE_FCOE */
  107. };
  108. /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
  109. * we set the num_rx_queues to evaluate to num_tx_queues. This is
  110. * used because we do not have a good way to get the max number of
  111. * rx queues with CONFIG_RPS disabled.
  112. */
  113. #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
  114. #define IXGBE_QUEUE_STATS_LEN ( \
  115. (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
  116. (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
  117. #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
  118. #define IXGBE_PB_STATS_LEN ( \
  119. (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
  120. sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
  121. sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
  122. sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
  123. / sizeof(u64))
  124. #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
  125. IXGBE_PB_STATS_LEN + \
  126. IXGBE_QUEUE_STATS_LEN)
  127. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  128. "Register test (offline)", "Eeprom test (offline)",
  129. "Interrupt test (offline)", "Loopback test (offline)",
  130. "Link test (on/offline)"
  131. };
  132. #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
  133. static int ixgbe_get_settings(struct net_device *netdev,
  134. struct ethtool_cmd *ecmd)
  135. {
  136. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  137. struct ixgbe_hw *hw = &adapter->hw;
  138. ixgbe_link_speed supported_link;
  139. u32 link_speed = 0;
  140. bool autoneg = false;
  141. bool link_up;
  142. hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
  143. /* set the supported link speeds */
  144. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  145. ecmd->supported |= SUPPORTED_10000baseT_Full;
  146. if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
  147. ecmd->supported |= SUPPORTED_1000baseT_Full;
  148. if (supported_link & IXGBE_LINK_SPEED_100_FULL)
  149. ecmd->supported |= SUPPORTED_100baseT_Full;
  150. /* set the advertised speeds */
  151. if (hw->phy.autoneg_advertised) {
  152. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
  153. ecmd->advertising |= ADVERTISED_100baseT_Full;
  154. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
  155. ecmd->advertising |= ADVERTISED_10000baseT_Full;
  156. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
  157. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  158. } else {
  159. /* default modes in case phy.autoneg_advertised isn't set */
  160. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  161. ecmd->advertising |= ADVERTISED_10000baseT_Full;
  162. if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
  163. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  164. if (supported_link & IXGBE_LINK_SPEED_100_FULL)
  165. ecmd->advertising |= ADVERTISED_100baseT_Full;
  166. }
  167. if (autoneg) {
  168. ecmd->supported |= SUPPORTED_Autoneg;
  169. ecmd->advertising |= ADVERTISED_Autoneg;
  170. ecmd->autoneg = AUTONEG_ENABLE;
  171. } else
  172. ecmd->autoneg = AUTONEG_DISABLE;
  173. ecmd->transceiver = XCVR_EXTERNAL;
  174. /* Determine the remaining settings based on the PHY type. */
  175. switch (adapter->hw.phy.type) {
  176. case ixgbe_phy_tn:
  177. case ixgbe_phy_aq:
  178. case ixgbe_phy_cu_unknown:
  179. ecmd->supported |= SUPPORTED_TP;
  180. ecmd->advertising |= ADVERTISED_TP;
  181. ecmd->port = PORT_TP;
  182. break;
  183. case ixgbe_phy_qt:
  184. ecmd->supported |= SUPPORTED_FIBRE;
  185. ecmd->advertising |= ADVERTISED_FIBRE;
  186. ecmd->port = PORT_FIBRE;
  187. break;
  188. case ixgbe_phy_nl:
  189. case ixgbe_phy_sfp_passive_tyco:
  190. case ixgbe_phy_sfp_passive_unknown:
  191. case ixgbe_phy_sfp_ftl:
  192. case ixgbe_phy_sfp_avago:
  193. case ixgbe_phy_sfp_intel:
  194. case ixgbe_phy_sfp_unknown:
  195. /* SFP+ devices, further checking needed */
  196. switch (adapter->hw.phy.sfp_type) {
  197. case ixgbe_sfp_type_da_cu:
  198. case ixgbe_sfp_type_da_cu_core0:
  199. case ixgbe_sfp_type_da_cu_core1:
  200. ecmd->supported |= SUPPORTED_FIBRE;
  201. ecmd->advertising |= ADVERTISED_FIBRE;
  202. ecmd->port = PORT_DA;
  203. break;
  204. case ixgbe_sfp_type_sr:
  205. case ixgbe_sfp_type_lr:
  206. case ixgbe_sfp_type_srlr_core0:
  207. case ixgbe_sfp_type_srlr_core1:
  208. ecmd->supported |= SUPPORTED_FIBRE;
  209. ecmd->advertising |= ADVERTISED_FIBRE;
  210. ecmd->port = PORT_FIBRE;
  211. break;
  212. case ixgbe_sfp_type_not_present:
  213. ecmd->supported |= SUPPORTED_FIBRE;
  214. ecmd->advertising |= ADVERTISED_FIBRE;
  215. ecmd->port = PORT_NONE;
  216. break;
  217. case ixgbe_sfp_type_1g_cu_core0:
  218. case ixgbe_sfp_type_1g_cu_core1:
  219. ecmd->supported |= SUPPORTED_TP;
  220. ecmd->advertising |= ADVERTISED_TP;
  221. ecmd->port = PORT_TP;
  222. break;
  223. case ixgbe_sfp_type_1g_sx_core0:
  224. case ixgbe_sfp_type_1g_sx_core1:
  225. ecmd->supported |= SUPPORTED_FIBRE;
  226. ecmd->advertising |= ADVERTISED_FIBRE;
  227. ecmd->port = PORT_FIBRE;
  228. break;
  229. case ixgbe_sfp_type_unknown:
  230. default:
  231. ecmd->supported |= SUPPORTED_FIBRE;
  232. ecmd->advertising |= ADVERTISED_FIBRE;
  233. ecmd->port = PORT_OTHER;
  234. break;
  235. }
  236. break;
  237. case ixgbe_phy_xaui:
  238. ecmd->supported |= SUPPORTED_FIBRE;
  239. ecmd->advertising |= ADVERTISED_FIBRE;
  240. ecmd->port = PORT_NONE;
  241. break;
  242. case ixgbe_phy_unknown:
  243. case ixgbe_phy_generic:
  244. case ixgbe_phy_sfp_unsupported:
  245. default:
  246. ecmd->supported |= SUPPORTED_FIBRE;
  247. ecmd->advertising |= ADVERTISED_FIBRE;
  248. ecmd->port = PORT_OTHER;
  249. break;
  250. }
  251. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  252. if (link_up) {
  253. switch (link_speed) {
  254. case IXGBE_LINK_SPEED_10GB_FULL:
  255. ethtool_cmd_speed_set(ecmd, SPEED_10000);
  256. break;
  257. case IXGBE_LINK_SPEED_1GB_FULL:
  258. ethtool_cmd_speed_set(ecmd, SPEED_1000);
  259. break;
  260. case IXGBE_LINK_SPEED_100_FULL:
  261. ethtool_cmd_speed_set(ecmd, SPEED_100);
  262. break;
  263. default:
  264. break;
  265. }
  266. ecmd->duplex = DUPLEX_FULL;
  267. } else {
  268. ethtool_cmd_speed_set(ecmd, -1);
  269. ecmd->duplex = -1;
  270. }
  271. return 0;
  272. }
  273. static int ixgbe_set_settings(struct net_device *netdev,
  274. struct ethtool_cmd *ecmd)
  275. {
  276. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  277. struct ixgbe_hw *hw = &adapter->hw;
  278. u32 advertised, old;
  279. s32 err = 0;
  280. if ((hw->phy.media_type == ixgbe_media_type_copper) ||
  281. (hw->phy.multispeed_fiber)) {
  282. /*
  283. * this function does not support duplex forcing, but can
  284. * limit the advertising of the adapter to the specified speed
  285. */
  286. if (ecmd->autoneg == AUTONEG_DISABLE)
  287. return -EINVAL;
  288. if (ecmd->advertising & ~ecmd->supported)
  289. return -EINVAL;
  290. old = hw->phy.autoneg_advertised;
  291. advertised = 0;
  292. if (ecmd->advertising & ADVERTISED_10000baseT_Full)
  293. advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  294. if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  295. advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  296. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  297. advertised |= IXGBE_LINK_SPEED_100_FULL;
  298. if (old == advertised)
  299. return err;
  300. /* this sets the link speed and restarts auto-neg */
  301. hw->mac.autotry_restart = true;
  302. err = hw->mac.ops.setup_link(hw, advertised, true);
  303. if (err) {
  304. e_info(probe, "setup link failed with code %d\n", err);
  305. hw->mac.ops.setup_link(hw, old, true);
  306. }
  307. } else {
  308. /* in this case we currently only support 10Gb/FULL */
  309. u32 speed = ethtool_cmd_speed(ecmd);
  310. if ((ecmd->autoneg == AUTONEG_ENABLE) ||
  311. (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
  312. (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
  313. return -EINVAL;
  314. }
  315. return err;
  316. }
  317. static void ixgbe_get_pauseparam(struct net_device *netdev,
  318. struct ethtool_pauseparam *pause)
  319. {
  320. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  321. struct ixgbe_hw *hw = &adapter->hw;
  322. if (hw->fc.disable_fc_autoneg)
  323. pause->autoneg = 0;
  324. else
  325. pause->autoneg = 1;
  326. if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
  327. pause->rx_pause = 1;
  328. } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
  329. pause->tx_pause = 1;
  330. } else if (hw->fc.current_mode == ixgbe_fc_full) {
  331. pause->rx_pause = 1;
  332. pause->tx_pause = 1;
  333. }
  334. }
  335. static int ixgbe_set_pauseparam(struct net_device *netdev,
  336. struct ethtool_pauseparam *pause)
  337. {
  338. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  339. struct ixgbe_hw *hw = &adapter->hw;
  340. struct ixgbe_fc_info fc = hw->fc;
  341. /* 82598 does no support link flow control with DCB enabled */
  342. if ((hw->mac.type == ixgbe_mac_82598EB) &&
  343. (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  344. return -EINVAL;
  345. /* some devices do not support autoneg of link flow control */
  346. if ((pause->autoneg == AUTONEG_ENABLE) &&
  347. (ixgbe_device_supports_autoneg_fc(hw) != 0))
  348. return -EINVAL;
  349. fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
  350. if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
  351. fc.requested_mode = ixgbe_fc_full;
  352. else if (pause->rx_pause && !pause->tx_pause)
  353. fc.requested_mode = ixgbe_fc_rx_pause;
  354. else if (!pause->rx_pause && pause->tx_pause)
  355. fc.requested_mode = ixgbe_fc_tx_pause;
  356. else
  357. fc.requested_mode = ixgbe_fc_none;
  358. /* if the thing changed then we'll update and use new autoneg */
  359. if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
  360. hw->fc = fc;
  361. if (netif_running(netdev))
  362. ixgbe_reinit_locked(adapter);
  363. else
  364. ixgbe_reset(adapter);
  365. }
  366. return 0;
  367. }
  368. static u32 ixgbe_get_msglevel(struct net_device *netdev)
  369. {
  370. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  371. return adapter->msg_enable;
  372. }
  373. static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
  374. {
  375. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  376. adapter->msg_enable = data;
  377. }
  378. static int ixgbe_get_regs_len(struct net_device *netdev)
  379. {
  380. #define IXGBE_REGS_LEN 1129
  381. return IXGBE_REGS_LEN * sizeof(u32);
  382. }
  383. #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
  384. static void ixgbe_get_regs(struct net_device *netdev,
  385. struct ethtool_regs *regs, void *p)
  386. {
  387. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  388. struct ixgbe_hw *hw = &adapter->hw;
  389. u32 *regs_buff = p;
  390. u8 i;
  391. memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
  392. regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
  393. /* General Registers */
  394. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
  395. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
  396. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  397. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
  398. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
  399. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  400. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
  401. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
  402. /* NVM Register */
  403. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
  404. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
  405. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
  406. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
  407. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
  408. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
  409. regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
  410. regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
  411. regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
  412. regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
  413. /* Interrupt */
  414. /* don't read EICR because it can clear interrupt causes, instead
  415. * read EICS which is a shadow but doesn't clear EICR */
  416. regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
  417. regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
  418. regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
  419. regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
  420. regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
  421. regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
  422. regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
  423. regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
  424. regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
  425. regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
  426. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
  427. regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
  428. /* Flow Control */
  429. regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
  430. regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
  431. regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
  432. regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
  433. regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
  434. for (i = 0; i < 8; i++) {
  435. switch (hw->mac.type) {
  436. case ixgbe_mac_82598EB:
  437. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
  438. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
  439. break;
  440. case ixgbe_mac_82599EB:
  441. case ixgbe_mac_X540:
  442. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
  443. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
  444. break;
  445. default:
  446. break;
  447. }
  448. }
  449. regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
  450. regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
  451. /* Receive DMA */
  452. for (i = 0; i < 64; i++)
  453. regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  454. for (i = 0; i < 64; i++)
  455. regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  456. for (i = 0; i < 64; i++)
  457. regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  458. for (i = 0; i < 64; i++)
  459. regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  460. for (i = 0; i < 64; i++)
  461. regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  462. for (i = 0; i < 64; i++)
  463. regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  464. for (i = 0; i < 16; i++)
  465. regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  466. for (i = 0; i < 16; i++)
  467. regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  468. regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  469. for (i = 0; i < 8; i++)
  470. regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
  471. regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  472. regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
  473. /* Receive */
  474. regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  475. regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  476. for (i = 0; i < 16; i++)
  477. regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
  478. for (i = 0; i < 16; i++)
  479. regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
  480. regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
  481. regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  482. regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  483. regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
  484. regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
  485. regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  486. for (i = 0; i < 8; i++)
  487. regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
  488. for (i = 0; i < 8; i++)
  489. regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
  490. regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
  491. /* Transmit */
  492. for (i = 0; i < 32; i++)
  493. regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  494. for (i = 0; i < 32; i++)
  495. regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  496. for (i = 0; i < 32; i++)
  497. regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  498. for (i = 0; i < 32; i++)
  499. regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  500. for (i = 0; i < 32; i++)
  501. regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  502. for (i = 0; i < 32; i++)
  503. regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  504. for (i = 0; i < 32; i++)
  505. regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
  506. for (i = 0; i < 32; i++)
  507. regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
  508. regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  509. for (i = 0; i < 16; i++)
  510. regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
  511. regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
  512. for (i = 0; i < 8; i++)
  513. regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
  514. regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
  515. /* Wake Up */
  516. regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
  517. regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
  518. regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
  519. regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
  520. regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
  521. regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
  522. regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
  523. regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
  524. regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
  525. /* DCB */
  526. regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
  527. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  528. regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
  529. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
  530. for (i = 0; i < 8; i++)
  531. regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
  532. for (i = 0; i < 8; i++)
  533. regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
  534. for (i = 0; i < 8; i++)
  535. regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
  536. for (i = 0; i < 8; i++)
  537. regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
  538. for (i = 0; i < 8; i++)
  539. regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
  540. for (i = 0; i < 8; i++)
  541. regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
  542. /* Statistics */
  543. regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
  544. regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
  545. regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
  546. regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
  547. for (i = 0; i < 8; i++)
  548. regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
  549. regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
  550. regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
  551. regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
  552. regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
  553. regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
  554. regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
  555. regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
  556. for (i = 0; i < 8; i++)
  557. regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
  558. for (i = 0; i < 8; i++)
  559. regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
  560. for (i = 0; i < 8; i++)
  561. regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
  562. for (i = 0; i < 8; i++)
  563. regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
  564. regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
  565. regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
  566. regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
  567. regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
  568. regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
  569. regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
  570. regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
  571. regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
  572. regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
  573. regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
  574. regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
  575. regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
  576. for (i = 0; i < 8; i++)
  577. regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
  578. regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
  579. regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
  580. regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
  581. regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
  582. regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
  583. regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
  584. regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
  585. regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
  586. regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
  587. regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
  588. regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
  589. regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
  590. regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
  591. regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
  592. regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
  593. regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
  594. regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
  595. regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
  596. regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
  597. for (i = 0; i < 16; i++)
  598. regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
  599. for (i = 0; i < 16; i++)
  600. regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
  601. for (i = 0; i < 16; i++)
  602. regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
  603. for (i = 0; i < 16; i++)
  604. regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
  605. /* MAC */
  606. regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
  607. regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
  608. regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
  609. regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
  610. regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
  611. regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
  612. regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
  613. regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
  614. regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
  615. regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  616. regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
  617. regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
  618. regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
  619. regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
  620. regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
  621. regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
  622. regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
  623. regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
  624. regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
  625. regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
  626. regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
  627. regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
  628. regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
  629. regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
  630. regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
  631. regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
  632. regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  633. regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
  634. regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  635. regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
  636. regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  637. regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
  638. regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  639. /* Diagnostic */
  640. regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
  641. for (i = 0; i < 8; i++)
  642. regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
  643. regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
  644. for (i = 0; i < 4; i++)
  645. regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
  646. regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
  647. regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
  648. for (i = 0; i < 8; i++)
  649. regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
  650. regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
  651. for (i = 0; i < 4; i++)
  652. regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
  653. regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
  654. regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
  655. regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
  656. regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
  657. regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
  658. regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
  659. regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
  660. regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
  661. regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
  662. regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
  663. regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
  664. for (i = 0; i < 8; i++)
  665. regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
  666. regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
  667. regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
  668. regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
  669. regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
  670. regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
  671. regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
  672. regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
  673. regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
  674. regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
  675. /* 82599 X540 specific registers */
  676. regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  677. }
  678. static int ixgbe_get_eeprom_len(struct net_device *netdev)
  679. {
  680. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  681. return adapter->hw.eeprom.word_size * 2;
  682. }
  683. static int ixgbe_get_eeprom(struct net_device *netdev,
  684. struct ethtool_eeprom *eeprom, u8 *bytes)
  685. {
  686. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  687. struct ixgbe_hw *hw = &adapter->hw;
  688. u16 *eeprom_buff;
  689. int first_word, last_word, eeprom_len;
  690. int ret_val = 0;
  691. u16 i;
  692. if (eeprom->len == 0)
  693. return -EINVAL;
  694. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  695. first_word = eeprom->offset >> 1;
  696. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  697. eeprom_len = last_word - first_word + 1;
  698. eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
  699. if (!eeprom_buff)
  700. return -ENOMEM;
  701. ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
  702. eeprom_buff);
  703. /* Device's eeprom is always little-endian, word addressable */
  704. for (i = 0; i < eeprom_len; i++)
  705. le16_to_cpus(&eeprom_buff[i]);
  706. memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
  707. kfree(eeprom_buff);
  708. return ret_val;
  709. }
  710. static int ixgbe_set_eeprom(struct net_device *netdev,
  711. struct ethtool_eeprom *eeprom, u8 *bytes)
  712. {
  713. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  714. struct ixgbe_hw *hw = &adapter->hw;
  715. u16 *eeprom_buff;
  716. void *ptr;
  717. int max_len, first_word, last_word, ret_val = 0;
  718. u16 i;
  719. if (eeprom->len == 0)
  720. return -EINVAL;
  721. if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  722. return -EINVAL;
  723. max_len = hw->eeprom.word_size * 2;
  724. first_word = eeprom->offset >> 1;
  725. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  726. eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  727. if (!eeprom_buff)
  728. return -ENOMEM;
  729. ptr = eeprom_buff;
  730. if (eeprom->offset & 1) {
  731. /*
  732. * need read/modify/write of first changed EEPROM word
  733. * only the second byte of the word is being modified
  734. */
  735. ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
  736. if (ret_val)
  737. goto err;
  738. ptr++;
  739. }
  740. if ((eeprom->offset + eeprom->len) & 1) {
  741. /*
  742. * need read/modify/write of last changed EEPROM word
  743. * only the first byte of the word is being modified
  744. */
  745. ret_val = hw->eeprom.ops.read(hw, last_word,
  746. &eeprom_buff[last_word - first_word]);
  747. if (ret_val)
  748. goto err;
  749. }
  750. /* Device's eeprom is always little-endian, word addressable */
  751. for (i = 0; i < last_word - first_word + 1; i++)
  752. le16_to_cpus(&eeprom_buff[i]);
  753. memcpy(ptr, bytes, eeprom->len);
  754. for (i = 0; i < last_word - first_word + 1; i++)
  755. cpu_to_le16s(&eeprom_buff[i]);
  756. ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
  757. last_word - first_word + 1,
  758. eeprom_buff);
  759. /* Update the checksum */
  760. if (ret_val == 0)
  761. hw->eeprom.ops.update_checksum(hw);
  762. err:
  763. kfree(eeprom_buff);
  764. return ret_val;
  765. }
  766. static void ixgbe_get_drvinfo(struct net_device *netdev,
  767. struct ethtool_drvinfo *drvinfo)
  768. {
  769. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  770. u32 nvm_track_id;
  771. strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
  772. strlcpy(drvinfo->version, ixgbe_driver_version,
  773. sizeof(drvinfo->version));
  774. nvm_track_id = (adapter->eeprom_verh << 16) |
  775. adapter->eeprom_verl;
  776. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
  777. nvm_track_id);
  778. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  779. sizeof(drvinfo->bus_info));
  780. drvinfo->n_stats = IXGBE_STATS_LEN;
  781. drvinfo->testinfo_len = IXGBE_TEST_LEN;
  782. drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
  783. }
  784. static void ixgbe_get_ringparam(struct net_device *netdev,
  785. struct ethtool_ringparam *ring)
  786. {
  787. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  788. struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  789. struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
  790. ring->rx_max_pending = IXGBE_MAX_RXD;
  791. ring->tx_max_pending = IXGBE_MAX_TXD;
  792. ring->rx_pending = rx_ring->count;
  793. ring->tx_pending = tx_ring->count;
  794. }
  795. static int ixgbe_set_ringparam(struct net_device *netdev,
  796. struct ethtool_ringparam *ring)
  797. {
  798. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  799. struct ixgbe_ring *temp_ring;
  800. int i, err = 0;
  801. u32 new_rx_count, new_tx_count;
  802. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  803. return -EINVAL;
  804. new_tx_count = clamp_t(u32, ring->tx_pending,
  805. IXGBE_MIN_TXD, IXGBE_MAX_TXD);
  806. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  807. new_rx_count = clamp_t(u32, ring->rx_pending,
  808. IXGBE_MIN_RXD, IXGBE_MAX_RXD);
  809. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  810. if ((new_tx_count == adapter->tx_ring_count) &&
  811. (new_rx_count == adapter->rx_ring_count)) {
  812. /* nothing to do */
  813. return 0;
  814. }
  815. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  816. usleep_range(1000, 2000);
  817. if (!netif_running(adapter->netdev)) {
  818. for (i = 0; i < adapter->num_tx_queues; i++)
  819. adapter->tx_ring[i]->count = new_tx_count;
  820. for (i = 0; i < adapter->num_rx_queues; i++)
  821. adapter->rx_ring[i]->count = new_rx_count;
  822. adapter->tx_ring_count = new_tx_count;
  823. adapter->rx_ring_count = new_rx_count;
  824. goto clear_reset;
  825. }
  826. /* allocate temporary buffer to store rings in */
  827. i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
  828. temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
  829. if (!temp_ring) {
  830. err = -ENOMEM;
  831. goto clear_reset;
  832. }
  833. ixgbe_down(adapter);
  834. /*
  835. * Setup new Tx resources and free the old Tx resources in that order.
  836. * We can then assign the new resources to the rings via a memcpy.
  837. * The advantage to this approach is that we are guaranteed to still
  838. * have resources even in the case of an allocation failure.
  839. */
  840. if (new_tx_count != adapter->tx_ring_count) {
  841. for (i = 0; i < adapter->num_tx_queues; i++) {
  842. memcpy(&temp_ring[i], adapter->tx_ring[i],
  843. sizeof(struct ixgbe_ring));
  844. temp_ring[i].count = new_tx_count;
  845. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  846. if (err) {
  847. while (i) {
  848. i--;
  849. ixgbe_free_tx_resources(&temp_ring[i]);
  850. }
  851. goto err_setup;
  852. }
  853. }
  854. for (i = 0; i < adapter->num_tx_queues; i++) {
  855. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  856. memcpy(adapter->tx_ring[i], &temp_ring[i],
  857. sizeof(struct ixgbe_ring));
  858. }
  859. adapter->tx_ring_count = new_tx_count;
  860. }
  861. /* Repeat the process for the Rx rings if needed */
  862. if (new_rx_count != adapter->rx_ring_count) {
  863. for (i = 0; i < adapter->num_rx_queues; i++) {
  864. memcpy(&temp_ring[i], adapter->rx_ring[i],
  865. sizeof(struct ixgbe_ring));
  866. temp_ring[i].count = new_rx_count;
  867. err = ixgbe_setup_rx_resources(&temp_ring[i]);
  868. if (err) {
  869. while (i) {
  870. i--;
  871. ixgbe_free_rx_resources(&temp_ring[i]);
  872. }
  873. goto err_setup;
  874. }
  875. }
  876. for (i = 0; i < adapter->num_rx_queues; i++) {
  877. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  878. memcpy(adapter->rx_ring[i], &temp_ring[i],
  879. sizeof(struct ixgbe_ring));
  880. }
  881. adapter->rx_ring_count = new_rx_count;
  882. }
  883. err_setup:
  884. ixgbe_up(adapter);
  885. vfree(temp_ring);
  886. clear_reset:
  887. clear_bit(__IXGBE_RESETTING, &adapter->state);
  888. return err;
  889. }
  890. static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
  891. {
  892. switch (sset) {
  893. case ETH_SS_TEST:
  894. return IXGBE_TEST_LEN;
  895. case ETH_SS_STATS:
  896. return IXGBE_STATS_LEN;
  897. default:
  898. return -EOPNOTSUPP;
  899. }
  900. }
  901. static void ixgbe_get_ethtool_stats(struct net_device *netdev,
  902. struct ethtool_stats *stats, u64 *data)
  903. {
  904. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  905. struct rtnl_link_stats64 temp;
  906. const struct rtnl_link_stats64 *net_stats;
  907. unsigned int start;
  908. struct ixgbe_ring *ring;
  909. int i, j;
  910. char *p = NULL;
  911. ixgbe_update_stats(adapter);
  912. net_stats = dev_get_stats(netdev, &temp);
  913. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  914. switch (ixgbe_gstrings_stats[i].type) {
  915. case NETDEV_STATS:
  916. p = (char *) net_stats +
  917. ixgbe_gstrings_stats[i].stat_offset;
  918. break;
  919. case IXGBE_STATS:
  920. p = (char *) adapter +
  921. ixgbe_gstrings_stats[i].stat_offset;
  922. break;
  923. default:
  924. data[i] = 0;
  925. continue;
  926. }
  927. data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
  928. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  929. }
  930. for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
  931. ring = adapter->tx_ring[j];
  932. if (!ring) {
  933. data[i] = 0;
  934. data[i+1] = 0;
  935. i += 2;
  936. continue;
  937. }
  938. do {
  939. start = u64_stats_fetch_begin_bh(&ring->syncp);
  940. data[i] = ring->stats.packets;
  941. data[i+1] = ring->stats.bytes;
  942. } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
  943. i += 2;
  944. }
  945. for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
  946. ring = adapter->rx_ring[j];
  947. if (!ring) {
  948. data[i] = 0;
  949. data[i+1] = 0;
  950. i += 2;
  951. continue;
  952. }
  953. do {
  954. start = u64_stats_fetch_begin_bh(&ring->syncp);
  955. data[i] = ring->stats.packets;
  956. data[i+1] = ring->stats.bytes;
  957. } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
  958. i += 2;
  959. }
  960. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  961. data[i++] = adapter->stats.pxontxc[j];
  962. data[i++] = adapter->stats.pxofftxc[j];
  963. }
  964. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  965. data[i++] = adapter->stats.pxonrxc[j];
  966. data[i++] = adapter->stats.pxoffrxc[j];
  967. }
  968. }
  969. static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
  970. u8 *data)
  971. {
  972. char *p = (char *)data;
  973. int i;
  974. switch (stringset) {
  975. case ETH_SS_TEST:
  976. for (i = 0; i < IXGBE_TEST_LEN; i++) {
  977. memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
  978. data += ETH_GSTRING_LEN;
  979. }
  980. break;
  981. case ETH_SS_STATS:
  982. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  983. memcpy(p, ixgbe_gstrings_stats[i].stat_string,
  984. ETH_GSTRING_LEN);
  985. p += ETH_GSTRING_LEN;
  986. }
  987. for (i = 0; i < netdev->num_tx_queues; i++) {
  988. sprintf(p, "tx_queue_%u_packets", i);
  989. p += ETH_GSTRING_LEN;
  990. sprintf(p, "tx_queue_%u_bytes", i);
  991. p += ETH_GSTRING_LEN;
  992. }
  993. for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
  994. sprintf(p, "rx_queue_%u_packets", i);
  995. p += ETH_GSTRING_LEN;
  996. sprintf(p, "rx_queue_%u_bytes", i);
  997. p += ETH_GSTRING_LEN;
  998. }
  999. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1000. sprintf(p, "tx_pb_%u_pxon", i);
  1001. p += ETH_GSTRING_LEN;
  1002. sprintf(p, "tx_pb_%u_pxoff", i);
  1003. p += ETH_GSTRING_LEN;
  1004. }
  1005. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1006. sprintf(p, "rx_pb_%u_pxon", i);
  1007. p += ETH_GSTRING_LEN;
  1008. sprintf(p, "rx_pb_%u_pxoff", i);
  1009. p += ETH_GSTRING_LEN;
  1010. }
  1011. /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
  1012. break;
  1013. }
  1014. }
  1015. static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
  1016. {
  1017. struct ixgbe_hw *hw = &adapter->hw;
  1018. bool link_up;
  1019. u32 link_speed = 0;
  1020. *data = 0;
  1021. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  1022. if (link_up)
  1023. return *data;
  1024. else
  1025. *data = 1;
  1026. return *data;
  1027. }
  1028. /* ethtool register test data */
  1029. struct ixgbe_reg_test {
  1030. u16 reg;
  1031. u8 array_len;
  1032. u8 test_type;
  1033. u32 mask;
  1034. u32 write;
  1035. };
  1036. /* In the hardware, registers are laid out either singly, in arrays
  1037. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  1038. * most tests take place on arrays or single registers (handled
  1039. * as a single-element array) and special-case the tables.
  1040. * Table tests are always pattern tests.
  1041. *
  1042. * We also make provision for some required setup steps by specifying
  1043. * registers to be written without any read-back testing.
  1044. */
  1045. #define PATTERN_TEST 1
  1046. #define SET_READ_TEST 2
  1047. #define WRITE_NO_TEST 3
  1048. #define TABLE32_TEST 4
  1049. #define TABLE64_TEST_LO 5
  1050. #define TABLE64_TEST_HI 6
  1051. /* default 82599 register test */
  1052. static const struct ixgbe_reg_test reg_test_82599[] = {
  1053. { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1054. { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1055. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1056. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1057. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  1058. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1059. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1060. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1061. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1062. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1063. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1064. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1065. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1066. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1067. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  1068. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
  1069. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1070. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
  1071. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1072. { 0, 0, 0, 0 }
  1073. };
  1074. /* default 82598 register test */
  1075. static const struct ixgbe_reg_test reg_test_82598[] = {
  1076. { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1077. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1078. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1079. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1080. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1081. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1082. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1083. /* Enable all four RX queues before testing. */
  1084. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1085. /* RDH is read-only for 82598, only test RDT. */
  1086. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1087. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1088. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1089. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1090. { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
  1091. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1092. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1093. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1094. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
  1095. { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
  1096. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1097. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
  1098. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1099. { 0, 0, 0, 0 }
  1100. };
  1101. static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1102. u32 mask, u32 write)
  1103. {
  1104. u32 pat, val, before;
  1105. static const u32 test_pattern[] = {
  1106. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
  1107. for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
  1108. before = readl(adapter->hw.hw_addr + reg);
  1109. writel((test_pattern[pat] & write),
  1110. (adapter->hw.hw_addr + reg));
  1111. val = readl(adapter->hw.hw_addr + reg);
  1112. if (val != (test_pattern[pat] & write & mask)) {
  1113. e_err(drv, "pattern test reg %04X failed: got "
  1114. "0x%08X expected 0x%08X\n",
  1115. reg, val, (test_pattern[pat] & write & mask));
  1116. *data = reg;
  1117. writel(before, adapter->hw.hw_addr + reg);
  1118. return 1;
  1119. }
  1120. writel(before, adapter->hw.hw_addr + reg);
  1121. }
  1122. return 0;
  1123. }
  1124. static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1125. u32 mask, u32 write)
  1126. {
  1127. u32 val, before;
  1128. before = readl(adapter->hw.hw_addr + reg);
  1129. writel((write & mask), (adapter->hw.hw_addr + reg));
  1130. val = readl(adapter->hw.hw_addr + reg);
  1131. if ((write & mask) != (val & mask)) {
  1132. e_err(drv, "set/check reg %04X test failed: got 0x%08X "
  1133. "expected 0x%08X\n", reg, (val & mask), (write & mask));
  1134. *data = reg;
  1135. writel(before, (adapter->hw.hw_addr + reg));
  1136. return 1;
  1137. }
  1138. writel(before, (adapter->hw.hw_addr + reg));
  1139. return 0;
  1140. }
  1141. #define REG_PATTERN_TEST(reg, mask, write) \
  1142. do { \
  1143. if (reg_pattern_test(adapter, data, reg, mask, write)) \
  1144. return 1; \
  1145. } while (0) \
  1146. #define REG_SET_AND_CHECK(reg, mask, write) \
  1147. do { \
  1148. if (reg_set_and_check(adapter, data, reg, mask, write)) \
  1149. return 1; \
  1150. } while (0) \
  1151. static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
  1152. {
  1153. const struct ixgbe_reg_test *test;
  1154. u32 value, before, after;
  1155. u32 i, toggle;
  1156. switch (adapter->hw.mac.type) {
  1157. case ixgbe_mac_82598EB:
  1158. toggle = 0x7FFFF3FF;
  1159. test = reg_test_82598;
  1160. break;
  1161. case ixgbe_mac_82599EB:
  1162. case ixgbe_mac_X540:
  1163. toggle = 0x7FFFF30F;
  1164. test = reg_test_82599;
  1165. break;
  1166. default:
  1167. *data = 1;
  1168. return 1;
  1169. break;
  1170. }
  1171. /*
  1172. * Because the status register is such a special case,
  1173. * we handle it separately from the rest of the register
  1174. * tests. Some bits are read-only, some toggle, and some
  1175. * are writeable on newer MACs.
  1176. */
  1177. before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
  1178. value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
  1179. IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
  1180. after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
  1181. if (value != after) {
  1182. e_err(drv, "failed STATUS register test got: 0x%08X "
  1183. "expected: 0x%08X\n", after, value);
  1184. *data = 1;
  1185. return 1;
  1186. }
  1187. /* restore previous status */
  1188. IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
  1189. /*
  1190. * Perform the remainder of the register test, looping through
  1191. * the test table until we either fail or reach the null entry.
  1192. */
  1193. while (test->reg) {
  1194. for (i = 0; i < test->array_len; i++) {
  1195. switch (test->test_type) {
  1196. case PATTERN_TEST:
  1197. REG_PATTERN_TEST(test->reg + (i * 0x40),
  1198. test->mask,
  1199. test->write);
  1200. break;
  1201. case SET_READ_TEST:
  1202. REG_SET_AND_CHECK(test->reg + (i * 0x40),
  1203. test->mask,
  1204. test->write);
  1205. break;
  1206. case WRITE_NO_TEST:
  1207. writel(test->write,
  1208. (adapter->hw.hw_addr + test->reg)
  1209. + (i * 0x40));
  1210. break;
  1211. case TABLE32_TEST:
  1212. REG_PATTERN_TEST(test->reg + (i * 4),
  1213. test->mask,
  1214. test->write);
  1215. break;
  1216. case TABLE64_TEST_LO:
  1217. REG_PATTERN_TEST(test->reg + (i * 8),
  1218. test->mask,
  1219. test->write);
  1220. break;
  1221. case TABLE64_TEST_HI:
  1222. REG_PATTERN_TEST((test->reg + 4) + (i * 8),
  1223. test->mask,
  1224. test->write);
  1225. break;
  1226. }
  1227. }
  1228. test++;
  1229. }
  1230. *data = 0;
  1231. return 0;
  1232. }
  1233. static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
  1234. {
  1235. struct ixgbe_hw *hw = &adapter->hw;
  1236. if (hw->eeprom.ops.validate_checksum(hw, NULL))
  1237. *data = 1;
  1238. else
  1239. *data = 0;
  1240. return *data;
  1241. }
  1242. static irqreturn_t ixgbe_test_intr(int irq, void *data)
  1243. {
  1244. struct net_device *netdev = (struct net_device *) data;
  1245. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1246. adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
  1247. return IRQ_HANDLED;
  1248. }
  1249. static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
  1250. {
  1251. struct net_device *netdev = adapter->netdev;
  1252. u32 mask, i = 0, shared_int = true;
  1253. u32 irq = adapter->pdev->irq;
  1254. *data = 0;
  1255. /* Hook up test interrupt handler just for this test */
  1256. if (adapter->msix_entries) {
  1257. /* NOTE: we don't test MSI-X interrupts here, yet */
  1258. return 0;
  1259. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1260. shared_int = false;
  1261. if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
  1262. netdev)) {
  1263. *data = 1;
  1264. return -1;
  1265. }
  1266. } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
  1267. netdev->name, netdev)) {
  1268. shared_int = false;
  1269. } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
  1270. netdev->name, netdev)) {
  1271. *data = 1;
  1272. return -1;
  1273. }
  1274. e_info(hw, "testing %s interrupt\n", shared_int ?
  1275. "shared" : "unshared");
  1276. /* Disable all the interrupts */
  1277. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1278. IXGBE_WRITE_FLUSH(&adapter->hw);
  1279. usleep_range(10000, 20000);
  1280. /* Test each interrupt */
  1281. for (; i < 10; i++) {
  1282. /* Interrupt to test */
  1283. mask = 1 << i;
  1284. if (!shared_int) {
  1285. /*
  1286. * Disable the interrupts to be reported in
  1287. * the cause register and then force the same
  1288. * interrupt and see if one gets posted. If
  1289. * an interrupt was posted to the bus, the
  1290. * test failed.
  1291. */
  1292. adapter->test_icr = 0;
  1293. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1294. ~mask & 0x00007FFF);
  1295. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1296. ~mask & 0x00007FFF);
  1297. IXGBE_WRITE_FLUSH(&adapter->hw);
  1298. usleep_range(10000, 20000);
  1299. if (adapter->test_icr & mask) {
  1300. *data = 3;
  1301. break;
  1302. }
  1303. }
  1304. /*
  1305. * Enable the interrupt to be reported in the cause
  1306. * register and then force the same interrupt and see
  1307. * if one gets posted. If an interrupt was not posted
  1308. * to the bus, the test failed.
  1309. */
  1310. adapter->test_icr = 0;
  1311. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1312. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  1313. IXGBE_WRITE_FLUSH(&adapter->hw);
  1314. usleep_range(10000, 20000);
  1315. if (!(adapter->test_icr &mask)) {
  1316. *data = 4;
  1317. break;
  1318. }
  1319. if (!shared_int) {
  1320. /*
  1321. * Disable the other interrupts to be reported in
  1322. * the cause register and then force the other
  1323. * interrupts and see if any get posted. If
  1324. * an interrupt was posted to the bus, the
  1325. * test failed.
  1326. */
  1327. adapter->test_icr = 0;
  1328. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1329. ~mask & 0x00007FFF);
  1330. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1331. ~mask & 0x00007FFF);
  1332. IXGBE_WRITE_FLUSH(&adapter->hw);
  1333. usleep_range(10000, 20000);
  1334. if (adapter->test_icr) {
  1335. *data = 5;
  1336. break;
  1337. }
  1338. }
  1339. }
  1340. /* Disable all the interrupts */
  1341. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1342. IXGBE_WRITE_FLUSH(&adapter->hw);
  1343. usleep_range(10000, 20000);
  1344. /* Unhook test interrupt handler */
  1345. free_irq(irq, netdev);
  1346. return *data;
  1347. }
  1348. static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
  1349. {
  1350. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1351. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1352. struct ixgbe_hw *hw = &adapter->hw;
  1353. u32 reg_ctl;
  1354. /* shut down the DMA engines now so they can be reinitialized later */
  1355. /* first Rx */
  1356. reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  1357. reg_ctl &= ~IXGBE_RXCTRL_RXEN;
  1358. IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
  1359. ixgbe_disable_rx_queue(adapter, rx_ring);
  1360. /* now Tx */
  1361. reg_ctl = IXGBE_RE

Large files files are truncated, but you can click here to view the full file