/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c

http://github.com/mirrors/linux · C · 3493 lines · 2747 code · 484 blank · 262 comment · 473 complexity · 2cbe1d3a5380fa647ff589f708c87068 MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* ethtool support for ixgbe */
  4. #include <linux/interrupt.h>
  5. #include <linux/types.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/highmem.h>
  13. #include <linux/uaccess.h>
  14. #include "ixgbe.h"
  15. #include "ixgbe_phy.h"
  16. #define IXGBE_ALL_RAR_ENTRIES 16
  17. enum {NETDEV_STATS, IXGBE_STATS};
  18. struct ixgbe_stats {
  19. char stat_string[ETH_GSTRING_LEN];
  20. int type;
  21. int sizeof_stat;
  22. int stat_offset;
  23. };
  24. #define IXGBE_STAT(m) IXGBE_STATS, \
  25. sizeof(((struct ixgbe_adapter *)0)->m), \
  26. offsetof(struct ixgbe_adapter, m)
  27. #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
  28. sizeof(((struct rtnl_link_stats64 *)0)->m), \
  29. offsetof(struct rtnl_link_stats64, m)
  30. static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  31. {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  32. {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  33. {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  34. {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  35. {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  36. {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  37. {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  38. {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  39. {"lsc_int", IXGBE_STAT(lsc_int)},
  40. {"tx_busy", IXGBE_STAT(tx_busy)},
  41. {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  42. {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  43. {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  44. {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  45. {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  46. {"multicast", IXGBE_NETDEV_STAT(multicast)},
  47. {"broadcast", IXGBE_STAT(stats.bprc)},
  48. {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  49. {"collisions", IXGBE_NETDEV_STAT(collisions)},
  50. {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  51. {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  52. {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  53. {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  54. {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  55. {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  56. {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  57. {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  58. {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  59. {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  60. {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  61. {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  62. {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  63. {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  64. {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  65. {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  66. {"rx_length_errors", IXGBE_STAT(stats.rlec)},
  67. {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
  68. {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
  69. {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  70. {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  71. {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  72. {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  73. {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  74. {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
  75. {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  76. {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  77. {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
  78. {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
  79. {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
  80. {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
  81. {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
  82. {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
  83. {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
  84. {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
  85. {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
  86. {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
  87. #ifdef IXGBE_FCOE
  88. {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
  89. {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
  90. {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
  91. {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
  92. {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
  93. {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
  94. {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
  95. {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
  96. #endif /* IXGBE_FCOE */
  97. };
  98. /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
  99. * we set the num_rx_queues to evaluate to num_tx_queues. This is
  100. * used because we do not have a good way to get the max number of
  101. * rx queues with CONFIG_RPS disabled.
  102. */
  103. #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
  104. #define IXGBE_QUEUE_STATS_LEN ( \
  105. (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
  106. (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
  107. #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
  108. #define IXGBE_PB_STATS_LEN ( \
  109. (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
  110. sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
  111. sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
  112. sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
  113. / sizeof(u64))
  114. #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
  115. IXGBE_PB_STATS_LEN + \
  116. IXGBE_QUEUE_STATS_LEN)
  117. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  118. "Register test (offline)", "Eeprom test (offline)",
  119. "Interrupt test (offline)", "Loopback test (offline)",
  120. "Link test (on/offline)"
  121. };
  122. #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
  123. static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
  124. #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
  125. "legacy-rx",
  126. #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
  127. "vf-ipsec",
  128. };
  129. #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
  130. /* currently supported speeds for 10G */
  131. #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
  132. SUPPORTED_10000baseKX4_Full | \
  133. SUPPORTED_10000baseKR_Full)
  134. #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
  135. static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
  136. {
  137. if (!ixgbe_isbackplane(hw->phy.media_type))
  138. return SUPPORTED_10000baseT_Full;
  139. switch (hw->device_id) {
  140. case IXGBE_DEV_ID_82598:
  141. case IXGBE_DEV_ID_82599_KX4:
  142. case IXGBE_DEV_ID_82599_KX4_MEZZ:
  143. case IXGBE_DEV_ID_X550EM_X_KX4:
  144. return SUPPORTED_10000baseKX4_Full;
  145. case IXGBE_DEV_ID_82598_BX:
  146. case IXGBE_DEV_ID_82599_KR:
  147. case IXGBE_DEV_ID_X550EM_X_KR:
  148. case IXGBE_DEV_ID_X550EM_X_XFI:
  149. return SUPPORTED_10000baseKR_Full;
  150. default:
  151. return SUPPORTED_10000baseKX4_Full |
  152. SUPPORTED_10000baseKR_Full;
  153. }
  154. }
  155. static int ixgbe_get_link_ksettings(struct net_device *netdev,
  156. struct ethtool_link_ksettings *cmd)
  157. {
  158. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  159. struct ixgbe_hw *hw = &adapter->hw;
  160. ixgbe_link_speed supported_link;
  161. bool autoneg = false;
  162. u32 supported, advertising;
  163. ethtool_convert_link_mode_to_legacy_u32(&supported,
  164. cmd->link_modes.supported);
  165. hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
  166. /* set the supported link speeds */
  167. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  168. supported |= ixgbe_get_supported_10gtypes(hw);
  169. if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
  170. supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
  171. SUPPORTED_1000baseKX_Full :
  172. SUPPORTED_1000baseT_Full;
  173. if (supported_link & IXGBE_LINK_SPEED_100_FULL)
  174. supported |= SUPPORTED_100baseT_Full;
  175. if (supported_link & IXGBE_LINK_SPEED_10_FULL)
  176. supported |= SUPPORTED_10baseT_Full;
  177. /* default advertised speed if phy.autoneg_advertised isn't set */
  178. advertising = supported;
  179. /* set the advertised speeds */
  180. if (hw->phy.autoneg_advertised) {
  181. advertising = 0;
  182. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
  183. advertising |= ADVERTISED_10baseT_Full;
  184. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
  185. advertising |= ADVERTISED_100baseT_Full;
  186. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
  187. advertising |= supported & ADVRTSD_MSK_10G;
  188. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
  189. if (supported & SUPPORTED_1000baseKX_Full)
  190. advertising |= ADVERTISED_1000baseKX_Full;
  191. else
  192. advertising |= ADVERTISED_1000baseT_Full;
  193. }
  194. } else {
  195. if (hw->phy.multispeed_fiber && !autoneg) {
  196. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  197. advertising = ADVERTISED_10000baseT_Full;
  198. }
  199. }
  200. if (autoneg) {
  201. supported |= SUPPORTED_Autoneg;
  202. advertising |= ADVERTISED_Autoneg;
  203. cmd->base.autoneg = AUTONEG_ENABLE;
  204. } else
  205. cmd->base.autoneg = AUTONEG_DISABLE;
  206. /* Determine the remaining settings based on the PHY type. */
  207. switch (adapter->hw.phy.type) {
  208. case ixgbe_phy_tn:
  209. case ixgbe_phy_aq:
  210. case ixgbe_phy_x550em_ext_t:
  211. case ixgbe_phy_fw:
  212. case ixgbe_phy_cu_unknown:
  213. supported |= SUPPORTED_TP;
  214. advertising |= ADVERTISED_TP;
  215. cmd->base.port = PORT_TP;
  216. break;
  217. case ixgbe_phy_qt:
  218. supported |= SUPPORTED_FIBRE;
  219. advertising |= ADVERTISED_FIBRE;
  220. cmd->base.port = PORT_FIBRE;
  221. break;
  222. case ixgbe_phy_nl:
  223. case ixgbe_phy_sfp_passive_tyco:
  224. case ixgbe_phy_sfp_passive_unknown:
  225. case ixgbe_phy_sfp_ftl:
  226. case ixgbe_phy_sfp_avago:
  227. case ixgbe_phy_sfp_intel:
  228. case ixgbe_phy_sfp_unknown:
  229. case ixgbe_phy_qsfp_passive_unknown:
  230. case ixgbe_phy_qsfp_active_unknown:
  231. case ixgbe_phy_qsfp_intel:
  232. case ixgbe_phy_qsfp_unknown:
  233. /* SFP+ devices, further checking needed */
  234. switch (adapter->hw.phy.sfp_type) {
  235. case ixgbe_sfp_type_da_cu:
  236. case ixgbe_sfp_type_da_cu_core0:
  237. case ixgbe_sfp_type_da_cu_core1:
  238. supported |= SUPPORTED_FIBRE;
  239. advertising |= ADVERTISED_FIBRE;
  240. cmd->base.port = PORT_DA;
  241. break;
  242. case ixgbe_sfp_type_sr:
  243. case ixgbe_sfp_type_lr:
  244. case ixgbe_sfp_type_srlr_core0:
  245. case ixgbe_sfp_type_srlr_core1:
  246. case ixgbe_sfp_type_1g_sx_core0:
  247. case ixgbe_sfp_type_1g_sx_core1:
  248. case ixgbe_sfp_type_1g_lx_core0:
  249. case ixgbe_sfp_type_1g_lx_core1:
  250. supported |= SUPPORTED_FIBRE;
  251. advertising |= ADVERTISED_FIBRE;
  252. cmd->base.port = PORT_FIBRE;
  253. break;
  254. case ixgbe_sfp_type_not_present:
  255. supported |= SUPPORTED_FIBRE;
  256. advertising |= ADVERTISED_FIBRE;
  257. cmd->base.port = PORT_NONE;
  258. break;
  259. case ixgbe_sfp_type_1g_cu_core0:
  260. case ixgbe_sfp_type_1g_cu_core1:
  261. supported |= SUPPORTED_TP;
  262. advertising |= ADVERTISED_TP;
  263. cmd->base.port = PORT_TP;
  264. break;
  265. case ixgbe_sfp_type_unknown:
  266. default:
  267. supported |= SUPPORTED_FIBRE;
  268. advertising |= ADVERTISED_FIBRE;
  269. cmd->base.port = PORT_OTHER;
  270. break;
  271. }
  272. break;
  273. case ixgbe_phy_xaui:
  274. supported |= SUPPORTED_FIBRE;
  275. advertising |= ADVERTISED_FIBRE;
  276. cmd->base.port = PORT_NONE;
  277. break;
  278. case ixgbe_phy_unknown:
  279. case ixgbe_phy_generic:
  280. case ixgbe_phy_sfp_unsupported:
  281. default:
  282. supported |= SUPPORTED_FIBRE;
  283. advertising |= ADVERTISED_FIBRE;
  284. cmd->base.port = PORT_OTHER;
  285. break;
  286. }
  287. /* Indicate pause support */
  288. supported |= SUPPORTED_Pause;
  289. switch (hw->fc.requested_mode) {
  290. case ixgbe_fc_full:
  291. advertising |= ADVERTISED_Pause;
  292. break;
  293. case ixgbe_fc_rx_pause:
  294. advertising |= ADVERTISED_Pause |
  295. ADVERTISED_Asym_Pause;
  296. break;
  297. case ixgbe_fc_tx_pause:
  298. advertising |= ADVERTISED_Asym_Pause;
  299. break;
  300. default:
  301. advertising &= ~(ADVERTISED_Pause |
  302. ADVERTISED_Asym_Pause);
  303. }
  304. if (netif_carrier_ok(netdev)) {
  305. switch (adapter->link_speed) {
  306. case IXGBE_LINK_SPEED_10GB_FULL:
  307. cmd->base.speed = SPEED_10000;
  308. break;
  309. case IXGBE_LINK_SPEED_5GB_FULL:
  310. cmd->base.speed = SPEED_5000;
  311. break;
  312. case IXGBE_LINK_SPEED_2_5GB_FULL:
  313. cmd->base.speed = SPEED_2500;
  314. break;
  315. case IXGBE_LINK_SPEED_1GB_FULL:
  316. cmd->base.speed = SPEED_1000;
  317. break;
  318. case IXGBE_LINK_SPEED_100_FULL:
  319. cmd->base.speed = SPEED_100;
  320. break;
  321. case IXGBE_LINK_SPEED_10_FULL:
  322. cmd->base.speed = SPEED_10;
  323. break;
  324. default:
  325. break;
  326. }
  327. cmd->base.duplex = DUPLEX_FULL;
  328. } else {
  329. cmd->base.speed = SPEED_UNKNOWN;
  330. cmd->base.duplex = DUPLEX_UNKNOWN;
  331. }
  332. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  333. supported);
  334. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  335. advertising);
  336. return 0;
  337. }
  338. static int ixgbe_set_link_ksettings(struct net_device *netdev,
  339. const struct ethtool_link_ksettings *cmd)
  340. {
  341. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  342. struct ixgbe_hw *hw = &adapter->hw;
  343. u32 advertised, old;
  344. s32 err = 0;
  345. u32 supported, advertising;
  346. ethtool_convert_link_mode_to_legacy_u32(&supported,
  347. cmd->link_modes.supported);
  348. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  349. cmd->link_modes.advertising);
  350. if ((hw->phy.media_type == ixgbe_media_type_copper) ||
  351. (hw->phy.multispeed_fiber)) {
  352. /*
  353. * this function does not support duplex forcing, but can
  354. * limit the advertising of the adapter to the specified speed
  355. */
  356. if (advertising & ~supported)
  357. return -EINVAL;
  358. /* only allow one speed at a time if no autoneg */
  359. if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
  360. if (advertising ==
  361. (ADVERTISED_10000baseT_Full |
  362. ADVERTISED_1000baseT_Full))
  363. return -EINVAL;
  364. }
  365. old = hw->phy.autoneg_advertised;
  366. advertised = 0;
  367. if (advertising & ADVERTISED_10000baseT_Full)
  368. advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  369. if (advertising & ADVERTISED_1000baseT_Full)
  370. advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  371. if (advertising & ADVERTISED_100baseT_Full)
  372. advertised |= IXGBE_LINK_SPEED_100_FULL;
  373. if (advertising & ADVERTISED_10baseT_Full)
  374. advertised |= IXGBE_LINK_SPEED_10_FULL;
  375. if (old == advertised)
  376. return err;
  377. /* this sets the link speed and restarts auto-neg */
  378. while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  379. usleep_range(1000, 2000);
  380. hw->mac.autotry_restart = true;
  381. err = hw->mac.ops.setup_link(hw, advertised, true);
  382. if (err) {
  383. e_info(probe, "setup link failed with code %d\n", err);
  384. hw->mac.ops.setup_link(hw, old, true);
  385. }
  386. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  387. } else {
  388. /* in this case we currently only support 10Gb/FULL */
  389. u32 speed = cmd->base.speed;
  390. if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
  391. (advertising != ADVERTISED_10000baseT_Full) ||
  392. (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
  393. return -EINVAL;
  394. }
  395. return err;
  396. }
  397. static void ixgbe_get_pauseparam(struct net_device *netdev,
  398. struct ethtool_pauseparam *pause)
  399. {
  400. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  401. struct ixgbe_hw *hw = &adapter->hw;
  402. if (ixgbe_device_supports_autoneg_fc(hw) &&
  403. !hw->fc.disable_fc_autoneg)
  404. pause->autoneg = 1;
  405. else
  406. pause->autoneg = 0;
  407. if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
  408. pause->rx_pause = 1;
  409. } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
  410. pause->tx_pause = 1;
  411. } else if (hw->fc.current_mode == ixgbe_fc_full) {
  412. pause->rx_pause = 1;
  413. pause->tx_pause = 1;
  414. }
  415. }
  416. static int ixgbe_set_pauseparam(struct net_device *netdev,
  417. struct ethtool_pauseparam *pause)
  418. {
  419. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  420. struct ixgbe_hw *hw = &adapter->hw;
  421. struct ixgbe_fc_info fc = hw->fc;
  422. /* 82598 does no support link flow control with DCB enabled */
  423. if ((hw->mac.type == ixgbe_mac_82598EB) &&
  424. (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  425. return -EINVAL;
  426. /* some devices do not support autoneg of link flow control */
  427. if ((pause->autoneg == AUTONEG_ENABLE) &&
  428. !ixgbe_device_supports_autoneg_fc(hw))
  429. return -EINVAL;
  430. fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
  431. if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
  432. fc.requested_mode = ixgbe_fc_full;
  433. else if (pause->rx_pause && !pause->tx_pause)
  434. fc.requested_mode = ixgbe_fc_rx_pause;
  435. else if (!pause->rx_pause && pause->tx_pause)
  436. fc.requested_mode = ixgbe_fc_tx_pause;
  437. else
  438. fc.requested_mode = ixgbe_fc_none;
  439. /* if the thing changed then we'll update and use new autoneg */
  440. if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
  441. hw->fc = fc;
  442. if (netif_running(netdev))
  443. ixgbe_reinit_locked(adapter);
  444. else
  445. ixgbe_reset(adapter);
  446. }
  447. return 0;
  448. }
  449. static u32 ixgbe_get_msglevel(struct net_device *netdev)
  450. {
  451. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  452. return adapter->msg_enable;
  453. }
  454. static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
  455. {
  456. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  457. adapter->msg_enable = data;
  458. }
  459. static int ixgbe_get_regs_len(struct net_device *netdev)
  460. {
  461. #define IXGBE_REGS_LEN 1145
  462. return IXGBE_REGS_LEN * sizeof(u32);
  463. }
  464. #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
  465. static void ixgbe_get_regs(struct net_device *netdev,
  466. struct ethtool_regs *regs, void *p)
  467. {
  468. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  469. struct ixgbe_hw *hw = &adapter->hw;
  470. u32 *regs_buff = p;
  471. u8 i;
  472. memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
  473. regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
  474. hw->device_id;
  475. /* General Registers */
  476. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
  477. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
  478. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  479. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
  480. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
  481. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  482. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
  483. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
  484. /* NVM Register */
  485. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
  486. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
  487. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
  488. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
  489. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
  490. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
  491. regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
  492. regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
  493. regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
  494. regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
  495. /* Interrupt */
  496. /* don't read EICR because it can clear interrupt causes, instead
  497. * read EICS which is a shadow but doesn't clear EICR */
  498. regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
  499. regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
  500. regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
  501. regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
  502. regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
  503. regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
  504. regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
  505. regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
  506. regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
  507. regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
  508. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
  509. regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
  510. /* Flow Control */
  511. regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
  512. for (i = 0; i < 4; i++)
  513. regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
  514. for (i = 0; i < 8; i++) {
  515. switch (hw->mac.type) {
  516. case ixgbe_mac_82598EB:
  517. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
  518. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
  519. break;
  520. case ixgbe_mac_82599EB:
  521. case ixgbe_mac_X540:
  522. case ixgbe_mac_X550:
  523. case ixgbe_mac_X550EM_x:
  524. case ixgbe_mac_x550em_a:
  525. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
  526. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
  527. break;
  528. default:
  529. break;
  530. }
  531. }
  532. regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
  533. regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
  534. /* Receive DMA */
  535. for (i = 0; i < 64; i++)
  536. regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  537. for (i = 0; i < 64; i++)
  538. regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  539. for (i = 0; i < 64; i++)
  540. regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  541. for (i = 0; i < 64; i++)
  542. regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  543. for (i = 0; i < 64; i++)
  544. regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  545. for (i = 0; i < 64; i++)
  546. regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  547. for (i = 0; i < 16; i++)
  548. regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  549. for (i = 0; i < 16; i++)
  550. regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  551. regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  552. for (i = 0; i < 8; i++)
  553. regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
  554. regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  555. regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
  556. /* Receive */
  557. regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  558. regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  559. for (i = 0; i < 16; i++)
  560. regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
  561. for (i = 0; i < 16; i++)
  562. regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
  563. regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
  564. regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  565. regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  566. regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
  567. regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
  568. regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  569. for (i = 0; i < 8; i++)
  570. regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
  571. for (i = 0; i < 8; i++)
  572. regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
  573. regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
  574. /* Transmit */
  575. for (i = 0; i < 32; i++)
  576. regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  577. for (i = 0; i < 32; i++)
  578. regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  579. for (i = 0; i < 32; i++)
  580. regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  581. for (i = 0; i < 32; i++)
  582. regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  583. for (i = 0; i < 32; i++)
  584. regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  585. for (i = 0; i < 32; i++)
  586. regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  587. for (i = 0; i < 32; i++)
  588. regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
  589. for (i = 0; i < 32; i++)
  590. regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
  591. regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  592. for (i = 0; i < 16; i++)
  593. regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
  594. regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
  595. for (i = 0; i < 8; i++)
  596. regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
  597. regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
  598. /* Wake Up */
  599. regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
  600. regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
  601. regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
  602. regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
  603. regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
  604. regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
  605. regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
  606. regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
  607. regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
  608. /* DCB */
  609. regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
  610. regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
  611. switch (hw->mac.type) {
  612. case ixgbe_mac_82598EB:
  613. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  614. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
  615. for (i = 0; i < 8; i++)
  616. regs_buff[833 + i] =
  617. IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
  618. for (i = 0; i < 8; i++)
  619. regs_buff[841 + i] =
  620. IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
  621. for (i = 0; i < 8; i++)
  622. regs_buff[849 + i] =
  623. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
  624. for (i = 0; i < 8; i++)
  625. regs_buff[857 + i] =
  626. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
  627. break;
  628. case ixgbe_mac_82599EB:
  629. case ixgbe_mac_X540:
  630. case ixgbe_mac_X550:
  631. case ixgbe_mac_X550EM_x:
  632. case ixgbe_mac_x550em_a:
  633. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  634. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
  635. for (i = 0; i < 8; i++)
  636. regs_buff[833 + i] =
  637. IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
  638. for (i = 0; i < 8; i++)
  639. regs_buff[841 + i] =
  640. IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
  641. for (i = 0; i < 8; i++)
  642. regs_buff[849 + i] =
  643. IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
  644. for (i = 0; i < 8; i++)
  645. regs_buff[857 + i] =
  646. IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
  647. break;
  648. default:
  649. break;
  650. }
  651. for (i = 0; i < 8; i++)
  652. regs_buff[865 + i] =
  653. IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
  654. for (i = 0; i < 8; i++)
  655. regs_buff[873 + i] =
  656. IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
  657. /* Statistics */
  658. regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
  659. regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
  660. regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
  661. regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
  662. for (i = 0; i < 8; i++)
  663. regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
  664. regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
  665. regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
  666. regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
  667. regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
  668. regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
  669. regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
  670. regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
  671. for (i = 0; i < 8; i++)
  672. regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
  673. for (i = 0; i < 8; i++)
  674. regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
  675. for (i = 0; i < 8; i++)
  676. regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
  677. for (i = 0; i < 8; i++)
  678. regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
  679. regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
  680. regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
  681. regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
  682. regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
  683. regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
  684. regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
  685. regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
  686. regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
  687. regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
  688. regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
  689. regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
  690. regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
  691. regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
  692. regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
  693. for (i = 0; i < 8; i++)
  694. regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
  695. regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
  696. regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
  697. regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
  698. regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
  699. regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
  700. regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
  701. regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
  702. regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
  703. regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
  704. regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
  705. regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
  706. regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
  707. regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
  708. regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
  709. regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
  710. regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
  711. regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
  712. regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
  713. regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
  714. regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
  715. for (i = 0; i < 16; i++)
  716. regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
  717. for (i = 0; i < 16; i++)
  718. regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
  719. for (i = 0; i < 16; i++)
  720. regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
  721. for (i = 0; i < 16; i++)
  722. regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
  723. /* MAC */
  724. regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
  725. regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
  726. regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
  727. regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
  728. regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
  729. regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
  730. regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
  731. regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
  732. regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
  733. regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  734. regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
  735. regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
  736. regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
  737. regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
  738. regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
  739. regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
  740. regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
  741. regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
  742. regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
  743. regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
  744. regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
  745. regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
  746. regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
  747. regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
  748. regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
  749. regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
  750. regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  751. regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
  752. regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  753. regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
  754. regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  755. regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
  756. regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  757. /* Diagnostic */
  758. regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
  759. for (i = 0; i < 8; i++)
  760. regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
  761. regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
  762. for (i = 0; i < 4; i++)
  763. regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
  764. regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
  765. regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
  766. for (i = 0; i < 8; i++)
  767. regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
  768. regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
  769. for (i = 0; i < 4; i++)
  770. regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
  771. regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
  772. regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
  773. for (i = 0; i < 4; i++)
  774. regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
  775. regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
  776. for (i = 0; i < 4; i++)
  777. regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
  778. for (i = 0; i < 8; i++)
  779. regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
  780. regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
  781. regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
  782. regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
  783. regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
  784. regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
  785. regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
  786. regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
  787. regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
  788. regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
  789. /* 82599 X540 specific registers */
  790. regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  791. /* 82599 X540 specific DCB registers */
  792. regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
  793. regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
  794. for (i = 0; i < 4; i++)
  795. regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
  796. regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
  797. /* same as RTTQCNRM */
  798. regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
  799. /* same as RTTQCNRR */
  800. /* X540 specific DCB registers */
  801. regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
  802. regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
  803. /* Security config registers */
  804. regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  805. regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
  806. regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  807. regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  808. regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  809. regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
  810. }
  811. static int ixgbe_get_eeprom_len(struct net_device *netdev)
  812. {
  813. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  814. return adapter->hw.eeprom.word_size * 2;
  815. }
  816. static int ixgbe_get_eeprom(struct net_device *netdev,
  817. struct ethtool_eeprom *eeprom, u8 *bytes)
  818. {
  819. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  820. struct ixgbe_hw *hw = &adapter->hw;
  821. u16 *eeprom_buff;
  822. int first_word, last_word, eeprom_len;
  823. int ret_val = 0;
  824. u16 i;
  825. if (eeprom->len == 0)
  826. return -EINVAL;
  827. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  828. first_word = eeprom->offset >> 1;
  829. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  830. eeprom_len = last_word - first_word + 1;
  831. eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
  832. if (!eeprom_buff)
  833. return -ENOMEM;
  834. ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
  835. eeprom_buff);
  836. /* Device's eeprom is always little-endian, word addressable */
  837. for (i = 0; i < eeprom_len; i++)
  838. le16_to_cpus(&eeprom_buff[i]);
  839. memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
  840. kfree(eeprom_buff);
  841. return ret_val;
  842. }
  843. static int ixgbe_set_eeprom(struct net_device *netdev,
  844. struct ethtool_eeprom *eeprom, u8 *bytes)
  845. {
  846. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  847. struct ixgbe_hw *hw = &adapter->hw;
  848. u16 *eeprom_buff;
  849. void *ptr;
  850. int max_len, first_word, last_word, ret_val = 0;
  851. u16 i;
  852. if (eeprom->len == 0)
  853. return -EINVAL;
  854. if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  855. return -EINVAL;
  856. max_len = hw->eeprom.word_size * 2;
  857. first_word = eeprom->offset >> 1;
  858. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  859. eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  860. if (!eeprom_buff)
  861. return -ENOMEM;
  862. ptr = eeprom_buff;
  863. if (eeprom->offset & 1) {
  864. /*
  865. * need read/modify/write of first changed EEPROM word
  866. * only the second byte of the word is being modified
  867. */
  868. ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
  869. if (ret_val)
  870. goto err;
  871. ptr++;
  872. }
  873. if ((eeprom->offset + eeprom->len) & 1) {
  874. /*
  875. * need read/modify/write of last changed EEPROM word
  876. * only the first byte of the word is being modified
  877. */
  878. ret_val = hw->eeprom.ops.read(hw, last_word,
  879. &eeprom_buff[last_word - first_word]);
  880. if (ret_val)
  881. goto err;
  882. }
  883. /* Device's eeprom is always little-endian, word addressable */
  884. for (i = 0; i < last_word - first_word + 1; i++)
  885. le16_to_cpus(&eeprom_buff[i]);
  886. memcpy(ptr, bytes, eeprom->len);
  887. for (i = 0; i < last_word - first_word + 1; i++)
  888. cpu_to_le16s(&eeprom_buff[i]);
  889. ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
  890. last_word - first_word + 1,
  891. eeprom_buff);
  892. /* Update the checksum */
  893. if (ret_val == 0)
  894. hw->eeprom.ops.update_checksum(hw);
  895. err:
  896. kfree(eeprom_buff);
  897. return ret_val;
  898. }
  899. static void ixgbe_get_drvinfo(struct net_device *netdev,
  900. struct ethtool_drvinfo *drvinfo)
  901. {
  902. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  903. strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
  904. strlcpy(drvinfo->version, ixgbe_driver_version,
  905. sizeof(drvinfo->version));
  906. strlcpy(drvinfo->fw_version, adapter->eeprom_id,
  907. sizeof(drvinfo->fw_version));
  908. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  909. sizeof(drvinfo->bus_info));
  910. drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
  911. }
  912. static void ixgbe_get_ringparam(struct net_device *netdev,
  913. struct ethtool_ringparam *ring)
  914. {
  915. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  916. struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  917. struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
  918. ring->rx_max_pending = IXGBE_MAX_RXD;
  919. ring->tx_max_pending = IXGBE_MAX_TXD;
  920. ring->rx_pending = rx_ring->count;
  921. ring->tx_pending = tx_ring->count;
  922. }
  923. static int ixgbe_set_ringparam(struct net_device *netdev,
  924. struct ethtool_ringparam *ring)
  925. {
  926. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  927. struct ixgbe_ring *temp_ring;
  928. int i, j, err = 0;
  929. u32 new_rx_count, new_tx_count;
  930. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  931. return -EINVAL;
  932. new_tx_count = clamp_t(u32, ring->tx_pending,
  933. IXGBE_MIN_TXD, IXGBE_MAX_TXD);
  934. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  935. new_rx_count = clamp_t(u32, ring->rx_pending,
  936. IXGBE_MIN_RXD, IXGBE_MAX_RXD);
  937. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  938. if ((new_tx_count == adapter->tx_ring_count) &&
  939. (new_rx_count == adapter->rx_ring_count)) {
  940. /* nothing to do */
  941. return 0;
  942. }
  943. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  944. usleep_range(1000, 2000);
  945. if (!netif_running(adapter->netdev)) {
  946. for (i = 0; i < adapter->num_tx_queues; i++)
  947. adapter->tx_ring[i]->count = new_tx_count;
  948. for (i = 0; i < adapter->num_xdp_queues; i++)
  949. adapter->xdp_ring[i]->count = new_tx_count;
  950. for (i = 0; i < adapter->num_rx_queues; i++)
  951. adapter->rx_ring[i]->count = new_rx_count;
  952. adapter->tx_ring_count = new_tx_count;
  953. adapter->xdp_ring_count = new_tx_count;
  954. adapter->rx_ring_count = new_rx_count;
  955. goto clear_reset;
  956. }
  957. /* allocate temporary buffer to store rings in */
  958. i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
  959. adapter->num_rx_queues);
  960. temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
  961. if (!temp_ring) {
  962. err = -ENOMEM;
  963. goto clear_reset;
  964. }
  965. ixgbe_down(adapter);
  966. /*
  967. * Setup new Tx resources and free the old Tx resources in that order.
  968. * We can then assign the new resources to the rings via a memcpy.
  969. * The advantage to this approach is that we are guaranteed to still
  970. * have resources even in the case of an allocation failure.
  971. */
  972. if (new_tx_count != adapter->tx_ring_count) {
  973. for (i = 0; i < adapter->num_tx_queues; i++) {
  974. memcpy(&temp_ring[i], adapter->tx_ring[i],
  975. sizeof(struct ixgbe_ring));
  976. temp_ring[i].count = new_tx_count;
  977. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  978. if (err) {
  979. while (i) {
  980. i--;
  981. ixgbe_free_tx_resources(&temp_ring[i]);
  982. }
  983. goto err_setup;
  984. }
  985. }
  986. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  987. memcpy(&temp_ring[i], adapter->xdp_ring[j],
  988. sizeof(struct ixgbe_ring));
  989. temp_ring[i].count = new_tx_count;
  990. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  991. if (err) {
  992. while (i) {
  993. i--;
  994. ixgbe_free_tx_resources(&temp_ring[i]);
  995. }
  996. goto err_setup;
  997. }
  998. }
  999. for (i = 0; i < adapter->num_tx_queues; i++) {
  1000. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  1001. memcpy(adapter->tx_ring[i], &temp_ring[i],
  1002. sizeof(struct ixgbe_ring));
  1003. }
  1004. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  1005. ixgbe_free_tx_resources(adapter->xdp_ring[j]);
  1006. memcpy(adapter->xdp_ring[j], &temp_ring[i],
  1007. sizeof(struct ixgbe_ring));
  1008. }
  1009. adapter->tx_ring_count = new_tx_count;
  1010. }
  1011. /* Repeat the process for the Rx rings if needed */
  1012. if (new_rx_count != adapter->rx_ring_count) {
  1013. for (i = 0; i < adapter->num_rx_queues; i++) {
  1014. memcpy(&temp_ring[i], adapter->rx_ring[i],
  1015. sizeof(struct ixgbe_ring));
  1016. /* Clear copied XDP RX-queue info */
  1017. memset(&temp_ring[i].xdp_rxq, 0,
  1018. sizeof(temp_ring[i].xdp_rxq));
  1019. temp_ring[i].count = new_rx_count;
  1020. err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
  1021. if (err) {
  1022. while (i) {
  1023. i--;
  1024. ixgbe_free_rx_resources(&temp_ring[i]);
  1025. }
  1026. goto err_setup;
  1027. }
  1028. }
  1029. for (i = 0; i < adapter->num_rx_queues; i++) {
  1030. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  1031. memcpy(adapter->rx_ring[i], &temp_ring[i],
  1032. sizeof(struct ixgbe_ring));
  1033. }
  1034. adapter->rx_ring_count = new_rx_count;
  1035. }
  1036. err_setup:
  1037. ixgbe_up(adapter);
  1038. vfree(temp_ring);
  1039. clear_reset:
  1040. clear_bit(__IXGBE_RESETTING, &adapter->state);
  1041. return err;
  1042. }
  1043. static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
  1044. {
  1045. switch (sset) {
  1046. case ETH_SS_TEST:
  1047. return IXGBE_TEST_LEN;
  1048. case ETH_SS_STATS:
  1049. return IXGBE_STATS_LEN;
  1050. case ETH_SS_PRIV_FLAGS:
  1051. return IXGBE_PRIV_FLAGS_STR_LEN;
  1052. default:
  1053. return -EOPNOTSUPP;
  1054. }
  1055. }
  1056. static void ixgbe_get_ethtool_stats(struct net_device *netdev,
  1057. struct ethtool_stats *stats, u64 *data)
  1058. {
  1059. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1060. struct rtnl_link_stats64 temp;
  1061. const struct rtnl_link_stats64 *net_stats;
  1062. unsigned int start;
  1063. struct ixgbe_ring *ring;
  1064. int i, j;
  1065. char *p = NULL;
  1066. ixgbe_update_stats(adapter);
  1067. net_stats = dev_get_stats(netdev, &temp);
  1068. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1069. switch (ixgbe_gstrings_stats[i].type) {
  1070. case NETDEV_STATS:
  1071. p = (char *) net_stats +
  1072. ixgbe_gstrings_stats[i].stat_offset;
  1073. break;
  1074. case IXGBE_STATS:
  1075. p = (char *) adapter +
  1076. ixgbe_gstrings_stats[i].stat_offset;
  1077. break;
  1078. default:
  1079. data[i] = 0;
  1080. continue;
  1081. }
  1082. data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
  1083. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  1084. }
  1085. for (j = 0; j < netdev->num_tx_queues; j++) {
  1086. ring = adapter->tx_ring[j];
  1087. if (!ring) {
  1088. data[i] = 0;
  1089. data[i+1] = 0;
  1090. i += 2;
  1091. continue;
  1092. }
  1093. do {
  1094. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1095. data[i] = ring->stats.packets;
  1096. data[i+1] = ring->stats.bytes;
  1097. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1098. i += 2;
  1099. }
  1100. for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
  1101. ring = adapter->rx_ring[j];
  1102. if (!ring) {
  1103. data[i] = 0;
  1104. data[i+1] = 0;
  1105. i += 2;
  1106. continue;
  1107. }
  1108. do {
  1109. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1110. data[i] = ring->stats.packets;
  1111. data[i+1] = ring->stats.bytes;
  1112. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1113. i += 2;
  1114. }
  1115. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1116. data[i++] = adapter->stats.pxontxc[j];
  1117. data[i++] = adapter->stats.pxofftxc[j];
  1118. }
  1119. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1120. data[i++] = adapter->stats.pxonrxc[j];
  1121. data[i++] = adapter->stats.pxoffrxc[j];
  1122. }
  1123. }
  1124. static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
  1125. u8 *data)
  1126. {
  1127. char *p = (char *)data;
  1128. unsigned int i;
  1129. switch (stringset) {
  1130. case ETH_SS_TEST:
  1131. for (i = 0; i < IXGBE_TEST_LEN; i++) {
  1132. memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
  1133. data += ETH_GSTRING_LEN;
  1134. }
  1135. break;
  1136. case ETH_SS_STATS:
  1137. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1138. memcpy(p, ixgbe_gstrings_stats[i].stat_string,
  1139. ETH_GSTRING_LEN);
  1140. p += ETH_GSTRING_LEN;
  1141. }
  1142. for (i = 0; i < netdev->num_tx_queues; i++) {
  1143. sprintf(p, "tx_queue_%u_packets", i);
  1144. p += ETH_GSTRING_LEN;
  1145. sprintf(p, "tx_queue_%u_bytes", i);
  1146. p += ETH_GSTRING_LEN;
  1147. }
  1148. for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
  1149. sprintf(p, "rx_queue_%u_packets", i);
  1150. p += ETH_GSTRING_LEN;
  1151. sprintf(p, "rx_queue_%u_bytes", i);
  1152. p += ETH_GSTRING_LEN;
  1153. }
  1154. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1155. sprintf(p, "tx_pb_%u_pxon", i);
  1156. p += ETH_GSTRING_LEN;
  1157. sprintf(p, "tx_pb_%u_pxoff", i);
  1158. p += ETH_GSTRING_LEN;
  1159. }
  1160. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1161. sprintf(p, "rx_pb_%u_pxon", i);
  1162. p += ETH_GSTRING_LEN;
  1163. sprintf(p, "rx_pb_%u_pxoff", i);
  1164. p += ETH_GSTRING_LEN;
  1165. }
  1166. /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
  1167. break;
  1168. case ETH_SS_PRIV_FLAGS:
  1169. memcpy(data, ixgbe_priv_flags_strings,
  1170. IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
  1171. }
  1172. }
  1173. static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
  1174. {
  1175. struct ixgbe_hw *hw = &adapter->hw;
  1176. bool link_up;
  1177. u32 link_speed = 0;
  1178. if (ixgbe_removed(hw->hw_addr)) {
  1179. *data = 1;
  1180. return 1;
  1181. }
  1182. *data = 0;
  1183. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  1184. if (link_up)
  1185. return *data;
  1186. else
  1187. *data = 1;
  1188. return *data;
  1189. }
  1190. /* ethtool register test data */
  1191. struct ixgbe_reg_test {
  1192. u16 reg;
  1193. u8 array_len;
  1194. u8 test_type;
  1195. u32 mask;
  1196. u32 write;
  1197. };
  1198. /* In the hardware, registers are laid out either singly, in arrays
  1199. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  1200. * most tests take place on arrays or single registers (handled
  1201. * as a single-element array) and special-case the tables.
  1202. * Table tests are always pattern tests.
  1203. *
  1204. * We also make provision for some required setup steps by specifying
  1205. * registers to be written without any read-back testing.
  1206. */
  1207. #define PATTERN_TEST 1
  1208. #define SET_READ_TEST 2
  1209. #define WRITE_NO_TEST 3
  1210. #define TABLE32_TEST 4
  1211. #define TABLE64_TEST_LO 5
  1212. #define TABLE64_TEST_HI 6
  1213. /* default 82599 register test */
  1214. static const struct ixgbe_reg_test reg_test_82599[] = {
  1215. { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1216. { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1217. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1218. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1219. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  1220. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1221. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1222. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1223. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1224. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1225. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1226. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1227. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1228. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1229. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  1230. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
  1231. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1232. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
  1233. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1234. { .reg = 0 }
  1235. };
  1236. /* default 82598 register test */
  1237. static const struct ixgbe_reg_test reg_test_82598[] = {
  1238. { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1239. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1240. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1241. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1242. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1243. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1244. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1245. /* Enable all four RX queues before testing. */
  1246. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1247. /* RDH is read-only for 82598, only test RDT. */
  1248. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1249. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1250. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1251. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1252. { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
  1253. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1254. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1255. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1256. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
  1257. { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
  1258. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1259. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
  1260. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1261. { .reg = 0 }
  1262. };
  1263. static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1264. u32 mask, u32 write)
  1265. {
  1266. u32 pat, val, before;
  1267. static const u32 test_pattern[] = {
  1268. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
  1269. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1270. *data = 1;
  1271. return true;
  1272. }
  1273. for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
  1274. before = ixgbe_read_reg(&adapter->hw, reg);
  1275. ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
  1276. val = ixgbe_read_reg(&adapter->hw, reg);
  1277. if (val != (test_pattern[pat] & write & mask)) {
  1278. e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
  1279. reg, val, (test_pattern[pat] & write & mask));
  1280. *data = reg;
  1281. ixgbe_write_reg(&adapter->hw, reg, before);
  1282. return true;
  1283. }
  1284. ixgbe_write_reg(&adapter->hw, reg, before);
  1285. }
  1286. return false;
  1287. }
  1288. static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1289. u32 mask, u32 write)
  1290. {
  1291. u32 val, before;
  1292. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1293. *data = 1;
  1294. return true;
  1295. }
  1296. before = ixgbe_read_reg(&adapter->hw, reg);
  1297. ixgbe_write_reg(&adapter->hw, reg, write & mask);
  1298. val = ixgbe_read_reg(&adapter->hw, reg);
  1299. if ((write & mask) != (val & mask)) {
  1300. e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
  1301. reg, (val & mask), (write & mask));
  1302. *data = reg;
  1303. ixgbe_write_reg(&adapter->hw, reg, before);
  1304. return true;
  1305. }
  1306. ixgbe_write_reg(&adapter->hw, reg, before);
  1307. return false;
  1308. }
  1309. static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
  1310. {
  1311. const struct ixgbe_reg_test *test;
  1312. u32 value, before, after;
  1313. u32 i, toggle;
  1314. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1315. e_err(drv, "Adapter removed - register test blocked\n");
  1316. *data = 1;
  1317. return 1;
  1318. }
  1319. switch (adapter->hw.mac.type) {
  1320. case ixgbe_mac_82598EB:
  1321. toggle = 0x7FFFF3FF;
  1322. test = reg_test_82598;
  1323. break;
  1324. case ixgbe_mac_82599EB:
  1325. case ixgbe_mac_X540:
  1326. case ixgbe_mac_X550:
  1327. case ixgbe_mac_X550EM_x:
  1328. case ixgbe_mac_x550em_a:
  1329. toggle = 0x7FFFF30F;
  1330. test = reg_test_82599;
  1331. break;
  1332. default:
  1333. *data = 1;
  1334. return 1;
  1335. }
  1336. /*
  1337. * Because the status register is such a special case,
  1338. * we handle it separately from the rest of the register
  1339. * tests. Some bits are read-only, some toggle, and some
  1340. * are writeable on newer MACs.
  1341. */
  1342. before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
  1343. value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
  1344. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
  1345. after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
  1346. if (value != after) {
  1347. e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
  1348. after, value);
  1349. *data = 1;
  1350. return 1;
  1351. }
  1352. /* restore previous status */
  1353. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
  1354. /*
  1355. * Perform the remainder of the register test, looping through
  1356. * the test table until we either fail or reach the null entry.
  1357. */
  1358. while (test->reg) {
  1359. for (i = 0; i < test->array_len; i++) {
  1360. bool b = false;
  1361. switch (test->test_type) {
  1362. case PATTERN_TEST:
  1363. b = reg_pattern_test(adapter, data,
  1364. test->reg + (i * 0x40),
  1365. test->mask,
  1366. test->write);
  1367. break;
  1368. case SET_READ_TEST:
  1369. b = reg_set_and_check(adapter, data,
  1370. test->reg + (i * 0x40),
  1371. test->mask,
  1372. test->write);
  1373. break;
  1374. case WRITE_NO_TEST:
  1375. ixgbe_write_reg(&adapter->hw,
  1376. test->reg + (i * 0x40),
  1377. test->wri