PageRenderTime 54ms CodeModel.GetById 12ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c

http://github.com/mirrors/linux
C | 3493 lines | 2747 code | 484 blank | 262 comment | 473 complexity | 2cbe1d3a5380fa647ff589f708c87068 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* ethtool support for ixgbe */
  4. #include <linux/interrupt.h>
  5. #include <linux/types.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/highmem.h>
  13. #include <linux/uaccess.h>
  14. #include "ixgbe.h"
  15. #include "ixgbe_phy.h"
  16. #define IXGBE_ALL_RAR_ENTRIES 16
  17. enum {NETDEV_STATS, IXGBE_STATS};
  18. struct ixgbe_stats {
  19. char stat_string[ETH_GSTRING_LEN];
  20. int type;
  21. int sizeof_stat;
  22. int stat_offset;
  23. };
  24. #define IXGBE_STAT(m) IXGBE_STATS, \
  25. sizeof(((struct ixgbe_adapter *)0)->m), \
  26. offsetof(struct ixgbe_adapter, m)
  27. #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
  28. sizeof(((struct rtnl_link_stats64 *)0)->m), \
  29. offsetof(struct rtnl_link_stats64, m)
  30. static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  31. {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  32. {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  33. {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  34. {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  35. {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  36. {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  37. {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  38. {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  39. {"lsc_int", IXGBE_STAT(lsc_int)},
  40. {"tx_busy", IXGBE_STAT(tx_busy)},
  41. {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  42. {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  43. {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  44. {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  45. {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  46. {"multicast", IXGBE_NETDEV_STAT(multicast)},
  47. {"broadcast", IXGBE_STAT(stats.bprc)},
  48. {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  49. {"collisions", IXGBE_NETDEV_STAT(collisions)},
  50. {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  51. {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  52. {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  53. {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  54. {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  55. {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  56. {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  57. {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  58. {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  59. {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  60. {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  61. {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  62. {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  63. {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  64. {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  65. {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  66. {"rx_length_errors", IXGBE_STAT(stats.rlec)},
  67. {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
  68. {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
  69. {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  70. {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  71. {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  72. {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  73. {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  74. {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
  75. {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  76. {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  77. {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
  78. {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
  79. {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
  80. {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
  81. {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
  82. {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
  83. {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
  84. {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
  85. {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
  86. {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
  87. #ifdef IXGBE_FCOE
  88. {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
  89. {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
  90. {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
  91. {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
  92. {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
  93. {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
  94. {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
  95. {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
  96. #endif /* IXGBE_FCOE */
  97. };
  98. /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
  99. * we set the num_rx_queues to evaluate to num_tx_queues. This is
  100. * used because we do not have a good way to get the max number of
  101. * rx queues with CONFIG_RPS disabled.
  102. */
  103. #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
  104. #define IXGBE_QUEUE_STATS_LEN ( \
  105. (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
  106. (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
  107. #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
  108. #define IXGBE_PB_STATS_LEN ( \
  109. (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
  110. sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
  111. sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
  112. sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
  113. / sizeof(u64))
  114. #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
  115. IXGBE_PB_STATS_LEN + \
  116. IXGBE_QUEUE_STATS_LEN)
  117. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  118. "Register test (offline)", "Eeprom test (offline)",
  119. "Interrupt test (offline)", "Loopback test (offline)",
  120. "Link test (on/offline)"
  121. };
  122. #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
  123. static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
  124. #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
  125. "legacy-rx",
  126. #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
  127. "vf-ipsec",
  128. };
  129. #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
  130. /* currently supported speeds for 10G */
  131. #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
  132. SUPPORTED_10000baseKX4_Full | \
  133. SUPPORTED_10000baseKR_Full)
  134. #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
  135. static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
  136. {
  137. if (!ixgbe_isbackplane(hw->phy.media_type))
  138. return SUPPORTED_10000baseT_Full;
  139. switch (hw->device_id) {
  140. case IXGBE_DEV_ID_82598:
  141. case IXGBE_DEV_ID_82599_KX4:
  142. case IXGBE_DEV_ID_82599_KX4_MEZZ:
  143. case IXGBE_DEV_ID_X550EM_X_KX4:
  144. return SUPPORTED_10000baseKX4_Full;
  145. case IXGBE_DEV_ID_82598_BX:
  146. case IXGBE_DEV_ID_82599_KR:
  147. case IXGBE_DEV_ID_X550EM_X_KR:
  148. case IXGBE_DEV_ID_X550EM_X_XFI:
  149. return SUPPORTED_10000baseKR_Full;
  150. default:
  151. return SUPPORTED_10000baseKX4_Full |
  152. SUPPORTED_10000baseKR_Full;
  153. }
  154. }
  155. static int ixgbe_get_link_ksettings(struct net_device *netdev,
  156. struct ethtool_link_ksettings *cmd)
  157. {
  158. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  159. struct ixgbe_hw *hw = &adapter->hw;
  160. ixgbe_link_speed supported_link;
  161. bool autoneg = false;
  162. u32 supported, advertising;
  163. ethtool_convert_link_mode_to_legacy_u32(&supported,
  164. cmd->link_modes.supported);
  165. hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
  166. /* set the supported link speeds */
  167. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  168. supported |= ixgbe_get_supported_10gtypes(hw);
  169. if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
  170. supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
  171. SUPPORTED_1000baseKX_Full :
  172. SUPPORTED_1000baseT_Full;
  173. if (supported_link & IXGBE_LINK_SPEED_100_FULL)
  174. supported |= SUPPORTED_100baseT_Full;
  175. if (supported_link & IXGBE_LINK_SPEED_10_FULL)
  176. supported |= SUPPORTED_10baseT_Full;
  177. /* default advertised speed if phy.autoneg_advertised isn't set */
  178. advertising = supported;
  179. /* set the advertised speeds */
  180. if (hw->phy.autoneg_advertised) {
  181. advertising = 0;
  182. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
  183. advertising |= ADVERTISED_10baseT_Full;
  184. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
  185. advertising |= ADVERTISED_100baseT_Full;
  186. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
  187. advertising |= supported & ADVRTSD_MSK_10G;
  188. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
  189. if (supported & SUPPORTED_1000baseKX_Full)
  190. advertising |= ADVERTISED_1000baseKX_Full;
  191. else
  192. advertising |= ADVERTISED_1000baseT_Full;
  193. }
  194. } else {
  195. if (hw->phy.multispeed_fiber && !autoneg) {
  196. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  197. advertising = ADVERTISED_10000baseT_Full;
  198. }
  199. }
  200. if (autoneg) {
  201. supported |= SUPPORTED_Autoneg;
  202. advertising |= ADVERTISED_Autoneg;
  203. cmd->base.autoneg = AUTONEG_ENABLE;
  204. } else
  205. cmd->base.autoneg = AUTONEG_DISABLE;
  206. /* Determine the remaining settings based on the PHY type. */
  207. switch (adapter->hw.phy.type) {
  208. case ixgbe_phy_tn:
  209. case ixgbe_phy_aq:
  210. case ixgbe_phy_x550em_ext_t:
  211. case ixgbe_phy_fw:
  212. case ixgbe_phy_cu_unknown:
  213. supported |= SUPPORTED_TP;
  214. advertising |= ADVERTISED_TP;
  215. cmd->base.port = PORT_TP;
  216. break;
  217. case ixgbe_phy_qt:
  218. supported |= SUPPORTED_FIBRE;
  219. advertising |= ADVERTISED_FIBRE;
  220. cmd->base.port = PORT_FIBRE;
  221. break;
  222. case ixgbe_phy_nl:
  223. case ixgbe_phy_sfp_passive_tyco:
  224. case ixgbe_phy_sfp_passive_unknown:
  225. case ixgbe_phy_sfp_ftl:
  226. case ixgbe_phy_sfp_avago:
  227. case ixgbe_phy_sfp_intel:
  228. case ixgbe_phy_sfp_unknown:
  229. case ixgbe_phy_qsfp_passive_unknown:
  230. case ixgbe_phy_qsfp_active_unknown:
  231. case ixgbe_phy_qsfp_intel:
  232. case ixgbe_phy_qsfp_unknown:
  233. /* SFP+ devices, further checking needed */
  234. switch (adapter->hw.phy.sfp_type) {
  235. case ixgbe_sfp_type_da_cu:
  236. case ixgbe_sfp_type_da_cu_core0:
  237. case ixgbe_sfp_type_da_cu_core1:
  238. supported |= SUPPORTED_FIBRE;
  239. advertising |= ADVERTISED_FIBRE;
  240. cmd->base.port = PORT_DA;
  241. break;
  242. case ixgbe_sfp_type_sr:
  243. case ixgbe_sfp_type_lr:
  244. case ixgbe_sfp_type_srlr_core0:
  245. case ixgbe_sfp_type_srlr_core1:
  246. case ixgbe_sfp_type_1g_sx_core0:
  247. case ixgbe_sfp_type_1g_sx_core1:
  248. case ixgbe_sfp_type_1g_lx_core0:
  249. case ixgbe_sfp_type_1g_lx_core1:
  250. supported |= SUPPORTED_FIBRE;
  251. advertising |= ADVERTISED_FIBRE;
  252. cmd->base.port = PORT_FIBRE;
  253. break;
  254. case ixgbe_sfp_type_not_present:
  255. supported |= SUPPORTED_FIBRE;
  256. advertising |= ADVERTISED_FIBRE;
  257. cmd->base.port = PORT_NONE;
  258. break;
  259. case ixgbe_sfp_type_1g_cu_core0:
  260. case ixgbe_sfp_type_1g_cu_core1:
  261. supported |= SUPPORTED_TP;
  262. advertising |= ADVERTISED_TP;
  263. cmd->base.port = PORT_TP;
  264. break;
  265. case ixgbe_sfp_type_unknown:
  266. default:
  267. supported |= SUPPORTED_FIBRE;
  268. advertising |= ADVERTISED_FIBRE;
  269. cmd->base.port = PORT_OTHER;
  270. break;
  271. }
  272. break;
  273. case ixgbe_phy_xaui:
  274. supported |= SUPPORTED_FIBRE;
  275. advertising |= ADVERTISED_FIBRE;
  276. cmd->base.port = PORT_NONE;
  277. break;
  278. case ixgbe_phy_unknown:
  279. case ixgbe_phy_generic:
  280. case ixgbe_phy_sfp_unsupported:
  281. default:
  282. supported |= SUPPORTED_FIBRE;
  283. advertising |= ADVERTISED_FIBRE;
  284. cmd->base.port = PORT_OTHER;
  285. break;
  286. }
  287. /* Indicate pause support */
  288. supported |= SUPPORTED_Pause;
  289. switch (hw->fc.requested_mode) {
  290. case ixgbe_fc_full:
  291. advertising |= ADVERTISED_Pause;
  292. break;
  293. case ixgbe_fc_rx_pause:
  294. advertising |= ADVERTISED_Pause |
  295. ADVERTISED_Asym_Pause;
  296. break;
  297. case ixgbe_fc_tx_pause:
  298. advertising |= ADVERTISED_Asym_Pause;
  299. break;
  300. default:
  301. advertising &= ~(ADVERTISED_Pause |
  302. ADVERTISED_Asym_Pause);
  303. }
  304. if (netif_carrier_ok(netdev)) {
  305. switch (adapter->link_speed) {
  306. case IXGBE_LINK_SPEED_10GB_FULL:
  307. cmd->base.speed = SPEED_10000;
  308. break;
  309. case IXGBE_LINK_SPEED_5GB_FULL:
  310. cmd->base.speed = SPEED_5000;
  311. break;
  312. case IXGBE_LINK_SPEED_2_5GB_FULL:
  313. cmd->base.speed = SPEED_2500;
  314. break;
  315. case IXGBE_LINK_SPEED_1GB_FULL:
  316. cmd->base.speed = SPEED_1000;
  317. break;
  318. case IXGBE_LINK_SPEED_100_FULL:
  319. cmd->base.speed = SPEED_100;
  320. break;
  321. case IXGBE_LINK_SPEED_10_FULL:
  322. cmd->base.speed = SPEED_10;
  323. break;
  324. default:
  325. break;
  326. }
  327. cmd->base.duplex = DUPLEX_FULL;
  328. } else {
  329. cmd->base.speed = SPEED_UNKNOWN;
  330. cmd->base.duplex = DUPLEX_UNKNOWN;
  331. }
  332. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  333. supported);
  334. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  335. advertising);
  336. return 0;
  337. }
  338. static int ixgbe_set_link_ksettings(struct net_device *netdev,
  339. const struct ethtool_link_ksettings *cmd)
  340. {
  341. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  342. struct ixgbe_hw *hw = &adapter->hw;
  343. u32 advertised, old;
  344. s32 err = 0;
  345. u32 supported, advertising;
  346. ethtool_convert_link_mode_to_legacy_u32(&supported,
  347. cmd->link_modes.supported);
  348. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  349. cmd->link_modes.advertising);
  350. if ((hw->phy.media_type == ixgbe_media_type_copper) ||
  351. (hw->phy.multispeed_fiber)) {
  352. /*
  353. * this function does not support duplex forcing, but can
  354. * limit the advertising of the adapter to the specified speed
  355. */
  356. if (advertising & ~supported)
  357. return -EINVAL;
  358. /* only allow one speed at a time if no autoneg */
  359. if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
  360. if (advertising ==
  361. (ADVERTISED_10000baseT_Full |
  362. ADVERTISED_1000baseT_Full))
  363. return -EINVAL;
  364. }
  365. old = hw->phy.autoneg_advertised;
  366. advertised = 0;
  367. if (advertising & ADVERTISED_10000baseT_Full)
  368. advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  369. if (advertising & ADVERTISED_1000baseT_Full)
  370. advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  371. if (advertising & ADVERTISED_100baseT_Full)
  372. advertised |= IXGBE_LINK_SPEED_100_FULL;
  373. if (advertising & ADVERTISED_10baseT_Full)
  374. advertised |= IXGBE_LINK_SPEED_10_FULL;
  375. if (old == advertised)
  376. return err;
  377. /* this sets the link speed and restarts auto-neg */
  378. while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  379. usleep_range(1000, 2000);
  380. hw->mac.autotry_restart = true;
  381. err = hw->mac.ops.setup_link(hw, advertised, true);
  382. if (err) {
  383. e_info(probe, "setup link failed with code %d\n", err);
  384. hw->mac.ops.setup_link(hw, old, true);
  385. }
  386. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  387. } else {
  388. /* in this case we currently only support 10Gb/FULL */
  389. u32 speed = cmd->base.speed;
  390. if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
  391. (advertising != ADVERTISED_10000baseT_Full) ||
  392. (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
  393. return -EINVAL;
  394. }
  395. return err;
  396. }
  397. static void ixgbe_get_pauseparam(struct net_device *netdev,
  398. struct ethtool_pauseparam *pause)
  399. {
  400. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  401. struct ixgbe_hw *hw = &adapter->hw;
  402. if (ixgbe_device_supports_autoneg_fc(hw) &&
  403. !hw->fc.disable_fc_autoneg)
  404. pause->autoneg = 1;
  405. else
  406. pause->autoneg = 0;
  407. if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
  408. pause->rx_pause = 1;
  409. } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
  410. pause->tx_pause = 1;
  411. } else if (hw->fc.current_mode == ixgbe_fc_full) {
  412. pause->rx_pause = 1;
  413. pause->tx_pause = 1;
  414. }
  415. }
  416. static int ixgbe_set_pauseparam(struct net_device *netdev,
  417. struct ethtool_pauseparam *pause)
  418. {
  419. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  420. struct ixgbe_hw *hw = &adapter->hw;
  421. struct ixgbe_fc_info fc = hw->fc;
  422. /* 82598 does no support link flow control with DCB enabled */
  423. if ((hw->mac.type == ixgbe_mac_82598EB) &&
  424. (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  425. return -EINVAL;
  426. /* some devices do not support autoneg of link flow control */
  427. if ((pause->autoneg == AUTONEG_ENABLE) &&
  428. !ixgbe_device_supports_autoneg_fc(hw))
  429. return -EINVAL;
  430. fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
  431. if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
  432. fc.requested_mode = ixgbe_fc_full;
  433. else if (pause->rx_pause && !pause->tx_pause)
  434. fc.requested_mode = ixgbe_fc_rx_pause;
  435. else if (!pause->rx_pause && pause->tx_pause)
  436. fc.requested_mode = ixgbe_fc_tx_pause;
  437. else
  438. fc.requested_mode = ixgbe_fc_none;
  439. /* if the thing changed then we'll update and use new autoneg */
  440. if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
  441. hw->fc = fc;
  442. if (netif_running(netdev))
  443. ixgbe_reinit_locked(adapter);
  444. else
  445. ixgbe_reset(adapter);
  446. }
  447. return 0;
  448. }
  449. static u32 ixgbe_get_msglevel(struct net_device *netdev)
  450. {
  451. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  452. return adapter->msg_enable;
  453. }
  454. static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
  455. {
  456. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  457. adapter->msg_enable = data;
  458. }
  459. static int ixgbe_get_regs_len(struct net_device *netdev)
  460. {
  461. #define IXGBE_REGS_LEN 1145
  462. return IXGBE_REGS_LEN * sizeof(u32);
  463. }
  464. #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
  465. static void ixgbe_get_regs(struct net_device *netdev,
  466. struct ethtool_regs *regs, void *p)
  467. {
  468. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  469. struct ixgbe_hw *hw = &adapter->hw;
  470. u32 *regs_buff = p;
  471. u8 i;
  472. memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
  473. regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
  474. hw->device_id;
  475. /* General Registers */
  476. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
  477. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
  478. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  479. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
  480. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
  481. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  482. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
  483. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
  484. /* NVM Register */
  485. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
  486. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
  487. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
  488. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
  489. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
  490. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
  491. regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
  492. regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
  493. regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
  494. regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
  495. /* Interrupt */
  496. /* don't read EICR because it can clear interrupt causes, instead
  497. * read EICS which is a shadow but doesn't clear EICR */
  498. regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
  499. regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
  500. regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
  501. regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
  502. regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
  503. regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
  504. regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
  505. regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
  506. regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
  507. regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
  508. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
  509. regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
  510. /* Flow Control */
  511. regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
  512. for (i = 0; i < 4; i++)
  513. regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
  514. for (i = 0; i < 8; i++) {
  515. switch (hw->mac.type) {
  516. case ixgbe_mac_82598EB:
  517. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
  518. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
  519. break;
  520. case ixgbe_mac_82599EB:
  521. case ixgbe_mac_X540:
  522. case ixgbe_mac_X550:
  523. case ixgbe_mac_X550EM_x:
  524. case ixgbe_mac_x550em_a:
  525. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
  526. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
  527. break;
  528. default:
  529. break;
  530. }
  531. }
  532. regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
  533. regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
  534. /* Receive DMA */
  535. for (i = 0; i < 64; i++)
  536. regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  537. for (i = 0; i < 64; i++)
  538. regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  539. for (i = 0; i < 64; i++)
  540. regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  541. for (i = 0; i < 64; i++)
  542. regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  543. for (i = 0; i < 64; i++)
  544. regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  545. for (i = 0; i < 64; i++)
  546. regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  547. for (i = 0; i < 16; i++)
  548. regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  549. for (i = 0; i < 16; i++)
  550. regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  551. regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  552. for (i = 0; i < 8; i++)
  553. regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
  554. regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  555. regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
  556. /* Receive */
  557. regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  558. regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  559. for (i = 0; i < 16; i++)
  560. regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
  561. for (i = 0; i < 16; i++)
  562. regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
  563. regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
  564. regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  565. regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  566. regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
  567. regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
  568. regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  569. for (i = 0; i < 8; i++)
  570. regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
  571. for (i = 0; i < 8; i++)
  572. regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
  573. regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
  574. /* Transmit */
  575. for (i = 0; i < 32; i++)
  576. regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  577. for (i = 0; i < 32; i++)
  578. regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  579. for (i = 0; i < 32; i++)
  580. regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  581. for (i = 0; i < 32; i++)
  582. regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  583. for (i = 0; i < 32; i++)
  584. regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  585. for (i = 0; i < 32; i++)
  586. regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  587. for (i = 0; i < 32; i++)
  588. regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
  589. for (i = 0; i < 32; i++)
  590. regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
  591. regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  592. for (i = 0; i < 16; i++)
  593. regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
  594. regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
  595. for (i = 0; i < 8; i++)
  596. regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
  597. regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
  598. /* Wake Up */
  599. regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
  600. regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
  601. regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
  602. regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
  603. regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
  604. regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
  605. regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
  606. regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
  607. regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
  608. /* DCB */
  609. regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
  610. regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
  611. switch (hw->mac.type) {
  612. case ixgbe_mac_82598EB:
  613. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  614. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
  615. for (i = 0; i < 8; i++)
  616. regs_buff[833 + i] =
  617. IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
  618. for (i = 0; i < 8; i++)
  619. regs_buff[841 + i] =
  620. IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
  621. for (i = 0; i < 8; i++)
  622. regs_buff[849 + i] =
  623. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
  624. for (i = 0; i < 8; i++)
  625. regs_buff[857 + i] =
  626. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
  627. break;
  628. case ixgbe_mac_82599EB:
  629. case ixgbe_mac_X540:
  630. case ixgbe_mac_X550:
  631. case ixgbe_mac_X550EM_x:
  632. case ixgbe_mac_x550em_a:
  633. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  634. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
  635. for (i = 0; i < 8; i++)
  636. regs_buff[833 + i] =
  637. IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
  638. for (i = 0; i < 8; i++)
  639. regs_buff[841 + i] =
  640. IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
  641. for (i = 0; i < 8; i++)
  642. regs_buff[849 + i] =
  643. IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
  644. for (i = 0; i < 8; i++)
  645. regs_buff[857 + i] =
  646. IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
  647. break;
  648. default:
  649. break;
  650. }
  651. for (i = 0; i < 8; i++)
  652. regs_buff[865 + i] =
  653. IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
  654. for (i = 0; i < 8; i++)
  655. regs_buff[873 + i] =
  656. IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
  657. /* Statistics */
  658. regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
  659. regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
  660. regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
  661. regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
  662. for (i = 0; i < 8; i++)
  663. regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
  664. regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
  665. regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
  666. regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
  667. regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
  668. regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
  669. regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
  670. regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
  671. for (i = 0; i < 8; i++)
  672. regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
  673. for (i = 0; i < 8; i++)
  674. regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
  675. for (i = 0; i < 8; i++)
  676. regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
  677. for (i = 0; i < 8; i++)
  678. regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
  679. regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
  680. regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
  681. regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
  682. regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
  683. regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
  684. regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
  685. regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
  686. regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
  687. regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
  688. regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
  689. regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
  690. regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
  691. regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
  692. regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
  693. for (i = 0; i < 8; i++)
  694. regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
  695. regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
  696. regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
  697. regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
  698. regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
  699. regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
  700. regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
  701. regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
  702. regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
  703. regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
  704. regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
  705. regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
  706. regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
  707. regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
  708. regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
  709. regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
  710. regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
  711. regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
  712. regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
  713. regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
  714. regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
  715. for (i = 0; i < 16; i++)
  716. regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
  717. for (i = 0; i < 16; i++)
  718. regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
  719. for (i = 0; i < 16; i++)
  720. regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
  721. for (i = 0; i < 16; i++)
  722. regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
  723. /* MAC */
  724. regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
  725. regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
  726. regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
  727. regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
  728. regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
  729. regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
  730. regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
  731. regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
  732. regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
  733. regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  734. regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
  735. regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
  736. regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
  737. regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
  738. regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
  739. regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
  740. regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
  741. regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
  742. regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
  743. regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
  744. regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
  745. regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
  746. regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
  747. regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
  748. regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
  749. regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
  750. regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  751. regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
  752. regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  753. regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
  754. regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  755. regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
  756. regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  757. /* Diagnostic */
  758. regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
  759. for (i = 0; i < 8; i++)
  760. regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
  761. regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
  762. for (i = 0; i < 4; i++)
  763. regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
  764. regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
  765. regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
  766. for (i = 0; i < 8; i++)
  767. regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
  768. regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
  769. for (i = 0; i < 4; i++)
  770. regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
  771. regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
  772. regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
  773. for (i = 0; i < 4; i++)
  774. regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
  775. regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
  776. for (i = 0; i < 4; i++)
  777. regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
  778. for (i = 0; i < 8; i++)
  779. regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
  780. regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
  781. regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
  782. regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
  783. regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
  784. regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
  785. regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
  786. regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
  787. regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
  788. regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
  789. /* 82599 X540 specific registers */
  790. regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  791. /* 82599 X540 specific DCB registers */
  792. regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
  793. regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
  794. for (i = 0; i < 4; i++)
  795. regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
  796. regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
  797. /* same as RTTQCNRM */
  798. regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
  799. /* same as RTTQCNRR */
  800. /* X540 specific DCB registers */
  801. regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
  802. regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
  803. /* Security config registers */
  804. regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  805. regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
  806. regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  807. regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  808. regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  809. regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
  810. }
  811. static int ixgbe_get_eeprom_len(struct net_device *netdev)
  812. {
  813. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  814. return adapter->hw.eeprom.word_size * 2;
  815. }
  816. static int ixgbe_get_eeprom(struct net_device *netdev,
  817. struct ethtool_eeprom *eeprom, u8 *bytes)
  818. {
  819. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  820. struct ixgbe_hw *hw = &adapter->hw;
  821. u16 *eeprom_buff;
  822. int first_word, last_word, eeprom_len;
  823. int ret_val = 0;
  824. u16 i;
  825. if (eeprom->len == 0)
  826. return -EINVAL;
  827. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  828. first_word = eeprom->offset >> 1;
  829. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  830. eeprom_len = last_word - first_word + 1;
  831. eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
  832. if (!eeprom_buff)
  833. return -ENOMEM;
  834. ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
  835. eeprom_buff);
  836. /* Device's eeprom is always little-endian, word addressable */
  837. for (i = 0; i < eeprom_len; i++)
  838. le16_to_cpus(&eeprom_buff[i]);
  839. memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
  840. kfree(eeprom_buff);
  841. return ret_val;
  842. }
  843. static int ixgbe_set_eeprom(struct net_device *netdev,
  844. struct ethtool_eeprom *eeprom, u8 *bytes)
  845. {
  846. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  847. struct ixgbe_hw *hw = &adapter->hw;
  848. u16 *eeprom_buff;
  849. void *ptr;
  850. int max_len, first_word, last_word, ret_val = 0;
  851. u16 i;
  852. if (eeprom->len == 0)
  853. return -EINVAL;
  854. if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  855. return -EINVAL;
  856. max_len = hw->eeprom.word_size * 2;
  857. first_word = eeprom->offset >> 1;
  858. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  859. eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  860. if (!eeprom_buff)
  861. return -ENOMEM;
  862. ptr = eeprom_buff;
  863. if (eeprom->offset & 1) {
  864. /*
  865. * need read/modify/write of first changed EEPROM word
  866. * only the second byte of the word is being modified
  867. */
  868. ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
  869. if (ret_val)
  870. goto err;
  871. ptr++;
  872. }
  873. if ((eeprom->offset + eeprom->len) & 1) {
  874. /*
  875. * need read/modify/write of last changed EEPROM word
  876. * only the first byte of the word is being modified
  877. */
  878. ret_val = hw->eeprom.ops.read(hw, last_word,
  879. &eeprom_buff[last_word - first_word]);
  880. if (ret_val)
  881. goto err;
  882. }
  883. /* Device's eeprom is always little-endian, word addressable */
  884. for (i = 0; i < last_word - first_word + 1; i++)
  885. le16_to_cpus(&eeprom_buff[i]);
  886. memcpy(ptr, bytes, eeprom->len);
  887. for (i = 0; i < last_word - first_word + 1; i++)
  888. cpu_to_le16s(&eeprom_buff[i]);
  889. ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
  890. last_word - first_word + 1,
  891. eeprom_buff);
  892. /* Update the checksum */
  893. if (ret_val == 0)
  894. hw->eeprom.ops.update_checksum(hw);
  895. err:
  896. kfree(eeprom_buff);
  897. return ret_val;
  898. }
  899. static void ixgbe_get_drvinfo(struct net_device *netdev,
  900. struct ethtool_drvinfo *drvinfo)
  901. {
  902. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  903. strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
  904. strlcpy(drvinfo->version, ixgbe_driver_version,
  905. sizeof(drvinfo->version));
  906. strlcpy(drvinfo->fw_version, adapter->eeprom_id,
  907. sizeof(drvinfo->fw_version));
  908. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  909. sizeof(drvinfo->bus_info));
  910. drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
  911. }
  912. static void ixgbe_get_ringparam(struct net_device *netdev,
  913. struct ethtool_ringparam *ring)
  914. {
  915. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  916. struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  917. struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
  918. ring->rx_max_pending = IXGBE_MAX_RXD;
  919. ring->tx_max_pending = IXGBE_MAX_TXD;
  920. ring->rx_pending = rx_ring->count;
  921. ring->tx_pending = tx_ring->count;
  922. }
  923. static int ixgbe_set_ringparam(struct net_device *netdev,
  924. struct ethtool_ringparam *ring)
  925. {
  926. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  927. struct ixgbe_ring *temp_ring;
  928. int i, j, err = 0;
  929. u32 new_rx_count, new_tx_count;
  930. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  931. return -EINVAL;
  932. new_tx_count = clamp_t(u32, ring->tx_pending,
  933. IXGBE_MIN_TXD, IXGBE_MAX_TXD);
  934. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  935. new_rx_count = clamp_t(u32, ring->rx_pending,
  936. IXGBE_MIN_RXD, IXGBE_MAX_RXD);
  937. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  938. if ((new_tx_count == adapter->tx_ring_count) &&
  939. (new_rx_count == adapter->rx_ring_count)) {
  940. /* nothing to do */
  941. return 0;
  942. }
  943. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  944. usleep_range(1000, 2000);
  945. if (!netif_running(adapter->netdev)) {
  946. for (i = 0; i < adapter->num_tx_queues; i++)
  947. adapter->tx_ring[i]->count = new_tx_count;
  948. for (i = 0; i < adapter->num_xdp_queues; i++)
  949. adapter->xdp_ring[i]->count = new_tx_count;
  950. for (i = 0; i < adapter->num_rx_queues; i++)
  951. adapter->rx_ring[i]->count = new_rx_count;
  952. adapter->tx_ring_count = new_tx_count;
  953. adapter->xdp_ring_count = new_tx_count;
  954. adapter->rx_ring_count = new_rx_count;
  955. goto clear_reset;
  956. }
  957. /* allocate temporary buffer to store rings in */
  958. i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
  959. adapter->num_rx_queues);
  960. temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
  961. if (!temp_ring) {
  962. err = -ENOMEM;
  963. goto clear_reset;
  964. }
  965. ixgbe_down(adapter);
  966. /*
  967. * Setup new Tx resources and free the old Tx resources in that order.
  968. * We can then assign the new resources to the rings via a memcpy.
  969. * The advantage to this approach is that we are guaranteed to still
  970. * have resources even in the case of an allocation failure.
  971. */
  972. if (new_tx_count != adapter->tx_ring_count) {
  973. for (i = 0; i < adapter->num_tx_queues; i++) {
  974. memcpy(&temp_ring[i], adapter->tx_ring[i],
  975. sizeof(struct ixgbe_ring));
  976. temp_ring[i].count = new_tx_count;
  977. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  978. if (err) {
  979. while (i) {
  980. i--;
  981. ixgbe_free_tx_resources(&temp_ring[i]);
  982. }
  983. goto err_setup;
  984. }
  985. }
  986. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  987. memcpy(&temp_ring[i], adapter->xdp_ring[j],
  988. sizeof(struct ixgbe_ring));
  989. temp_ring[i].count = new_tx_count;
  990. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  991. if (err) {
  992. while (i) {
  993. i--;
  994. ixgbe_free_tx_resources(&temp_ring[i]);
  995. }
  996. goto err_setup;
  997. }
  998. }
  999. for (i = 0; i < adapter->num_tx_queues; i++) {
  1000. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  1001. memcpy(adapter->tx_ring[i], &temp_ring[i],
  1002. sizeof(struct ixgbe_ring));
  1003. }
  1004. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  1005. ixgbe_free_tx_resources(adapter->xdp_ring[j]);
  1006. memcpy(adapter->xdp_ring[j], &temp_ring[i],
  1007. sizeof(struct ixgbe_ring));
  1008. }
  1009. adapter->tx_ring_count = new_tx_count;
  1010. }
  1011. /* Repeat the process for the Rx rings if needed */
  1012. if (new_rx_count != adapter->rx_ring_count) {
  1013. for (i = 0; i < adapter->num_rx_queues; i++) {
  1014. memcpy(&temp_ring[i], adapter->rx_ring[i],
  1015. sizeof(struct ixgbe_ring));
  1016. /* Clear copied XDP RX-queue info */
  1017. memset(&temp_ring[i].xdp_rxq, 0,
  1018. sizeof(temp_ring[i].xdp_rxq));
  1019. temp_ring[i].count = new_rx_count;
  1020. err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
  1021. if (err) {
  1022. while (i) {
  1023. i--;
  1024. ixgbe_free_rx_resources(&temp_ring[i]);
  1025. }
  1026. goto err_setup;
  1027. }
  1028. }
  1029. for (i = 0; i < adapter->num_rx_queues; i++) {
  1030. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  1031. memcpy(adapter->rx_ring[i], &temp_ring[i],
  1032. sizeof(struct ixgbe_ring));
  1033. }
  1034. adapter->rx_ring_count = new_rx_count;
  1035. }
  1036. err_setup:
  1037. ixgbe_up(adapter);
  1038. vfree(temp_ring);
  1039. clear_reset:
  1040. clear_bit(__IXGBE_RESETTING, &adapter->state);
  1041. return err;
  1042. }
  1043. static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
  1044. {
  1045. switch (sset) {
  1046. case ETH_SS_TEST:
  1047. return IXGBE_TEST_LEN;
  1048. case ETH_SS_STATS:
  1049. return IXGBE_STATS_LEN;
  1050. case ETH_SS_PRIV_FLAGS:
  1051. return IXGBE_PRIV_FLAGS_STR_LEN;
  1052. default:
  1053. return -EOPNOTSUPP;
  1054. }
  1055. }
  1056. static void ixgbe_get_ethtool_stats(struct net_device *netdev,
  1057. struct ethtool_stats *stats, u64 *data)
  1058. {
  1059. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1060. struct rtnl_link_stats64 temp;
  1061. const struct rtnl_link_stats64 *net_stats;
  1062. unsigned int start;
  1063. struct ixgbe_ring *ring;
  1064. int i, j;
  1065. char *p = NULL;
  1066. ixgbe_update_stats(adapter);
  1067. net_stats = dev_get_stats(netdev, &temp);
  1068. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1069. switch (ixgbe_gstrings_stats[i].type) {
  1070. case NETDEV_STATS:
  1071. p = (char *) net_stats +
  1072. ixgbe_gstrings_stats[i].stat_offset;
  1073. break;
  1074. case IXGBE_STATS:
  1075. p = (char *) adapter +
  1076. ixgbe_gstrings_stats[i].stat_offset;
  1077. break;
  1078. default:
  1079. data[i] = 0;
  1080. continue;
  1081. }
  1082. data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
  1083. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  1084. }
  1085. for (j = 0; j < netdev->num_tx_queues; j++) {
  1086. ring = adapter->tx_ring[j];
  1087. if (!ring) {
  1088. data[i] = 0;
  1089. data[i+1] = 0;
  1090. i += 2;
  1091. continue;
  1092. }
  1093. do {
  1094. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1095. data[i] = ring->stats.packets;
  1096. data[i+1] = ring->stats.bytes;
  1097. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1098. i += 2;
  1099. }
  1100. for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
  1101. ring = adapter->rx_ring[j];
  1102. if (!ring) {
  1103. data[i] = 0;
  1104. data[i+1] = 0;
  1105. i += 2;
  1106. continue;
  1107. }
  1108. do {
  1109. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1110. data[i] = ring->stats.packets;
  1111. data[i+1] = ring->stats.bytes;
  1112. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1113. i += 2;
  1114. }
  1115. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1116. data[i++] = adapter->stats.pxontxc[j];
  1117. data[i++] = adapter->stats.pxofftxc[j];
  1118. }
  1119. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1120. data[i++] = adapter->stats.pxonrxc[j];
  1121. data[i++] = adapter->stats.pxoffrxc[j];
  1122. }
  1123. }
  1124. static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
  1125. u8 *data)
  1126. {
  1127. char *p = (char *)data;
  1128. unsigned int i;
  1129. switch (stringset) {
  1130. case ETH_SS_TEST:
  1131. for (i = 0; i < IXGBE_TEST_LEN; i++) {
  1132. memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
  1133. data += ETH_GSTRING_LEN;
  1134. }
  1135. break;
  1136. case ETH_SS_STATS:
  1137. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1138. memcpy(p, ixgbe_gstrings_stats[i].stat_string,
  1139. ETH_GSTRING_LEN);
  1140. p += ETH_GSTRING_LEN;
  1141. }
  1142. for (i = 0; i < netdev->num_tx_queues; i++) {
  1143. sprintf(p, "tx_queue_%u_packets", i);
  1144. p += ETH_GSTRING_LEN;
  1145. sprintf(p, "tx_queue_%u_bytes", i);
  1146. p += ETH_GSTRING_LEN;
  1147. }
  1148. for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
  1149. sprintf(p, "rx_queue_%u_packets", i);
  1150. p += ETH_GSTRING_LEN;
  1151. sprintf(p, "rx_queue_%u_bytes", i);
  1152. p += ETH_GSTRING_LEN;
  1153. }
  1154. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1155. sprintf(p, "tx_pb_%u_pxon", i);
  1156. p += ETH_GSTRING_LEN;
  1157. sprintf(p, "tx_pb_%u_pxoff", i);
  1158. p += ETH_GSTRING_LEN;
  1159. }
  1160. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1161. sprintf(p, "rx_pb_%u_pxon", i);
  1162. p += ETH_GSTRING_LEN;
  1163. sprintf(p, "rx_pb_%u_pxoff", i);
  1164. p += ETH_GSTRING_LEN;
  1165. }
  1166. /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
  1167. break;
  1168. case ETH_SS_PRIV_FLAGS:
  1169. memcpy(data, ixgbe_priv_flags_strings,
  1170. IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
  1171. }
  1172. }
  1173. static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
  1174. {
  1175. struct ixgbe_hw *hw = &adapter->hw;
  1176. bool link_up;
  1177. u32 link_speed = 0;
  1178. if (ixgbe_removed(hw->hw_addr)) {
  1179. *data = 1;
  1180. return 1;
  1181. }
  1182. *data = 0;
  1183. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  1184. if (link_up)
  1185. return *data;
  1186. else
  1187. *data = 1;
  1188. return *data;
  1189. }
  1190. /* ethtool register test data */
  1191. struct ixgbe_reg_test {
  1192. u16 reg;
  1193. u8 array_len;
  1194. u8 test_type;
  1195. u32 mask;
  1196. u32 write;
  1197. };
  1198. /* In the hardware, registers are laid out either singly, in arrays
  1199. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  1200. * most tests take place on arrays or single registers (handled
  1201. * as a single-element array) and special-case the tables.
  1202. * Table tests are always pattern tests.
  1203. *
  1204. * We also make provision for some required setup steps by specifying
  1205. * registers to be written without any read-back testing.
  1206. */
  1207. #define PATTERN_TEST 1
  1208. #define SET_READ_TEST 2
  1209. #define WRITE_NO_TEST 3
  1210. #define TABLE32_TEST 4
  1211. #define TABLE64_TEST_LO 5
  1212. #define TABLE64_TEST_HI 6
  1213. /* default 82599 register test */
  1214. static const struct ixgbe_reg_test reg_test_82599[] = {
  1215. { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1216. { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1217. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1218. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1219. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  1220. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1221. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1222. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1223. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1224. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1225. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1226. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1227. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1228. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1229. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  1230. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
  1231. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1232. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
  1233. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1234. { .reg = 0 }
  1235. };
  1236. /* default 82598 register test */
  1237. static const struct ixgbe_reg_test reg_test_82598[] = {
  1238. { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1239. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1240. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1241. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1242. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1243. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1244. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1245. /* Enable all four RX queues before testing. */
  1246. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1247. /* RDH is read-only for 82598, only test RDT. */
  1248. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1249. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1250. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1251. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1252. { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
  1253. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1254. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1255. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1256. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
  1257. { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
  1258. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1259. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
  1260. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1261. { .reg = 0 }
  1262. };
  1263. static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1264. u32 mask, u32 write)
  1265. {
  1266. u32 pat, val, before;
  1267. static const u32 test_pattern[] = {
  1268. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
  1269. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1270. *data = 1;
  1271. return true;
  1272. }
  1273. for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
  1274. before = ixgbe_read_reg(&adapter->hw, reg);
  1275. ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
  1276. val = ixgbe_read_reg(&adapter->hw, reg);
  1277. if (val != (test_pattern[pat] & write & mask)) {
  1278. e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
  1279. reg, val, (test_pattern[pat] & write & mask));
  1280. *data = reg;
  1281. ixgbe_write_reg(&adapter->hw, reg, before);
  1282. return true;
  1283. }
  1284. ixgbe_write_reg(&adapter->hw, reg, before);
  1285. }
  1286. return false;
  1287. }
  1288. static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1289. u32 mask, u32 write)
  1290. {
  1291. u32 val, before;
  1292. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1293. *data = 1;
  1294. return true;
  1295. }
  1296. before = ixgbe_read_reg(&adapter->hw, reg);
  1297. ixgbe_write_reg(&adapter->hw, reg, write & mask);
  1298. val = ixgbe_read_reg(&adapter->hw, reg);
  1299. if ((write & mask) != (val & mask)) {
  1300. e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
  1301. reg, (val & mask), (write & mask));
  1302. *data = reg;
  1303. ixgbe_write_reg(&adapter->hw, reg, before);
  1304. return true;
  1305. }
  1306. ixgbe_write_reg(&adapter->hw, reg, before);
  1307. return false;
  1308. }
  1309. static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
  1310. {
  1311. const struct ixgbe_reg_test *test;
  1312. u32 value, before, after;
  1313. u32 i, toggle;
  1314. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1315. e_err(drv, "Adapter removed - register test blocked\n");
  1316. *data = 1;
  1317. return 1;
  1318. }
  1319. switch (adapter->hw.mac.type) {
  1320. case ixgbe_mac_82598EB:
  1321. toggle = 0x7FFFF3FF;
  1322. test = reg_test_82598;
  1323. break;
  1324. case ixgbe_mac_82599EB:
  1325. case ixgbe_mac_X540:
  1326. case ixgbe_mac_X550:
  1327. case ixgbe_mac_X550EM_x:
  1328. case ixgbe_mac_x550em_a:
  1329. toggle = 0x7FFFF30F;
  1330. test = reg_test_82599;
  1331. break;
  1332. default:
  1333. *data = 1;
  1334. return 1;
  1335. }
  1336. /*
  1337. * Because the status register is such a special case,
  1338. * we handle it separately from the rest of the register
  1339. * tests. Some bits are read-only, some toggle, and some
  1340. * are writeable on newer MACs.
  1341. */
  1342. before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
  1343. value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
  1344. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
  1345. after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
  1346. if (value != after) {
  1347. e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
  1348. after, value);
  1349. *data = 1;
  1350. return 1;
  1351. }
  1352. /* restore previous status */
  1353. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
  1354. /*
  1355. * Perform the remainder of the register test, looping through
  1356. * the test table until we either fail or reach the null entry.
  1357. */
  1358. while (test->reg) {
  1359. for (i = 0; i < test->array_len; i++) {
  1360. bool b = false;
  1361. switch (test->test_type) {
  1362. case PATTERN_TEST:
  1363. b = reg_pattern_test(adapter, data,
  1364. test->reg + (i * 0x40),
  1365. test->mask,
  1366. test->write);
  1367. break;
  1368. case SET_READ_TEST:
  1369. b = reg_set_and_check(adapter, data,
  1370. test->reg + (i * 0x40),
  1371. test->mask,
  1372. test->write);
  1373. break;
  1374. case WRITE_NO_TEST:
  1375. ixgbe_write_reg(&adapter->hw,
  1376. test->reg + (i * 0x40),
  1377. test->write);
  1378. break;
  1379. case TABLE32_TEST:
  1380. b = reg_pattern_test(adapter, data,
  1381. test->reg + (i * 4),
  1382. test->mask,
  1383. test->write);
  1384. break;
  1385. case TABLE64_TEST_LO:
  1386. b = reg_pattern_test(adapter, data,
  1387. test->reg + (i * 8),
  1388. test->mask,
  1389. test->write);
  1390. break;
  1391. case TABLE64_TEST_HI:
  1392. b = reg_pattern_test(adapter, data,
  1393. (test->reg + 4) + (i * 8),
  1394. test->mask,
  1395. test->write);
  1396. break;
  1397. }
  1398. if (b)
  1399. return 1;
  1400. }
  1401. test++;
  1402. }
  1403. *data = 0;
  1404. return 0;
  1405. }
  1406. static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
  1407. {
  1408. struct ixgbe_hw *hw = &adapter->hw;
  1409. if (hw->eeprom.ops.validate_checksum(hw, NULL))
  1410. *data = 1;
  1411. else
  1412. *data = 0;
  1413. return *data;
  1414. }
  1415. static irqreturn_t ixgbe_test_intr(int irq, void *data)
  1416. {
  1417. struct net_device *netdev = (struct net_device *) data;
  1418. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1419. adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
  1420. return IRQ_HANDLED;
  1421. }
  1422. static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
  1423. {
  1424. struct net_device *netdev = adapter->netdev;
  1425. u32 mask, i = 0, shared_int = true;
  1426. u32 irq = adapter->pdev->irq;
  1427. *data = 0;
  1428. /* Hook up test interrupt handler just for this test */
  1429. if (adapter->msix_entries) {
  1430. /* NOTE: we don't test MSI-X interrupts here, yet */
  1431. return 0;
  1432. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1433. shared_int = false;
  1434. if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
  1435. netdev)) {
  1436. *data = 1;
  1437. return -1;
  1438. }
  1439. } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
  1440. netdev->name, netdev)) {
  1441. shared_int = false;
  1442. } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
  1443. netdev->name, netdev)) {
  1444. *data = 1;
  1445. return -1;
  1446. }
  1447. e_info(hw, "testing %s interrupt\n", shared_int ?
  1448. "shared" : "unshared");
  1449. /* Disable all the interrupts */
  1450. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1451. IXGBE_WRITE_FLUSH(&adapter->hw);
  1452. usleep_range(10000, 20000);
  1453. /* Test each interrupt */
  1454. for (; i < 10; i++) {
  1455. /* Interrupt to test */
  1456. mask = BIT(i);
  1457. if (!shared_int) {
  1458. /*
  1459. * Disable the interrupts to be reported in
  1460. * the cause register and then force the same
  1461. * interrupt and see if one gets posted. If
  1462. * an interrupt was posted to the bus, the
  1463. * test failed.
  1464. */
  1465. adapter->test_icr = 0;
  1466. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1467. ~mask & 0x00007FFF);
  1468. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1469. ~mask & 0x00007FFF);
  1470. IXGBE_WRITE_FLUSH(&adapter->hw);
  1471. usleep_range(10000, 20000);
  1472. if (adapter->test_icr & mask) {
  1473. *data = 3;
  1474. break;
  1475. }
  1476. }
  1477. /*
  1478. * Enable the interrupt to be reported in the cause
  1479. * register and then force the same interrupt and see
  1480. * if one gets posted. If an interrupt was not posted
  1481. * to the bus, the test failed.
  1482. */
  1483. adapter->test_icr = 0;
  1484. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1485. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  1486. IXGBE_WRITE_FLUSH(&adapter->hw);
  1487. usleep_range(10000, 20000);
  1488. if (!(adapter->test_icr & mask)) {
  1489. *data = 4;
  1490. break;
  1491. }
  1492. if (!shared_int) {
  1493. /*
  1494. * Disable the other interrupts to be reported in
  1495. * the cause register and then force the other
  1496. * interrupts and see if any get posted. If
  1497. * an interrupt was posted to the bus, the
  1498. * test failed.
  1499. */
  1500. adapter->test_icr = 0;
  1501. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1502. ~mask & 0x00007FFF);
  1503. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1504. ~mask & 0x00007FFF);
  1505. IXGBE_WRITE_FLUSH(&adapter->hw);
  1506. usleep_range(10000, 20000);
  1507. if (adapter->test_icr) {
  1508. *data = 5;
  1509. break;
  1510. }
  1511. }
  1512. }
  1513. /* Disable all the interrupts */
  1514. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1515. IXGBE_WRITE_FLUSH(&adapter->hw);
  1516. usleep_range(10000, 20000);
  1517. /* Unhook test interrupt handler */
  1518. free_irq(irq, netdev);
  1519. return *data;
  1520. }
  1521. static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
  1522. {
  1523. /* Shut down the DMA engines now so they can be reinitialized later,
  1524. * since the test rings and normally used rings should overlap on
  1525. * queue 0 we can just use the standard disable Rx/Tx calls and they
  1526. * will take care of disabling the test rings for us.
  1527. */
  1528. /* first Rx */
  1529. ixgbe_disable_rx(adapter);
  1530. /* now Tx */
  1531. ixgbe_disable_tx(adapter);
  1532. ixgbe_reset(adapter);
  1533. ixgbe_free_tx_resources(&adapter->test_tx_ring);
  1534. ixgbe_free_rx_resources(&adapter->test_rx_ring);
  1535. }
  1536. static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
  1537. {
  1538. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1539. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1540. struct ixgbe_hw *hw = &adapter->hw;
  1541. u32 rctl, reg_data;
  1542. int ret_val;
  1543. int err;
  1544. /* Setup Tx descriptor ring and Tx buffers */
  1545. tx_ring->count = IXGBE_DEFAULT_TXD;
  1546. tx_ring->queue_index = 0;
  1547. tx_ring->dev = &adapter->pdev->dev;
  1548. tx_ring->netdev = adapter->netdev;
  1549. tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
  1550. err = ixgbe_setup_tx_resources(tx_ring);
  1551. if (err)
  1552. return 1;
  1553. switch (adapter->hw.mac.type) {
  1554. case ixgbe_mac_82599EB:
  1555. case ixgbe_mac_X540:
  1556. case ixgbe_mac_X550:
  1557. case ixgbe_mac_X550EM_x:
  1558. case ixgbe_mac_x550em_a:
  1559. reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
  1560. reg_data |= IXGBE_DMATXCTL_TE;
  1561. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
  1562. break;
  1563. default:
  1564. break;
  1565. }
  1566. ixgbe_configure_tx_ring(adapter, tx_ring);
  1567. /* Setup Rx Descriptor ring and Rx buffers */
  1568. rx_ring->count = IXGBE_DEFAULT_RXD;
  1569. rx_ring->queue_index = 0;
  1570. rx_ring->dev = &adapter->pdev->dev;
  1571. rx_ring->netdev = adapter->netdev;
  1572. rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
  1573. err = ixgbe_setup_rx_resources(adapter, rx_ring);
  1574. if (err) {
  1575. ret_val = 4;
  1576. goto err_nomem;
  1577. }
  1578. hw->mac.ops.disable_rx(hw);
  1579. ixgbe_configure_rx_ring(adapter, rx_ring);
  1580. rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
  1581. rctl |= IXGBE_RXCTRL_DMBYPS;
  1582. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
  1583. hw->mac.ops.enable_rx(hw);
  1584. return 0;
  1585. err_nomem:
  1586. ixgbe_free_desc_rings(adapter);
  1587. return ret_val;
  1588. }
  1589. static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
  1590. {
  1591. struct ixgbe_hw *hw = &adapter->hw;
  1592. u32 reg_data;
  1593. /* Setup MAC loopback */
  1594. reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  1595. reg_data |= IXGBE_HLREG0_LPBK;
  1596. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
  1597. reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  1598. reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
  1599. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
  1600. /* X540 and X550 needs to set the MACC.FLU bit to force link up */
  1601. switch (adapter->hw.mac.type) {
  1602. case ixgbe_mac_X540:
  1603. case ixgbe_mac_X550:
  1604. case ixgbe_mac_X550EM_x:
  1605. case ixgbe_mac_x550em_a:
  1606. reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
  1607. reg_data |= IXGBE_MACC_FLU;
  1608. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
  1609. break;
  1610. default:
  1611. if (hw->mac.orig_autoc) {
  1612. reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
  1613. IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
  1614. } else {
  1615. return 10;
  1616. }
  1617. }
  1618. IXGBE_WRITE_FLUSH(hw);
  1619. usleep_range(10000, 20000);
  1620. /* Disable Atlas Tx lanes; re-enabled in reset path */
  1621. if (hw->mac.type == ixgbe_mac_82598EB) {
  1622. u8 atlas;
  1623. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
  1624. atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
  1625. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
  1626. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
  1627. atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
  1628. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
  1629. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
  1630. atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
  1631. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
  1632. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
  1633. atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
  1634. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
  1635. }
  1636. return 0;
  1637. }
  1638. static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
  1639. {
  1640. u32 reg_data;
  1641. reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
  1642. reg_data &= ~IXGBE_HLREG0_LPBK;
  1643. IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
  1644. }
  1645. static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
  1646. unsigned int frame_size)
  1647. {
  1648. memset(skb->data, 0xFF, frame_size);
  1649. frame_size >>= 1;
  1650. memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
  1651. memset(&skb->data[frame_size + 10], 0xBE, 1);
  1652. memset(&skb->data[frame_size + 12], 0xAF, 1);
  1653. }
  1654. static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
  1655. unsigned int frame_size)
  1656. {
  1657. unsigned char *data;
  1658. bool match = true;
  1659. frame_size >>= 1;
  1660. data = kmap(rx_buffer->page) + rx_buffer->page_offset;
  1661. if (data[3] != 0xFF ||
  1662. data[frame_size + 10] != 0xBE ||
  1663. data[frame_size + 12] != 0xAF)
  1664. match = false;
  1665. kunmap(rx_buffer->page);
  1666. return match;
  1667. }
  1668. static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
  1669. struct ixgbe_ring *tx_ring,
  1670. unsigned int size)
  1671. {
  1672. union ixgbe_adv_rx_desc *rx_desc;
  1673. u16 rx_ntc, tx_ntc, count = 0;
  1674. /* initialize next to clean and descriptor values */
  1675. rx_ntc = rx_ring->next_to_clean;
  1676. tx_ntc = tx_ring->next_to_clean;
  1677. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
  1678. while (tx_ntc != tx_ring->next_to_use) {
  1679. union ixgbe_adv_tx_desc *tx_desc;
  1680. struct ixgbe_tx_buffer *tx_buffer;
  1681. tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
  1682. /* if DD is not set transmit has not completed */
  1683. if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
  1684. return count;
  1685. /* unmap buffer on Tx side */
  1686. tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
  1687. /* Free all the Tx ring sk_buffs */
  1688. dev_kfree_skb_any(tx_buffer->skb);
  1689. /* unmap skb header data */
  1690. dma_unmap_single(tx_ring->dev,
  1691. dma_unmap_addr(tx_buffer, dma),
  1692. dma_unmap_len(tx_buffer, len),
  1693. DMA_TO_DEVICE);
  1694. dma_unmap_len_set(tx_buffer, len, 0);
  1695. /* increment Tx next to clean counter */
  1696. tx_ntc++;
  1697. if (tx_ntc == tx_ring->count)
  1698. tx_ntc = 0;
  1699. }
  1700. while (rx_desc->wb.upper.length) {
  1701. struct ixgbe_rx_buffer *rx_buffer;
  1702. /* check Rx buffer */
  1703. rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
  1704. /* sync Rx buffer for CPU read */
  1705. dma_sync_single_for_cpu(rx_ring->dev,
  1706. rx_buffer->dma,
  1707. ixgbe_rx_bufsz(rx_ring),
  1708. DMA_FROM_DEVICE);
  1709. /* verify contents of skb */
  1710. if (ixgbe_check_lbtest_frame(rx_buffer, size))
  1711. count++;
  1712. else
  1713. break;
  1714. /* sync Rx buffer for device write */
  1715. dma_sync_single_for_device(rx_ring->dev,
  1716. rx_buffer->dma,
  1717. ixgbe_rx_bufsz(rx_ring),
  1718. DMA_FROM_DEVICE);
  1719. /* increment Rx next to clean counter */
  1720. rx_ntc++;
  1721. if (rx_ntc == rx_ring->count)
  1722. rx_ntc = 0;
  1723. /* fetch next descriptor */
  1724. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
  1725. }
  1726. netdev_tx_reset_queue(txring_txq(tx_ring));
  1727. /* re-map buffers to ring, store next to clean values */
  1728. ixgbe_alloc_rx_buffers(rx_ring, count);
  1729. rx_ring->next_to_clean = rx_ntc;
  1730. tx_ring->next_to_clean = tx_ntc;
  1731. return count;
  1732. }
  1733. static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
  1734. {
  1735. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1736. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1737. int i, j, lc, good_cnt, ret_val = 0;
  1738. unsigned int size = 1024;
  1739. netdev_tx_t tx_ret_val;
  1740. struct sk_buff *skb;
  1741. u32 flags_orig = adapter->flags;
  1742. /* DCB can modify the frames on Tx */
  1743. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  1744. /* allocate test skb */
  1745. skb = alloc_skb(size, GFP_KERNEL);
  1746. if (!skb)
  1747. return 11;
  1748. /* place data into test skb */
  1749. ixgbe_create_lbtest_frame(skb, size);
  1750. skb_put(skb, size);
  1751. /*
  1752. * Calculate the loop count based on the largest descriptor ring
  1753. * The idea is to wrap the largest ring a number of times using 64
  1754. * send/receive pairs during each loop
  1755. */
  1756. if (rx_ring->count <= tx_ring->count)
  1757. lc = ((tx_ring->count / 64) * 2) + 1;
  1758. else
  1759. lc = ((rx_ring->count / 64) * 2) + 1;
  1760. for (j = 0; j <= lc; j++) {
  1761. /* reset count of good packets */
  1762. good_cnt = 0;
  1763. /* place 64 packets on the transmit queue*/
  1764. for (i = 0; i < 64; i++) {
  1765. skb_get(skb);
  1766. tx_ret_val = ixgbe_xmit_frame_ring(skb,
  1767. adapter,
  1768. tx_ring);
  1769. if (tx_ret_val == NETDEV_TX_OK)
  1770. good_cnt++;
  1771. }
  1772. if (good_cnt != 64) {
  1773. ret_val = 12;
  1774. break;
  1775. }
  1776. /* allow 200 milliseconds for packets to go from Tx to Rx */
  1777. msleep(200);
  1778. good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
  1779. if (good_cnt != 64) {
  1780. ret_val = 13;
  1781. break;
  1782. }
  1783. }
  1784. /* free the original skb */
  1785. kfree_skb(skb);
  1786. adapter->flags = flags_orig;
  1787. return ret_val;
  1788. }
  1789. static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
  1790. {
  1791. *data = ixgbe_setup_desc_rings(adapter);
  1792. if (*data)
  1793. goto out;
  1794. *data = ixgbe_setup_loopback_test(adapter);
  1795. if (*data)
  1796. goto err_loopback;
  1797. *data = ixgbe_run_loopback_test(adapter);
  1798. ixgbe_loopback_cleanup(adapter);
  1799. err_loopback:
  1800. ixgbe_free_desc_rings(adapter);
  1801. out:
  1802. return *data;
  1803. }
  1804. static void ixgbe_diag_test(struct net_device *netdev,
  1805. struct ethtool_test *eth_test, u64 *data)
  1806. {
  1807. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1808. bool if_running = netif_running(netdev);
  1809. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1810. e_err(hw, "Adapter removed - test blocked\n");
  1811. data[0] = 1;
  1812. data[1] = 1;
  1813. data[2] = 1;
  1814. data[3] = 1;
  1815. data[4] = 1;
  1816. eth_test->flags |= ETH_TEST_FL_FAILED;
  1817. return;
  1818. }
  1819. set_bit(__IXGBE_TESTING, &adapter->state);
  1820. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  1821. struct ixgbe_hw *hw = &adapter->hw;
  1822. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  1823. int i;
  1824. for (i = 0; i < adapter->num_vfs; i++) {
  1825. if (adapter->vfinfo[i].clear_to_send) {
  1826. netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
  1827. data[0] = 1;
  1828. data[1] = 1;
  1829. data[2] = 1;
  1830. data[3] = 1;
  1831. data[4] = 1;
  1832. eth_test->flags |= ETH_TEST_FL_FAILED;
  1833. clear_bit(__IXGBE_TESTING,
  1834. &adapter->state);
  1835. goto skip_ol_tests;
  1836. }
  1837. }
  1838. }
  1839. /* Offline tests */
  1840. e_info(hw, "offline testing starting\n");
  1841. /* Link test performed before hardware reset so autoneg doesn't
  1842. * interfere with test result
  1843. */
  1844. if (ixgbe_link_test(adapter, &data[4]))
  1845. eth_test->flags |= ETH_TEST_FL_FAILED;
  1846. if (if_running)
  1847. /* indicate we're in test mode */
  1848. ixgbe_close(netdev);
  1849. else
  1850. ixgbe_reset(adapter);
  1851. e_info(hw, "register testing starting\n");
  1852. if (ixgbe_reg_test(adapter, &data[0]))
  1853. eth_test->flags |= ETH_TEST_FL_FAILED;
  1854. ixgbe_reset(adapter);
  1855. e_info(hw, "eeprom testing starting\n");
  1856. if (ixgbe_eeprom_test(adapter, &data[1]))
  1857. eth_test->flags |= ETH_TEST_FL_FAILED;
  1858. ixgbe_reset(adapter);
  1859. e_info(hw, "interrupt testing starting\n");
  1860. if (ixgbe_intr_test(adapter, &data[2]))
  1861. eth_test->flags |= ETH_TEST_FL_FAILED;
  1862. /* If SRIOV or VMDq is enabled then skip MAC
  1863. * loopback diagnostic. */
  1864. if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
  1865. IXGBE_FLAG_VMDQ_ENABLED)) {
  1866. e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
  1867. data[3] = 0;
  1868. goto skip_loopback;
  1869. }
  1870. ixgbe_reset(adapter);
  1871. e_info(hw, "loopback testing starting\n");
  1872. if (ixgbe_loopback_test(adapter, &data[3]))
  1873. eth_test->flags |= ETH_TEST_FL_FAILED;
  1874. skip_loopback:
  1875. ixgbe_reset(adapter);
  1876. /* clear testing bit and return adapter to previous state */
  1877. clear_bit(__IXGBE_TESTING, &adapter->state);
  1878. if (if_running)
  1879. ixgbe_open(netdev);
  1880. else if (hw->mac.ops.disable_tx_laser)
  1881. hw->mac.ops.disable_tx_laser(hw);
  1882. } else {
  1883. e_info(hw, "online testing starting\n");
  1884. /* Online tests */
  1885. if (ixgbe_link_test(adapter, &data[4]))
  1886. eth_test->flags |= ETH_TEST_FL_FAILED;
  1887. /* Offline tests aren't run; pass by default */
  1888. data[0] = 0;
  1889. data[1] = 0;
  1890. data[2] = 0;
  1891. data[3] = 0;
  1892. clear_bit(__IXGBE_TESTING, &adapter->state);
  1893. }
  1894. skip_ol_tests:
  1895. msleep_interruptible(4 * 1000);
  1896. }
  1897. static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
  1898. struct ethtool_wolinfo *wol)
  1899. {
  1900. struct ixgbe_hw *hw = &adapter->hw;
  1901. int retval = 0;
  1902. /* WOL not supported for all devices */
  1903. if (!ixgbe_wol_supported(adapter, hw->device_id,
  1904. hw->subsystem_device_id)) {
  1905. retval = 1;
  1906. wol->supported = 0;
  1907. }
  1908. return retval;
  1909. }
  1910. static void ixgbe_get_wol(struct net_device *netdev,
  1911. struct ethtool_wolinfo *wol)
  1912. {
  1913. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1914. wol->supported = WAKE_UCAST | WAKE_MCAST |
  1915. WAKE_BCAST | WAKE_MAGIC;
  1916. wol->wolopts = 0;
  1917. if (ixgbe_wol_exclusion(adapter, wol) ||
  1918. !device_can_wakeup(&adapter->pdev->dev))
  1919. return;
  1920. if (adapter->wol & IXGBE_WUFC_EX)
  1921. wol->wolopts |= WAKE_UCAST;
  1922. if (adapter->wol & IXGBE_WUFC_MC)
  1923. wol->wolopts |= WAKE_MCAST;
  1924. if (adapter->wol & IXGBE_WUFC_BC)
  1925. wol->wolopts |= WAKE_BCAST;
  1926. if (adapter->wol & IXGBE_WUFC_MAG)
  1927. wol->wolopts |= WAKE_MAGIC;
  1928. }
  1929. static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  1930. {
  1931. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1932. if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
  1933. WAKE_FILTER))
  1934. return -EOPNOTSUPP;
  1935. if (ixgbe_wol_exclusion(adapter, wol))
  1936. return wol->wolopts ? -EOPNOTSUPP : 0;
  1937. adapter->wol = 0;
  1938. if (wol->wolopts & WAKE_UCAST)
  1939. adapter->wol |= IXGBE_WUFC_EX;
  1940. if (wol->wolopts & WAKE_MCAST)
  1941. adapter->wol |= IXGBE_WUFC_MC;
  1942. if (wol->wolopts & WAKE_BCAST)
  1943. adapter->wol |= IXGBE_WUFC_BC;
  1944. if (wol->wolopts & WAKE_MAGIC)
  1945. adapter->wol |= IXGBE_WUFC_MAG;
  1946. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  1947. return 0;
  1948. }
  1949. static int ixgbe_nway_reset(struct net_device *netdev)
  1950. {
  1951. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1952. if (netif_running(netdev))
  1953. ixgbe_reinit_locked(adapter);
  1954. return 0;
  1955. }
  1956. static int ixgbe_set_phys_id(struct net_device *netdev,
  1957. enum ethtool_phys_id_state state)
  1958. {
  1959. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1960. struct ixgbe_hw *hw = &adapter->hw;
  1961. if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
  1962. return -EOPNOTSUPP;
  1963. switch (state) {
  1964. case ETHTOOL_ID_ACTIVE:
  1965. adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  1966. return 2;
  1967. case ETHTOOL_ID_ON:
  1968. hw->mac.ops.led_on(hw, hw->mac.led_link_act);
  1969. break;
  1970. case ETHTOOL_ID_OFF:
  1971. hw->mac.ops.led_off(hw, hw->mac.led_link_act);
  1972. break;
  1973. case ETHTOOL_ID_INACTIVE:
  1974. /* Restore LED settings */
  1975. IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
  1976. break;
  1977. }
  1978. return 0;
  1979. }
  1980. static int ixgbe_get_coalesce(struct net_device *netdev,
  1981. struct ethtool_coalesce *ec)
  1982. {
  1983. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1984. /* only valid if in constant ITR mode */
  1985. if (adapter->rx_itr_setting <= 1)
  1986. ec->rx_coalesce_usecs = adapter->rx_itr_setting;
  1987. else
  1988. ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
  1989. /* if in mixed tx/rx queues per vector mode, report only rx settings */
  1990. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  1991. return 0;
  1992. /* only valid if in constant ITR mode */
  1993. if (adapter->tx_itr_setting <= 1)
  1994. ec->tx_coalesce_usecs = adapter->tx_itr_setting;
  1995. else
  1996. ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
  1997. return 0;
  1998. }
  1999. /*
  2000. * this function must be called before setting the new value of
  2001. * rx_itr_setting
  2002. */
  2003. static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
  2004. {
  2005. struct net_device *netdev = adapter->netdev;
  2006. /* nothing to do if LRO or RSC are not enabled */
  2007. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
  2008. !(netdev->features & NETIF_F_LRO))
  2009. return false;
  2010. /* check the feature flag value and enable RSC if necessary */
  2011. if (adapter->rx_itr_setting == 1 ||
  2012. adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
  2013. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
  2014. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  2015. e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
  2016. return true;
  2017. }
  2018. /* if interrupt rate is too high then disable RSC */
  2019. } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  2020. adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
  2021. e_info(probe, "rx-usecs set too low, disabling RSC\n");
  2022. return true;
  2023. }
  2024. return false;
  2025. }
  2026. static int ixgbe_set_coalesce(struct net_device *netdev,
  2027. struct ethtool_coalesce *ec)
  2028. {
  2029. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2030. struct ixgbe_q_vector *q_vector;
  2031. int i;
  2032. u16 tx_itr_param, rx_itr_param, tx_itr_prev;
  2033. bool need_reset = false;
  2034. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
  2035. /* reject Tx specific changes in case of mixed RxTx vectors */
  2036. if (ec->tx_coalesce_usecs)
  2037. return -EINVAL;
  2038. tx_itr_prev = adapter->rx_itr_setting;
  2039. } else {
  2040. tx_itr_prev = adapter->tx_itr_setting;
  2041. }
  2042. if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
  2043. (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
  2044. return -EINVAL;
  2045. if (ec->rx_coalesce_usecs > 1)
  2046. adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
  2047. else
  2048. adapter->rx_itr_setting = ec->rx_coalesce_usecs;
  2049. if (adapter->rx_itr_setting == 1)
  2050. rx_itr_param = IXGBE_20K_ITR;
  2051. else
  2052. rx_itr_param = adapter->rx_itr_setting;
  2053. if (ec->tx_coalesce_usecs > 1)
  2054. adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
  2055. else
  2056. adapter->tx_itr_setting = ec->tx_coalesce_usecs;
  2057. if (adapter->tx_itr_setting == 1)
  2058. tx_itr_param = IXGBE_12K_ITR;
  2059. else
  2060. tx_itr_param = adapter->tx_itr_setting;
  2061. /* mixed Rx/Tx */
  2062. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  2063. adapter->tx_itr_setting = adapter->rx_itr_setting;
  2064. /* detect ITR changes that require update of TXDCTL.WTHRESH */
  2065. if ((adapter->tx_itr_setting != 1) &&
  2066. (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
  2067. if ((tx_itr_prev == 1) ||
  2068. (tx_itr_prev >= IXGBE_100K_ITR))
  2069. need_reset = true;
  2070. } else {
  2071. if ((tx_itr_prev != 1) &&
  2072. (tx_itr_prev < IXGBE_100K_ITR))
  2073. need_reset = true;
  2074. }
  2075. /* check the old value and enable RSC if necessary */
  2076. need_reset |= ixgbe_update_rsc(adapter);
  2077. for (i = 0; i < adapter->num_q_vectors; i++) {
  2078. q_vector = adapter->q_vector[i];
  2079. if (q_vector->tx.count && !q_vector->rx.count)
  2080. /* tx only */
  2081. q_vector->itr = tx_itr_param;
  2082. else
  2083. /* rx only or mixed */
  2084. q_vector->itr = rx_itr_param;
  2085. ixgbe_write_eitr(q_vector);
  2086. }
  2087. /*
  2088. * do reset here at the end to make sure EITR==0 case is handled
  2089. * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
  2090. * also locks in RSC enable/disable which requires reset
  2091. */
  2092. if (need_reset)
  2093. ixgbe_do_reset(netdev);
  2094. return 0;
  2095. }
  2096. static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2097. struct ethtool_rxnfc *cmd)
  2098. {
  2099. union ixgbe_atr_input *mask = &adapter->fdir_mask;
  2100. struct ethtool_rx_flow_spec *fsp =
  2101. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2102. struct hlist_node *node2;
  2103. struct ixgbe_fdir_filter *rule = NULL;
  2104. /* report total rule count */
  2105. cmd->data = (1024 << adapter->fdir_pballoc) - 2;
  2106. hlist_for_each_entry_safe(rule, node2,
  2107. &adapter->fdir_filter_list, fdir_node) {
  2108. if (fsp->location <= rule->sw_idx)
  2109. break;
  2110. }
  2111. if (!rule || fsp->location != rule->sw_idx)
  2112. return -EINVAL;
  2113. /* fill out the flow spec entry */
  2114. /* set flow type field */
  2115. switch (rule->filter.formatted.flow_type) {
  2116. case IXGBE_ATR_FLOW_TYPE_TCPV4:
  2117. fsp->flow_type = TCP_V4_FLOW;
  2118. break;
  2119. case IXGBE_ATR_FLOW_TYPE_UDPV4:
  2120. fsp->flow_type = UDP_V4_FLOW;
  2121. break;
  2122. case IXGBE_ATR_FLOW_TYPE_SCTPV4:
  2123. fsp->flow_type = SCTP_V4_FLOW;
  2124. break;
  2125. case IXGBE_ATR_FLOW_TYPE_IPV4:
  2126. fsp->flow_type = IP_USER_FLOW;
  2127. fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
  2128. fsp->h_u.usr_ip4_spec.proto = 0;
  2129. fsp->m_u.usr_ip4_spec.proto = 0;
  2130. break;
  2131. default:
  2132. return -EINVAL;
  2133. }
  2134. fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
  2135. fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
  2136. fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
  2137. fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
  2138. fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
  2139. fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
  2140. fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
  2141. fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
  2142. fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
  2143. fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
  2144. fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
  2145. fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
  2146. fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
  2147. fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
  2148. fsp->flow_type |= FLOW_EXT;
  2149. /* record action */
  2150. if (rule->action == IXGBE_FDIR_DROP_QUEUE)
  2151. fsp->ring_cookie = RX_CLS_FLOW_DISC;
  2152. else
  2153. fsp->ring_cookie = rule->action;
  2154. return 0;
  2155. }
  2156. static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
  2157. struct ethtool_rxnfc *cmd,
  2158. u32 *rule_locs)
  2159. {
  2160. struct hlist_node *node2;
  2161. struct ixgbe_fdir_filter *rule;
  2162. int cnt = 0;
  2163. /* report total rule count */
  2164. cmd->data = (1024 << adapter->fdir_pballoc) - 2;
  2165. hlist_for_each_entry_safe(rule, node2,
  2166. &adapter->fdir_filter_list, fdir_node) {
  2167. if (cnt == cmd->rule_cnt)
  2168. return -EMSGSIZE;
  2169. rule_locs[cnt] = rule->sw_idx;
  2170. cnt++;
  2171. }
  2172. cmd->rule_cnt = cnt;
  2173. return 0;
  2174. }
  2175. static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
  2176. struct ethtool_rxnfc *cmd)
  2177. {
  2178. cmd->data = 0;
  2179. /* Report default options for RSS on ixgbe */
  2180. switch (cmd->flow_type) {
  2181. case TCP_V4_FLOW:
  2182. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2183. /* fallthrough */
  2184. case UDP_V4_FLOW:
  2185. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  2186. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2187. /* fallthrough */
  2188. case SCTP_V4_FLOW:
  2189. case AH_ESP_V4_FLOW:
  2190. case AH_V4_FLOW:
  2191. case ESP_V4_FLOW:
  2192. case IPV4_FLOW:
  2193. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  2194. break;
  2195. case TCP_V6_FLOW:
  2196. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2197. /* fallthrough */
  2198. case UDP_V6_FLOW:
  2199. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2200. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2201. /* fallthrough */
  2202. case SCTP_V6_FLOW:
  2203. case AH_ESP_V6_FLOW:
  2204. case AH_V6_FLOW:
  2205. case ESP_V6_FLOW:
  2206. case IPV6_FLOW:
  2207. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  2208. break;
  2209. default:
  2210. return -EINVAL;
  2211. }
  2212. return 0;
  2213. }
  2214. static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  2215. u32 *rule_locs)
  2216. {
  2217. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2218. int ret = -EOPNOTSUPP;
  2219. switch (cmd->cmd) {
  2220. case ETHTOOL_GRXRINGS:
  2221. cmd->data = adapter->num_rx_queues;
  2222. ret = 0;
  2223. break;
  2224. case ETHTOOL_GRXCLSRLCNT:
  2225. cmd->rule_cnt = adapter->fdir_filter_count;
  2226. ret = 0;
  2227. break;
  2228. case ETHTOOL_GRXCLSRULE:
  2229. ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
  2230. break;
  2231. case ETHTOOL_GRXCLSRLALL:
  2232. ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
  2233. break;
  2234. case ETHTOOL_GRXFH:
  2235. ret = ixgbe_get_rss_hash_opts(adapter, cmd);
  2236. break;
  2237. default:
  2238. break;
  2239. }
  2240. return ret;
  2241. }
  2242. int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2243. struct ixgbe_fdir_filter *input,
  2244. u16 sw_idx)
  2245. {
  2246. struct ixgbe_hw *hw = &adapter->hw;
  2247. struct hlist_node *node2;
  2248. struct ixgbe_fdir_filter *rule, *parent;
  2249. int err = -EINVAL;
  2250. parent = NULL;
  2251. rule = NULL;
  2252. hlist_for_each_entry_safe(rule, node2,
  2253. &adapter->fdir_filter_list, fdir_node) {
  2254. /* hash found, or no matching entry */
  2255. if (rule->sw_idx >= sw_idx)
  2256. break;
  2257. parent = rule;
  2258. }
  2259. /* if there is an old rule occupying our place remove it */
  2260. if (rule && (rule->sw_idx == sw_idx)) {
  2261. if (!input || (rule->filter.formatted.bkt_hash !=
  2262. input->filter.formatted.bkt_hash)) {
  2263. err = ixgbe_fdir_erase_perfect_filter_82599(hw,
  2264. &rule->filter,
  2265. sw_idx);
  2266. }
  2267. hlist_del(&rule->fdir_node);
  2268. kfree(rule);
  2269. adapter->fdir_filter_count--;
  2270. }
  2271. /*
  2272. * If no input this was a delete, err should be 0 if a rule was
  2273. * successfully found and removed from the list else -EINVAL
  2274. */
  2275. if (!input)
  2276. return err;
  2277. /* initialize node and set software index */
  2278. INIT_HLIST_NODE(&input->fdir_node);
  2279. /* add filter to the list */
  2280. if (parent)
  2281. hlist_add_behind(&input->fdir_node, &parent->fdir_node);
  2282. else
  2283. hlist_add_head(&input->fdir_node,
  2284. &adapter->fdir_filter_list);
  2285. /* update counts */
  2286. adapter->fdir_filter_count++;
  2287. return 0;
  2288. }
  2289. static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
  2290. u8 *flow_type)
  2291. {
  2292. switch (fsp->flow_type & ~FLOW_EXT) {
  2293. case TCP_V4_FLOW:
  2294. *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  2295. break;
  2296. case UDP_V4_FLOW:
  2297. *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
  2298. break;
  2299. case SCTP_V4_FLOW:
  2300. *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
  2301. break;
  2302. case IP_USER_FLOW:
  2303. switch (fsp->h_u.usr_ip4_spec.proto) {
  2304. case IPPROTO_TCP:
  2305. *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  2306. break;
  2307. case IPPROTO_UDP:
  2308. *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
  2309. break;
  2310. case IPPROTO_SCTP:
  2311. *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
  2312. break;
  2313. case 0:
  2314. if (!fsp->m_u.usr_ip4_spec.proto) {
  2315. *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
  2316. break;
  2317. }
  2318. /* fall through */
  2319. default:
  2320. return 0;
  2321. }
  2322. break;
  2323. default:
  2324. return 0;
  2325. }
  2326. return 1;
  2327. }
  2328. static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2329. struct ethtool_rxnfc *cmd)
  2330. {
  2331. struct ethtool_rx_flow_spec *fsp =
  2332. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2333. struct ixgbe_hw *hw = &adapter->hw;
  2334. struct ixgbe_fdir_filter *input;
  2335. union ixgbe_atr_input mask;
  2336. u8 queue;
  2337. int err;
  2338. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  2339. return -EOPNOTSUPP;
  2340. /* ring_cookie is a masked into a set of queues and ixgbe pools or
  2341. * we use the drop index.
  2342. */
  2343. if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
  2344. queue = IXGBE_FDIR_DROP_QUEUE;
  2345. } else {
  2346. u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
  2347. u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
  2348. if (!vf && (ring >= adapter->num_rx_queues))
  2349. return -EINVAL;
  2350. else if (vf &&
  2351. ((vf > adapter->num_vfs) ||
  2352. ring >= adapter->num_rx_queues_per_pool))
  2353. return -EINVAL;
  2354. /* Map the ring onto the absolute queue index */
  2355. if (!vf)
  2356. queue = adapter->rx_ring[ring]->reg_idx;
  2357. else
  2358. queue = ((vf - 1) *
  2359. adapter->num_rx_queues_per_pool) + ring;
  2360. }
  2361. /* Don't allow indexes to exist outside of available space */
  2362. if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
  2363. e_err(drv, "Location out of range\n");
  2364. return -EINVAL;
  2365. }
  2366. input = kzalloc(sizeof(*input), GFP_ATOMIC);
  2367. if (!input)
  2368. return -ENOMEM;
  2369. memset(&mask, 0, sizeof(union ixgbe_atr_input));
  2370. /* set SW index */
  2371. input->sw_idx = fsp->location;
  2372. /* record flow type */
  2373. if (!ixgbe_flowspec_to_flow_type(fsp,
  2374. &input->filter.formatted.flow_type)) {
  2375. e_err(drv, "Unrecognized flow type\n");
  2376. goto err_out;
  2377. }
  2378. mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
  2379. IXGBE_ATR_L4TYPE_MASK;
  2380. if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
  2381. mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
  2382. /* Copy input into formatted structures */
  2383. input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
  2384. mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
  2385. input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
  2386. mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
  2387. input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
  2388. mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
  2389. input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
  2390. mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
  2391. if (fsp->flow_type & FLOW_EXT) {
  2392. input->filter.formatted.vm_pool =
  2393. (unsigned char)ntohl(fsp->h_ext.data[1]);
  2394. mask.formatted.vm_pool =
  2395. (unsigned char)ntohl(fsp->m_ext.data[1]);
  2396. input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
  2397. mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
  2398. input->filter.formatted.flex_bytes =
  2399. fsp->h_ext.vlan_etype;
  2400. mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
  2401. }
  2402. /* determine if we need to drop or route the packet */
  2403. if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
  2404. input->action = IXGBE_FDIR_DROP_QUEUE;
  2405. else
  2406. input->action = fsp->ring_cookie;
  2407. spin_lock(&adapter->fdir_perfect_lock);
  2408. if (hlist_empty(&adapter->fdir_filter_list)) {
  2409. /* save mask and program input mask into HW */
  2410. memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
  2411. err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
  2412. if (err) {
  2413. e_err(drv, "Error writing mask\n");
  2414. goto err_out_w_lock;
  2415. }
  2416. } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
  2417. e_err(drv, "Only one mask supported per port\n");
  2418. goto err_out_w_lock;
  2419. }
  2420. /* apply mask and compute/store hash */
  2421. ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
  2422. /* program filters to filter memory */
  2423. err = ixgbe_fdir_write_perfect_filter_82599(hw,
  2424. &input->filter, input->sw_idx, queue);
  2425. if (err)
  2426. goto err_out_w_lock;
  2427. ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
  2428. spin_unlock(&adapter->fdir_perfect_lock);
  2429. return err;
  2430. err_out_w_lock:
  2431. spin_unlock(&adapter->fdir_perfect_lock);
  2432. err_out:
  2433. kfree(input);
  2434. return -EINVAL;
  2435. }
  2436. static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2437. struct ethtool_rxnfc *cmd)
  2438. {
  2439. struct ethtool_rx_flow_spec *fsp =
  2440. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2441. int err;
  2442. spin_lock(&adapter->fdir_perfect_lock);
  2443. err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
  2444. spin_unlock(&adapter->fdir_perfect_lock);
  2445. return err;
  2446. }
  2447. #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
  2448. IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2449. static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
  2450. struct ethtool_rxnfc *nfc)
  2451. {
  2452. u32 flags2 = adapter->flags2;
  2453. /*
  2454. * RSS does not support anything other than hashing
  2455. * to queues on src and dst IPs and ports
  2456. */
  2457. if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
  2458. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  2459. return -EINVAL;
  2460. switch (nfc->flow_type) {
  2461. case TCP_V4_FLOW:
  2462. case TCP_V6_FLOW:
  2463. if (!(nfc->data & RXH_IP_SRC) ||
  2464. !(nfc->data & RXH_IP_DST) ||
  2465. !(nfc->data & RXH_L4_B_0_1) ||
  2466. !(nfc->data & RXH_L4_B_2_3))
  2467. return -EINVAL;
  2468. break;
  2469. case UDP_V4_FLOW:
  2470. if (!(nfc->data & RXH_IP_SRC) ||
  2471. !(nfc->data & RXH_IP_DST))
  2472. return -EINVAL;
  2473. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2474. case 0:
  2475. flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
  2476. break;
  2477. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  2478. flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
  2479. break;
  2480. default:
  2481. return -EINVAL;
  2482. }
  2483. break;
  2484. case UDP_V6_FLOW:
  2485. if (!(nfc->data & RXH_IP_SRC) ||
  2486. !(nfc->data & RXH_IP_DST))
  2487. return -EINVAL;
  2488. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2489. case 0:
  2490. flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
  2491. break;
  2492. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  2493. flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
  2494. break;
  2495. default:
  2496. return -EINVAL;
  2497. }
  2498. break;
  2499. case AH_ESP_V4_FLOW:
  2500. case AH_V4_FLOW:
  2501. case ESP_V4_FLOW:
  2502. case SCTP_V4_FLOW:
  2503. case AH_ESP_V6_FLOW:
  2504. case AH_V6_FLOW:
  2505. case ESP_V6_FLOW:
  2506. case SCTP_V6_FLOW:
  2507. if (!(nfc->data & RXH_IP_SRC) ||
  2508. !(nfc->data & RXH_IP_DST) ||
  2509. (nfc->data & RXH_L4_B_0_1) ||
  2510. (nfc->data & RXH_L4_B_2_3))
  2511. return -EINVAL;
  2512. break;
  2513. default:
  2514. return -EINVAL;
  2515. }
  2516. /* if we changed something we need to update flags */
  2517. if (flags2 != adapter->flags2) {
  2518. struct ixgbe_hw *hw = &adapter->hw;
  2519. u32 mrqc;
  2520. unsigned int pf_pool = adapter->num_vfs;
  2521. if ((hw->mac.type >= ixgbe_mac_X550) &&
  2522. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  2523. mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
  2524. else
  2525. mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
  2526. if ((flags2 & UDP_RSS_FLAGS) &&
  2527. !(adapter->flags2 & UDP_RSS_FLAGS))
  2528. e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
  2529. adapter->flags2 = flags2;
  2530. /* Perform hash on these packet types */
  2531. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
  2532. | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
  2533. | IXGBE_MRQC_RSS_FIELD_IPV6
  2534. | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
  2535. mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
  2536. IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
  2537. if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  2538. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
  2539. if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2540. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  2541. if ((hw->mac.type >= ixgbe_mac_X550) &&
  2542. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  2543. IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
  2544. else
  2545. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  2546. }
  2547. return 0;
  2548. }
  2549. static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  2550. {
  2551. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2552. int ret = -EOPNOTSUPP;
  2553. switch (cmd->cmd) {
  2554. case ETHTOOL_SRXCLSRLINS:
  2555. ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
  2556. break;
  2557. case ETHTOOL_SRXCLSRLDEL:
  2558. ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
  2559. break;
  2560. case ETHTOOL_SRXFH:
  2561. ret = ixgbe_set_rss_hash_opt(adapter, cmd);
  2562. break;
  2563. default:
  2564. break;
  2565. }
  2566. return ret;
  2567. }
  2568. static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
  2569. {
  2570. if (adapter->hw.mac.type < ixgbe_mac_X550)
  2571. return 16;
  2572. else
  2573. return 64;
  2574. }
  2575. static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
  2576. {
  2577. return IXGBE_RSS_KEY_SIZE;
  2578. }
  2579. static u32 ixgbe_rss_indir_size(struct net_device *netdev)
  2580. {
  2581. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2582. return ixgbe_rss_indir_tbl_entries(adapter);
  2583. }
  2584. static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
  2585. {
  2586. int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
  2587. u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
  2588. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2589. rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
  2590. for (i = 0; i < reta_size; i++)
  2591. indir[i] = adapter->rss_indir_tbl[i] & rss_m;
  2592. }
  2593. static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  2594. u8 *hfunc)
  2595. {
  2596. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2597. if (hfunc)
  2598. *hfunc = ETH_RSS_HASH_TOP;
  2599. if (indir)
  2600. ixgbe_get_reta(adapter, indir);
  2601. if (key)
  2602. memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
  2603. return 0;
  2604. }
  2605. static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
  2606. const u8 *key, const u8 hfunc)
  2607. {
  2608. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2609. int i;
  2610. u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  2611. if (hfunc)
  2612. return -EINVAL;
  2613. /* Fill out the redirection table */
  2614. if (indir) {
  2615. int max_queues = min_t(int, adapter->num_rx_queues,
  2616. ixgbe_rss_indir_tbl_max(adapter));
  2617. /*Allow at least 2 queues w/ SR-IOV.*/
  2618. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
  2619. (max_queues < 2))
  2620. max_queues = 2;
  2621. /* Verify user input. */
  2622. for (i = 0; i < reta_entries; i++)
  2623. if (indir[i] >= max_queues)
  2624. return -EINVAL;
  2625. for (i = 0; i < reta_entries; i++)
  2626. adapter->rss_indir_tbl[i] = indir[i];
  2627. ixgbe_store_reta(adapter);
  2628. }
  2629. /* Fill out the rss hash key */
  2630. if (key) {
  2631. memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
  2632. ixgbe_store_key(adapter);
  2633. }
  2634. return 0;
  2635. }
  2636. static int ixgbe_get_ts_info(struct net_device *dev,
  2637. struct ethtool_ts_info *info)
  2638. {
  2639. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2640. /* we always support timestamping disabled */
  2641. info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
  2642. switch (adapter->hw.mac.type) {
  2643. case ixgbe_mac_X550:
  2644. case ixgbe_mac_X550EM_x:
  2645. case ixgbe_mac_x550em_a:
  2646. info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
  2647. break;
  2648. case ixgbe_mac_X540:
  2649. case ixgbe_mac_82599EB:
  2650. info->rx_filters |=
  2651. BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  2652. BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
  2653. BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
  2654. break;
  2655. default:
  2656. return ethtool_op_get_ts_info(dev, info);
  2657. }
  2658. info->so_timestamping =
  2659. SOF_TIMESTAMPING_TX_SOFTWARE |
  2660. SOF_TIMESTAMPING_RX_SOFTWARE |
  2661. SOF_TIMESTAMPING_SOFTWARE |
  2662. SOF_TIMESTAMPING_TX_HARDWARE |
  2663. SOF_TIMESTAMPING_RX_HARDWARE |
  2664. SOF_TIMESTAMPING_RAW_HARDWARE;
  2665. if (adapter->ptp_clock)
  2666. info->phc_index = ptp_clock_index(adapter->ptp_clock);
  2667. else
  2668. info->phc_index = -1;
  2669. info->tx_types =
  2670. BIT(HWTSTAMP_TX_OFF) |
  2671. BIT(HWTSTAMP_TX_ON);
  2672. return 0;
  2673. }
  2674. static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
  2675. {
  2676. unsigned int max_combined;
  2677. u8 tcs = adapter->hw_tcs;
  2678. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  2679. /* We only support one q_vector without MSI-X */
  2680. max_combined = 1;
  2681. } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  2682. /* Limit value based on the queue mask */
  2683. max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
  2684. } else if (tcs > 1) {
  2685. /* For DCB report channels per traffic class */
  2686. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  2687. /* 8 TC w/ 4 queues per TC */
  2688. max_combined = 4;
  2689. } else if (tcs > 4) {
  2690. /* 8 TC w/ 8 queues per TC */
  2691. max_combined = 8;
  2692. } else {
  2693. /* 4 TC w/ 16 queues per TC */
  2694. max_combined = 16;
  2695. }
  2696. } else if (adapter->atr_sample_rate) {
  2697. /* support up to 64 queues with ATR */
  2698. max_combined = IXGBE_MAX_FDIR_INDICES;
  2699. } else {
  2700. /* support up to 16 queues with RSS */
  2701. max_combined = ixgbe_max_rss_indices(adapter);
  2702. }
  2703. return max_combined;
  2704. }
  2705. static void ixgbe_get_channels(struct net_device *dev,
  2706. struct ethtool_channels *ch)
  2707. {
  2708. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2709. /* report maximum channels */
  2710. ch->max_combined = ixgbe_max_channels(adapter);
  2711. /* report info for other vector */
  2712. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2713. ch->max_other = NON_Q_VECTORS;
  2714. ch->other_count = NON_Q_VECTORS;
  2715. }
  2716. /* record RSS queues */
  2717. ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
  2718. /* nothing else to report if RSS is disabled */
  2719. if (ch->combined_count == 1)
  2720. return;
  2721. /* we do not support ATR queueing if SR-IOV is enabled */
  2722. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2723. return;
  2724. /* same thing goes for being DCB enabled */
  2725. if (adapter->hw_tcs > 1)
  2726. return;
  2727. /* if ATR is disabled we can exit */
  2728. if (!adapter->atr_sample_rate)
  2729. return;
  2730. /* report flow director queues as maximum channels */
  2731. ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
  2732. }
  2733. static int ixgbe_set_channels(struct net_device *dev,
  2734. struct ethtool_channels *ch)
  2735. {
  2736. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2737. unsigned int count = ch->combined_count;
  2738. u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
  2739. /* verify they are not requesting separate vectors */
  2740. if (!count || ch->rx_count || ch->tx_count)
  2741. return -EINVAL;
  2742. /* verify other_count has not changed */
  2743. if (ch->other_count != NON_Q_VECTORS)
  2744. return -EINVAL;
  2745. /* verify the number of channels does not exceed hardware limits */
  2746. if (count > ixgbe_max_channels(adapter))
  2747. return -EINVAL;
  2748. /* update feature limits from largest to smallest supported values */
  2749. adapter->ring_feature[RING_F_FDIR].limit = count;
  2750. /* cap RSS limit */
  2751. if (count > max_rss_indices)
  2752. count = max_rss_indices;
  2753. adapter->ring_feature[RING_F_RSS].limit = count;
  2754. #ifdef IXGBE_FCOE
  2755. /* cap FCoE limit at 8 */
  2756. if (count > IXGBE_FCRETA_SIZE)
  2757. count = IXGBE_FCRETA_SIZE;
  2758. adapter->ring_feature[RING_F_FCOE].limit = count;
  2759. #endif
  2760. /* use setup TC to update any traffic class queue mapping */
  2761. return ixgbe_setup_tc(dev, adapter->hw_tcs);
  2762. }
  2763. static int ixgbe_get_module_info(struct net_device *dev,
  2764. struct ethtool_modinfo *modinfo)
  2765. {
  2766. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2767. struct ixgbe_hw *hw = &adapter->hw;
  2768. s32 status;
  2769. u8 sff8472_rev, addr_mode;
  2770. bool page_swap = false;
  2771. if (hw->phy.type == ixgbe_phy_fw)
  2772. return -ENXIO;
  2773. /* Check whether we support SFF-8472 or not */
  2774. status = hw->phy.ops.read_i2c_eeprom(hw,
  2775. IXGBE_SFF_SFF_8472_COMP,
  2776. &sff8472_rev);
  2777. if (status)
  2778. return -EIO;
  2779. /* addressing mode is not supported */
  2780. status = hw->phy.ops.read_i2c_eeprom(hw,
  2781. IXGBE_SFF_SFF_8472_SWAP,
  2782. &addr_mode);
  2783. if (status)
  2784. return -EIO;
  2785. if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
  2786. e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
  2787. page_swap = true;
  2788. }
  2789. if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
  2790. !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
  2791. /* We have a SFP, but it does not support SFF-8472 */
  2792. modinfo->type = ETH_MODULE_SFF_8079;
  2793. modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
  2794. } else {
  2795. /* We have a SFP which supports a revision of SFF-8472. */
  2796. modinfo->type = ETH_MODULE_SFF_8472;
  2797. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  2798. }
  2799. return 0;
  2800. }
  2801. static int ixgbe_get_module_eeprom(struct net_device *dev,
  2802. struct ethtool_eeprom *ee,
  2803. u8 *data)
  2804. {
  2805. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2806. struct ixgbe_hw *hw = &adapter->hw;
  2807. s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
  2808. u8 databyte = 0xFF;
  2809. int i = 0;
  2810. if (ee->len == 0)
  2811. return -EINVAL;
  2812. if (hw->phy.type == ixgbe_phy_fw)
  2813. return -ENXIO;
  2814. for (i = ee->offset; i < ee->offset + ee->len; i++) {
  2815. /* I2C reads can take long time */
  2816. if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  2817. return -EBUSY;
  2818. if (i < ETH_MODULE_SFF_8079_LEN)
  2819. status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
  2820. else
  2821. status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
  2822. if (status)
  2823. return -EIO;
  2824. data[i - ee->offset] = databyte;
  2825. }
  2826. return 0;
  2827. }
  2828. static const struct {
  2829. ixgbe_link_speed mac_speed;
  2830. u32 supported;
  2831. } ixgbe_ls_map[] = {
  2832. { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
  2833. { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
  2834. { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
  2835. { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
  2836. { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
  2837. };
  2838. static const struct {
  2839. u32 lp_advertised;
  2840. u32 mac_speed;
  2841. } ixgbe_lp_map[] = {
  2842. { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
  2843. { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
  2844. { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
  2845. { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
  2846. { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
  2847. { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
  2848. };
  2849. static int
  2850. ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
  2851. {
  2852. u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
  2853. struct ixgbe_hw *hw = &adapter->hw;
  2854. s32 rc;
  2855. u16 i;
  2856. rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
  2857. if (rc)
  2858. return rc;
  2859. edata->lp_advertised = 0;
  2860. for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
  2861. if (info[0] & ixgbe_lp_map[i].lp_advertised)
  2862. edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
  2863. }
  2864. edata->supported = 0;
  2865. for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
  2866. if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
  2867. edata->supported |= ixgbe_ls_map[i].supported;
  2868. }
  2869. edata->advertised = 0;
  2870. for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
  2871. if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
  2872. edata->advertised |= ixgbe_ls_map[i].supported;
  2873. }
  2874. edata->eee_enabled = !!edata->advertised;
  2875. edata->tx_lpi_enabled = edata->eee_enabled;
  2876. if (edata->advertised & edata->lp_advertised)
  2877. edata->eee_active = true;
  2878. return 0;
  2879. }
  2880. static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
  2881. {
  2882. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2883. struct ixgbe_hw *hw = &adapter->hw;
  2884. if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
  2885. return -EOPNOTSUPP;
  2886. if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
  2887. return ixgbe_get_eee_fw(adapter, edata);
  2888. return -EOPNOTSUPP;
  2889. }
  2890. static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
  2891. {
  2892. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2893. struct ixgbe_hw *hw = &adapter->hw;
  2894. struct ethtool_eee eee_data;
  2895. s32 ret_val;
  2896. if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
  2897. return -EOPNOTSUPP;
  2898. memset(&eee_data, 0, sizeof(struct ethtool_eee));
  2899. ret_val = ixgbe_get_eee(netdev, &eee_data);
  2900. if (ret_val)
  2901. return ret_val;
  2902. if (eee_data.eee_enabled && !edata->eee_enabled) {
  2903. if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
  2904. e_err(drv, "Setting EEE tx-lpi is not supported\n");
  2905. return -EINVAL;
  2906. }
  2907. if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
  2908. e_err(drv,
  2909. "Setting EEE Tx LPI timer is not supported\n");
  2910. return -EINVAL;
  2911. }
  2912. if (eee_data.advertised != edata->advertised) {
  2913. e_err(drv,
  2914. "Setting EEE advertised speeds is not supported\n");
  2915. return -EINVAL;
  2916. }
  2917. }
  2918. if (eee_data.eee_enabled != edata->eee_enabled) {
  2919. if (edata->eee_enabled) {
  2920. adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
  2921. hw->phy.eee_speeds_advertised =
  2922. hw->phy.eee_speeds_supported;
  2923. } else {
  2924. adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
  2925. hw->phy.eee_speeds_advertised = 0;
  2926. }
  2927. /* reset link */
  2928. if (netif_running(netdev))
  2929. ixgbe_reinit_locked(adapter);
  2930. else
  2931. ixgbe_reset(adapter);
  2932. }
  2933. return 0;
  2934. }
  2935. static u32 ixgbe_get_priv_flags(struct net_device *netdev)
  2936. {
  2937. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2938. u32 priv_flags = 0;
  2939. if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
  2940. priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
  2941. if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
  2942. priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
  2943. return priv_flags;
  2944. }
  2945. static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
  2946. {
  2947. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2948. unsigned int flags2 = adapter->flags2;
  2949. flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
  2950. if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
  2951. flags2 |= IXGBE_FLAG2_RX_LEGACY;
  2952. flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
  2953. if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
  2954. flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
  2955. if (flags2 != adapter->flags2) {
  2956. adapter->flags2 = flags2;
  2957. /* reset interface to repopulate queues */
  2958. if (netif_running(netdev))
  2959. ixgbe_reinit_locked(adapter);
  2960. }
  2961. return 0;
  2962. }
  2963. static const struct ethtool_ops ixgbe_ethtool_ops = {
  2964. .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
  2965. .get_drvinfo = ixgbe_get_drvinfo,
  2966. .get_regs_len = ixgbe_get_regs_len,
  2967. .get_regs = ixgbe_get_regs,
  2968. .get_wol = ixgbe_get_wol,
  2969. .set_wol = ixgbe_set_wol,
  2970. .nway_reset = ixgbe_nway_reset,
  2971. .get_link = ethtool_op_get_link,
  2972. .get_eeprom_len = ixgbe_get_eeprom_len,
  2973. .get_eeprom = ixgbe_get_eeprom,
  2974. .set_eeprom = ixgbe_set_eeprom,
  2975. .get_ringparam = ixgbe_get_ringparam,
  2976. .set_ringparam = ixgbe_set_ringparam,
  2977. .get_pauseparam = ixgbe_get_pauseparam,
  2978. .set_pauseparam = ixgbe_set_pauseparam,
  2979. .get_msglevel = ixgbe_get_msglevel,
  2980. .set_msglevel = ixgbe_set_msglevel,
  2981. .self_test = ixgbe_diag_test,
  2982. .get_strings = ixgbe_get_strings,
  2983. .set_phys_id = ixgbe_set_phys_id,
  2984. .get_sset_count = ixgbe_get_sset_count,
  2985. .get_ethtool_stats = ixgbe_get_ethtool_stats,
  2986. .get_coalesce = ixgbe_get_coalesce,
  2987. .set_coalesce = ixgbe_set_coalesce,
  2988. .get_rxnfc = ixgbe_get_rxnfc,
  2989. .set_rxnfc = ixgbe_set_rxnfc,
  2990. .get_rxfh_indir_size = ixgbe_rss_indir_size,
  2991. .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
  2992. .get_rxfh = ixgbe_get_rxfh,
  2993. .set_rxfh = ixgbe_set_rxfh,
  2994. .get_eee = ixgbe_get_eee,
  2995. .set_eee = ixgbe_set_eee,
  2996. .get_channels = ixgbe_get_channels,
  2997. .set_channels = ixgbe_set_channels,
  2998. .get_priv_flags = ixgbe_get_priv_flags,
  2999. .set_priv_flags = ixgbe_set_priv_flags,
  3000. .get_ts_info = ixgbe_get_ts_info,
  3001. .get_module_info = ixgbe_get_module_info,
  3002. .get_module_eeprom = ixgbe_get_module_eeprom,
  3003. .get_link_ksettings = ixgbe_get_link_ksettings,
  3004. .set_link_ksettings = ixgbe_set_link_ksettings,
  3005. };
  3006. void ixgbe_set_ethtool_ops(struct net_device *netdev)
  3007. {
  3008. netdev->ethtool_ops = &ixgbe_ethtool_ops;
  3009. }