PageRenderTime 78ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 2ms

/drivers/net/ethernet/intel/i40e/i40e_main.c

http://github.com/mirrors/linux-2.6
C | 15811 lines | 10302 code | 1975 blank | 3534 comment | 1980 complexity | 0f76330d2621dae3914651ca6b0cd824 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include <linux/etherdevice.h>
  4. #include <linux/of_net.h>
  5. #include <linux/pci.h>
  6. #include <linux/bpf.h>
  7. /* Local includes */
  8. #include "i40e.h"
  9. #include "i40e_diag.h"
  10. #include "i40e_xsk.h"
  11. #include <net/udp_tunnel.h>
  12. #include <net/xdp_sock.h>
  13. /* All i40e tracepoints are defined by the include below, which
  14. * must be included exactly once across the whole kernel with
  15. * CREATE_TRACE_POINTS defined
  16. */
  17. #define CREATE_TRACE_POINTS
  18. #include "i40e_trace.h"
  19. const char i40e_driver_name[] = "i40e";
  20. static const char i40e_driver_string[] =
  21. "Intel(R) Ethernet Connection XL710 Network Driver";
  22. #define DRV_KERN "-k"
  23. #define DRV_VERSION_MAJOR 2
  24. #define DRV_VERSION_MINOR 8
  25. #define DRV_VERSION_BUILD 20
  26. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  27. __stringify(DRV_VERSION_MINOR) "." \
  28. __stringify(DRV_VERSION_BUILD) DRV_KERN
  29. const char i40e_driver_version_str[] = DRV_VERSION;
  30. static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
  31. /* a bit of forward declarations */
  32. static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
  33. static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
  34. static int i40e_add_vsi(struct i40e_vsi *vsi);
  35. static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
  36. static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
  37. static int i40e_setup_misc_vector(struct i40e_pf *pf);
  38. static void i40e_determine_queue_usage(struct i40e_pf *pf);
  39. static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
  40. static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
  41. static int i40e_reset(struct i40e_pf *pf);
  42. static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
  43. static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
  44. static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
  45. static bool i40e_check_recovery_mode(struct i40e_pf *pf);
  46. static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
  47. static void i40e_fdir_sb_setup(struct i40e_pf *pf);
  48. static int i40e_veb_get_bw_info(struct i40e_veb *veb);
  49. static int i40e_get_capabilities(struct i40e_pf *pf,
  50. enum i40e_admin_queue_opc list_type);
  51. /* i40e_pci_tbl - PCI Device ID Table
  52. *
  53. * Last entry must be all 0s
  54. *
  55. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  56. * Class, Class Mask, private data (not used) }
  57. */
  58. static const struct pci_device_id i40e_pci_tbl[] = {
  59. {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
  60. {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
  61. {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
  62. {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
  63. {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
  64. {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
  65. {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
  66. {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
  67. {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
  68. {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
  69. {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
  70. {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
  71. {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
  72. {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
  73. {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
  74. {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
  75. {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
  76. {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
  77. {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
  78. {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
  79. {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
  80. {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
  81. {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
  82. {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
  83. /* required last entry */
  84. {0, }
  85. };
  86. MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
  87. #define I40E_MAX_VF_COUNT 128
  88. static int debug = -1;
  89. module_param(debug, uint, 0);
  90. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
  91. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  92. MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
  93. MODULE_LICENSE("GPL v2");
  94. MODULE_VERSION(DRV_VERSION);
  95. static struct workqueue_struct *i40e_wq;
  96. /**
  97. * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  98. * @hw: pointer to the HW structure
  99. * @mem: ptr to mem struct to fill out
  100. * @size: size of memory requested
  101. * @alignment: what to align the allocation to
  102. **/
  103. int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
  104. u64 size, u32 alignment)
  105. {
  106. struct i40e_pf *pf = (struct i40e_pf *)hw->back;
  107. mem->size = ALIGN(size, alignment);
  108. mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
  109. GFP_KERNEL);
  110. if (!mem->va)
  111. return -ENOMEM;
  112. return 0;
  113. }
  114. /**
  115. * i40e_free_dma_mem_d - OS specific memory free for shared code
  116. * @hw: pointer to the HW structure
  117. * @mem: ptr to mem struct to free
  118. **/
  119. int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
  120. {
  121. struct i40e_pf *pf = (struct i40e_pf *)hw->back;
  122. dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
  123. mem->va = NULL;
  124. mem->pa = 0;
  125. mem->size = 0;
  126. return 0;
  127. }
  128. /**
  129. * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
  130. * @hw: pointer to the HW structure
  131. * @mem: ptr to mem struct to fill out
  132. * @size: size of memory requested
  133. **/
  134. int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
  135. u32 size)
  136. {
  137. mem->size = size;
  138. mem->va = kzalloc(size, GFP_KERNEL);
  139. if (!mem->va)
  140. return -ENOMEM;
  141. return 0;
  142. }
  143. /**
  144. * i40e_free_virt_mem_d - OS specific memory free for shared code
  145. * @hw: pointer to the HW structure
  146. * @mem: ptr to mem struct to free
  147. **/
  148. int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
  149. {
  150. /* it's ok to kfree a NULL pointer */
  151. kfree(mem->va);
  152. mem->va = NULL;
  153. mem->size = 0;
  154. return 0;
  155. }
  156. /**
  157. * i40e_get_lump - find a lump of free generic resource
  158. * @pf: board private structure
  159. * @pile: the pile of resource to search
  160. * @needed: the number of items needed
  161. * @id: an owner id to stick on the items assigned
  162. *
  163. * Returns the base item index of the lump, or negative for error
  164. *
  165. * The search_hint trick and lack of advanced fit-finding only work
  166. * because we're highly likely to have all the same size lump requests.
  167. * Linear search time and any fragmentation should be minimal.
  168. **/
  169. static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
  170. u16 needed, u16 id)
  171. {
  172. int ret = -ENOMEM;
  173. int i, j;
  174. if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
  175. dev_info(&pf->pdev->dev,
  176. "param err: pile=%s needed=%d id=0x%04x\n",
  177. pile ? "<valid>" : "<null>", needed, id);
  178. return -EINVAL;
  179. }
  180. /* start the linear search with an imperfect hint */
  181. i = pile->search_hint;
  182. while (i < pile->num_entries) {
  183. /* skip already allocated entries */
  184. if (pile->list[i] & I40E_PILE_VALID_BIT) {
  185. i++;
  186. continue;
  187. }
  188. /* do we have enough in this lump? */
  189. for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
  190. if (pile->list[i+j] & I40E_PILE_VALID_BIT)
  191. break;
  192. }
  193. if (j == needed) {
  194. /* there was enough, so assign it to the requestor */
  195. for (j = 0; j < needed; j++)
  196. pile->list[i+j] = id | I40E_PILE_VALID_BIT;
  197. ret = i;
  198. pile->search_hint = i + j;
  199. break;
  200. }
  201. /* not enough, so skip over it and continue looking */
  202. i += j;
  203. }
  204. return ret;
  205. }
  206. /**
  207. * i40e_put_lump - return a lump of generic resource
  208. * @pile: the pile of resource to search
  209. * @index: the base item index
  210. * @id: the owner id of the items assigned
  211. *
  212. * Returns the count of items in the lump
  213. **/
  214. static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
  215. {
  216. int valid_id = (id | I40E_PILE_VALID_BIT);
  217. int count = 0;
  218. int i;
  219. if (!pile || index >= pile->num_entries)
  220. return -EINVAL;
  221. for (i = index;
  222. i < pile->num_entries && pile->list[i] == valid_id;
  223. i++) {
  224. pile->list[i] = 0;
  225. count++;
  226. }
  227. if (count && index < pile->search_hint)
  228. pile->search_hint = index;
  229. return count;
  230. }
  231. /**
  232. * i40e_find_vsi_from_id - searches for the vsi with the given id
  233. * @pf: the pf structure to search for the vsi
  234. * @id: id of the vsi it is searching for
  235. **/
  236. struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  237. {
  238. int i;
  239. for (i = 0; i < pf->num_alloc_vsi; i++)
  240. if (pf->vsi[i] && (pf->vsi[i]->id == id))
  241. return pf->vsi[i];
  242. return NULL;
  243. }
  244. /**
  245. * i40e_service_event_schedule - Schedule the service task to wake up
  246. * @pf: board private structure
  247. *
  248. * If not already scheduled, this puts the task into the work queue
  249. **/
  250. void i40e_service_event_schedule(struct i40e_pf *pf)
  251. {
  252. if ((!test_bit(__I40E_DOWN, pf->state) &&
  253. !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
  254. test_bit(__I40E_RECOVERY_MODE, pf->state))
  255. queue_work(i40e_wq, &pf->service_task);
  256. }
  257. /**
  258. * i40e_tx_timeout - Respond to a Tx Hang
  259. * @netdev: network interface device structure
  260. *
  261. * If any port has noticed a Tx timeout, it is likely that the whole
  262. * device is munged, not just the one netdev port, so go for the full
  263. * reset.
  264. **/
  265. static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  266. {
  267. struct i40e_netdev_priv *np = netdev_priv(netdev);
  268. struct i40e_vsi *vsi = np->vsi;
  269. struct i40e_pf *pf = vsi->back;
  270. struct i40e_ring *tx_ring = NULL;
  271. unsigned int i;
  272. u32 head, val;
  273. pf->tx_timeout_count++;
  274. /* with txqueue index, find the tx_ring struct */
  275. for (i = 0; i < vsi->num_queue_pairs; i++) {
  276. if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
  277. if (txqueue ==
  278. vsi->tx_rings[i]->queue_index) {
  279. tx_ring = vsi->tx_rings[i];
  280. break;
  281. }
  282. }
  283. }
  284. if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
  285. pf->tx_timeout_recovery_level = 1; /* reset after some time */
  286. else if (time_before(jiffies,
  287. (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
  288. return; /* don't do any new action before the next timeout */
  289. /* don't kick off another recovery if one is already pending */
  290. if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
  291. return;
  292. if (tx_ring) {
  293. head = i40e_get_head(tx_ring);
  294. /* Read interrupt register */
  295. if (pf->flags & I40E_FLAG_MSIX_ENABLED)
  296. val = rd32(&pf->hw,
  297. I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
  298. tx_ring->vsi->base_vector - 1));
  299. else
  300. val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
  301. netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
  302. vsi->seid, txqueue, tx_ring->next_to_clean,
  303. head, tx_ring->next_to_use,
  304. readl(tx_ring->tail), val);
  305. }
  306. pf->tx_timeout_last_recovery = jiffies;
  307. netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
  308. pf->tx_timeout_recovery_level, txqueue);
  309. switch (pf->tx_timeout_recovery_level) {
  310. case 1:
  311. set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
  312. break;
  313. case 2:
  314. set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
  315. break;
  316. case 3:
  317. set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
  318. break;
  319. default:
  320. netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
  321. break;
  322. }
  323. i40e_service_event_schedule(pf);
  324. pf->tx_timeout_recovery_level++;
  325. }
  326. /**
  327. * i40e_get_vsi_stats_struct - Get System Network Statistics
  328. * @vsi: the VSI we care about
  329. *
  330. * Returns the address of the device statistics structure.
  331. * The statistics are actually updated from the service task.
  332. **/
  333. struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  334. {
  335. return &vsi->net_stats;
  336. }
  337. /**
  338. * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
  339. * @ring: Tx ring to get statistics from
  340. * @stats: statistics entry to be updated
  341. **/
  342. static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
  343. struct rtnl_link_stats64 *stats)
  344. {
  345. u64 bytes, packets;
  346. unsigned int start;
  347. do {
  348. start = u64_stats_fetch_begin_irq(&ring->syncp);
  349. packets = ring->stats.packets;
  350. bytes = ring->stats.bytes;
  351. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  352. stats->tx_packets += packets;
  353. stats->tx_bytes += bytes;
  354. }
  355. /**
  356. * i40e_get_netdev_stats_struct - Get statistics for netdev interface
  357. * @netdev: network interface device structure
  358. * @stats: data structure to store statistics
  359. *
  360. * Returns the address of the device statistics structure.
  361. * The statistics are actually updated from the service task.
  362. **/
  363. static void i40e_get_netdev_stats_struct(struct net_device *netdev,
  364. struct rtnl_link_stats64 *stats)
  365. {
  366. struct i40e_netdev_priv *np = netdev_priv(netdev);
  367. struct i40e_vsi *vsi = np->vsi;
  368. struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
  369. struct i40e_ring *ring;
  370. int i;
  371. if (test_bit(__I40E_VSI_DOWN, vsi->state))
  372. return;
  373. if (!vsi->tx_rings)
  374. return;
  375. rcu_read_lock();
  376. for (i = 0; i < vsi->num_queue_pairs; i++) {
  377. u64 bytes, packets;
  378. unsigned int start;
  379. ring = READ_ONCE(vsi->tx_rings[i]);
  380. if (!ring)
  381. continue;
  382. i40e_get_netdev_stats_struct_tx(ring, stats);
  383. if (i40e_enabled_xdp_vsi(vsi)) {
  384. ring++;
  385. i40e_get_netdev_stats_struct_tx(ring, stats);
  386. }
  387. ring++;
  388. do {
  389. start = u64_stats_fetch_begin_irq(&ring->syncp);
  390. packets = ring->stats.packets;
  391. bytes = ring->stats.bytes;
  392. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  393. stats->rx_packets += packets;
  394. stats->rx_bytes += bytes;
  395. }
  396. rcu_read_unlock();
  397. /* following stats updated by i40e_watchdog_subtask() */
  398. stats->multicast = vsi_stats->multicast;
  399. stats->tx_errors = vsi_stats->tx_errors;
  400. stats->tx_dropped = vsi_stats->tx_dropped;
  401. stats->rx_errors = vsi_stats->rx_errors;
  402. stats->rx_dropped = vsi_stats->rx_dropped;
  403. stats->rx_crc_errors = vsi_stats->rx_crc_errors;
  404. stats->rx_length_errors = vsi_stats->rx_length_errors;
  405. }
  406. /**
  407. * i40e_vsi_reset_stats - Resets all stats of the given vsi
  408. * @vsi: the VSI to have its stats reset
  409. **/
  410. void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
  411. {
  412. struct rtnl_link_stats64 *ns;
  413. int i;
  414. if (!vsi)
  415. return;
  416. ns = i40e_get_vsi_stats_struct(vsi);
  417. memset(ns, 0, sizeof(*ns));
  418. memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
  419. memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
  420. memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
  421. if (vsi->rx_rings && vsi->rx_rings[0]) {
  422. for (i = 0; i < vsi->num_queue_pairs; i++) {
  423. memset(&vsi->rx_rings[i]->stats, 0,
  424. sizeof(vsi->rx_rings[i]->stats));
  425. memset(&vsi->rx_rings[i]->rx_stats, 0,
  426. sizeof(vsi->rx_rings[i]->rx_stats));
  427. memset(&vsi->tx_rings[i]->stats, 0,
  428. sizeof(vsi->tx_rings[i]->stats));
  429. memset(&vsi->tx_rings[i]->tx_stats, 0,
  430. sizeof(vsi->tx_rings[i]->tx_stats));
  431. }
  432. }
  433. vsi->stat_offsets_loaded = false;
  434. }
  435. /**
  436. * i40e_pf_reset_stats - Reset all of the stats for the given PF
  437. * @pf: the PF to be reset
  438. **/
  439. void i40e_pf_reset_stats(struct i40e_pf *pf)
  440. {
  441. int i;
  442. memset(&pf->stats, 0, sizeof(pf->stats));
  443. memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
  444. pf->stat_offsets_loaded = false;
  445. for (i = 0; i < I40E_MAX_VEB; i++) {
  446. if (pf->veb[i]) {
  447. memset(&pf->veb[i]->stats, 0,
  448. sizeof(pf->veb[i]->stats));
  449. memset(&pf->veb[i]->stats_offsets, 0,
  450. sizeof(pf->veb[i]->stats_offsets));
  451. memset(&pf->veb[i]->tc_stats, 0,
  452. sizeof(pf->veb[i]->tc_stats));
  453. memset(&pf->veb[i]->tc_stats_offsets, 0,
  454. sizeof(pf->veb[i]->tc_stats_offsets));
  455. pf->veb[i]->stat_offsets_loaded = false;
  456. }
  457. }
  458. pf->hw_csum_rx_error = 0;
  459. }
  460. /**
  461. * i40e_stat_update48 - read and update a 48 bit stat from the chip
  462. * @hw: ptr to the hardware info
  463. * @hireg: the high 32 bit reg to read
  464. * @loreg: the low 32 bit reg to read
  465. * @offset_loaded: has the initial offset been loaded yet
  466. * @offset: ptr to current offset value
  467. * @stat: ptr to the stat
  468. *
  469. * Since the device stats are not reset at PFReset, they likely will not
  470. * be zeroed when the driver starts. We'll save the first values read
  471. * and use them as offsets to be subtracted from the raw values in order
  472. * to report stats that count from zero. In the process, we also manage
  473. * the potential roll-over.
  474. **/
  475. static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
  476. bool offset_loaded, u64 *offset, u64 *stat)
  477. {
  478. u64 new_data;
  479. if (hw->device_id == I40E_DEV_ID_QEMU) {
  480. new_data = rd32(hw, loreg);
  481. new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
  482. } else {
  483. new_data = rd64(hw, loreg);
  484. }
  485. if (!offset_loaded)
  486. *offset = new_data;
  487. if (likely(new_data >= *offset))
  488. *stat = new_data - *offset;
  489. else
  490. *stat = (new_data + BIT_ULL(48)) - *offset;
  491. *stat &= 0xFFFFFFFFFFFFULL;
  492. }
  493. /**
  494. * i40e_stat_update32 - read and update a 32 bit stat from the chip
  495. * @hw: ptr to the hardware info
  496. * @reg: the hw reg to read
  497. * @offset_loaded: has the initial offset been loaded yet
  498. * @offset: ptr to current offset value
  499. * @stat: ptr to the stat
  500. **/
  501. static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
  502. bool offset_loaded, u64 *offset, u64 *stat)
  503. {
  504. u32 new_data;
  505. new_data = rd32(hw, reg);
  506. if (!offset_loaded)
  507. *offset = new_data;
  508. if (likely(new_data >= *offset))
  509. *stat = (u32)(new_data - *offset);
  510. else
  511. *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
  512. }
  513. /**
  514. * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
  515. * @hw: ptr to the hardware info
  516. * @reg: the hw reg to read and clear
  517. * @stat: ptr to the stat
  518. **/
  519. static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
  520. {
  521. u32 new_data = rd32(hw, reg);
  522. wr32(hw, reg, 1); /* must write a nonzero value to clear register */
  523. *stat += new_data;
  524. }
  525. /**
  526. * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
  527. * @vsi: the VSI to be updated
  528. **/
  529. void i40e_update_eth_stats(struct i40e_vsi *vsi)
  530. {
  531. int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
  532. struct i40e_pf *pf = vsi->back;
  533. struct i40e_hw *hw = &pf->hw;
  534. struct i40e_eth_stats *oes;
  535. struct i40e_eth_stats *es; /* device's eth stats */
  536. es = &vsi->eth_stats;
  537. oes = &vsi->eth_stats_offsets;
  538. /* Gather up the stats that the hw collects */
  539. i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
  540. vsi->stat_offsets_loaded,
  541. &oes->tx_errors, &es->tx_errors);
  542. i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
  543. vsi->stat_offsets_loaded,
  544. &oes->rx_discards, &es->rx_discards);
  545. i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
  546. vsi->stat_offsets_loaded,
  547. &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
  548. i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
  549. I40E_GLV_GORCL(stat_idx),
  550. vsi->stat_offsets_loaded,
  551. &oes->rx_bytes, &es->rx_bytes);
  552. i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
  553. I40E_GLV_UPRCL(stat_idx),
  554. vsi->stat_offsets_loaded,
  555. &oes->rx_unicast, &es->rx_unicast);
  556. i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
  557. I40E_GLV_MPRCL(stat_idx),
  558. vsi->stat_offsets_loaded,
  559. &oes->rx_multicast, &es->rx_multicast);
  560. i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
  561. I40E_GLV_BPRCL(stat_idx),
  562. vsi->stat_offsets_loaded,
  563. &oes->rx_broadcast, &es->rx_broadcast);
  564. i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
  565. I40E_GLV_GOTCL(stat_idx),
  566. vsi->stat_offsets_loaded,
  567. &oes->tx_bytes, &es->tx_bytes);
  568. i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
  569. I40E_GLV_UPTCL(stat_idx),
  570. vsi->stat_offsets_loaded,
  571. &oes->tx_unicast, &es->tx_unicast);
  572. i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
  573. I40E_GLV_MPTCL(stat_idx),
  574. vsi->stat_offsets_loaded,
  575. &oes->tx_multicast, &es->tx_multicast);
  576. i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
  577. I40E_GLV_BPTCL(stat_idx),
  578. vsi->stat_offsets_loaded,
  579. &oes->tx_broadcast, &es->tx_broadcast);
  580. vsi->stat_offsets_loaded = true;
  581. }
  582. /**
  583. * i40e_update_veb_stats - Update Switch component statistics
  584. * @veb: the VEB being updated
  585. **/
  586. void i40e_update_veb_stats(struct i40e_veb *veb)
  587. {
  588. struct i40e_pf *pf = veb->pf;
  589. struct i40e_hw *hw = &pf->hw;
  590. struct i40e_eth_stats *oes;
  591. struct i40e_eth_stats *es; /* device's eth stats */
  592. struct i40e_veb_tc_stats *veb_oes;
  593. struct i40e_veb_tc_stats *veb_es;
  594. int i, idx = 0;
  595. idx = veb->stats_idx;
  596. es = &veb->stats;
  597. oes = &veb->stats_offsets;
  598. veb_es = &veb->tc_stats;
  599. veb_oes = &veb->tc_stats_offsets;
  600. /* Gather up the stats that the hw collects */
  601. i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
  602. veb->stat_offsets_loaded,
  603. &oes->tx_discards, &es->tx_discards);
  604. if (hw->revision_id > 0)
  605. i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
  606. veb->stat_offsets_loaded,
  607. &oes->rx_unknown_protocol,
  608. &es->rx_unknown_protocol);
  609. i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
  610. veb->stat_offsets_loaded,
  611. &oes->rx_bytes, &es->rx_bytes);
  612. i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
  613. veb->stat_offsets_loaded,
  614. &oes->rx_unicast, &es->rx_unicast);
  615. i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
  616. veb->stat_offsets_loaded,
  617. &oes->rx_multicast, &es->rx_multicast);
  618. i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
  619. veb->stat_offsets_loaded,
  620. &oes->rx_broadcast, &es->rx_broadcast);
  621. i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
  622. veb->stat_offsets_loaded,
  623. &oes->tx_bytes, &es->tx_bytes);
  624. i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
  625. veb->stat_offsets_loaded,
  626. &oes->tx_unicast, &es->tx_unicast);
  627. i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
  628. veb->stat_offsets_loaded,
  629. &oes->tx_multicast, &es->tx_multicast);
  630. i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
  631. veb->stat_offsets_loaded,
  632. &oes->tx_broadcast, &es->tx_broadcast);
  633. for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
  634. i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
  635. I40E_GLVEBTC_RPCL(i, idx),
  636. veb->stat_offsets_loaded,
  637. &veb_oes->tc_rx_packets[i],
  638. &veb_es->tc_rx_packets[i]);
  639. i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
  640. I40E_GLVEBTC_RBCL(i, idx),
  641. veb->stat_offsets_loaded,
  642. &veb_oes->tc_rx_bytes[i],
  643. &veb_es->tc_rx_bytes[i]);
  644. i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
  645. I40E_GLVEBTC_TPCL(i, idx),
  646. veb->stat_offsets_loaded,
  647. &veb_oes->tc_tx_packets[i],
  648. &veb_es->tc_tx_packets[i]);
  649. i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
  650. I40E_GLVEBTC_TBCL(i, idx),
  651. veb->stat_offsets_loaded,
  652. &veb_oes->tc_tx_bytes[i],
  653. &veb_es->tc_tx_bytes[i]);
  654. }
  655. veb->stat_offsets_loaded = true;
  656. }
  657. /**
  658. * i40e_update_vsi_stats - Update the vsi statistics counters.
  659. * @vsi: the VSI to be updated
  660. *
  661. * There are a few instances where we store the same stat in a
  662. * couple of different structs. This is partly because we have
  663. * the netdev stats that need to be filled out, which is slightly
  664. * different from the "eth_stats" defined by the chip and used in
  665. * VF communications. We sort it out here.
  666. **/
  667. static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
  668. {
  669. struct i40e_pf *pf = vsi->back;
  670. struct rtnl_link_stats64 *ons;
  671. struct rtnl_link_stats64 *ns; /* netdev stats */
  672. struct i40e_eth_stats *oes;
  673. struct i40e_eth_stats *es; /* device's eth stats */
  674. u32 tx_restart, tx_busy;
  675. struct i40e_ring *p;
  676. u32 rx_page, rx_buf;
  677. u64 bytes, packets;
  678. unsigned int start;
  679. u64 tx_linearize;
  680. u64 tx_force_wb;
  681. u64 rx_p, rx_b;
  682. u64 tx_p, tx_b;
  683. u16 q;
  684. if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
  685. test_bit(__I40E_CONFIG_BUSY, pf->state))
  686. return;
  687. ns = i40e_get_vsi_stats_struct(vsi);
  688. ons = &vsi->net_stats_offsets;
  689. es = &vsi->eth_stats;
  690. oes = &vsi->eth_stats_offsets;
  691. /* Gather up the netdev and vsi stats that the driver collects
  692. * on the fly during packet processing
  693. */
  694. rx_b = rx_p = 0;
  695. tx_b = tx_p = 0;
  696. tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
  697. rx_page = 0;
  698. rx_buf = 0;
  699. rcu_read_lock();
  700. for (q = 0; q < vsi->num_queue_pairs; q++) {
  701. /* locate Tx ring */
  702. p = READ_ONCE(vsi->tx_rings[q]);
  703. do {
  704. start = u64_stats_fetch_begin_irq(&p->syncp);
  705. packets = p->stats.packets;
  706. bytes = p->stats.bytes;
  707. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  708. tx_b += bytes;
  709. tx_p += packets;
  710. tx_restart += p->tx_stats.restart_queue;
  711. tx_busy += p->tx_stats.tx_busy;
  712. tx_linearize += p->tx_stats.tx_linearize;
  713. tx_force_wb += p->tx_stats.tx_force_wb;
  714. /* Rx queue is part of the same block as Tx queue */
  715. p = &p[1];
  716. do {
  717. start = u64_stats_fetch_begin_irq(&p->syncp);
  718. packets = p->stats.packets;
  719. bytes = p->stats.bytes;
  720. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  721. rx_b += bytes;
  722. rx_p += packets;
  723. rx_buf += p->rx_stats.alloc_buff_failed;
  724. rx_page += p->rx_stats.alloc_page_failed;
  725. }
  726. rcu_read_unlock();
  727. vsi->tx_restart = tx_restart;
  728. vsi->tx_busy = tx_busy;
  729. vsi->tx_linearize = tx_linearize;
  730. vsi->tx_force_wb = tx_force_wb;
  731. vsi->rx_page_failed = rx_page;
  732. vsi->rx_buf_failed = rx_buf;
  733. ns->rx_packets = rx_p;
  734. ns->rx_bytes = rx_b;
  735. ns->tx_packets = tx_p;
  736. ns->tx_bytes = tx_b;
  737. /* update netdev stats from eth stats */
  738. i40e_update_eth_stats(vsi);
  739. ons->tx_errors = oes->tx_errors;
  740. ns->tx_errors = es->tx_errors;
  741. ons->multicast = oes->rx_multicast;
  742. ns->multicast = es->rx_multicast;
  743. ons->rx_dropped = oes->rx_discards;
  744. ns->rx_dropped = es->rx_discards;
  745. ons->tx_dropped = oes->tx_discards;
  746. ns->tx_dropped = es->tx_discards;
  747. /* pull in a couple PF stats if this is the main vsi */
  748. if (vsi == pf->vsi[pf->lan_vsi]) {
  749. ns->rx_crc_errors = pf->stats.crc_errors;
  750. ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
  751. ns->rx_length_errors = pf->stats.rx_length_errors;
  752. }
  753. }
  754. /**
  755. * i40e_update_pf_stats - Update the PF statistics counters.
  756. * @pf: the PF to be updated
  757. **/
  758. static void i40e_update_pf_stats(struct i40e_pf *pf)
  759. {
  760. struct i40e_hw_port_stats *osd = &pf->stats_offsets;
  761. struct i40e_hw_port_stats *nsd = &pf->stats;
  762. struct i40e_hw *hw = &pf->hw;
  763. u32 val;
  764. int i;
  765. i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
  766. I40E_GLPRT_GORCL(hw->port),
  767. pf->stat_offsets_loaded,
  768. &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
  769. i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
  770. I40E_GLPRT_GOTCL(hw->port),
  771. pf->stat_offsets_loaded,
  772. &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
  773. i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
  774. pf->stat_offsets_loaded,
  775. &osd->eth.rx_discards,
  776. &nsd->eth.rx_discards);
  777. i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
  778. I40E_GLPRT_UPRCL(hw->port),
  779. pf->stat_offsets_loaded,
  780. &osd->eth.rx_unicast,
  781. &nsd->eth.rx_unicast);
  782. i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
  783. I40E_GLPRT_MPRCL(hw->port),
  784. pf->stat_offsets_loaded,
  785. &osd->eth.rx_multicast,
  786. &nsd->eth.rx_multicast);
  787. i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
  788. I40E_GLPRT_BPRCL(hw->port),
  789. pf->stat_offsets_loaded,
  790. &osd->eth.rx_broadcast,
  791. &nsd->eth.rx_broadcast);
  792. i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
  793. I40E_GLPRT_UPTCL(hw->port),
  794. pf->stat_offsets_loaded,
  795. &osd->eth.tx_unicast,
  796. &nsd->eth.tx_unicast);
  797. i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
  798. I40E_GLPRT_MPTCL(hw->port),
  799. pf->stat_offsets_loaded,
  800. &osd->eth.tx_multicast,
  801. &nsd->eth.tx_multicast);
  802. i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
  803. I40E_GLPRT_BPTCL(hw->port),
  804. pf->stat_offsets_loaded,
  805. &osd->eth.tx_broadcast,
  806. &nsd->eth.tx_broadcast);
  807. i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
  808. pf->stat_offsets_loaded,
  809. &osd->tx_dropped_link_down,
  810. &nsd->tx_dropped_link_down);
  811. i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
  812. pf->stat_offsets_loaded,
  813. &osd->crc_errors, &nsd->crc_errors);
  814. i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
  815. pf->stat_offsets_loaded,
  816. &osd->illegal_bytes, &nsd->illegal_bytes);
  817. i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
  818. pf->stat_offsets_loaded,
  819. &osd->mac_local_faults,
  820. &nsd->mac_local_faults);
  821. i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
  822. pf->stat_offsets_loaded,
  823. &osd->mac_remote_faults,
  824. &nsd->mac_remote_faults);
  825. i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
  826. pf->stat_offsets_loaded,
  827. &osd->rx_length_errors,
  828. &nsd->rx_length_errors);
  829. i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
  830. pf->stat_offsets_loaded,
  831. &osd->link_xon_rx, &nsd->link_xon_rx);
  832. i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
  833. pf->stat_offsets_loaded,
  834. &osd->link_xon_tx, &nsd->link_xon_tx);
  835. i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
  836. pf->stat_offsets_loaded,
  837. &osd->link_xoff_rx, &nsd->link_xoff_rx);
  838. i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
  839. pf->stat_offsets_loaded,
  840. &osd->link_xoff_tx, &nsd->link_xoff_tx);
  841. for (i = 0; i < 8; i++) {
  842. i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
  843. pf->stat_offsets_loaded,
  844. &osd->priority_xoff_rx[i],
  845. &nsd->priority_xoff_rx[i]);
  846. i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
  847. pf->stat_offsets_loaded,
  848. &osd->priority_xon_rx[i],
  849. &nsd->priority_xon_rx[i]);
  850. i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
  851. pf->stat_offsets_loaded,
  852. &osd->priority_xon_tx[i],
  853. &nsd->priority_xon_tx[i]);
  854. i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
  855. pf->stat_offsets_loaded,
  856. &osd->priority_xoff_tx[i],
  857. &nsd->priority_xoff_tx[i]);
  858. i40e_stat_update32(hw,
  859. I40E_GLPRT_RXON2OFFCNT(hw->port, i),
  860. pf->stat_offsets_loaded,
  861. &osd->priority_xon_2_xoff[i],
  862. &nsd->priority_xon_2_xoff[i]);
  863. }
  864. i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
  865. I40E_GLPRT_PRC64L(hw->port),
  866. pf->stat_offsets_loaded,
  867. &osd->rx_size_64, &nsd->rx_size_64);
  868. i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
  869. I40E_GLPRT_PRC127L(hw->port),
  870. pf->stat_offsets_loaded,
  871. &osd->rx_size_127, &nsd->rx_size_127);
  872. i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
  873. I40E_GLPRT_PRC255L(hw->port),
  874. pf->stat_offsets_loaded,
  875. &osd->rx_size_255, &nsd->rx_size_255);
  876. i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
  877. I40E_GLPRT_PRC511L(hw->port),
  878. pf->stat_offsets_loaded,
  879. &osd->rx_size_511, &nsd->rx_size_511);
  880. i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
  881. I40E_GLPRT_PRC1023L(hw->port),
  882. pf->stat_offsets_loaded,
  883. &osd->rx_size_1023, &nsd->rx_size_1023);
  884. i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
  885. I40E_GLPRT_PRC1522L(hw->port),
  886. pf->stat_offsets_loaded,
  887. &osd->rx_size_1522, &nsd->rx_size_1522);
  888. i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
  889. I40E_GLPRT_PRC9522L(hw->port),
  890. pf->stat_offsets_loaded,
  891. &osd->rx_size_big, &nsd->rx_size_big);
  892. i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
  893. I40E_GLPRT_PTC64L(hw->port),
  894. pf->stat_offsets_loaded,
  895. &osd->tx_size_64, &nsd->tx_size_64);
  896. i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
  897. I40E_GLPRT_PTC127L(hw->port),
  898. pf->stat_offsets_loaded,
  899. &osd->tx_size_127, &nsd->tx_size_127);
  900. i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
  901. I40E_GLPRT_PTC255L(hw->port),
  902. pf->stat_offsets_loaded,
  903. &osd->tx_size_255, &nsd->tx_size_255);
  904. i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
  905. I40E_GLPRT_PTC511L(hw->port),
  906. pf->stat_offsets_loaded,
  907. &osd->tx_size_511, &nsd->tx_size_511);
  908. i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
  909. I40E_GLPRT_PTC1023L(hw->port),
  910. pf->stat_offsets_loaded,
  911. &osd->tx_size_1023, &nsd->tx_size_1023);
  912. i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
  913. I40E_GLPRT_PTC1522L(hw->port),
  914. pf->stat_offsets_loaded,
  915. &osd->tx_size_1522, &nsd->tx_size_1522);
  916. i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
  917. I40E_GLPRT_PTC9522L(hw->port),
  918. pf->stat_offsets_loaded,
  919. &osd->tx_size_big, &nsd->tx_size_big);
  920. i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
  921. pf->stat_offsets_loaded,
  922. &osd->rx_undersize, &nsd->rx_undersize);
  923. i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
  924. pf->stat_offsets_loaded,
  925. &osd->rx_fragments, &nsd->rx_fragments);
  926. i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
  927. pf->stat_offsets_loaded,
  928. &osd->rx_oversize, &nsd->rx_oversize);
  929. i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
  930. pf->stat_offsets_loaded,
  931. &osd->rx_jabber, &nsd->rx_jabber);
  932. /* FDIR stats */
  933. i40e_stat_update_and_clear32(hw,
  934. I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
  935. &nsd->fd_atr_match);
  936. i40e_stat_update_and_clear32(hw,
  937. I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
  938. &nsd->fd_sb_match);
  939. i40e_stat_update_and_clear32(hw,
  940. I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
  941. &nsd->fd_atr_tunnel_match);
  942. val = rd32(hw, I40E_PRTPM_EEE_STAT);
  943. nsd->tx_lpi_status =
  944. (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
  945. I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
  946. nsd->rx_lpi_status =
  947. (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
  948. I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
  949. i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
  950. pf->stat_offsets_loaded,
  951. &osd->tx_lpi_count, &nsd->tx_lpi_count);
  952. i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
  953. pf->stat_offsets_loaded,
  954. &osd->rx_lpi_count, &nsd->rx_lpi_count);
  955. if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
  956. !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
  957. nsd->fd_sb_status = true;
  958. else
  959. nsd->fd_sb_status = false;
  960. if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
  961. !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
  962. nsd->fd_atr_status = true;
  963. else
  964. nsd->fd_atr_status = false;
  965. pf->stat_offsets_loaded = true;
  966. }
  967. /**
  968. * i40e_update_stats - Update the various statistics counters.
  969. * @vsi: the VSI to be updated
  970. *
  971. * Update the various stats for this VSI and its related entities.
  972. **/
  973. void i40e_update_stats(struct i40e_vsi *vsi)
  974. {
  975. struct i40e_pf *pf = vsi->back;
  976. if (vsi == pf->vsi[pf->lan_vsi])
  977. i40e_update_pf_stats(pf);
  978. i40e_update_vsi_stats(vsi);
  979. }
  980. /**
  981. * i40e_count_filters - counts VSI mac filters
  982. * @vsi: the VSI to be searched
  983. *
  984. * Returns count of mac filters
  985. **/
  986. int i40e_count_filters(struct i40e_vsi *vsi)
  987. {
  988. struct i40e_mac_filter *f;
  989. struct hlist_node *h;
  990. int bkt;
  991. int cnt = 0;
  992. hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
  993. ++cnt;
  994. return cnt;
  995. }
  996. /**
  997. * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
  998. * @vsi: the VSI to be searched
  999. * @macaddr: the MAC address
  1000. * @vlan: the vlan
  1001. *
  1002. * Returns ptr to the filter object or NULL
  1003. **/
  1004. static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
  1005. const u8 *macaddr, s16 vlan)
  1006. {
  1007. struct i40e_mac_filter *f;
  1008. u64 key;
  1009. if (!vsi || !macaddr)
  1010. return NULL;
  1011. key = i40e_addr_to_hkey(macaddr);
  1012. hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
  1013. if ((ether_addr_equal(macaddr, f->macaddr)) &&
  1014. (vlan == f->vlan))
  1015. return f;
  1016. }
  1017. return NULL;
  1018. }
  1019. /**
  1020. * i40e_find_mac - Find a mac addr in the macvlan filters list
  1021. * @vsi: the VSI to be searched
  1022. * @macaddr: the MAC address we are searching for
  1023. *
  1024. * Returns the first filter with the provided MAC address or NULL if
  1025. * MAC address was not found
  1026. **/
  1027. struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
  1028. {
  1029. struct i40e_mac_filter *f;
  1030. u64 key;
  1031. if (!vsi || !macaddr)
  1032. return NULL;
  1033. key = i40e_addr_to_hkey(macaddr);
  1034. hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
  1035. if ((ether_addr_equal(macaddr, f->macaddr)))
  1036. return f;
  1037. }
  1038. return NULL;
  1039. }
  1040. /**
  1041. * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
  1042. * @vsi: the VSI to be searched
  1043. *
  1044. * Returns true if VSI is in vlan mode or false otherwise
  1045. **/
  1046. bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
  1047. {
  1048. /* If we have a PVID, always operate in VLAN mode */
  1049. if (vsi->info.pvid)
  1050. return true;
  1051. /* We need to operate in VLAN mode whenever we have any filters with
  1052. * a VLAN other than I40E_VLAN_ALL. We could check the table each
  1053. * time, incurring search cost repeatedly. However, we can notice two
  1054. * things:
  1055. *
  1056. * 1) the only place where we can gain a VLAN filter is in
  1057. * i40e_add_filter.
  1058. *
  1059. * 2) the only place where filters are actually removed is in
  1060. * i40e_sync_filters_subtask.
  1061. *
  1062. * Thus, we can simply use a boolean value, has_vlan_filters which we
  1063. * will set to true when we add a VLAN filter in i40e_add_filter. Then
  1064. * we have to perform the full search after deleting filters in
  1065. * i40e_sync_filters_subtask, but we already have to search
  1066. * filters here and can perform the check at the same time. This
  1067. * results in avoiding embedding a loop for VLAN mode inside another
  1068. * loop over all the filters, and should maintain correctness as noted
  1069. * above.
  1070. */
  1071. return vsi->has_vlan_filter;
  1072. }
  1073. /**
  1074. * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
  1075. * @vsi: the VSI to configure
  1076. * @tmp_add_list: list of filters ready to be added
  1077. * @tmp_del_list: list of filters ready to be deleted
  1078. * @vlan_filters: the number of active VLAN filters
  1079. *
  1080. * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
  1081. * behave as expected. If we have any active VLAN filters remaining or about
  1082. * to be added then we need to update non-VLAN filters to be marked as VLAN=0
  1083. * so that they only match against untagged traffic. If we no longer have any
  1084. * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
  1085. * so that they match against both tagged and untagged traffic. In this way,
  1086. * we ensure that we correctly receive the desired traffic. This ensures that
  1087. * when we have an active VLAN we will receive only untagged traffic and
  1088. * traffic matching active VLANs. If we have no active VLANs then we will
  1089. * operate in non-VLAN mode and receive all traffic, tagged or untagged.
  1090. *
  1091. * Finally, in a similar fashion, this function also corrects filters when
  1092. * there is an active PVID assigned to this VSI.
  1093. *
  1094. * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
  1095. *
  1096. * This function is only expected to be called from within
  1097. * i40e_sync_vsi_filters.
  1098. *
  1099. * NOTE: This function expects to be called while under the
  1100. * mac_filter_hash_lock
  1101. */
  1102. static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
  1103. struct hlist_head *tmp_add_list,
  1104. struct hlist_head *tmp_del_list,
  1105. int vlan_filters)
  1106. {
  1107. s16 pvid = le16_to_cpu(vsi->info.pvid);
  1108. struct i40e_mac_filter *f, *add_head;
  1109. struct i40e_new_mac_filter *new;
  1110. struct hlist_node *h;
  1111. int bkt, new_vlan;
  1112. /* To determine if a particular filter needs to be replaced we
  1113. * have the three following conditions:
  1114. *
  1115. * a) if we have a PVID assigned, then all filters which are
  1116. * not marked as VLAN=PVID must be replaced with filters that
  1117. * are.
  1118. * b) otherwise, if we have any active VLANS, all filters
  1119. * which are marked as VLAN=-1 must be replaced with
  1120. * filters marked as VLAN=0
  1121. * c) finally, if we do not have any active VLANS, all filters
  1122. * which are marked as VLAN=0 must be replaced with filters
  1123. * marked as VLAN=-1
  1124. */
  1125. /* Update the filters about to be added in place */
  1126. hlist_for_each_entry(new, tmp_add_list, hlist) {
  1127. if (pvid && new->f->vlan != pvid)
  1128. new->f->vlan = pvid;
  1129. else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
  1130. new->f->vlan = 0;
  1131. else if (!vlan_filters && new->f->vlan == 0)
  1132. new->f->vlan = I40E_VLAN_ANY;
  1133. }
  1134. /* Update the remaining active filters */
  1135. hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
  1136. /* Combine the checks for whether a filter needs to be changed
  1137. * and then determine the new VLAN inside the if block, in
  1138. * order to avoid duplicating code for adding the new filter
  1139. * then deleting the old filter.
  1140. */
  1141. if ((pvid && f->vlan != pvid) ||
  1142. (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
  1143. (!vlan_filters && f->vlan == 0)) {
  1144. /* Determine the new vlan we will be adding */
  1145. if (pvid)
  1146. new_vlan = pvid;
  1147. else if (vlan_filters)
  1148. new_vlan = 0;
  1149. else
  1150. new_vlan = I40E_VLAN_ANY;
  1151. /* Create the new filter */
  1152. add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
  1153. if (!add_head)
  1154. return -ENOMEM;
  1155. /* Create a temporary i40e_new_mac_filter */
  1156. new = kzalloc(sizeof(*new), GFP_ATOMIC);
  1157. if (!new)
  1158. return -ENOMEM;
  1159. new->f = add_head;
  1160. new->state = add_head->state;
  1161. /* Add the new filter to the tmp list */
  1162. hlist_add_head(&new->hlist, tmp_add_list);
  1163. /* Put the original filter into the delete list */
  1164. f->state = I40E_FILTER_REMOVE;
  1165. hash_del(&f->hlist);
  1166. hlist_add_head(&f->hlist, tmp_del_list);
  1167. }
  1168. }
  1169. vsi->has_vlan_filter = !!vlan_filters;
  1170. return 0;
  1171. }
  1172. /**
  1173. * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
  1174. * @vsi: the PF Main VSI - inappropriate for any other VSI
  1175. * @macaddr: the MAC address
  1176. *
  1177. * Remove whatever filter the firmware set up so the driver can manage
  1178. * its own filtering intelligently.
  1179. **/
  1180. static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
  1181. {
  1182. struct i40e_aqc_remove_macvlan_element_data element;
  1183. struct i40e_pf *pf = vsi->back;
  1184. /* Only appropriate for the PF main VSI */
  1185. if (vsi->type != I40E_VSI_MAIN)
  1186. return;
  1187. memset(&element, 0, sizeof(element));
  1188. ether_addr_copy(element.mac_addr, macaddr);
  1189. element.vlan_tag = 0;
  1190. /* Ignore error returns, some firmware does it this way... */
  1191. element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
  1192. i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
  1193. memset(&element, 0, sizeof(element));
  1194. ether_addr_copy(element.mac_addr, macaddr);
  1195. element.vlan_tag = 0;
  1196. /* ...and some firmware does it this way. */
  1197. element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
  1198. I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
  1199. i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
  1200. }
  1201. /**
  1202. * i40e_add_filter - Add a mac/vlan filter to the VSI
  1203. * @vsi: the VSI to be searched
  1204. * @macaddr: the MAC address
  1205. * @vlan: the vlan
  1206. *
  1207. * Returns ptr to the filter object or NULL when no memory available.
  1208. *
  1209. * NOTE: This function is expected to be called with mac_filter_hash_lock
  1210. * being held.
  1211. **/
  1212. struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
  1213. const u8 *macaddr, s16 vlan)
  1214. {
  1215. struct i40e_mac_filter *f;
  1216. u64 key;
  1217. if (!vsi || !macaddr)
  1218. return NULL;
  1219. f = i40e_find_filter(vsi, macaddr, vlan);
  1220. if (!f) {
  1221. f = kzalloc(sizeof(*f), GFP_ATOMIC);
  1222. if (!f)
  1223. return NULL;
  1224. /* Update the boolean indicating if we need to function in
  1225. * VLAN mode.
  1226. */
  1227. if (vlan >= 0)
  1228. vsi->has_vlan_filter = true;
  1229. ether_addr_copy(f->macaddr, macaddr);
  1230. f->vlan = vlan;
  1231. f->state = I40E_FILTER_NEW;
  1232. INIT_HLIST_NODE(&f->hlist);
  1233. key = i40e_addr_to_hkey(macaddr);
  1234. hash_add(vsi->mac_filter_hash, &f->hlist, key);
  1235. vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
  1236. set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
  1237. }
  1238. /* If we're asked to add a filter that has been marked for removal, it
  1239. * is safe to simply restore it to active state. __i40e_del_filter
  1240. * will have simply deleted any filters which were previously marked
  1241. * NEW or FAILED, so if it is currently marked REMOVE it must have
  1242. * previously been ACTIVE. Since we haven't yet run the sync filters
  1243. * task, just restore this filter to the ACTIVE state so that the
  1244. * sync task leaves it in place
  1245. */
  1246. if (f->state == I40E_FILTER_REMOVE)
  1247. f->state = I40E_FILTER_ACTIVE;
  1248. return f;
  1249. }
  1250. /**
  1251. * __i40e_del_filter - Remove a specific filter from the VSI
  1252. * @vsi: VSI to remove from
  1253. * @f: the filter to remove from the list
  1254. *
  1255. * This function should be called instead of i40e_del_filter only if you know
  1256. * the exact filter you will remove already, such as via i40e_find_filter or
  1257. * i40e_find_mac.
  1258. *
  1259. * NOTE: This function is expected to be called with mac_filter_hash_lock
  1260. * being held.
  1261. * ANOTHER NOTE: This function MUST be called from within the context of
  1262. * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
  1263. * instead of list_for_each_entry().
  1264. **/
  1265. void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
  1266. {
  1267. if (!f)
  1268. return;
  1269. /* If the filter was never added to firmware then we can just delete it
  1270. * directly and we don't want to set the status to remove or else an
  1271. * admin queue command will unnecessarily fire.
  1272. */
  1273. if ((f->state == I40E_FILTER_FAILED) ||
  1274. (f->state == I40E_FILTER_NEW)) {
  1275. hash_del(&f->hlist);
  1276. kfree(f);
  1277. } else {
  1278. f->state = I40E_FILTER_REMOVE;
  1279. }
  1280. vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
  1281. set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
  1282. }
  1283. /**
  1284. * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
  1285. * @vsi: the VSI to be searched
  1286. * @macaddr: the MAC address
  1287. * @vlan: the VLAN
  1288. *
  1289. * NOTE: This function is expected to be called with mac_filter_hash_lock
  1290. * being held.
  1291. * ANOTHER NOTE: This function MUST be called from within the context of
  1292. * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
  1293. * instead of list_for_each_entry().
  1294. **/
  1295. void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
  1296. {
  1297. struct i40e_mac_filter *f;
  1298. if (!vsi || !macaddr)
  1299. return;
  1300. f = i40e_find_filter(vsi, macaddr, vlan);
  1301. __i40e_del_filter(vsi, f);
  1302. }
  1303. /**
  1304. * i40e_add_mac_filter - Add a MAC filter for all active VLANs
  1305. * @vsi: the VSI to be searched
  1306. * @macaddr: the mac address to be filtered
  1307. *
  1308. * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
  1309. * go through all the macvlan filters and add a macvlan filter for each
  1310. * unique vlan that already exists. If a PVID has been assigned, instead only
  1311. * add the macaddr to that VLAN.
  1312. *
  1313. * Returns last filter added on success, else NULL
  1314. **/
  1315. struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
  1316. const u8 *macaddr)
  1317. {
  1318. struct i40e_mac_filter *f, *add = NULL;
  1319. struct hlist_node *h;
  1320. int bkt;
  1321. if (vsi->info.pvid)
  1322. return i40e_add_filter(vsi, macaddr,
  1323. le16_to_cpu(vsi->info.pvid));
  1324. if (!i40e_is_vsi_in_vlan(vsi))
  1325. return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
  1326. hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
  1327. if (f->state == I40E_FILTER_REMOVE)
  1328. continue;
  1329. add = i40e_add_filter(vsi, macaddr, f->vlan);
  1330. if (!add)
  1331. return NULL;
  1332. }
  1333. return add;
  1334. }
  1335. /**
  1336. * i40e_del_mac_filter - Remove a MAC filter from all VLANs
  1337. * @vsi: the VSI to be searched
  1338. * @macaddr: the mac address to be removed
  1339. *
  1340. * Removes a given MAC address from a VSI regardless of what VLAN it has been
  1341. * associated with.
  1342. *
  1343. * Returns 0 for success, or error
  1344. **/
  1345. int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
  1346. {
  1347. struct i40e_mac_filter *f;
  1348. struct hlist_node *h;
  1349. bool found = false;
  1350. int bkt;
  1351. lockdep_assert_held(&vsi->mac_filter_hash_lock);
  1352. hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
  1353. if (ether_addr_equal(macaddr, f->macaddr)) {
  1354. __i40e_del_filter(vsi, f);
  1355. found = true;
  1356. }
  1357. }
  1358. if (found)
  1359. return 0;
  1360. else
  1361. return -ENOENT;
  1362. }
  1363. /**
  1364. * i40e_set_mac - NDO callback to set mac address
  1365. * @netdev: network interface device structure
  1366. * @p: pointer to an address structure
  1367. *
  1368. * Returns 0 on success, negative on failure
  1369. **/
  1370. static int i40e_set_mac(struct net_device *netdev, void *p)
  1371. {
  1372. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1373. struct i40e_vsi *vsi = np->vsi;
  1374. struct i40e_pf *pf = vsi->back;
  1375. struct i40e_hw *hw = &pf->hw;
  1376. struct sockaddr *addr = p;
  1377. if (!is_valid_ether_addr(addr->sa_data))
  1378. return -EADDRNOTAVAIL;
  1379. if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
  1380. netdev_info(netdev, "already using mac address %pM\n",
  1381. addr->sa_data);
  1382. return 0;
  1383. }
  1384. if (test_bit(__I40E_DOWN, pf->state) ||
  1385. test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
  1386. return -EADDRNOTAVAIL;
  1387. if (ether_addr_equal(hw->mac.addr, addr->sa_data))
  1388. netdev_info(netdev, "returning to hw mac address %pM\n",
  1389. hw->mac.addr);
  1390. else
  1391. netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
  1392. /* Copy the address first, so that we avoid a possible race with
  1393. * .set_rx_mode().
  1394. * - Remove old address from MAC filter
  1395. * - Copy new address
  1396. * - Add new address to MAC filter
  1397. */
  1398. spin_lock_bh(&vsi->mac_filter_hash_lock);
  1399. i40e_del_mac_filter(vsi, netdev->dev_addr);
  1400. ether_addr_copy(netdev->dev_addr, addr->sa_data);
  1401. i40e_add_mac_filter(vsi, netdev->dev_addr);
  1402. spin_unlock_bh(&vsi->mac_filter_hash_lock);
  1403. if (vsi->type == I40E_VSI_MAIN) {
  1404. i40e_status ret;
  1405. ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
  1406. addr->sa_data, NULL);
  1407. if (ret)
  1408. netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
  1409. i40e_stat_str(hw, ret),
  1410. i40e_aq_str(hw, hw->aq.asq_last_status));
  1411. }
  1412. /* schedule our worker thread which will take care of
  1413. * applying the new filter changes
  1414. */
  1415. i40e_service_event_schedule(pf);
  1416. return 0;
  1417. }
  1418. /**
  1419. * i40e_config_rss_aq - Prepare for RSS using AQ commands
  1420. * @vsi: vsi structure
  1421. * @seed: RSS hash seed
  1422. **/
  1423. static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
  1424. u8 *lut, u16 lut_size)
  1425. {
  1426. struct i40e_pf *pf = vsi->back;
  1427. struct i40e_hw *hw = &pf->hw;
  1428. int ret = 0;
  1429. if (seed) {
  1430. struct i40e_aqc_get_set_rss_key_data *seed_dw =
  1431. (struct i40e_aqc_get_set_rss_key_data *)seed;
  1432. ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
  1433. if (ret) {
  1434. dev_info(&pf->pdev->dev,
  1435. "Cannot set RSS key, err %s aq_err %s\n",
  1436. i40e_stat_str(hw, ret),
  1437. i40e_aq_str(hw, hw->aq.asq_last_status));
  1438. return ret;
  1439. }
  1440. }
  1441. if (lut) {
  1442. bool pf_lut = vsi->type == I40E_VSI_MAIN;
  1443. ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
  1444. if (ret) {
  1445. dev_info(&pf->pdev->dev,
  1446. "Cannot set RSS lut, err %s aq_err %s\n",
  1447. i40e_stat_str(hw, ret),
  1448. i40e_aq_str(hw, hw->aq.asq_last_status));
  1449. return ret;
  1450. }
  1451. }
  1452. return ret;
  1453. }
  1454. /**
  1455. * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
  1456. * @vsi: VSI structure
  1457. **/

Large files files are truncated, but you can click here to view the full file