/drivers/net/ethernet/sfc/tx.c

http://github.com/mirrors/linux · C · 651 lines · 424 code · 110 blank · 117 comment · 72 complexity · 087b5a895405a3e4e5111b77c0584c19 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2005-2006 Fen Systems Ltd.
  5. * Copyright 2005-2013 Solarflare Communications Inc.
  6. */
  7. #include <linux/pci.h>
  8. #include <linux/tcp.h>
  9. #include <linux/ip.h>
  10. #include <linux/in.h>
  11. #include <linux/ipv6.h>
  12. #include <linux/slab.h>
  13. #include <net/ipv6.h>
  14. #include <linux/if_ether.h>
  15. #include <linux/highmem.h>
  16. #include <linux/cache.h>
  17. #include "net_driver.h"
  18. #include "efx.h"
  19. #include "io.h"
  20. #include "nic.h"
  21. #include "tx.h"
  22. #include "tx_common.h"
  23. #include "workarounds.h"
  24. #include "ef10_regs.h"
  25. #ifdef EFX_USE_PIO
  26. #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
  27. unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
  28. #endif /* EFX_USE_PIO */
  29. static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
  30. struct efx_tx_buffer *buffer)
  31. {
  32. unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
  33. struct efx_buffer *page_buf =
  34. &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
  35. unsigned int offset =
  36. ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
  37. if (unlikely(!page_buf->addr) &&
  38. efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
  39. GFP_ATOMIC))
  40. return NULL;
  41. buffer->dma_addr = page_buf->dma_addr + offset;
  42. buffer->unmap_len = 0;
  43. return (u8 *)page_buf->addr + offset;
  44. }
  45. u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
  46. struct efx_tx_buffer *buffer, size_t len)
  47. {
  48. if (len > EFX_TX_CB_SIZE)
  49. return NULL;
  50. return efx_tx_get_copy_buffer(tx_queue, buffer);
  51. }
  52. static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
  53. {
  54. /* We need to consider both queues that the net core sees as one */
  55. struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
  56. struct efx_nic *efx = txq1->efx;
  57. unsigned int fill_level;
  58. fill_level = max(txq1->insert_count - txq1->old_read_count,
  59. txq2->insert_count - txq2->old_read_count);
  60. if (likely(fill_level < efx->txq_stop_thresh))
  61. return;
  62. /* We used the stale old_read_count above, which gives us a
  63. * pessimistic estimate of the fill level (which may even
  64. * validly be >= efx->txq_entries). Now try again using
  65. * read_count (more likely to be a cache miss).
  66. *
  67. * If we read read_count and then conditionally stop the
  68. * queue, it is possible for the completion path to race with
  69. * us and complete all outstanding descriptors in the middle,
  70. * after which there will be no more completions to wake it.
  71. * Therefore we stop the queue first, then read read_count
  72. * (with a memory barrier to ensure the ordering), then
  73. * restart the queue if the fill level turns out to be low
  74. * enough.
  75. */
  76. netif_tx_stop_queue(txq1->core_txq);
  77. smp_mb();
  78. txq1->old_read_count = READ_ONCE(txq1->read_count);
  79. txq2->old_read_count = READ_ONCE(txq2->read_count);
  80. fill_level = max(txq1->insert_count - txq1->old_read_count,
  81. txq2->insert_count - txq2->old_read_count);
  82. EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
  83. if (likely(fill_level < efx->txq_stop_thresh)) {
  84. smp_mb();
  85. if (likely(!efx->loopback_selftest))
  86. netif_tx_start_queue(txq1->core_txq);
  87. }
  88. }
  89. static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
  90. struct sk_buff *skb)
  91. {
  92. unsigned int copy_len = skb->len;
  93. struct efx_tx_buffer *buffer;
  94. u8 *copy_buffer;
  95. int rc;
  96. EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
  97. buffer = efx_tx_queue_get_insert_buffer(tx_queue);
  98. copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
  99. if (unlikely(!copy_buffer))
  100. return -ENOMEM;
  101. rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
  102. EFX_WARN_ON_PARANOID(rc);
  103. buffer->len = copy_len;
  104. buffer->skb = skb;
  105. buffer->flags = EFX_TX_BUF_SKB;
  106. ++tx_queue->insert_count;
  107. return rc;
  108. }
  109. #ifdef EFX_USE_PIO
  110. struct efx_short_copy_buffer {
  111. int used;
  112. u8 buf[L1_CACHE_BYTES];
  113. };
  114. /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
  115. * Advances piobuf pointer. Leaves additional data in the copy buffer.
  116. */
  117. static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
  118. u8 *data, int len,
  119. struct efx_short_copy_buffer *copy_buf)
  120. {
  121. int block_len = len & ~(sizeof(copy_buf->buf) - 1);
  122. __iowrite64_copy(*piobuf, data, block_len >> 3);
  123. *piobuf += block_len;
  124. len -= block_len;
  125. if (len) {
  126. data += block_len;
  127. BUG_ON(copy_buf->used);
  128. BUG_ON(len > sizeof(copy_buf->buf));
  129. memcpy(copy_buf->buf, data, len);
  130. copy_buf->used = len;
  131. }
  132. }
  133. /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
  134. * Advances piobuf pointer. Leaves additional data in the copy buffer.
  135. */
  136. static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
  137. u8 *data, int len,
  138. struct efx_short_copy_buffer *copy_buf)
  139. {
  140. if (copy_buf->used) {
  141. /* if the copy buffer is partially full, fill it up and write */
  142. int copy_to_buf =
  143. min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
  144. memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
  145. copy_buf->used += copy_to_buf;
  146. /* if we didn't fill it up then we're done for now */
  147. if (copy_buf->used < sizeof(copy_buf->buf))
  148. return;
  149. __iowrite64_copy(*piobuf, copy_buf->buf,
  150. sizeof(copy_buf->buf) >> 3);
  151. *piobuf += sizeof(copy_buf->buf);
  152. data += copy_to_buf;
  153. len -= copy_to_buf;
  154. copy_buf->used = 0;
  155. }
  156. efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
  157. }
  158. static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
  159. struct efx_short_copy_buffer *copy_buf)
  160. {
  161. /* if there's anything in it, write the whole buffer, including junk */
  162. if (copy_buf->used)
  163. __iowrite64_copy(piobuf, copy_buf->buf,
  164. sizeof(copy_buf->buf) >> 3);
  165. }
  166. /* Traverse skb structure and copy fragments in to PIO buffer.
  167. * Advances piobuf pointer.
  168. */
  169. static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
  170. u8 __iomem **piobuf,
  171. struct efx_short_copy_buffer *copy_buf)
  172. {
  173. int i;
  174. efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
  175. copy_buf);
  176. for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
  177. skb_frag_t *f = &skb_shinfo(skb)->frags[i];
  178. u8 *vaddr;
  179. vaddr = kmap_atomic(skb_frag_page(f));
  180. efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
  181. skb_frag_size(f), copy_buf);
  182. kunmap_atomic(vaddr);
  183. }
  184. EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
  185. }
  186. static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
  187. struct sk_buff *skb)
  188. {
  189. struct efx_tx_buffer *buffer =
  190. efx_tx_queue_get_insert_buffer(tx_queue);
  191. u8 __iomem *piobuf = tx_queue->piobuf;
  192. /* Copy to PIO buffer. Ensure the writes are padded to the end
  193. * of a cache line, as this is required for write-combining to be
  194. * effective on at least x86.
  195. */
  196. if (skb_shinfo(skb)->nr_frags) {
  197. /* The size of the copy buffer will ensure all writes
  198. * are the size of a cache line.
  199. */
  200. struct efx_short_copy_buffer copy_buf;
  201. copy_buf.used = 0;
  202. efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
  203. &piobuf, &copy_buf);
  204. efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
  205. } else {
  206. /* Pad the write to the size of a cache line.
  207. * We can do this because we know the skb_shared_info struct is
  208. * after the source, and the destination buffer is big enough.
  209. */
  210. BUILD_BUG_ON(L1_CACHE_BYTES >
  211. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  212. __iowrite64_copy(tx_queue->piobuf, skb->data,
  213. ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
  214. }
  215. buffer->skb = skb;
  216. buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
  217. EFX_POPULATE_QWORD_5(buffer->option,
  218. ESF_DZ_TX_DESC_IS_OPT, 1,
  219. ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
  220. ESF_DZ_TX_PIO_CONT, 0,
  221. ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
  222. ESF_DZ_TX_PIO_BUF_ADDR,
  223. tx_queue->piobuf_offset);
  224. ++tx_queue->insert_count;
  225. return 0;
  226. }
  227. #endif /* EFX_USE_PIO */
  228. /*
  229. * Fallback to software TSO.
  230. *
  231. * This is used if we are unable to send a GSO packet through hardware TSO.
  232. * This should only ever happen due to per-queue restrictions - unsupported
  233. * packets should first be filtered by the feature flags.
  234. *
  235. * Returns 0 on success, error code otherwise.
  236. */
  237. static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
  238. struct sk_buff *skb)
  239. {
  240. struct sk_buff *segments, *next;
  241. segments = skb_gso_segment(skb, 0);
  242. if (IS_ERR(segments))
  243. return PTR_ERR(segments);
  244. dev_consume_skb_any(skb);
  245. skb_list_walk_safe(segments, skb, next) {
  246. skb_mark_not_on_list(skb);
  247. efx_enqueue_skb(tx_queue, skb);
  248. }
  249. return 0;
  250. }
  251. /*
  252. * Add a socket buffer to a TX queue
  253. *
  254. * This maps all fragments of a socket buffer for DMA and adds them to
  255. * the TX queue. The queue's insert pointer will be incremented by
  256. * the number of fragments in the socket buffer.
  257. *
  258. * If any DMA mapping fails, any mapped fragments will be unmapped,
  259. * the queue's insert pointer will be restored to its original value.
  260. *
  261. * This function is split out from efx_hard_start_xmit to allow the
  262. * loopback test to direct packets via specific TX queues.
  263. *
  264. * Returns NETDEV_TX_OK.
  265. * You must hold netif_tx_lock() to call this function.
  266. */
  267. netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
  268. {
  269. unsigned int old_insert_count = tx_queue->insert_count;
  270. bool xmit_more = netdev_xmit_more();
  271. bool data_mapped = false;
  272. unsigned int segments;
  273. unsigned int skb_len;
  274. int rc;
  275. skb_len = skb->len;
  276. segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
  277. if (segments == 1)
  278. segments = 0; /* Don't use TSO for a single segment. */
  279. /* Handle TSO first - it's *possible* (although unlikely) that we might
  280. * be passed a packet to segment that's smaller than the copybreak/PIO
  281. * size limit.
  282. */
  283. if (segments) {
  284. EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
  285. rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
  286. if (rc == -EINVAL) {
  287. rc = efx_tx_tso_fallback(tx_queue, skb);
  288. tx_queue->tso_fallbacks++;
  289. if (rc == 0)
  290. return 0;
  291. }
  292. if (rc)
  293. goto err;
  294. #ifdef EFX_USE_PIO
  295. } else if (skb_len <= efx_piobuf_size && !xmit_more &&
  296. efx_nic_may_tx_pio(tx_queue)) {
  297. /* Use PIO for short packets with an empty queue. */
  298. if (efx_enqueue_skb_pio(tx_queue, skb))
  299. goto err;
  300. tx_queue->pio_packets++;
  301. data_mapped = true;
  302. #endif
  303. } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
  304. /* Pad short packets or coalesce short fragmented packets. */
  305. if (efx_enqueue_skb_copy(tx_queue, skb))
  306. goto err;
  307. tx_queue->cb_packets++;
  308. data_mapped = true;
  309. }
  310. /* Map for DMA and create descriptors if we haven't done so already. */
  311. if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
  312. goto err;
  313. efx_tx_maybe_stop_queue(tx_queue);
  314. /* Pass off to hardware */
  315. if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
  316. struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
  317. /* There could be packets left on the partner queue if
  318. * xmit_more was set. If we do not push those they
  319. * could be left for a long time and cause a netdev watchdog.
  320. */
  321. if (txq2->xmit_more_available)
  322. efx_nic_push_buffers(txq2);
  323. efx_nic_push_buffers(tx_queue);
  324. } else {
  325. tx_queue->xmit_more_available = xmit_more;
  326. }
  327. if (segments) {
  328. tx_queue->tso_bursts++;
  329. tx_queue->tso_packets += segments;
  330. tx_queue->tx_packets += segments;
  331. } else {
  332. tx_queue->tx_packets++;
  333. }
  334. return NETDEV_TX_OK;
  335. err:
  336. efx_enqueue_unwind(tx_queue, old_insert_count);
  337. dev_kfree_skb_any(skb);
  338. /* If we're not expecting another transmit and we had something to push
  339. * on this queue or a partner queue then we need to push here to get the
  340. * previous packets out.
  341. */
  342. if (!xmit_more) {
  343. struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
  344. if (txq2->xmit_more_available)
  345. efx_nic_push_buffers(txq2);
  346. efx_nic_push_buffers(tx_queue);
  347. }
  348. return NETDEV_TX_OK;
  349. }
  350. static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
  351. {
  352. int i;
  353. for (i = 0; i < n; i++)
  354. xdp_return_frame_rx_napi(xdpfs[i]);
  355. }
  356. /* Transmit a packet from an XDP buffer
  357. *
  358. * Returns number of packets sent on success, error code otherwise.
  359. * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
  360. * (for XDP redirect).
  361. */
  362. int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
  363. bool flush)
  364. {
  365. struct efx_tx_buffer *tx_buffer;
  366. struct efx_tx_queue *tx_queue;
  367. struct xdp_frame *xdpf;
  368. dma_addr_t dma_addr;
  369. unsigned int len;
  370. int space;
  371. int cpu;
  372. int i;
  373. cpu = raw_smp_processor_id();
  374. if (!efx->xdp_tx_queue_count ||
  375. unlikely(cpu >= efx->xdp_tx_queue_count))
  376. return -EINVAL;
  377. tx_queue = efx->xdp_tx_queues[cpu];
  378. if (unlikely(!tx_queue))
  379. return -EINVAL;
  380. if (unlikely(n && !xdpfs))
  381. return -EINVAL;
  382. if (!n)
  383. return 0;
  384. /* Check for available space. We should never need multiple
  385. * descriptors per frame.
  386. */
  387. space = efx->txq_entries +
  388. tx_queue->read_count - tx_queue->insert_count;
  389. for (i = 0; i < n; i++) {
  390. xdpf = xdpfs[i];
  391. if (i >= space)
  392. break;
  393. /* We'll want a descriptor for this tx. */
  394. prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
  395. len = xdpf->len;
  396. /* Map for DMA. */
  397. dma_addr = dma_map_single(&efx->pci_dev->dev,
  398. xdpf->data, len,
  399. DMA_TO_DEVICE);
  400. if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
  401. break;
  402. /* Create descriptor and set up for unmapping DMA. */
  403. tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
  404. tx_buffer->xdpf = xdpf;
  405. tx_buffer->flags = EFX_TX_BUF_XDP |
  406. EFX_TX_BUF_MAP_SINGLE;
  407. tx_buffer->dma_offset = 0;
  408. tx_buffer->unmap_len = len;
  409. tx_queue->tx_packets++;
  410. }
  411. /* Pass mapped frames to hardware. */
  412. if (flush && i > 0)
  413. efx_nic_push_buffers(tx_queue);
  414. if (i == 0)
  415. return -EIO;
  416. efx_xdp_return_frames(n - i, xdpfs + i);
  417. return i;
  418. }
  419. /* Initiate a packet transmission. We use one channel per CPU
  420. * (sharing when we have more CPUs than channels). On Falcon, the TX
  421. * completion events will be directed back to the CPU that transmitted
  422. * the packet, which should be cache-efficient.
  423. *
  424. * Context: non-blocking.
  425. * Note that returning anything other than NETDEV_TX_OK will cause the
  426. * OS to free the skb.
  427. */
  428. netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
  429. struct net_device *net_dev)
  430. {
  431. struct efx_nic *efx = netdev_priv(net_dev);
  432. struct efx_tx_queue *tx_queue;
  433. unsigned index, type;
  434. EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
  435. /* PTP "event" packet */
  436. if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
  437. unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
  438. return efx_ptp_tx(efx, skb);
  439. }
  440. index = skb_get_queue_mapping(skb);
  441. type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
  442. if (index >= efx->n_tx_channels) {
  443. index -= efx->n_tx_channels;
  444. type |= EFX_TXQ_TYPE_HIGHPRI;
  445. }
  446. tx_queue = efx_get_tx_queue(efx, index, type);
  447. return efx_enqueue_skb(tx_queue, skb);
  448. }
  449. void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
  450. {
  451. unsigned int pkts_compl = 0, bytes_compl = 0;
  452. unsigned int read_ptr;
  453. bool finished = false;
  454. read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
  455. while (!finished) {
  456. struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
  457. if (!efx_tx_buffer_in_use(buffer)) {
  458. struct efx_nic *efx = tx_queue->efx;
  459. netif_err(efx, hw, efx->net_dev,
  460. "TX queue %d spurious single TX completion\n",
  461. tx_queue->queue);
  462. efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
  463. return;
  464. }
  465. /* Need to check the flag before dequeueing. */
  466. if (buffer->flags & EFX_TX_BUF_SKB)
  467. finished = true;
  468. efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
  469. ++tx_queue->read_count;
  470. read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
  471. }
  472. tx_queue->pkts_compl += pkts_compl;
  473. tx_queue->bytes_compl += bytes_compl;
  474. EFX_WARN_ON_PARANOID(pkts_compl != 1);
  475. efx_xmit_done_check_empty(tx_queue);
  476. }
  477. void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
  478. {
  479. struct efx_nic *efx = tx_queue->efx;
  480. /* Must be inverse of queue lookup in efx_hard_start_xmit() */
  481. tx_queue->core_txq =
  482. netdev_get_tx_queue(efx->net_dev,
  483. tx_queue->queue / EFX_TXQ_TYPES +
  484. ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
  485. efx->n_tx_channels : 0));
  486. }
  487. int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
  488. void *type_data)
  489. {
  490. struct efx_nic *efx = netdev_priv(net_dev);
  491. struct tc_mqprio_qopt *mqprio = type_data;
  492. struct efx_channel *channel;
  493. struct efx_tx_queue *tx_queue;
  494. unsigned tc, num_tc;
  495. int rc;
  496. if (type != TC_SETUP_QDISC_MQPRIO)
  497. return -EOPNOTSUPP;
  498. num_tc = mqprio->num_tc;
  499. if (num_tc > EFX_MAX_TX_TC)
  500. return -EINVAL;
  501. mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
  502. if (num_tc == net_dev->num_tc)
  503. return 0;
  504. for (tc = 0; tc < num_tc; tc++) {
  505. net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
  506. net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
  507. }
  508. if (num_tc > net_dev->num_tc) {
  509. /* Initialise high-priority queues as necessary */
  510. efx_for_each_channel(channel, efx) {
  511. efx_for_each_possible_channel_tx_queue(tx_queue,
  512. channel) {
  513. if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
  514. continue;
  515. if (!tx_queue->buffer) {
  516. rc = efx_probe_tx_queue(tx_queue);
  517. if (rc)
  518. return rc;
  519. }
  520. if (!tx_queue->initialised)
  521. efx_init_tx_queue(tx_queue);
  522. efx_init_tx_queue_core_txq(tx_queue);
  523. }
  524. }
  525. } else {
  526. /* Reduce number of classes before number of queues */
  527. net_dev->num_tc = num_tc;
  528. }
  529. rc = netif_set_real_num_tx_queues(net_dev,
  530. max_t(int, num_tc, 1) *
  531. efx->n_tx_channels);
  532. if (rc)
  533. return rc;
  534. /* Do not destroy high-priority queues when they become
  535. * unused. We would have to flush them first, and it is
  536. * fairly difficult to flush a subset of TX queues. Leave
  537. * it to efx_fini_channels().
  538. */
  539. net_dev->num_tc = num_tc;
  540. return 0;
  541. }