PageRenderTime 58ms CodeModel.GetById 20ms app.highlight 32ms RepoModel.GetById 2ms app.codeStats 0ms

/drivers/net/ethernet/sfc/tx.c

http://github.com/mirrors/linux
C | 651 lines | 424 code | 110 blank | 117 comment | 72 complexity | 087b5a895405a3e4e5111b77c0584c19 MD5 | raw file
  1// SPDX-License-Identifier: GPL-2.0-only
  2/****************************************************************************
  3 * Driver for Solarflare network controllers and boards
  4 * Copyright 2005-2006 Fen Systems Ltd.
  5 * Copyright 2005-2013 Solarflare Communications Inc.
  6 */
  7
  8#include <linux/pci.h>
  9#include <linux/tcp.h>
 10#include <linux/ip.h>
 11#include <linux/in.h>
 12#include <linux/ipv6.h>
 13#include <linux/slab.h>
 14#include <net/ipv6.h>
 15#include <linux/if_ether.h>
 16#include <linux/highmem.h>
 17#include <linux/cache.h>
 18#include "net_driver.h"
 19#include "efx.h"
 20#include "io.h"
 21#include "nic.h"
 22#include "tx.h"
 23#include "tx_common.h"
 24#include "workarounds.h"
 25#include "ef10_regs.h"
 26
 27#ifdef EFX_USE_PIO
 28
 29#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
 30unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
 31
 32#endif /* EFX_USE_PIO */
 33
 34static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
 35					 struct efx_tx_buffer *buffer)
 36{
 37	unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
 38	struct efx_buffer *page_buf =
 39		&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
 40	unsigned int offset =
 41		((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
 42
 43	if (unlikely(!page_buf->addr) &&
 44	    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
 45				 GFP_ATOMIC))
 46		return NULL;
 47	buffer->dma_addr = page_buf->dma_addr + offset;
 48	buffer->unmap_len = 0;
 49	return (u8 *)page_buf->addr + offset;
 50}
 51
 52u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
 53				   struct efx_tx_buffer *buffer, size_t len)
 54{
 55	if (len > EFX_TX_CB_SIZE)
 56		return NULL;
 57	return efx_tx_get_copy_buffer(tx_queue, buffer);
 58}
 59
 60static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
 61{
 62	/* We need to consider both queues that the net core sees as one */
 63	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
 64	struct efx_nic *efx = txq1->efx;
 65	unsigned int fill_level;
 66
 67	fill_level = max(txq1->insert_count - txq1->old_read_count,
 68			 txq2->insert_count - txq2->old_read_count);
 69	if (likely(fill_level < efx->txq_stop_thresh))
 70		return;
 71
 72	/* We used the stale old_read_count above, which gives us a
 73	 * pessimistic estimate of the fill level (which may even
 74	 * validly be >= efx->txq_entries).  Now try again using
 75	 * read_count (more likely to be a cache miss).
 76	 *
 77	 * If we read read_count and then conditionally stop the
 78	 * queue, it is possible for the completion path to race with
 79	 * us and complete all outstanding descriptors in the middle,
 80	 * after which there will be no more completions to wake it.
 81	 * Therefore we stop the queue first, then read read_count
 82	 * (with a memory barrier to ensure the ordering), then
 83	 * restart the queue if the fill level turns out to be low
 84	 * enough.
 85	 */
 86	netif_tx_stop_queue(txq1->core_txq);
 87	smp_mb();
 88	txq1->old_read_count = READ_ONCE(txq1->read_count);
 89	txq2->old_read_count = READ_ONCE(txq2->read_count);
 90
 91	fill_level = max(txq1->insert_count - txq1->old_read_count,
 92			 txq2->insert_count - txq2->old_read_count);
 93	EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
 94	if (likely(fill_level < efx->txq_stop_thresh)) {
 95		smp_mb();
 96		if (likely(!efx->loopback_selftest))
 97			netif_tx_start_queue(txq1->core_txq);
 98	}
 99}
100
101static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
102				struct sk_buff *skb)
103{
104	unsigned int copy_len = skb->len;
105	struct efx_tx_buffer *buffer;
106	u8 *copy_buffer;
107	int rc;
108
109	EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
110
111	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
112
113	copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
114	if (unlikely(!copy_buffer))
115		return -ENOMEM;
116
117	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
118	EFX_WARN_ON_PARANOID(rc);
119	buffer->len = copy_len;
120
121	buffer->skb = skb;
122	buffer->flags = EFX_TX_BUF_SKB;
123
124	++tx_queue->insert_count;
125	return rc;
126}
127
128#ifdef EFX_USE_PIO
129
130struct efx_short_copy_buffer {
131	int used;
132	u8 buf[L1_CACHE_BYTES];
133};
134
135/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
136 * Advances piobuf pointer. Leaves additional data in the copy buffer.
137 */
138static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
139				    u8 *data, int len,
140				    struct efx_short_copy_buffer *copy_buf)
141{
142	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
143
144	__iowrite64_copy(*piobuf, data, block_len >> 3);
145	*piobuf += block_len;
146	len -= block_len;
147
148	if (len) {
149		data += block_len;
150		BUG_ON(copy_buf->used);
151		BUG_ON(len > sizeof(copy_buf->buf));
152		memcpy(copy_buf->buf, data, len);
153		copy_buf->used = len;
154	}
155}
156
157/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
158 * Advances piobuf pointer. Leaves additional data in the copy buffer.
159 */
160static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
161				       u8 *data, int len,
162				       struct efx_short_copy_buffer *copy_buf)
163{
164	if (copy_buf->used) {
165		/* if the copy buffer is partially full, fill it up and write */
166		int copy_to_buf =
167			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
168
169		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
170		copy_buf->used += copy_to_buf;
171
172		/* if we didn't fill it up then we're done for now */
173		if (copy_buf->used < sizeof(copy_buf->buf))
174			return;
175
176		__iowrite64_copy(*piobuf, copy_buf->buf,
177				 sizeof(copy_buf->buf) >> 3);
178		*piobuf += sizeof(copy_buf->buf);
179		data += copy_to_buf;
180		len -= copy_to_buf;
181		copy_buf->used = 0;
182	}
183
184	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
185}
186
187static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
188				  struct efx_short_copy_buffer *copy_buf)
189{
190	/* if there's anything in it, write the whole buffer, including junk */
191	if (copy_buf->used)
192		__iowrite64_copy(piobuf, copy_buf->buf,
193				 sizeof(copy_buf->buf) >> 3);
194}
195
196/* Traverse skb structure and copy fragments in to PIO buffer.
197 * Advances piobuf pointer.
198 */
199static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
200				     u8 __iomem **piobuf,
201				     struct efx_short_copy_buffer *copy_buf)
202{
203	int i;
204
205	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
206				copy_buf);
207
208	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
209		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
210		u8 *vaddr;
211
212		vaddr = kmap_atomic(skb_frag_page(f));
213
214		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
215					   skb_frag_size(f), copy_buf);
216		kunmap_atomic(vaddr);
217	}
218
219	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
220}
221
222static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
223			       struct sk_buff *skb)
224{
225	struct efx_tx_buffer *buffer =
226		efx_tx_queue_get_insert_buffer(tx_queue);
227	u8 __iomem *piobuf = tx_queue->piobuf;
228
229	/* Copy to PIO buffer. Ensure the writes are padded to the end
230	 * of a cache line, as this is required for write-combining to be
231	 * effective on at least x86.
232	 */
233
234	if (skb_shinfo(skb)->nr_frags) {
235		/* The size of the copy buffer will ensure all writes
236		 * are the size of a cache line.
237		 */
238		struct efx_short_copy_buffer copy_buf;
239
240		copy_buf.used = 0;
241
242		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
243					 &piobuf, &copy_buf);
244		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
245	} else {
246		/* Pad the write to the size of a cache line.
247		 * We can do this because we know the skb_shared_info struct is
248		 * after the source, and the destination buffer is big enough.
249		 */
250		BUILD_BUG_ON(L1_CACHE_BYTES >
251			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
252		__iowrite64_copy(tx_queue->piobuf, skb->data,
253				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
254	}
255
256	buffer->skb = skb;
257	buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
258
259	EFX_POPULATE_QWORD_5(buffer->option,
260			     ESF_DZ_TX_DESC_IS_OPT, 1,
261			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
262			     ESF_DZ_TX_PIO_CONT, 0,
263			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
264			     ESF_DZ_TX_PIO_BUF_ADDR,
265			     tx_queue->piobuf_offset);
266	++tx_queue->insert_count;
267	return 0;
268}
269#endif /* EFX_USE_PIO */
270
271/*
272 * Fallback to software TSO.
273 *
274 * This is used if we are unable to send a GSO packet through hardware TSO.
275 * This should only ever happen due to per-queue restrictions - unsupported
276 * packets should first be filtered by the feature flags.
277 *
278 * Returns 0 on success, error code otherwise.
279 */
280static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
281			       struct sk_buff *skb)
282{
283	struct sk_buff *segments, *next;
284
285	segments = skb_gso_segment(skb, 0);
286	if (IS_ERR(segments))
287		return PTR_ERR(segments);
288
289	dev_consume_skb_any(skb);
290
291	skb_list_walk_safe(segments, skb, next) {
292		skb_mark_not_on_list(skb);
293		efx_enqueue_skb(tx_queue, skb);
294	}
295
296	return 0;
297}
298
299/*
300 * Add a socket buffer to a TX queue
301 *
302 * This maps all fragments of a socket buffer for DMA and adds them to
303 * the TX queue.  The queue's insert pointer will be incremented by
304 * the number of fragments in the socket buffer.
305 *
306 * If any DMA mapping fails, any mapped fragments will be unmapped,
307 * the queue's insert pointer will be restored to its original value.
308 *
309 * This function is split out from efx_hard_start_xmit to allow the
310 * loopback test to direct packets via specific TX queues.
311 *
312 * Returns NETDEV_TX_OK.
313 * You must hold netif_tx_lock() to call this function.
314 */
315netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
316{
317	unsigned int old_insert_count = tx_queue->insert_count;
318	bool xmit_more = netdev_xmit_more();
319	bool data_mapped = false;
320	unsigned int segments;
321	unsigned int skb_len;
322	int rc;
323
324	skb_len = skb->len;
325	segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
326	if (segments == 1)
327		segments = 0; /* Don't use TSO for a single segment. */
328
329	/* Handle TSO first - it's *possible* (although unlikely) that we might
330	 * be passed a packet to segment that's smaller than the copybreak/PIO
331	 * size limit.
332	 */
333	if (segments) {
334		EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
335		rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
336		if (rc == -EINVAL) {
337			rc = efx_tx_tso_fallback(tx_queue, skb);
338			tx_queue->tso_fallbacks++;
339			if (rc == 0)
340				return 0;
341		}
342		if (rc)
343			goto err;
344#ifdef EFX_USE_PIO
345	} else if (skb_len <= efx_piobuf_size && !xmit_more &&
346		   efx_nic_may_tx_pio(tx_queue)) {
347		/* Use PIO for short packets with an empty queue. */
348		if (efx_enqueue_skb_pio(tx_queue, skb))
349			goto err;
350		tx_queue->pio_packets++;
351		data_mapped = true;
352#endif
353	} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
354		/* Pad short packets or coalesce short fragmented packets. */
355		if (efx_enqueue_skb_copy(tx_queue, skb))
356			goto err;
357		tx_queue->cb_packets++;
358		data_mapped = true;
359	}
360
361	/* Map for DMA and create descriptors if we haven't done so already. */
362	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
363		goto err;
364
365	efx_tx_maybe_stop_queue(tx_queue);
366
367	/* Pass off to hardware */
368	if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
369		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
370
371		/* There could be packets left on the partner queue if
372		 * xmit_more was set. If we do not push those they
373		 * could be left for a long time and cause a netdev watchdog.
374		 */
375		if (txq2->xmit_more_available)
376			efx_nic_push_buffers(txq2);
377
378		efx_nic_push_buffers(tx_queue);
379	} else {
380		tx_queue->xmit_more_available = xmit_more;
381	}
382
383	if (segments) {
384		tx_queue->tso_bursts++;
385		tx_queue->tso_packets += segments;
386		tx_queue->tx_packets  += segments;
387	} else {
388		tx_queue->tx_packets++;
389	}
390
391	return NETDEV_TX_OK;
392
393
394err:
395	efx_enqueue_unwind(tx_queue, old_insert_count);
396	dev_kfree_skb_any(skb);
397
398	/* If we're not expecting another transmit and we had something to push
399	 * on this queue or a partner queue then we need to push here to get the
400	 * previous packets out.
401	 */
402	if (!xmit_more) {
403		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
404
405		if (txq2->xmit_more_available)
406			efx_nic_push_buffers(txq2);
407
408		efx_nic_push_buffers(tx_queue);
409	}
410
411	return NETDEV_TX_OK;
412}
413
414static void efx_xdp_return_frames(int n,  struct xdp_frame **xdpfs)
415{
416	int i;
417
418	for (i = 0; i < n; i++)
419		xdp_return_frame_rx_napi(xdpfs[i]);
420}
421
422/* Transmit a packet from an XDP buffer
423 *
424 * Returns number of packets sent on success, error code otherwise.
425 * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
426 * (for XDP redirect).
427 */
428int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
429		       bool flush)
430{
431	struct efx_tx_buffer *tx_buffer;
432	struct efx_tx_queue *tx_queue;
433	struct xdp_frame *xdpf;
434	dma_addr_t dma_addr;
435	unsigned int len;
436	int space;
437	int cpu;
438	int i;
439
440	cpu = raw_smp_processor_id();
441
442	if (!efx->xdp_tx_queue_count ||
443	    unlikely(cpu >= efx->xdp_tx_queue_count))
444		return -EINVAL;
445
446	tx_queue = efx->xdp_tx_queues[cpu];
447	if (unlikely(!tx_queue))
448		return -EINVAL;
449
450	if (unlikely(n && !xdpfs))
451		return -EINVAL;
452
453	if (!n)
454		return 0;
455
456	/* Check for available space. We should never need multiple
457	 * descriptors per frame.
458	 */
459	space = efx->txq_entries +
460		tx_queue->read_count - tx_queue->insert_count;
461
462	for (i = 0; i < n; i++) {
463		xdpf = xdpfs[i];
464
465		if (i >= space)
466			break;
467
468		/* We'll want a descriptor for this tx. */
469		prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
470
471		len = xdpf->len;
472
473		/* Map for DMA. */
474		dma_addr = dma_map_single(&efx->pci_dev->dev,
475					  xdpf->data, len,
476					  DMA_TO_DEVICE);
477		if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
478			break;
479
480		/*  Create descriptor and set up for unmapping DMA. */
481		tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
482		tx_buffer->xdpf = xdpf;
483		tx_buffer->flags = EFX_TX_BUF_XDP |
484				   EFX_TX_BUF_MAP_SINGLE;
485		tx_buffer->dma_offset = 0;
486		tx_buffer->unmap_len = len;
487		tx_queue->tx_packets++;
488	}
489
490	/* Pass mapped frames to hardware. */
491	if (flush && i > 0)
492		efx_nic_push_buffers(tx_queue);
493
494	if (i == 0)
495		return -EIO;
496
497	efx_xdp_return_frames(n - i, xdpfs + i);
498
499	return i;
500}
501
502/* Initiate a packet transmission.  We use one channel per CPU
503 * (sharing when we have more CPUs than channels).  On Falcon, the TX
504 * completion events will be directed back to the CPU that transmitted
505 * the packet, which should be cache-efficient.
506 *
507 * Context: non-blocking.
508 * Note that returning anything other than NETDEV_TX_OK will cause the
509 * OS to free the skb.
510 */
511netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
512				struct net_device *net_dev)
513{
514	struct efx_nic *efx = netdev_priv(net_dev);
515	struct efx_tx_queue *tx_queue;
516	unsigned index, type;
517
518	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
519
520	/* PTP "event" packet */
521	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
522	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
523		return efx_ptp_tx(efx, skb);
524	}
525
526	index = skb_get_queue_mapping(skb);
527	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
528	if (index >= efx->n_tx_channels) {
529		index -= efx->n_tx_channels;
530		type |= EFX_TXQ_TYPE_HIGHPRI;
531	}
532	tx_queue = efx_get_tx_queue(efx, index, type);
533
534	return efx_enqueue_skb(tx_queue, skb);
535}
536
537void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
538{
539	unsigned int pkts_compl = 0, bytes_compl = 0;
540	unsigned int read_ptr;
541	bool finished = false;
542
543	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
544
545	while (!finished) {
546		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
547
548		if (!efx_tx_buffer_in_use(buffer)) {
549			struct efx_nic *efx = tx_queue->efx;
550
551			netif_err(efx, hw, efx->net_dev,
552				  "TX queue %d spurious single TX completion\n",
553				  tx_queue->queue);
554			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
555			return;
556		}
557
558		/* Need to check the flag before dequeueing. */
559		if (buffer->flags & EFX_TX_BUF_SKB)
560			finished = true;
561		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
562
563		++tx_queue->read_count;
564		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
565	}
566
567	tx_queue->pkts_compl += pkts_compl;
568	tx_queue->bytes_compl += bytes_compl;
569
570	EFX_WARN_ON_PARANOID(pkts_compl != 1);
571
572	efx_xmit_done_check_empty(tx_queue);
573}
574
575void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
576{
577	struct efx_nic *efx = tx_queue->efx;
578
579	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
580	tx_queue->core_txq =
581		netdev_get_tx_queue(efx->net_dev,
582				    tx_queue->queue / EFX_TXQ_TYPES +
583				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
584				     efx->n_tx_channels : 0));
585}
586
587int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
588		 void *type_data)
589{
590	struct efx_nic *efx = netdev_priv(net_dev);
591	struct tc_mqprio_qopt *mqprio = type_data;
592	struct efx_channel *channel;
593	struct efx_tx_queue *tx_queue;
594	unsigned tc, num_tc;
595	int rc;
596
597	if (type != TC_SETUP_QDISC_MQPRIO)
598		return -EOPNOTSUPP;
599
600	num_tc = mqprio->num_tc;
601
602	if (num_tc > EFX_MAX_TX_TC)
603		return -EINVAL;
604
605	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
606
607	if (num_tc == net_dev->num_tc)
608		return 0;
609
610	for (tc = 0; tc < num_tc; tc++) {
611		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
612		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
613	}
614
615	if (num_tc > net_dev->num_tc) {
616		/* Initialise high-priority queues as necessary */
617		efx_for_each_channel(channel, efx) {
618			efx_for_each_possible_channel_tx_queue(tx_queue,
619							       channel) {
620				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
621					continue;
622				if (!tx_queue->buffer) {
623					rc = efx_probe_tx_queue(tx_queue);
624					if (rc)
625						return rc;
626				}
627				if (!tx_queue->initialised)
628					efx_init_tx_queue(tx_queue);
629				efx_init_tx_queue_core_txq(tx_queue);
630			}
631		}
632	} else {
633		/* Reduce number of classes before number of queues */
634		net_dev->num_tc = num_tc;
635	}
636
637	rc = netif_set_real_num_tx_queues(net_dev,
638					  max_t(int, num_tc, 1) *
639					  efx->n_tx_channels);
640	if (rc)
641		return rc;
642
643	/* Do not destroy high-priority queues when they become
644	 * unused.  We would have to flush them first, and it is
645	 * fairly difficult to flush a subset of TX queues.  Leave
646	 * it to efx_fini_channels().
647	 */
648
649	net_dev->num_tc = num_tc;
650	return 0;
651}