PageRenderTime 66ms CodeModel.GetById 26ms app.highlight 28ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/wireless/rt2x00/rt2x00queue.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 1267 lines | 747 code | 212 blank | 308 comment | 120 complexity | 14ec32477fa635e1fa885f56009cb3e1 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
   1/*
   2	Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
   3	Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   4	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
   5	<http://rt2x00.serialmonkey.com>
   6
   7	This program is free software; you can redistribute it and/or modify
   8	it under the terms of the GNU General Public License as published by
   9	the Free Software Foundation; either version 2 of the License, or
  10	(at your option) any later version.
  11
  12	This program is distributed in the hope that it will be useful,
  13	but WITHOUT ANY WARRANTY; without even the implied warranty of
  14	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15	GNU General Public License for more details.
  16
  17	You should have received a copy of the GNU General Public License
  18	along with this program; if not, write to the
  19	Free Software Foundation, Inc.,
  20	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21 */
  22
  23/*
  24	Module: rt2x00lib
  25	Abstract: rt2x00 queue specific routines.
  26 */
  27
  28#include <linux/slab.h>
  29#include <linux/kernel.h>
  30#include <linux/module.h>
  31#include <linux/dma-mapping.h>
  32
  33#include "rt2x00.h"
  34#include "rt2x00lib.h"
  35
  36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
  37{
  38	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  39	struct sk_buff *skb;
  40	struct skb_frame_desc *skbdesc;
  41	unsigned int frame_size;
  42	unsigned int head_size = 0;
  43	unsigned int tail_size = 0;
  44
  45	/*
  46	 * The frame size includes descriptor size, because the
  47	 * hardware directly receive the frame into the skbuffer.
  48	 */
  49	frame_size = entry->queue->data_size + entry->queue->desc_size;
  50
  51	/*
  52	 * The payload should be aligned to a 4-byte boundary,
  53	 * this means we need at least 3 bytes for moving the frame
  54	 * into the correct offset.
  55	 */
  56	head_size = 4;
  57
  58	/*
  59	 * For IV/EIV/ICV assembly we must make sure there is
  60	 * at least 8 bytes bytes available in headroom for IV/EIV
  61	 * and 8 bytes for ICV data as tailroon.
  62	 */
  63	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
  64		head_size += 8;
  65		tail_size += 8;
  66	}
  67
  68	/*
  69	 * Allocate skbuffer.
  70	 */
  71	skb = dev_alloc_skb(frame_size + head_size + tail_size);
  72	if (!skb)
  73		return NULL;
  74
  75	/*
  76	 * Make sure we not have a frame with the requested bytes
  77	 * available in the head and tail.
  78	 */
  79	skb_reserve(skb, head_size);
  80	skb_put(skb, frame_size);
  81
  82	/*
  83	 * Populate skbdesc.
  84	 */
  85	skbdesc = get_skb_frame_desc(skb);
  86	memset(skbdesc, 0, sizeof(*skbdesc));
  87	skbdesc->entry = entry;
  88
  89	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
  90		skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
  91						  skb->data,
  92						  skb->len,
  93						  DMA_FROM_DEVICE);
  94		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
  95	}
  96
  97	return skb;
  98}
  99
 100void rt2x00queue_map_txskb(struct queue_entry *entry)
 101{
 102	struct device *dev = entry->queue->rt2x00dev->dev;
 103	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 104
 105	skbdesc->skb_dma =
 106	    dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
 107	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
 108}
 109EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
 110
 111void rt2x00queue_unmap_skb(struct queue_entry *entry)
 112{
 113	struct device *dev = entry->queue->rt2x00dev->dev;
 114	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 115
 116	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
 117		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 118				 DMA_FROM_DEVICE);
 119		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
 120	} else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
 121		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 122				 DMA_TO_DEVICE);
 123		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
 124	}
 125}
 126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
 127
 128void rt2x00queue_free_skb(struct queue_entry *entry)
 129{
 130	if (!entry->skb)
 131		return;
 132
 133	rt2x00queue_unmap_skb(entry);
 134	dev_kfree_skb_any(entry->skb);
 135	entry->skb = NULL;
 136}
 137
 138void rt2x00queue_align_frame(struct sk_buff *skb)
 139{
 140	unsigned int frame_length = skb->len;
 141	unsigned int align = ALIGN_SIZE(skb, 0);
 142
 143	if (!align)
 144		return;
 145
 146	skb_push(skb, align);
 147	memmove(skb->data, skb->data + align, frame_length);
 148	skb_trim(skb, frame_length);
 149}
 150
 151void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
 152{
 153	unsigned int payload_length = skb->len - header_length;
 154	unsigned int header_align = ALIGN_SIZE(skb, 0);
 155	unsigned int payload_align = ALIGN_SIZE(skb, header_length);
 156	unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
 157
 158	/*
 159	 * Adjust the header alignment if the payload needs to be moved more
 160	 * than the header.
 161	 */
 162	if (payload_align > header_align)
 163		header_align += 4;
 164
 165	/* There is nothing to do if no alignment is needed */
 166	if (!header_align)
 167		return;
 168
 169	/* Reserve the amount of space needed in front of the frame */
 170	skb_push(skb, header_align);
 171
 172	/*
 173	 * Move the header.
 174	 */
 175	memmove(skb->data, skb->data + header_align, header_length);
 176
 177	/* Move the payload, if present and if required */
 178	if (payload_length && payload_align)
 179		memmove(skb->data + header_length + l2pad,
 180			skb->data + header_length + l2pad + payload_align,
 181			payload_length);
 182
 183	/* Trim the skb to the correct size */
 184	skb_trim(skb, header_length + l2pad + payload_length);
 185}
 186
 187void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
 188{
 189	/*
 190	 * L2 padding is only present if the skb contains more than just the
 191	 * IEEE 802.11 header.
 192	 */
 193	unsigned int l2pad = (skb->len > header_length) ?
 194				L2PAD_SIZE(header_length) : 0;
 195
 196	if (!l2pad)
 197		return;
 198
 199	memmove(skb->data + l2pad, skb->data, header_length);
 200	skb_pull(skb, l2pad);
 201}
 202
 203static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
 204						 struct txentry_desc *txdesc)
 205{
 206	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
 207	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
 208	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
 209	unsigned long irqflags;
 210
 211	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 212		return;
 213
 214	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 215
 216	if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
 217		return;
 218
 219	/*
 220	 * The hardware is not able to insert a sequence number. Assign a
 221	 * software generated one here.
 222	 *
 223	 * This is wrong because beacons are not getting sequence
 224	 * numbers assigned properly.
 225	 *
 226	 * A secondary problem exists for drivers that cannot toggle
 227	 * sequence counting per-frame, since those will override the
 228	 * sequence counter given by mac80211.
 229	 */
 230	spin_lock_irqsave(&intf->seqlock, irqflags);
 231
 232	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 233		intf->seqno += 0x10;
 234	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 235	hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
 236
 237	spin_unlock_irqrestore(&intf->seqlock, irqflags);
 238
 239}
 240
 241static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
 242						  struct txentry_desc *txdesc,
 243						  const struct rt2x00_rate *hwrate)
 244{
 245	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 246	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
 247	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 248	unsigned int data_length;
 249	unsigned int duration;
 250	unsigned int residual;
 251
 252	/*
 253	 * Determine with what IFS priority this frame should be send.
 254	 * Set ifs to IFS_SIFS when the this is not the first fragment,
 255	 * or this fragment came after RTS/CTS.
 256	 */
 257	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 258		txdesc->u.plcp.ifs = IFS_BACKOFF;
 259	else
 260		txdesc->u.plcp.ifs = IFS_SIFS;
 261
 262	/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
 263	data_length = entry->skb->len + 4;
 264	data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
 265
 266	/*
 267	 * PLCP setup
 268	 * Length calculation depends on OFDM/CCK rate.
 269	 */
 270	txdesc->u.plcp.signal = hwrate->plcp;
 271	txdesc->u.plcp.service = 0x04;
 272
 273	if (hwrate->flags & DEV_RATE_OFDM) {
 274		txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
 275		txdesc->u.plcp.length_low = data_length & 0x3f;
 276	} else {
 277		/*
 278		 * Convert length to microseconds.
 279		 */
 280		residual = GET_DURATION_RES(data_length, hwrate->bitrate);
 281		duration = GET_DURATION(data_length, hwrate->bitrate);
 282
 283		if (residual != 0) {
 284			duration++;
 285
 286			/*
 287			 * Check if we need to set the Length Extension
 288			 */
 289			if (hwrate->bitrate == 110 && residual <= 30)
 290				txdesc->u.plcp.service |= 0x80;
 291		}
 292
 293		txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
 294		txdesc->u.plcp.length_low = duration & 0xff;
 295
 296		/*
 297		 * When preamble is enabled we should set the
 298		 * preamble bit for the signal.
 299		 */
 300		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 301			txdesc->u.plcp.signal |= 0x08;
 302	}
 303}
 304
 305static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
 306						struct txentry_desc *txdesc,
 307						const struct rt2x00_rate *hwrate)
 308{
 309	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
 310	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 311	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
 312
 313	if (tx_info->control.sta)
 314		txdesc->u.ht.mpdu_density =
 315		    tx_info->control.sta->ht_cap.ampdu_density;
 316
 317	txdesc->u.ht.ba_size = 7;	/* FIXME: What value is needed? */
 318
 319	/*
 320	 * Only one STBC stream is supported for now.
 321	 */
 322	if (tx_info->flags & IEEE80211_TX_CTL_STBC)
 323		txdesc->u.ht.stbc = 1;
 324
 325	/*
 326	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
 327	 * mcs rate to be used
 328	 */
 329	if (txrate->flags & IEEE80211_TX_RC_MCS) {
 330		txdesc->u.ht.mcs = txrate->idx;
 331
 332		/*
 333		 * MIMO PS should be set to 1 for STA's using dynamic SM PS
 334		 * when using more then one tx stream (>MCS7).
 335		 */
 336		if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
 337		    ((tx_info->control.sta->ht_cap.cap &
 338		      IEEE80211_HT_CAP_SM_PS) >>
 339		     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
 340		    WLAN_HT_CAP_SM_PS_DYNAMIC)
 341			__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
 342	} else {
 343		txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
 344		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 345			txdesc->u.ht.mcs |= 0x08;
 346	}
 347
 348	/*
 349	 * This frame is eligible for an AMPDU, however, don't aggregate
 350	 * frames that are intended to probe a specific tx rate.
 351	 */
 352	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
 353	    !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
 354		__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
 355
 356	/*
 357	 * Set 40Mhz mode if necessary (for legacy rates this will
 358	 * duplicate the frame to both channels).
 359	 */
 360	if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
 361	    txrate->flags & IEEE80211_TX_RC_DUP_DATA)
 362		__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
 363	if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
 364		__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
 365
 366	/*
 367	 * Determine IFS values
 368	 * - Use TXOP_BACKOFF for management frames except beacons
 369	 * - Use TXOP_SIFS for fragment bursts
 370	 * - Use TXOP_HTTXOP for everything else
 371	 *
 372	 * Note: rt2800 devices won't use CTS protection (if used)
 373	 * for frames not transmitted with TXOP_HTTXOP
 374	 */
 375	if (ieee80211_is_mgmt(hdr->frame_control) &&
 376	    !ieee80211_is_beacon(hdr->frame_control))
 377		txdesc->u.ht.txop = TXOP_BACKOFF;
 378	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 379		txdesc->u.ht.txop = TXOP_SIFS;
 380	else
 381		txdesc->u.ht.txop = TXOP_HTTXOP;
 382}
 383
 384static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
 385					     struct txentry_desc *txdesc)
 386{
 387	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 388	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
 389	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
 390	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 391	struct ieee80211_rate *rate;
 392	const struct rt2x00_rate *hwrate = NULL;
 393
 394	memset(txdesc, 0, sizeof(*txdesc));
 395
 396	/*
 397	 * Header and frame information.
 398	 */
 399	txdesc->length = entry->skb->len;
 400	txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
 401
 402	/*
 403	 * Check whether this frame is to be acked.
 404	 */
 405	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
 406		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
 407
 408	/*
 409	 * Check if this is a RTS/CTS frame
 410	 */
 411	if (ieee80211_is_rts(hdr->frame_control) ||
 412	    ieee80211_is_cts(hdr->frame_control)) {
 413		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 414		if (ieee80211_is_rts(hdr->frame_control))
 415			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
 416		else
 417			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
 418		if (tx_info->control.rts_cts_rate_idx >= 0)
 419			rate =
 420			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
 421	}
 422
 423	/*
 424	 * Determine retry information.
 425	 */
 426	txdesc->retry_limit = tx_info->control.rates[0].count - 1;
 427	if (txdesc->retry_limit >= rt2x00dev->long_retry)
 428		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
 429
 430	/*
 431	 * Check if more fragments are pending
 432	 */
 433	if (ieee80211_has_morefrags(hdr->frame_control)) {
 434		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 435		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
 436	}
 437
 438	/*
 439	 * Check if more frames (!= fragments) are pending
 440	 */
 441	if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
 442		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 443
 444	/*
 445	 * Beacons and probe responses require the tsf timestamp
 446	 * to be inserted into the frame.
 447	 */
 448	if (ieee80211_is_beacon(hdr->frame_control) ||
 449	    ieee80211_is_probe_resp(hdr->frame_control))
 450		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
 451
 452	if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
 453	    !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
 454		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
 455
 456	/*
 457	 * Determine rate modulation.
 458	 */
 459	if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
 460		txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
 461	else if (txrate->flags & IEEE80211_TX_RC_MCS)
 462		txdesc->rate_mode = RATE_MODE_HT_MIX;
 463	else {
 464		rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
 465		hwrate = rt2x00_get_rate(rate->hw_value);
 466		if (hwrate->flags & DEV_RATE_OFDM)
 467			txdesc->rate_mode = RATE_MODE_OFDM;
 468		else
 469			txdesc->rate_mode = RATE_MODE_CCK;
 470	}
 471
 472	/*
 473	 * Apply TX descriptor handling by components
 474	 */
 475	rt2x00crypto_create_tx_descriptor(entry, txdesc);
 476	rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
 477
 478	if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
 479		rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
 480	else
 481		rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
 482}
 483
 484static int rt2x00queue_write_tx_data(struct queue_entry *entry,
 485				     struct txentry_desc *txdesc)
 486{
 487	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 488
 489	/*
 490	 * This should not happen, we already checked the entry
 491	 * was ours. When the hardware disagrees there has been
 492	 * a queue corruption!
 493	 */
 494	if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
 495		     rt2x00dev->ops->lib->get_entry_state(entry))) {
 496		ERROR(rt2x00dev,
 497		      "Corrupt queue %d, accessing entry which is not ours.\n"
 498		      "Please file bug report to %s.\n",
 499		      entry->queue->qid, DRV_PROJECT);
 500		return -EINVAL;
 501	}
 502
 503	/*
 504	 * Add the requested extra tx headroom in front of the skb.
 505	 */
 506	skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
 507	memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
 508
 509	/*
 510	 * Call the driver's write_tx_data function, if it exists.
 511	 */
 512	if (rt2x00dev->ops->lib->write_tx_data)
 513		rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
 514
 515	/*
 516	 * Map the skb to DMA.
 517	 */
 518	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
 519		rt2x00queue_map_txskb(entry);
 520
 521	return 0;
 522}
 523
 524static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
 525					    struct txentry_desc *txdesc)
 526{
 527	struct data_queue *queue = entry->queue;
 528
 529	queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
 530
 531	/*
 532	 * All processing on the frame has been completed, this means
 533	 * it is now ready to be dumped to userspace through debugfs.
 534	 */
 535	rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
 536}
 537
 538static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
 539				      struct txentry_desc *txdesc)
 540{
 541	/*
 542	 * Check if we need to kick the queue, there are however a few rules
 543	 *	1) Don't kick unless this is the last in frame in a burst.
 544	 *	   When the burst flag is set, this frame is always followed
 545	 *	   by another frame which in some way are related to eachother.
 546	 *	   This is true for fragments, RTS or CTS-to-self frames.
 547	 *	2) Rule 1 can be broken when the available entries
 548	 *	   in the queue are less then a certain threshold.
 549	 */
 550	if (rt2x00queue_threshold(queue) ||
 551	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
 552		queue->rt2x00dev->ops->lib->kick_queue(queue);
 553}
 554
 555int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
 556			       bool local)
 557{
 558	struct ieee80211_tx_info *tx_info;
 559	struct queue_entry *entry;
 560	struct txentry_desc txdesc;
 561	struct skb_frame_desc *skbdesc;
 562	u8 rate_idx, rate_flags;
 563	int ret = 0;
 564
 565	spin_lock(&queue->tx_lock);
 566
 567	entry = rt2x00queue_get_entry(queue, Q_INDEX);
 568
 569	if (unlikely(rt2x00queue_full(queue))) {
 570		ERROR(queue->rt2x00dev,
 571		      "Dropping frame due to full tx queue %d.\n", queue->qid);
 572		ret = -ENOBUFS;
 573		goto out;
 574	}
 575
 576	if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
 577				      &entry->flags))) {
 578		ERROR(queue->rt2x00dev,
 579		      "Arrived at non-free entry in the non-full queue %d.\n"
 580		      "Please file bug report to %s.\n",
 581		      queue->qid, DRV_PROJECT);
 582		ret = -EINVAL;
 583		goto out;
 584	}
 585
 586	/*
 587	 * Copy all TX descriptor information into txdesc,
 588	 * after that we are free to use the skb->cb array
 589	 * for our information.
 590	 */
 591	entry->skb = skb;
 592	rt2x00queue_create_tx_descriptor(entry, &txdesc);
 593
 594	/*
 595	 * All information is retrieved from the skb->cb array,
 596	 * now we should claim ownership of the driver part of that
 597	 * array, preserving the bitrate index and flags.
 598	 */
 599	tx_info = IEEE80211_SKB_CB(skb);
 600	rate_idx = tx_info->control.rates[0].idx;
 601	rate_flags = tx_info->control.rates[0].flags;
 602	skbdesc = get_skb_frame_desc(skb);
 603	memset(skbdesc, 0, sizeof(*skbdesc));
 604	skbdesc->entry = entry;
 605	skbdesc->tx_rate_idx = rate_idx;
 606	skbdesc->tx_rate_flags = rate_flags;
 607
 608	if (local)
 609		skbdesc->flags |= SKBDESC_NOT_MAC80211;
 610
 611	/*
 612	 * When hardware encryption is supported, and this frame
 613	 * is to be encrypted, we should strip the IV/EIV data from
 614	 * the frame so we can provide it to the driver separately.
 615	 */
 616	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
 617	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
 618		if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
 619			rt2x00crypto_tx_copy_iv(skb, &txdesc);
 620		else
 621			rt2x00crypto_tx_remove_iv(skb, &txdesc);
 622	}
 623
 624	/*
 625	 * When DMA allocation is required we should guarantee to the
 626	 * driver that the DMA is aligned to a 4-byte boundary.
 627	 * However some drivers require L2 padding to pad the payload
 628	 * rather then the header. This could be a requirement for
 629	 * PCI and USB devices, while header alignment only is valid
 630	 * for PCI devices.
 631	 */
 632	if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
 633		rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
 634	else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
 635		rt2x00queue_align_frame(entry->skb);
 636
 637	/*
 638	 * It could be possible that the queue was corrupted and this
 639	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
 640	 * this frame will simply be dropped.
 641	 */
 642	if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
 643		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 644		entry->skb = NULL;
 645		ret = -EIO;
 646		goto out;
 647	}
 648
 649	set_bit(ENTRY_DATA_PENDING, &entry->flags);
 650
 651	rt2x00queue_index_inc(entry, Q_INDEX);
 652	rt2x00queue_write_tx_descriptor(entry, &txdesc);
 653	rt2x00queue_kick_tx_queue(queue, &txdesc);
 654
 655out:
 656	spin_unlock(&queue->tx_lock);
 657	return ret;
 658}
 659
 660int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
 661			     struct ieee80211_vif *vif)
 662{
 663	struct rt2x00_intf *intf = vif_to_intf(vif);
 664
 665	if (unlikely(!intf->beacon))
 666		return -ENOBUFS;
 667
 668	mutex_lock(&intf->beacon_skb_mutex);
 669
 670	/*
 671	 * Clean up the beacon skb.
 672	 */
 673	rt2x00queue_free_skb(intf->beacon);
 674
 675	/*
 676	 * Clear beacon (single bssid devices don't need to clear the beacon
 677	 * since the beacon queue will get stopped anyway).
 678	 */
 679	if (rt2x00dev->ops->lib->clear_beacon)
 680		rt2x00dev->ops->lib->clear_beacon(intf->beacon);
 681
 682	mutex_unlock(&intf->beacon_skb_mutex);
 683
 684	return 0;
 685}
 686
 687int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
 688				     struct ieee80211_vif *vif)
 689{
 690	struct rt2x00_intf *intf = vif_to_intf(vif);
 691	struct skb_frame_desc *skbdesc;
 692	struct txentry_desc txdesc;
 693
 694	if (unlikely(!intf->beacon))
 695		return -ENOBUFS;
 696
 697	/*
 698	 * Clean up the beacon skb.
 699	 */
 700	rt2x00queue_free_skb(intf->beacon);
 701
 702	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
 703	if (!intf->beacon->skb)
 704		return -ENOMEM;
 705
 706	/*
 707	 * Copy all TX descriptor information into txdesc,
 708	 * after that we are free to use the skb->cb array
 709	 * for our information.
 710	 */
 711	rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
 712
 713	/*
 714	 * Fill in skb descriptor
 715	 */
 716	skbdesc = get_skb_frame_desc(intf->beacon->skb);
 717	memset(skbdesc, 0, sizeof(*skbdesc));
 718	skbdesc->entry = intf->beacon;
 719
 720	/*
 721	 * Send beacon to hardware.
 722	 */
 723	rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
 724
 725	return 0;
 726
 727}
 728
 729int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
 730			      struct ieee80211_vif *vif)
 731{
 732	struct rt2x00_intf *intf = vif_to_intf(vif);
 733	int ret;
 734
 735	mutex_lock(&intf->beacon_skb_mutex);
 736	ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
 737	mutex_unlock(&intf->beacon_skb_mutex);
 738
 739	return ret;
 740}
 741
 742bool rt2x00queue_for_each_entry(struct data_queue *queue,
 743				enum queue_index start,
 744				enum queue_index end,
 745				void *data,
 746				bool (*fn)(struct queue_entry *entry,
 747					   void *data))
 748{
 749	unsigned long irqflags;
 750	unsigned int index_start;
 751	unsigned int index_end;
 752	unsigned int i;
 753
 754	if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
 755		ERROR(queue->rt2x00dev,
 756		      "Entry requested from invalid index range (%d - %d)\n",
 757		      start, end);
 758		return true;
 759	}
 760
 761	/*
 762	 * Only protect the range we are going to loop over,
 763	 * if during our loop a extra entry is set to pending
 764	 * it should not be kicked during this run, since it
 765	 * is part of another TX operation.
 766	 */
 767	spin_lock_irqsave(&queue->index_lock, irqflags);
 768	index_start = queue->index[start];
 769	index_end = queue->index[end];
 770	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 771
 772	/*
 773	 * Start from the TX done pointer, this guarantees that we will
 774	 * send out all frames in the correct order.
 775	 */
 776	if (index_start < index_end) {
 777		for (i = index_start; i < index_end; i++) {
 778			if (fn(&queue->entries[i], data))
 779				return true;
 780		}
 781	} else {
 782		for (i = index_start; i < queue->limit; i++) {
 783			if (fn(&queue->entries[i], data))
 784				return true;
 785		}
 786
 787		for (i = 0; i < index_end; i++) {
 788			if (fn(&queue->entries[i], data))
 789				return true;
 790		}
 791	}
 792
 793	return false;
 794}
 795EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
 796
 797struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
 798					  enum queue_index index)
 799{
 800	struct queue_entry *entry;
 801	unsigned long irqflags;
 802
 803	if (unlikely(index >= Q_INDEX_MAX)) {
 804		ERROR(queue->rt2x00dev,
 805		      "Entry requested from invalid index type (%d)\n", index);
 806		return NULL;
 807	}
 808
 809	spin_lock_irqsave(&queue->index_lock, irqflags);
 810
 811	entry = &queue->entries[queue->index[index]];
 812
 813	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 814
 815	return entry;
 816}
 817EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
 818
 819void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
 820{
 821	struct data_queue *queue = entry->queue;
 822	unsigned long irqflags;
 823
 824	if (unlikely(index >= Q_INDEX_MAX)) {
 825		ERROR(queue->rt2x00dev,
 826		      "Index change on invalid index type (%d)\n", index);
 827		return;
 828	}
 829
 830	spin_lock_irqsave(&queue->index_lock, irqflags);
 831
 832	queue->index[index]++;
 833	if (queue->index[index] >= queue->limit)
 834		queue->index[index] = 0;
 835
 836	entry->last_action = jiffies;
 837
 838	if (index == Q_INDEX) {
 839		queue->length++;
 840	} else if (index == Q_INDEX_DONE) {
 841		queue->length--;
 842		queue->count++;
 843	}
 844
 845	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 846}
 847
 848void rt2x00queue_pause_queue(struct data_queue *queue)
 849{
 850	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 851	    !test_bit(QUEUE_STARTED, &queue->flags) ||
 852	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
 853		return;
 854
 855	switch (queue->qid) {
 856	case QID_AC_VO:
 857	case QID_AC_VI:
 858	case QID_AC_BE:
 859	case QID_AC_BK:
 860		/*
 861		 * For TX queues, we have to disable the queue
 862		 * inside mac80211.
 863		 */
 864		ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
 865		break;
 866	default:
 867		break;
 868	}
 869}
 870EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
 871
 872void rt2x00queue_unpause_queue(struct data_queue *queue)
 873{
 874	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 875	    !test_bit(QUEUE_STARTED, &queue->flags) ||
 876	    !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
 877		return;
 878
 879	switch (queue->qid) {
 880	case QID_AC_VO:
 881	case QID_AC_VI:
 882	case QID_AC_BE:
 883	case QID_AC_BK:
 884		/*
 885		 * For TX queues, we have to enable the queue
 886		 * inside mac80211.
 887		 */
 888		ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
 889		break;
 890	case QID_RX:
 891		/*
 892		 * For RX we need to kick the queue now in order to
 893		 * receive frames.
 894		 */
 895		queue->rt2x00dev->ops->lib->kick_queue(queue);
 896	default:
 897		break;
 898	}
 899}
 900EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
 901
 902void rt2x00queue_start_queue(struct data_queue *queue)
 903{
 904	mutex_lock(&queue->status_lock);
 905
 906	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 907	    test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
 908		mutex_unlock(&queue->status_lock);
 909		return;
 910	}
 911
 912	set_bit(QUEUE_PAUSED, &queue->flags);
 913
 914	queue->rt2x00dev->ops->lib->start_queue(queue);
 915
 916	rt2x00queue_unpause_queue(queue);
 917
 918	mutex_unlock(&queue->status_lock);
 919}
 920EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
 921
 922void rt2x00queue_stop_queue(struct data_queue *queue)
 923{
 924	mutex_lock(&queue->status_lock);
 925
 926	if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
 927		mutex_unlock(&queue->status_lock);
 928		return;
 929	}
 930
 931	rt2x00queue_pause_queue(queue);
 932
 933	queue->rt2x00dev->ops->lib->stop_queue(queue);
 934
 935	mutex_unlock(&queue->status_lock);
 936}
 937EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
 938
 939void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
 940{
 941	bool started;
 942	bool tx_queue =
 943		(queue->qid == QID_AC_VO) ||
 944		(queue->qid == QID_AC_VI) ||
 945		(queue->qid == QID_AC_BE) ||
 946		(queue->qid == QID_AC_BK);
 947
 948	mutex_lock(&queue->status_lock);
 949
 950	/*
 951	 * If the queue has been started, we must stop it temporarily
 952	 * to prevent any new frames to be queued on the device. If
 953	 * we are not dropping the pending frames, the queue must
 954	 * only be stopped in the software and not the hardware,
 955	 * otherwise the queue will never become empty on its own.
 956	 */
 957	started = test_bit(QUEUE_STARTED, &queue->flags);
 958	if (started) {
 959		/*
 960		 * Pause the queue
 961		 */
 962		rt2x00queue_pause_queue(queue);
 963
 964		/*
 965		 * If we are not supposed to drop any pending
 966		 * frames, this means we must force a start (=kick)
 967		 * to the queue to make sure the hardware will
 968		 * start transmitting.
 969		 */
 970		if (!drop && tx_queue)
 971			queue->rt2x00dev->ops->lib->kick_queue(queue);
 972	}
 973
 974	/*
 975	 * Check if driver supports flushing, if that is the case we can
 976	 * defer the flushing to the driver. Otherwise we must use the
 977	 * alternative which just waits for the queue to become empty.
 978	 */
 979	if (likely(queue->rt2x00dev->ops->lib->flush_queue))
 980		queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
 981
 982	/*
 983	 * The queue flush has failed...
 984	 */
 985	if (unlikely(!rt2x00queue_empty(queue)))
 986		WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
 987
 988	/*
 989	 * Restore the queue to the previous status
 990	 */
 991	if (started)
 992		rt2x00queue_unpause_queue(queue);
 993
 994	mutex_unlock(&queue->status_lock);
 995}
 996EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
 997
 998void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
 999{
1000	struct data_queue *queue;
1001
1002	/*
1003	 * rt2x00queue_start_queue will call ieee80211_wake_queue
1004	 * for each queue after is has been properly initialized.
1005	 */
1006	tx_queue_for_each(rt2x00dev, queue)
1007		rt2x00queue_start_queue(queue);
1008
1009	rt2x00queue_start_queue(rt2x00dev->rx);
1010}
1011EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1012
1013void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1014{
1015	struct data_queue *queue;
1016
1017	/*
1018	 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1019	 * as well, but we are completely shutting doing everything
1020	 * now, so it is much safer to stop all TX queues at once,
1021	 * and use rt2x00queue_stop_queue for cleaning up.
1022	 */
1023	ieee80211_stop_queues(rt2x00dev->hw);
1024
1025	tx_queue_for_each(rt2x00dev, queue)
1026		rt2x00queue_stop_queue(queue);
1027
1028	rt2x00queue_stop_queue(rt2x00dev->rx);
1029}
1030EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1031
1032void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1033{
1034	struct data_queue *queue;
1035
1036	tx_queue_for_each(rt2x00dev, queue)
1037		rt2x00queue_flush_queue(queue, drop);
1038
1039	rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1040}
1041EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1042
1043static void rt2x00queue_reset(struct data_queue *queue)
1044{
1045	unsigned long irqflags;
1046	unsigned int i;
1047
1048	spin_lock_irqsave(&queue->index_lock, irqflags);
1049
1050	queue->count = 0;
1051	queue->length = 0;
1052
1053	for (i = 0; i < Q_INDEX_MAX; i++)
1054		queue->index[i] = 0;
1055
1056	spin_unlock_irqrestore(&queue->index_lock, irqflags);
1057}
1058
1059void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1060{
1061	struct data_queue *queue;
1062	unsigned int i;
1063
1064	queue_for_each(rt2x00dev, queue) {
1065		rt2x00queue_reset(queue);
1066
1067		for (i = 0; i < queue->limit; i++)
1068			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1069	}
1070}
1071
1072static int rt2x00queue_alloc_entries(struct data_queue *queue,
1073				     const struct data_queue_desc *qdesc)
1074{
1075	struct queue_entry *entries;
1076	unsigned int entry_size;
1077	unsigned int i;
1078
1079	rt2x00queue_reset(queue);
1080
1081	queue->limit = qdesc->entry_num;
1082	queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1083	queue->data_size = qdesc->data_size;
1084	queue->desc_size = qdesc->desc_size;
1085
1086	/*
1087	 * Allocate all queue entries.
1088	 */
1089	entry_size = sizeof(*entries) + qdesc->priv_size;
1090	entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1091	if (!entries)
1092		return -ENOMEM;
1093
1094#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1095	(((char *)(__base)) + ((__limit) * (__esize)) + \
1096	    ((__index) * (__psize)))
1097
1098	for (i = 0; i < queue->limit; i++) {
1099		entries[i].flags = 0;
1100		entries[i].queue = queue;
1101		entries[i].skb = NULL;
1102		entries[i].entry_idx = i;
1103		entries[i].priv_data =
1104		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1105					    sizeof(*entries), qdesc->priv_size);
1106	}
1107
1108#undef QUEUE_ENTRY_PRIV_OFFSET
1109
1110	queue->entries = entries;
1111
1112	return 0;
1113}
1114
1115static void rt2x00queue_free_skbs(struct data_queue *queue)
1116{
1117	unsigned int i;
1118
1119	if (!queue->entries)
1120		return;
1121
1122	for (i = 0; i < queue->limit; i++) {
1123		rt2x00queue_free_skb(&queue->entries[i]);
1124	}
1125}
1126
1127static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1128{
1129	unsigned int i;
1130	struct sk_buff *skb;
1131
1132	for (i = 0; i < queue->limit; i++) {
1133		skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1134		if (!skb)
1135			return -ENOMEM;
1136		queue->entries[i].skb = skb;
1137	}
1138
1139	return 0;
1140}
1141
1142int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1143{
1144	struct data_queue *queue;
1145	int status;
1146
1147	status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1148	if (status)
1149		goto exit;
1150
1151	tx_queue_for_each(rt2x00dev, queue) {
1152		status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1153		if (status)
1154			goto exit;
1155	}
1156
1157	status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1158	if (status)
1159		goto exit;
1160
1161	if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1162		status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1163						   rt2x00dev->ops->atim);
1164		if (status)
1165			goto exit;
1166	}
1167
1168	status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1169	if (status)
1170		goto exit;
1171
1172	return 0;
1173
1174exit:
1175	ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1176
1177	rt2x00queue_uninitialize(rt2x00dev);
1178
1179	return status;
1180}
1181
1182void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1183{
1184	struct data_queue *queue;
1185
1186	rt2x00queue_free_skbs(rt2x00dev->rx);
1187
1188	queue_for_each(rt2x00dev, queue) {
1189		kfree(queue->entries);
1190		queue->entries = NULL;
1191	}
1192}
1193
1194static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1195			     struct data_queue *queue, enum data_queue_qid qid)
1196{
1197	mutex_init(&queue->status_lock);
1198	spin_lock_init(&queue->tx_lock);
1199	spin_lock_init(&queue->index_lock);
1200
1201	queue->rt2x00dev = rt2x00dev;
1202	queue->qid = qid;
1203	queue->txop = 0;
1204	queue->aifs = 2;
1205	queue->cw_min = 5;
1206	queue->cw_max = 10;
1207}
1208
1209int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1210{
1211	struct data_queue *queue;
1212	enum data_queue_qid qid;
1213	unsigned int req_atim =
1214	    !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1215
1216	/*
1217	 * We need the following queues:
1218	 * RX: 1
1219	 * TX: ops->tx_queues
1220	 * Beacon: 1
1221	 * Atim: 1 (if required)
1222	 */
1223	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1224
1225	queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1226	if (!queue) {
1227		ERROR(rt2x00dev, "Queue allocation failed.\n");
1228		return -ENOMEM;
1229	}
1230
1231	/*
1232	 * Initialize pointers
1233	 */
1234	rt2x00dev->rx = queue;
1235	rt2x00dev->tx = &queue[1];
1236	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1237	rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1238
1239	/*
1240	 * Initialize queue parameters.
1241	 * RX: qid = QID_RX
1242	 * TX: qid = QID_AC_VO + index
1243	 * TX: cw_min: 2^5 = 32.
1244	 * TX: cw_max: 2^10 = 1024.
1245	 * BCN: qid = QID_BEACON
1246	 * ATIM: qid = QID_ATIM
1247	 */
1248	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1249
1250	qid = QID_AC_VO;
1251	tx_queue_for_each(rt2x00dev, queue)
1252		rt2x00queue_init(rt2x00dev, queue, qid++);
1253
1254	rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1255	if (req_atim)
1256		rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1257
1258	return 0;
1259}
1260
1261void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1262{
1263	kfree(rt2x00dev->rx);
1264	rt2x00dev->rx = NULL;
1265	rt2x00dev->tx = NULL;
1266	rt2x00dev->bcn = NULL;
1267}