PageRenderTime 109ms CodeModel.GetById 19ms app.highlight 79ms RepoModel.GetById 2ms app.codeStats 0ms

/drivers/net/wireless/rt2x00/rt2x00queue.c

http://github.com/mirrors/linux
C | 1360 lines | 797 code | 227 blank | 336 comment | 128 complexity | 1e462b96589277ee1df417923168ff7c MD5 | raw file
   1/*
   2	Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
   3	Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   4	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
   5	<http://rt2x00.serialmonkey.com>
   6
   7	This program is free software; you can redistribute it and/or modify
   8	it under the terms of the GNU General Public License as published by
   9	the Free Software Foundation; either version 2 of the License, or
  10	(at your option) any later version.
  11
  12	This program is distributed in the hope that it will be useful,
  13	but WITHOUT ANY WARRANTY; without even the implied warranty of
  14	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15	GNU General Public License for more details.
  16
  17	You should have received a copy of the GNU General Public License
  18	along with this program; if not, write to the
  19	Free Software Foundation, Inc.,
  20	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21 */
  22
  23/*
  24	Module: rt2x00lib
  25	Abstract: rt2x00 queue specific routines.
  26 */
  27
  28#include <linux/slab.h>
  29#include <linux/kernel.h>
  30#include <linux/module.h>
  31#include <linux/dma-mapping.h>
  32
  33#include "rt2x00.h"
  34#include "rt2x00lib.h"
  35
  36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
  37{
  38	struct data_queue *queue = entry->queue;
  39	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  40	struct sk_buff *skb;
  41	struct skb_frame_desc *skbdesc;
  42	unsigned int frame_size;
  43	unsigned int head_size = 0;
  44	unsigned int tail_size = 0;
  45
  46	/*
  47	 * The frame size includes descriptor size, because the
  48	 * hardware directly receive the frame into the skbuffer.
  49	 */
  50	frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
  51
  52	/*
  53	 * The payload should be aligned to a 4-byte boundary,
  54	 * this means we need at least 3 bytes for moving the frame
  55	 * into the correct offset.
  56	 */
  57	head_size = 4;
  58
  59	/*
  60	 * For IV/EIV/ICV assembly we must make sure there is
  61	 * at least 8 bytes bytes available in headroom for IV/EIV
  62	 * and 8 bytes for ICV data as tailroon.
  63	 */
  64	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
  65		head_size += 8;
  66		tail_size += 8;
  67	}
  68
  69	/*
  70	 * Allocate skbuffer.
  71	 */
  72	skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
  73	if (!skb)
  74		return NULL;
  75
  76	/*
  77	 * Make sure we not have a frame with the requested bytes
  78	 * available in the head and tail.
  79	 */
  80	skb_reserve(skb, head_size);
  81	skb_put(skb, frame_size);
  82
  83	/*
  84	 * Populate skbdesc.
  85	 */
  86	skbdesc = get_skb_frame_desc(skb);
  87	memset(skbdesc, 0, sizeof(*skbdesc));
  88	skbdesc->entry = entry;
  89
  90	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
  91		dma_addr_t skb_dma;
  92
  93		skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
  94					 DMA_FROM_DEVICE);
  95		if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
  96			dev_kfree_skb_any(skb);
  97			return NULL;
  98		}
  99
 100		skbdesc->skb_dma = skb_dma;
 101		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
 102	}
 103
 104	return skb;
 105}
 106
 107int rt2x00queue_map_txskb(struct queue_entry *entry)
 108{
 109	struct device *dev = entry->queue->rt2x00dev->dev;
 110	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 111
 112	skbdesc->skb_dma =
 113	    dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
 114
 115	if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
 116		return -ENOMEM;
 117
 118	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
 119	return 0;
 120}
 121EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
 122
 123void rt2x00queue_unmap_skb(struct queue_entry *entry)
 124{
 125	struct device *dev = entry->queue->rt2x00dev->dev;
 126	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 127
 128	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
 129		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 130				 DMA_FROM_DEVICE);
 131		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
 132	} else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
 133		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 134				 DMA_TO_DEVICE);
 135		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
 136	}
 137}
 138EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
 139
 140void rt2x00queue_free_skb(struct queue_entry *entry)
 141{
 142	if (!entry->skb)
 143		return;
 144
 145	rt2x00queue_unmap_skb(entry);
 146	dev_kfree_skb_any(entry->skb);
 147	entry->skb = NULL;
 148}
 149
 150void rt2x00queue_align_frame(struct sk_buff *skb)
 151{
 152	unsigned int frame_length = skb->len;
 153	unsigned int align = ALIGN_SIZE(skb, 0);
 154
 155	if (!align)
 156		return;
 157
 158	skb_push(skb, align);
 159	memmove(skb->data, skb->data + align, frame_length);
 160	skb_trim(skb, frame_length);
 161}
 162
 163void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
 164{
 165	unsigned int payload_length = skb->len - header_length;
 166	unsigned int header_align = ALIGN_SIZE(skb, 0);
 167	unsigned int payload_align = ALIGN_SIZE(skb, header_length);
 168	unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
 169
 170	/*
 171	 * Adjust the header alignment if the payload needs to be moved more
 172	 * than the header.
 173	 */
 174	if (payload_align > header_align)
 175		header_align += 4;
 176
 177	/* There is nothing to do if no alignment is needed */
 178	if (!header_align)
 179		return;
 180
 181	/* Reserve the amount of space needed in front of the frame */
 182	skb_push(skb, header_align);
 183
 184	/*
 185	 * Move the header.
 186	 */
 187	memmove(skb->data, skb->data + header_align, header_length);
 188
 189	/* Move the payload, if present and if required */
 190	if (payload_length && payload_align)
 191		memmove(skb->data + header_length + l2pad,
 192			skb->data + header_length + l2pad + payload_align,
 193			payload_length);
 194
 195	/* Trim the skb to the correct size */
 196	skb_trim(skb, header_length + l2pad + payload_length);
 197}
 198
 199void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
 200{
 201	/*
 202	 * L2 padding is only present if the skb contains more than just the
 203	 * IEEE 802.11 header.
 204	 */
 205	unsigned int l2pad = (skb->len > header_length) ?
 206				L2PAD_SIZE(header_length) : 0;
 207
 208	if (!l2pad)
 209		return;
 210
 211	memmove(skb->data + l2pad, skb->data, header_length);
 212	skb_pull(skb, l2pad);
 213}
 214
 215static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
 216						 struct sk_buff *skb,
 217						 struct txentry_desc *txdesc)
 218{
 219	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 220	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 221	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
 222	u16 seqno;
 223
 224	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 225		return;
 226
 227	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 228
 229	if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
 230		/*
 231		 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
 232		 * seqno on retransmited data (non-QOS) frames. To workaround
 233		 * the problem let's generate seqno in software if QOS is
 234		 * disabled.
 235		 */
 236		if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
 237			__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 238		else
 239			/* H/W will generate sequence number */
 240			return;
 241	}
 242
 243	/*
 244	 * The hardware is not able to insert a sequence number. Assign a
 245	 * software generated one here.
 246	 *
 247	 * This is wrong because beacons are not getting sequence
 248	 * numbers assigned properly.
 249	 *
 250	 * A secondary problem exists for drivers that cannot toggle
 251	 * sequence counting per-frame, since those will override the
 252	 * sequence counter given by mac80211.
 253	 */
 254	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 255		seqno = atomic_add_return(0x10, &intf->seqno);
 256	else
 257		seqno = atomic_read(&intf->seqno);
 258
 259	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 260	hdr->seq_ctrl |= cpu_to_le16(seqno);
 261}
 262
 263static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
 264						  struct sk_buff *skb,
 265						  struct txentry_desc *txdesc,
 266						  const struct rt2x00_rate *hwrate)
 267{
 268	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 269	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 270	unsigned int data_length;
 271	unsigned int duration;
 272	unsigned int residual;
 273
 274	/*
 275	 * Determine with what IFS priority this frame should be send.
 276	 * Set ifs to IFS_SIFS when the this is not the first fragment,
 277	 * or this fragment came after RTS/CTS.
 278	 */
 279	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 280		txdesc->u.plcp.ifs = IFS_BACKOFF;
 281	else
 282		txdesc->u.plcp.ifs = IFS_SIFS;
 283
 284	/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
 285	data_length = skb->len + 4;
 286	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
 287
 288	/*
 289	 * PLCP setup
 290	 * Length calculation depends on OFDM/CCK rate.
 291	 */
 292	txdesc->u.plcp.signal = hwrate->plcp;
 293	txdesc->u.plcp.service = 0x04;
 294
 295	if (hwrate->flags & DEV_RATE_OFDM) {
 296		txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
 297		txdesc->u.plcp.length_low = data_length & 0x3f;
 298	} else {
 299		/*
 300		 * Convert length to microseconds.
 301		 */
 302		residual = GET_DURATION_RES(data_length, hwrate->bitrate);
 303		duration = GET_DURATION(data_length, hwrate->bitrate);
 304
 305		if (residual != 0) {
 306			duration++;
 307
 308			/*
 309			 * Check if we need to set the Length Extension
 310			 */
 311			if (hwrate->bitrate == 110 && residual <= 30)
 312				txdesc->u.plcp.service |= 0x80;
 313		}
 314
 315		txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
 316		txdesc->u.plcp.length_low = duration & 0xff;
 317
 318		/*
 319		 * When preamble is enabled we should set the
 320		 * preamble bit for the signal.
 321		 */
 322		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 323			txdesc->u.plcp.signal |= 0x08;
 324	}
 325}
 326
 327static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 328						struct sk_buff *skb,
 329						struct txentry_desc *txdesc,
 330						struct ieee80211_sta *sta,
 331						const struct rt2x00_rate *hwrate)
 332{
 333	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 334	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 335	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 336	struct rt2x00_sta *sta_priv = NULL;
 337
 338	if (sta) {
 339		txdesc->u.ht.mpdu_density =
 340		    sta->ht_cap.ampdu_density;
 341
 342		sta_priv = sta_to_rt2x00_sta(sta);
 343		txdesc->u.ht.wcid = sta_priv->wcid;
 344	}
 345
 346	/*
 347	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
 348	 * mcs rate to be used
 349	 */
 350	if (txrate->flags & IEEE80211_TX_RC_MCS) {
 351		txdesc->u.ht.mcs = txrate->idx;
 352
 353		/*
 354		 * MIMO PS should be set to 1 for STA's using dynamic SM PS
 355		 * when using more then one tx stream (>MCS7).
 356		 */
 357		if (sta && txdesc->u.ht.mcs > 7 &&
 358		    sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
 359			__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
 360	} else {
 361		txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
 362		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 363			txdesc->u.ht.mcs |= 0x08;
 364	}
 365
 366	if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
 367		if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 368			txdesc->u.ht.txop = TXOP_SIFS;
 369		else
 370			txdesc->u.ht.txop = TXOP_BACKOFF;
 371
 372		/* Left zero on all other settings. */
 373		return;
 374	}
 375
 376	txdesc->u.ht.ba_size = 7;	/* FIXME: What value is needed? */
 377
 378	/*
 379	 * Only one STBC stream is supported for now.
 380	 */
 381	if (tx_info->flags & IEEE80211_TX_CTL_STBC)
 382		txdesc->u.ht.stbc = 1;
 383
 384	/*
 385	 * This frame is eligible for an AMPDU, however, don't aggregate
 386	 * frames that are intended to probe a specific tx rate.
 387	 */
 388	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
 389	    !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
 390		__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
 391
 392	/*
 393	 * Set 40Mhz mode if necessary (for legacy rates this will
 394	 * duplicate the frame to both channels).
 395	 */
 396	if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
 397	    txrate->flags & IEEE80211_TX_RC_DUP_DATA)
 398		__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
 399	if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
 400		__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
 401
 402	/*
 403	 * Determine IFS values
 404	 * - Use TXOP_BACKOFF for management frames except beacons
 405	 * - Use TXOP_SIFS for fragment bursts
 406	 * - Use TXOP_HTTXOP for everything else
 407	 *
 408	 * Note: rt2800 devices won't use CTS protection (if used)
 409	 * for frames not transmitted with TXOP_HTTXOP
 410	 */
 411	if (ieee80211_is_mgmt(hdr->frame_control) &&
 412	    !ieee80211_is_beacon(hdr->frame_control))
 413		txdesc->u.ht.txop = TXOP_BACKOFF;
 414	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 415		txdesc->u.ht.txop = TXOP_SIFS;
 416	else
 417		txdesc->u.ht.txop = TXOP_HTTXOP;
 418}
 419
 420static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 421					     struct sk_buff *skb,
 422					     struct txentry_desc *txdesc,
 423					     struct ieee80211_sta *sta)
 424{
 425	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 426	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 427	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 428	struct ieee80211_rate *rate;
 429	const struct rt2x00_rate *hwrate = NULL;
 430
 431	memset(txdesc, 0, sizeof(*txdesc));
 432
 433	/*
 434	 * Header and frame information.
 435	 */
 436	txdesc->length = skb->len;
 437	txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
 438
 439	/*
 440	 * Check whether this frame is to be acked.
 441	 */
 442	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
 443		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
 444
 445	/*
 446	 * Check if this is a RTS/CTS frame
 447	 */
 448	if (ieee80211_is_rts(hdr->frame_control) ||
 449	    ieee80211_is_cts(hdr->frame_control)) {
 450		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 451		if (ieee80211_is_rts(hdr->frame_control))
 452			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
 453		else
 454			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
 455		if (tx_info->control.rts_cts_rate_idx >= 0)
 456			rate =
 457			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
 458	}
 459
 460	/*
 461	 * Determine retry information.
 462	 */
 463	txdesc->retry_limit = tx_info->control.rates[0].count - 1;
 464	if (txdesc->retry_limit >= rt2x00dev->long_retry)
 465		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
 466
 467	/*
 468	 * Check if more fragments are pending
 469	 */
 470	if (ieee80211_has_morefrags(hdr->frame_control)) {
 471		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 472		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
 473	}
 474
 475	/*
 476	 * Check if more frames (!= fragments) are pending
 477	 */
 478	if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
 479		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 480
 481	/*
 482	 * Beacons and probe responses require the tsf timestamp
 483	 * to be inserted into the frame.
 484	 */
 485	if (ieee80211_is_beacon(hdr->frame_control) ||
 486	    ieee80211_is_probe_resp(hdr->frame_control))
 487		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
 488
 489	if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
 490	    !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
 491		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
 492
 493	/*
 494	 * Determine rate modulation.
 495	 */
 496	if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
 497		txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
 498	else if (txrate->flags & IEEE80211_TX_RC_MCS)
 499		txdesc->rate_mode = RATE_MODE_HT_MIX;
 500	else {
 501		rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
 502		hwrate = rt2x00_get_rate(rate->hw_value);
 503		if (hwrate->flags & DEV_RATE_OFDM)
 504			txdesc->rate_mode = RATE_MODE_OFDM;
 505		else
 506			txdesc->rate_mode = RATE_MODE_CCK;
 507	}
 508
 509	/*
 510	 * Apply TX descriptor handling by components
 511	 */
 512	rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
 513	rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
 514
 515	if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
 516		rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
 517						   sta, hwrate);
 518	else
 519		rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
 520						      hwrate);
 521}
 522
 523static int rt2x00queue_write_tx_data(struct queue_entry *entry,
 524				     struct txentry_desc *txdesc)
 525{
 526	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 527
 528	/*
 529	 * This should not happen, we already checked the entry
 530	 * was ours. When the hardware disagrees there has been
 531	 * a queue corruption!
 532	 */
 533	if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
 534		     rt2x00dev->ops->lib->get_entry_state(entry))) {
 535		rt2x00_err(rt2x00dev,
 536			   "Corrupt queue %d, accessing entry which is not ours\n"
 537			   "Please file bug report to %s\n",
 538			   entry->queue->qid, DRV_PROJECT);
 539		return -EINVAL;
 540	}
 541
 542	/*
 543	 * Add the requested extra tx headroom in front of the skb.
 544	 */
 545	skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
 546	memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
 547
 548	/*
 549	 * Call the driver's write_tx_data function, if it exists.
 550	 */
 551	if (rt2x00dev->ops->lib->write_tx_data)
 552		rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
 553
 554	/*
 555	 * Map the skb to DMA.
 556	 */
 557	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
 558	    rt2x00queue_map_txskb(entry))
 559		return -ENOMEM;
 560
 561	return 0;
 562}
 563
 564static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
 565					    struct txentry_desc *txdesc)
 566{
 567	struct data_queue *queue = entry->queue;
 568
 569	queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
 570
 571	/*
 572	 * All processing on the frame has been completed, this means
 573	 * it is now ready to be dumped to userspace through debugfs.
 574	 */
 575	rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
 576}
 577
 578static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
 579				      struct txentry_desc *txdesc)
 580{
 581	/*
 582	 * Check if we need to kick the queue, there are however a few rules
 583	 *	1) Don't kick unless this is the last in frame in a burst.
 584	 *	   When the burst flag is set, this frame is always followed
 585	 *	   by another frame which in some way are related to eachother.
 586	 *	   This is true for fragments, RTS or CTS-to-self frames.
 587	 *	2) Rule 1 can be broken when the available entries
 588	 *	   in the queue are less then a certain threshold.
 589	 */
 590	if (rt2x00queue_threshold(queue) ||
 591	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
 592		queue->rt2x00dev->ops->lib->kick_queue(queue);
 593}
 594
 595static void rt2x00queue_bar_check(struct queue_entry *entry)
 596{
 597	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 598	struct ieee80211_bar *bar = (void *) (entry->skb->data +
 599				    rt2x00dev->extra_tx_headroom);
 600	struct rt2x00_bar_list_entry *bar_entry;
 601
 602	if (likely(!ieee80211_is_back_req(bar->frame_control)))
 603		return;
 604
 605	bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
 606
 607	/*
 608	 * If the alloc fails we still send the BAR out but just don't track
 609	 * it in our bar list. And as a result we will report it to mac80211
 610	 * back as failed.
 611	 */
 612	if (!bar_entry)
 613		return;
 614
 615	bar_entry->entry = entry;
 616	bar_entry->block_acked = 0;
 617
 618	/*
 619	 * Copy the relevant parts of the 802.11 BAR into out check list
 620	 * such that we can use RCU for less-overhead in the RX path since
 621	 * sending BARs and processing the according BlockAck should be
 622	 * the exception.
 623	 */
 624	memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
 625	memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
 626	bar_entry->control = bar->control;
 627	bar_entry->start_seq_num = bar->start_seq_num;
 628
 629	/*
 630	 * Insert BAR into our BAR check list.
 631	 */
 632	spin_lock_bh(&rt2x00dev->bar_list_lock);
 633	list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
 634	spin_unlock_bh(&rt2x00dev->bar_list_lock);
 635}
 636
 637int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
 638			       bool local)
 639{
 640	struct ieee80211_tx_info *tx_info;
 641	struct queue_entry *entry;
 642	struct txentry_desc txdesc;
 643	struct skb_frame_desc *skbdesc;
 644	u8 rate_idx, rate_flags;
 645	int ret = 0;
 646
 647	/*
 648	 * Copy all TX descriptor information into txdesc,
 649	 * after that we are free to use the skb->cb array
 650	 * for our information.
 651	 */
 652	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
 653
 654	/*
 655	 * All information is retrieved from the skb->cb array,
 656	 * now we should claim ownership of the driver part of that
 657	 * array, preserving the bitrate index and flags.
 658	 */
 659	tx_info = IEEE80211_SKB_CB(skb);
 660	rate_idx = tx_info->control.rates[0].idx;
 661	rate_flags = tx_info->control.rates[0].flags;
 662	skbdesc = get_skb_frame_desc(skb);
 663	memset(skbdesc, 0, sizeof(*skbdesc));
 664	skbdesc->tx_rate_idx = rate_idx;
 665	skbdesc->tx_rate_flags = rate_flags;
 666
 667	if (local)
 668		skbdesc->flags |= SKBDESC_NOT_MAC80211;
 669
 670	/*
 671	 * When hardware encryption is supported, and this frame
 672	 * is to be encrypted, we should strip the IV/EIV data from
 673	 * the frame so we can provide it to the driver separately.
 674	 */
 675	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
 676	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
 677		if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
 678			rt2x00crypto_tx_copy_iv(skb, &txdesc);
 679		else
 680			rt2x00crypto_tx_remove_iv(skb, &txdesc);
 681	}
 682
 683	/*
 684	 * When DMA allocation is required we should guarantee to the
 685	 * driver that the DMA is aligned to a 4-byte boundary.
 686	 * However some drivers require L2 padding to pad the payload
 687	 * rather then the header. This could be a requirement for
 688	 * PCI and USB devices, while header alignment only is valid
 689	 * for PCI devices.
 690	 */
 691	if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
 692		rt2x00queue_insert_l2pad(skb, txdesc.header_length);
 693	else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
 694		rt2x00queue_align_frame(skb);
 695
 696	/*
 697	 * That function must be called with bh disabled.
 698	 */
 699	spin_lock(&queue->tx_lock);
 700
 701	if (unlikely(rt2x00queue_full(queue))) {
 702		rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
 703			   queue->qid);
 704		ret = -ENOBUFS;
 705		goto out;
 706	}
 707
 708	entry = rt2x00queue_get_entry(queue, Q_INDEX);
 709
 710	if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
 711				      &entry->flags))) {
 712		rt2x00_err(queue->rt2x00dev,
 713			   "Arrived at non-free entry in the non-full queue %d\n"
 714			   "Please file bug report to %s\n",
 715			   queue->qid, DRV_PROJECT);
 716		ret = -EINVAL;
 717		goto out;
 718	}
 719
 720	skbdesc->entry = entry;
 721	entry->skb = skb;
 722
 723	/*
 724	 * It could be possible that the queue was corrupted and this
 725	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
 726	 * this frame will simply be dropped.
 727	 */
 728	if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
 729		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 730		entry->skb = NULL;
 731		ret = -EIO;
 732		goto out;
 733	}
 734
 735	/*
 736	 * Put BlockAckReqs into our check list for driver BA processing.
 737	 */
 738	rt2x00queue_bar_check(entry);
 739
 740	set_bit(ENTRY_DATA_PENDING, &entry->flags);
 741
 742	rt2x00queue_index_inc(entry, Q_INDEX);
 743	rt2x00queue_write_tx_descriptor(entry, &txdesc);
 744	rt2x00queue_kick_tx_queue(queue, &txdesc);
 745
 746out:
 747	spin_unlock(&queue->tx_lock);
 748	return ret;
 749}
 750
 751int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
 752			     struct ieee80211_vif *vif)
 753{
 754	struct rt2x00_intf *intf = vif_to_intf(vif);
 755
 756	if (unlikely(!intf->beacon))
 757		return -ENOBUFS;
 758
 759	mutex_lock(&intf->beacon_skb_mutex);
 760
 761	/*
 762	 * Clean up the beacon skb.
 763	 */
 764	rt2x00queue_free_skb(intf->beacon);
 765
 766	/*
 767	 * Clear beacon (single bssid devices don't need to clear the beacon
 768	 * since the beacon queue will get stopped anyway).
 769	 */
 770	if (rt2x00dev->ops->lib->clear_beacon)
 771		rt2x00dev->ops->lib->clear_beacon(intf->beacon);
 772
 773	mutex_unlock(&intf->beacon_skb_mutex);
 774
 775	return 0;
 776}
 777
 778int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
 779				     struct ieee80211_vif *vif)
 780{
 781	struct rt2x00_intf *intf = vif_to_intf(vif);
 782	struct skb_frame_desc *skbdesc;
 783	struct txentry_desc txdesc;
 784
 785	if (unlikely(!intf->beacon))
 786		return -ENOBUFS;
 787
 788	/*
 789	 * Clean up the beacon skb.
 790	 */
 791	rt2x00queue_free_skb(intf->beacon);
 792
 793	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
 794	if (!intf->beacon->skb)
 795		return -ENOMEM;
 796
 797	/*
 798	 * Copy all TX descriptor information into txdesc,
 799	 * after that we are free to use the skb->cb array
 800	 * for our information.
 801	 */
 802	rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 803
 804	/*
 805	 * Fill in skb descriptor
 806	 */
 807	skbdesc = get_skb_frame_desc(intf->beacon->skb);
 808	memset(skbdesc, 0, sizeof(*skbdesc));
 809	skbdesc->entry = intf->beacon;
 810
 811	/*
 812	 * Send beacon to hardware.
 813	 */
 814	rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
 815
 816	return 0;
 817
 818}
 819
 820int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
 821			      struct ieee80211_vif *vif)
 822{
 823	struct rt2x00_intf *intf = vif_to_intf(vif);
 824	int ret;
 825
 826	mutex_lock(&intf->beacon_skb_mutex);
 827	ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
 828	mutex_unlock(&intf->beacon_skb_mutex);
 829
 830	return ret;
 831}
 832
 833bool rt2x00queue_for_each_entry(struct data_queue *queue,
 834				enum queue_index start,
 835				enum queue_index end,
 836				void *data,
 837				bool (*fn)(struct queue_entry *entry,
 838					   void *data))
 839{
 840	unsigned long irqflags;
 841	unsigned int index_start;
 842	unsigned int index_end;
 843	unsigned int i;
 844
 845	if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
 846		rt2x00_err(queue->rt2x00dev,
 847			   "Entry requested from invalid index range (%d - %d)\n",
 848			   start, end);
 849		return true;
 850	}
 851
 852	/*
 853	 * Only protect the range we are going to loop over,
 854	 * if during our loop a extra entry is set to pending
 855	 * it should not be kicked during this run, since it
 856	 * is part of another TX operation.
 857	 */
 858	spin_lock_irqsave(&queue->index_lock, irqflags);
 859	index_start = queue->index[start];
 860	index_end = queue->index[end];
 861	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 862
 863	/*
 864	 * Start from the TX done pointer, this guarantees that we will
 865	 * send out all frames in the correct order.
 866	 */
 867	if (index_start < index_end) {
 868		for (i = index_start; i < index_end; i++) {
 869			if (fn(&queue->entries[i], data))
 870				return true;
 871		}
 872	} else {
 873		for (i = index_start; i < queue->limit; i++) {
 874			if (fn(&queue->entries[i], data))
 875				return true;
 876		}
 877
 878		for (i = 0; i < index_end; i++) {
 879			if (fn(&queue->entries[i], data))
 880				return true;
 881		}
 882	}
 883
 884	return false;
 885}
 886EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
 887
 888struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
 889					  enum queue_index index)
 890{
 891	struct queue_entry *entry;
 892	unsigned long irqflags;
 893
 894	if (unlikely(index >= Q_INDEX_MAX)) {
 895		rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
 896			   index);
 897		return NULL;
 898	}
 899
 900	spin_lock_irqsave(&queue->index_lock, irqflags);
 901
 902	entry = &queue->entries[queue->index[index]];
 903
 904	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 905
 906	return entry;
 907}
 908EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
 909
 910void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
 911{
 912	struct data_queue *queue = entry->queue;
 913	unsigned long irqflags;
 914
 915	if (unlikely(index >= Q_INDEX_MAX)) {
 916		rt2x00_err(queue->rt2x00dev,
 917			   "Index change on invalid index type (%d)\n", index);
 918		return;
 919	}
 920
 921	spin_lock_irqsave(&queue->index_lock, irqflags);
 922
 923	queue->index[index]++;
 924	if (queue->index[index] >= queue->limit)
 925		queue->index[index] = 0;
 926
 927	entry->last_action = jiffies;
 928
 929	if (index == Q_INDEX) {
 930		queue->length++;
 931	} else if (index == Q_INDEX_DONE) {
 932		queue->length--;
 933		queue->count++;
 934	}
 935
 936	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 937}
 938
 939static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
 940{
 941	switch (queue->qid) {
 942	case QID_AC_VO:
 943	case QID_AC_VI:
 944	case QID_AC_BE:
 945	case QID_AC_BK:
 946		/*
 947		 * For TX queues, we have to disable the queue
 948		 * inside mac80211.
 949		 */
 950		ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
 951		break;
 952	default:
 953		break;
 954	}
 955}
 956void rt2x00queue_pause_queue(struct data_queue *queue)
 957{
 958	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 959	    !test_bit(QUEUE_STARTED, &queue->flags) ||
 960	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
 961		return;
 962
 963	rt2x00queue_pause_queue_nocheck(queue);
 964}
 965EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
 966
 967void rt2x00queue_unpause_queue(struct data_queue *queue)
 968{
 969	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 970	    !test_bit(QUEUE_STARTED, &queue->flags) ||
 971	    !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
 972		return;
 973
 974	switch (queue->qid) {
 975	case QID_AC_VO:
 976	case QID_AC_VI:
 977	case QID_AC_BE:
 978	case QID_AC_BK:
 979		/*
 980		 * For TX queues, we have to enable the queue
 981		 * inside mac80211.
 982		 */
 983		ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
 984		break;
 985	case QID_RX:
 986		/*
 987		 * For RX we need to kick the queue now in order to
 988		 * receive frames.
 989		 */
 990		queue->rt2x00dev->ops->lib->kick_queue(queue);
 991	default:
 992		break;
 993	}
 994}
 995EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
 996
 997void rt2x00queue_start_queue(struct data_queue *queue)
 998{
 999	mutex_lock(&queue->status_lock);
1000
1001	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
1002	    test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
1003		mutex_unlock(&queue->status_lock);
1004		return;
1005	}
1006
1007	set_bit(QUEUE_PAUSED, &queue->flags);
1008
1009	queue->rt2x00dev->ops->lib->start_queue(queue);
1010
1011	rt2x00queue_unpause_queue(queue);
1012
1013	mutex_unlock(&queue->status_lock);
1014}
1015EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
1016
1017void rt2x00queue_stop_queue(struct data_queue *queue)
1018{
1019	mutex_lock(&queue->status_lock);
1020
1021	if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
1022		mutex_unlock(&queue->status_lock);
1023		return;
1024	}
1025
1026	rt2x00queue_pause_queue_nocheck(queue);
1027
1028	queue->rt2x00dev->ops->lib->stop_queue(queue);
1029
1030	mutex_unlock(&queue->status_lock);
1031}
1032EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
1033
1034void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1035{
1036	bool started;
1037	bool tx_queue =
1038		(queue->qid == QID_AC_VO) ||
1039		(queue->qid == QID_AC_VI) ||
1040		(queue->qid == QID_AC_BE) ||
1041		(queue->qid == QID_AC_BK);
1042
1043	mutex_lock(&queue->status_lock);
1044
1045	/*
1046	 * If the queue has been started, we must stop it temporarily
1047	 * to prevent any new frames to be queued on the device. If
1048	 * we are not dropping the pending frames, the queue must
1049	 * only be stopped in the software and not the hardware,
1050	 * otherwise the queue will never become empty on its own.
1051	 */
1052	started = test_bit(QUEUE_STARTED, &queue->flags);
1053	if (started) {
1054		/*
1055		 * Pause the queue
1056		 */
1057		rt2x00queue_pause_queue(queue);
1058
1059		/*
1060		 * If we are not supposed to drop any pending
1061		 * frames, this means we must force a start (=kick)
1062		 * to the queue to make sure the hardware will
1063		 * start transmitting.
1064		 */
1065		if (!drop && tx_queue)
1066			queue->rt2x00dev->ops->lib->kick_queue(queue);
1067	}
1068
1069	/*
1070	 * Check if driver supports flushing, if that is the case we can
1071	 * defer the flushing to the driver. Otherwise we must use the
1072	 * alternative which just waits for the queue to become empty.
1073	 */
1074	if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1075		queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1076
1077	/*
1078	 * The queue flush has failed...
1079	 */
1080	if (unlikely(!rt2x00queue_empty(queue)))
1081		rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1082			    queue->qid);
1083
1084	/*
1085	 * Restore the queue to the previous status
1086	 */
1087	if (started)
1088		rt2x00queue_unpause_queue(queue);
1089
1090	mutex_unlock(&queue->status_lock);
1091}
1092EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1093
1094void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1095{
1096	struct data_queue *queue;
1097
1098	/*
1099	 * rt2x00queue_start_queue will call ieee80211_wake_queue
1100	 * for each queue after is has been properly initialized.
1101	 */
1102	tx_queue_for_each(rt2x00dev, queue)
1103		rt2x00queue_start_queue(queue);
1104
1105	rt2x00queue_start_queue(rt2x00dev->rx);
1106}
1107EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1108
1109void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1110{
1111	struct data_queue *queue;
1112
1113	/*
1114	 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1115	 * as well, but we are completely shutting doing everything
1116	 * now, so it is much safer to stop all TX queues at once,
1117	 * and use rt2x00queue_stop_queue for cleaning up.
1118	 */
1119	ieee80211_stop_queues(rt2x00dev->hw);
1120
1121	tx_queue_for_each(rt2x00dev, queue)
1122		rt2x00queue_stop_queue(queue);
1123
1124	rt2x00queue_stop_queue(rt2x00dev->rx);
1125}
1126EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1127
1128void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1129{
1130	struct data_queue *queue;
1131
1132	tx_queue_for_each(rt2x00dev, queue)
1133		rt2x00queue_flush_queue(queue, drop);
1134
1135	rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1136}
1137EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1138
1139static void rt2x00queue_reset(struct data_queue *queue)
1140{
1141	unsigned long irqflags;
1142	unsigned int i;
1143
1144	spin_lock_irqsave(&queue->index_lock, irqflags);
1145
1146	queue->count = 0;
1147	queue->length = 0;
1148
1149	for (i = 0; i < Q_INDEX_MAX; i++)
1150		queue->index[i] = 0;
1151
1152	spin_unlock_irqrestore(&queue->index_lock, irqflags);
1153}
1154
1155void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1156{
1157	struct data_queue *queue;
1158	unsigned int i;
1159
1160	queue_for_each(rt2x00dev, queue) {
1161		rt2x00queue_reset(queue);
1162
1163		for (i = 0; i < queue->limit; i++)
1164			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1165	}
1166}
1167
1168static int rt2x00queue_alloc_entries(struct data_queue *queue)
1169{
1170	struct queue_entry *entries;
1171	unsigned int entry_size;
1172	unsigned int i;
1173
1174	rt2x00queue_reset(queue);
1175
1176	/*
1177	 * Allocate all queue entries.
1178	 */
1179	entry_size = sizeof(*entries) + queue->priv_size;
1180	entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1181	if (!entries)
1182		return -ENOMEM;
1183
1184#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1185	(((char *)(__base)) + ((__limit) * (__esize)) + \
1186	    ((__index) * (__psize)))
1187
1188	for (i = 0; i < queue->limit; i++) {
1189		entries[i].flags = 0;
1190		entries[i].queue = queue;
1191		entries[i].skb = NULL;
1192		entries[i].entry_idx = i;
1193		entries[i].priv_data =
1194		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1195					    sizeof(*entries), queue->priv_size);
1196	}
1197
1198#undef QUEUE_ENTRY_PRIV_OFFSET
1199
1200	queue->entries = entries;
1201
1202	return 0;
1203}
1204
1205static void rt2x00queue_free_skbs(struct data_queue *queue)
1206{
1207	unsigned int i;
1208
1209	if (!queue->entries)
1210		return;
1211
1212	for (i = 0; i < queue->limit; i++) {
1213		rt2x00queue_free_skb(&queue->entries[i]);
1214	}
1215}
1216
1217static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1218{
1219	unsigned int i;
1220	struct sk_buff *skb;
1221
1222	for (i = 0; i < queue->limit; i++) {
1223		skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1224		if (!skb)
1225			return -ENOMEM;
1226		queue->entries[i].skb = skb;
1227	}
1228
1229	return 0;
1230}
1231
1232int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1233{
1234	struct data_queue *queue;
1235	int status;
1236
1237	status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1238	if (status)
1239		goto exit;
1240
1241	tx_queue_for_each(rt2x00dev, queue) {
1242		status = rt2x00queue_alloc_entries(queue);
1243		if (status)
1244			goto exit;
1245	}
1246
1247	status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1248	if (status)
1249		goto exit;
1250
1251	if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1252		status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1253		if (status)
1254			goto exit;
1255	}
1256
1257	status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1258	if (status)
1259		goto exit;
1260
1261	return 0;
1262
1263exit:
1264	rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1265
1266	rt2x00queue_uninitialize(rt2x00dev);
1267
1268	return status;
1269}
1270
1271void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1272{
1273	struct data_queue *queue;
1274
1275	rt2x00queue_free_skbs(rt2x00dev->rx);
1276
1277	queue_for_each(rt2x00dev, queue) {
1278		kfree(queue->entries);
1279		queue->entries = NULL;
1280	}
1281}
1282
1283static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1284			     struct data_queue *queue, enum data_queue_qid qid)
1285{
1286	mutex_init(&queue->status_lock);
1287	spin_lock_init(&queue->tx_lock);
1288	spin_lock_init(&queue->index_lock);
1289
1290	queue->rt2x00dev = rt2x00dev;
1291	queue->qid = qid;
1292	queue->txop = 0;
1293	queue->aifs = 2;
1294	queue->cw_min = 5;
1295	queue->cw_max = 10;
1296
1297	rt2x00dev->ops->queue_init(queue);
1298
1299	queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1300}
1301
1302int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1303{
1304	struct data_queue *queue;
1305	enum data_queue_qid qid;
1306	unsigned int req_atim =
1307	    !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1308
1309	/*
1310	 * We need the following queues:
1311	 * RX: 1
1312	 * TX: ops->tx_queues
1313	 * Beacon: 1
1314	 * Atim: 1 (if required)
1315	 */
1316	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1317
1318	queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1319	if (!queue) {
1320		rt2x00_err(rt2x00dev, "Queue allocation failed\n");
1321		return -ENOMEM;
1322	}
1323
1324	/*
1325	 * Initialize pointers
1326	 */
1327	rt2x00dev->rx = queue;
1328	rt2x00dev->tx = &queue[1];
1329	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1330	rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1331
1332	/*
1333	 * Initialize queue parameters.
1334	 * RX: qid = QID_RX
1335	 * TX: qid = QID_AC_VO + index
1336	 * TX: cw_min: 2^5 = 32.
1337	 * TX: cw_max: 2^10 = 1024.
1338	 * BCN: qid = QID_BEACON
1339	 * ATIM: qid = QID_ATIM
1340	 */
1341	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1342
1343	qid = QID_AC_VO;
1344	tx_queue_for_each(rt2x00dev, queue)
1345		rt2x00queue_init(rt2x00dev, queue, qid++);
1346
1347	rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1348	if (req_atim)
1349		rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1350
1351	return 0;
1352}
1353
1354void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1355{
1356	kfree(rt2x00dev->rx);
1357	rt2x00dev->rx = NULL;
1358	rt2x00dev->tx = NULL;
1359	rt2x00dev->bcn = NULL;
1360}