PageRenderTime 84ms CodeModel.GetById 25ms app.highlight 45ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/wireless/iwlegacy/iwl3945-base.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t
C | 4017 lines | 2705 code | 702 blank | 610 comment | 348 complexity | 946a0e2e55dd8e77993ba52ea93cfda9 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

   1/******************************************************************************
   2 *
   3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
   4 *
   5 * Portions of this file are derived from the ipw3945 project, as well
   6 * as portions of the ieee80211 subsystem header files.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of version 2 of the GNU General Public License as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but WITHOUT
  13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 * more details.
  16 *
  17 * You should have received a copy of the GNU General Public License along with
  18 * this program; if not, write to the Free Software Foundation, Inc.,
  19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20 *
  21 * The full GNU General Public License is included in this distribution in the
  22 * file called LICENSE.
  23 *
  24 * Contact Information:
  25 *  Intel Linux Wireless <ilw@linux.intel.com>
  26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27 *
  28 *****************************************************************************/
  29
  30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31
  32#include <linux/kernel.h>
  33#include <linux/module.h>
  34#include <linux/init.h>
  35#include <linux/pci.h>
  36#include <linux/pci-aspm.h>
  37#include <linux/slab.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/delay.h>
  40#include <linux/sched.h>
  41#include <linux/skbuff.h>
  42#include <linux/netdevice.h>
  43#include <linux/wireless.h>
  44#include <linux/firmware.h>
  45#include <linux/etherdevice.h>
  46#include <linux/if_arp.h>
  47
  48#include <net/ieee80211_radiotap.h>
  49#include <net/mac80211.h>
  50
  51#include <asm/div64.h>
  52
  53#define DRV_NAME	"iwl3945"
  54
  55#include "iwl-fh.h"
  56#include "iwl-3945-fh.h"
  57#include "iwl-commands.h"
  58#include "iwl-sta.h"
  59#include "iwl-3945.h"
  60#include "iwl-core.h"
  61#include "iwl-helpers.h"
  62#include "iwl-dev.h"
  63#include "iwl-spectrum.h"
  64
  65/*
  66 * module name, copyright, version, etc.
  67 */
  68
  69#define DRV_DESCRIPTION	\
  70"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
  71
  72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
  73#define VD "d"
  74#else
  75#define VD
  76#endif
  77
  78/*
  79 * add "s" to indicate spectrum measurement included.
  80 * we add it here to be consistent with previous releases in which
  81 * this was configurable.
  82 */
  83#define DRV_VERSION  IWLWIFI_VERSION VD "s"
  84#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
  85#define DRV_AUTHOR     "<ilw@linux.intel.com>"
  86
  87MODULE_DESCRIPTION(DRV_DESCRIPTION);
  88MODULE_VERSION(DRV_VERSION);
  89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
  90MODULE_LICENSE("GPL");
  91
  92 /* module parameters */
  93struct iwl_mod_params iwl3945_mod_params = {
  94	.sw_crypto = 1,
  95	.restart_fw = 1,
  96	.disable_hw_scan = 1,
  97	/* the rest are 0 by default */
  98};
  99
 100/**
 101 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
 102 * @priv: eeprom and antenna fields are used to determine antenna flags
 103 *
 104 * priv->eeprom39  is used to determine if antenna AUX/MAIN are reversed
 105 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
 106 *
 107 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
 108 * IWL_ANTENNA_MAIN      - Force MAIN antenna
 109 * IWL_ANTENNA_AUX       - Force AUX antenna
 110 */
 111__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
 112{
 113	struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
 114
 115	switch (iwl3945_mod_params.antenna) {
 116	case IWL_ANTENNA_DIVERSITY:
 117		return 0;
 118
 119	case IWL_ANTENNA_MAIN:
 120		if (eeprom->antenna_switch_type)
 121			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
 122		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
 123
 124	case IWL_ANTENNA_AUX:
 125		if (eeprom->antenna_switch_type)
 126			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
 127		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
 128	}
 129
 130	/* bad antenna selector value */
 131	IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
 132		iwl3945_mod_params.antenna);
 133
 134	return 0;		/* "diversity" is default if error */
 135}
 136
 137static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
 138				   struct ieee80211_key_conf *keyconf,
 139				   u8 sta_id)
 140{
 141	unsigned long flags;
 142	__le16 key_flags = 0;
 143	int ret;
 144
 145	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
 146	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
 147
 148	if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
 149		key_flags |= STA_KEY_MULTICAST_MSK;
 150
 151	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
 152	keyconf->hw_key_idx = keyconf->keyidx;
 153	key_flags &= ~STA_KEY_FLG_INVALID;
 154
 155	spin_lock_irqsave(&priv->sta_lock, flags);
 156	priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
 157	priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
 158	memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
 159	       keyconf->keylen);
 160
 161	memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
 162	       keyconf->keylen);
 163
 164	if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
 165			== STA_KEY_FLG_NO_ENC)
 166		priv->stations[sta_id].sta.key.key_offset =
 167				 iwl_legacy_get_free_ucode_key_index(priv);
 168	/* else, we are overriding an existing key => no need to allocated room
 169	* in uCode. */
 170
 171	WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
 172		"no space for a new key");
 173
 174	priv->stations[sta_id].sta.key.key_flags = key_flags;
 175	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
 176	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 177
 178	IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
 179
 180	ret = iwl_legacy_send_add_sta(priv,
 181				&priv->stations[sta_id].sta, CMD_ASYNC);
 182
 183	spin_unlock_irqrestore(&priv->sta_lock, flags);
 184
 185	return ret;
 186}
 187
 188static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
 189				  struct ieee80211_key_conf *keyconf,
 190				  u8 sta_id)
 191{
 192	return -EOPNOTSUPP;
 193}
 194
 195static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
 196				  struct ieee80211_key_conf *keyconf,
 197				  u8 sta_id)
 198{
 199	return -EOPNOTSUPP;
 200}
 201
 202static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
 203{
 204	unsigned long flags;
 205	struct iwl_legacy_addsta_cmd sta_cmd;
 206
 207	spin_lock_irqsave(&priv->sta_lock, flags);
 208	memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
 209	memset(&priv->stations[sta_id].sta.key, 0,
 210		sizeof(struct iwl4965_keyinfo));
 211	priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
 212	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
 213	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 214	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
 215	spin_unlock_irqrestore(&priv->sta_lock, flags);
 216
 217	IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
 218	return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 219}
 220
 221static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
 222			struct ieee80211_key_conf *keyconf, u8 sta_id)
 223{
 224	int ret = 0;
 225
 226	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
 227
 228	switch (keyconf->cipher) {
 229	case WLAN_CIPHER_SUITE_CCMP:
 230		ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
 231		break;
 232	case WLAN_CIPHER_SUITE_TKIP:
 233		ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
 234		break;
 235	case WLAN_CIPHER_SUITE_WEP40:
 236	case WLAN_CIPHER_SUITE_WEP104:
 237		ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
 238		break;
 239	default:
 240		IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
 241			keyconf->cipher);
 242		ret = -EINVAL;
 243	}
 244
 245	IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
 246		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
 247		      sta_id, ret);
 248
 249	return ret;
 250}
 251
 252static int iwl3945_remove_static_key(struct iwl_priv *priv)
 253{
 254	int ret = -EOPNOTSUPP;
 255
 256	return ret;
 257}
 258
 259static int iwl3945_set_static_key(struct iwl_priv *priv,
 260				struct ieee80211_key_conf *key)
 261{
 262	if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
 263	    key->cipher == WLAN_CIPHER_SUITE_WEP104)
 264		return -EOPNOTSUPP;
 265
 266	IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
 267	return -EINVAL;
 268}
 269
 270static void iwl3945_clear_free_frames(struct iwl_priv *priv)
 271{
 272	struct list_head *element;
 273
 274	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
 275		       priv->frames_count);
 276
 277	while (!list_empty(&priv->free_frames)) {
 278		element = priv->free_frames.next;
 279		list_del(element);
 280		kfree(list_entry(element, struct iwl3945_frame, list));
 281		priv->frames_count--;
 282	}
 283
 284	if (priv->frames_count) {
 285		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
 286			    priv->frames_count);
 287		priv->frames_count = 0;
 288	}
 289}
 290
 291static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
 292{
 293	struct iwl3945_frame *frame;
 294	struct list_head *element;
 295	if (list_empty(&priv->free_frames)) {
 296		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
 297		if (!frame) {
 298			IWL_ERR(priv, "Could not allocate frame!\n");
 299			return NULL;
 300		}
 301
 302		priv->frames_count++;
 303		return frame;
 304	}
 305
 306	element = priv->free_frames.next;
 307	list_del(element);
 308	return list_entry(element, struct iwl3945_frame, list);
 309}
 310
 311static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
 312{
 313	memset(frame, 0, sizeof(*frame));
 314	list_add(&frame->list, &priv->free_frames);
 315}
 316
 317unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
 318				struct ieee80211_hdr *hdr,
 319				int left)
 320{
 321
 322	if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
 323		return 0;
 324
 325	if (priv->beacon_skb->len > left)
 326		return 0;
 327
 328	memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
 329
 330	return priv->beacon_skb->len;
 331}
 332
 333static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
 334{
 335	struct iwl3945_frame *frame;
 336	unsigned int frame_size;
 337	int rc;
 338	u8 rate;
 339
 340	frame = iwl3945_get_free_frame(priv);
 341
 342	if (!frame) {
 343		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
 344			  "command.\n");
 345		return -ENOMEM;
 346	}
 347
 348	rate = iwl_legacy_get_lowest_plcp(priv,
 349				&priv->contexts[IWL_RXON_CTX_BSS]);
 350
 351	frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
 352
 353	rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
 354			      &frame->u.cmd[0]);
 355
 356	iwl3945_free_frame(priv, frame);
 357
 358	return rc;
 359}
 360
 361static void iwl3945_unset_hw_params(struct iwl_priv *priv)
 362{
 363	if (priv->_3945.shared_virt)
 364		dma_free_coherent(&priv->pci_dev->dev,
 365				  sizeof(struct iwl3945_shared),
 366				  priv->_3945.shared_virt,
 367				  priv->_3945.shared_phys);
 368}
 369
 370static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
 371				      struct ieee80211_tx_info *info,
 372				      struct iwl_device_cmd *cmd,
 373				      struct sk_buff *skb_frag,
 374				      int sta_id)
 375{
 376	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
 377	struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
 378
 379	tx_cmd->sec_ctl = 0;
 380
 381	switch (keyinfo->cipher) {
 382	case WLAN_CIPHER_SUITE_CCMP:
 383		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
 384		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
 385		IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
 386		break;
 387
 388	case WLAN_CIPHER_SUITE_TKIP:
 389		break;
 390
 391	case WLAN_CIPHER_SUITE_WEP104:
 392		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 393		/* fall through */
 394	case WLAN_CIPHER_SUITE_WEP40:
 395		tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
 396		    (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
 397
 398		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
 399
 400		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
 401			     "with key %d\n", info->control.hw_key->hw_key_idx);
 402		break;
 403
 404	default:
 405		IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
 406		break;
 407	}
 408}
 409
 410/*
 411 * handle build REPLY_TX command notification.
 412 */
 413static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
 414				  struct iwl_device_cmd *cmd,
 415				  struct ieee80211_tx_info *info,
 416				  struct ieee80211_hdr *hdr, u8 std_id)
 417{
 418	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
 419	__le32 tx_flags = tx_cmd->tx_flags;
 420	__le16 fc = hdr->frame_control;
 421
 422	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 423	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 424		tx_flags |= TX_CMD_FLG_ACK_MSK;
 425		if (ieee80211_is_mgmt(fc))
 426			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 427		if (ieee80211_is_probe_resp(fc) &&
 428		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
 429			tx_flags |= TX_CMD_FLG_TSF_MSK;
 430	} else {
 431		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
 432		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 433	}
 434
 435	tx_cmd->sta_id = std_id;
 436	if (ieee80211_has_morefrags(fc))
 437		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
 438
 439	if (ieee80211_is_data_qos(fc)) {
 440		u8 *qc = ieee80211_get_qos_ctl(hdr);
 441		tx_cmd->tid_tspec = qc[0] & 0xf;
 442		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 443	} else {
 444		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 445	}
 446
 447	iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
 448
 449	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
 450	if (ieee80211_is_mgmt(fc)) {
 451		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
 452			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
 453		else
 454			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
 455	} else {
 456		tx_cmd->timeout.pm_frame_timeout = 0;
 457	}
 458
 459	tx_cmd->driver_txop = 0;
 460	tx_cmd->tx_flags = tx_flags;
 461	tx_cmd->next_frame_len = 0;
 462}
 463
 464/*
 465 * start REPLY_TX command process
 466 */
 467static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 468{
 469	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 470	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 471	struct iwl3945_tx_cmd *tx_cmd;
 472	struct iwl_tx_queue *txq = NULL;
 473	struct iwl_queue *q = NULL;
 474	struct iwl_device_cmd *out_cmd;
 475	struct iwl_cmd_meta *out_meta;
 476	dma_addr_t phys_addr;
 477	dma_addr_t txcmd_phys;
 478	int txq_id = skb_get_queue_mapping(skb);
 479	u16 len, idx, hdr_len;
 480	u8 id;
 481	u8 unicast;
 482	u8 sta_id;
 483	u8 tid = 0;
 484	__le16 fc;
 485	u8 wait_write_ptr = 0;
 486	unsigned long flags;
 487
 488	spin_lock_irqsave(&priv->lock, flags);
 489	if (iwl_legacy_is_rfkill(priv)) {
 490		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
 491		goto drop_unlock;
 492	}
 493
 494	if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
 495		IWL_ERR(priv, "ERROR: No TX rate available.\n");
 496		goto drop_unlock;
 497	}
 498
 499	unicast = !is_multicast_ether_addr(hdr->addr1);
 500	id = 0;
 501
 502	fc = hdr->frame_control;
 503
 504#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 505	if (ieee80211_is_auth(fc))
 506		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
 507	else if (ieee80211_is_assoc_req(fc))
 508		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
 509	else if (ieee80211_is_reassoc_req(fc))
 510		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
 511#endif
 512
 513	spin_unlock_irqrestore(&priv->lock, flags);
 514
 515	hdr_len = ieee80211_hdrlen(fc);
 516
 517	/* Find index into station table for destination station */
 518	sta_id = iwl_legacy_sta_id_or_broadcast(
 519			priv, &priv->contexts[IWL_RXON_CTX_BSS],
 520			info->control.sta);
 521	if (sta_id == IWL_INVALID_STATION) {
 522		IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
 523			       hdr->addr1);
 524		goto drop;
 525	}
 526
 527	IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
 528
 529	if (ieee80211_is_data_qos(fc)) {
 530		u8 *qc = ieee80211_get_qos_ctl(hdr);
 531		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 532		if (unlikely(tid >= MAX_TID_COUNT))
 533			goto drop;
 534	}
 535
 536	/* Descriptor for chosen Tx queue */
 537	txq = &priv->txq[txq_id];
 538	q = &txq->q;
 539
 540	if ((iwl_legacy_queue_space(q) < q->high_mark))
 541		goto drop;
 542
 543	spin_lock_irqsave(&priv->lock, flags);
 544
 545	idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
 546
 547	/* Set up driver data for this TFD */
 548	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
 549	txq->txb[q->write_ptr].skb = skb;
 550	txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 551
 552	/* Init first empty entry in queue's array of Tx/cmd buffers */
 553	out_cmd = txq->cmd[idx];
 554	out_meta = &txq->meta[idx];
 555	tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
 556	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
 557	memset(tx_cmd, 0, sizeof(*tx_cmd));
 558
 559	/*
 560	 * Set up the Tx-command (not MAC!) header.
 561	 * Store the chosen Tx queue and TFD index within the sequence field;
 562	 * after Tx, uCode's Tx response will return this value so driver can
 563	 * locate the frame within the tx queue and do post-tx processing.
 564	 */
 565	out_cmd->hdr.cmd = REPLY_TX;
 566	out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
 567				INDEX_TO_SEQ(q->write_ptr)));
 568
 569	/* Copy MAC header from skb into command buffer */
 570	memcpy(tx_cmd->hdr, hdr, hdr_len);
 571
 572
 573	if (info->control.hw_key)
 574		iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
 575
 576	/* TODO need this for burst mode later on */
 577	iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
 578
 579	/* set is_hcca to 0; it probably will never be implemented */
 580	iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
 581
 582	/* Total # bytes to be transmitted */
 583	len = (u16)skb->len;
 584	tx_cmd->len = cpu_to_le16(len);
 585
 586	iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
 587	iwl_legacy_update_stats(priv, true, fc, len);
 588	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
 589	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
 590
 591	if (!ieee80211_has_morefrags(hdr->frame_control)) {
 592		txq->need_update = 1;
 593	} else {
 594		wait_write_ptr = 1;
 595		txq->need_update = 0;
 596	}
 597
 598	IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
 599		     le16_to_cpu(out_cmd->hdr.sequence));
 600	IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
 601	iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
 602	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
 603			   ieee80211_hdrlen(fc));
 604
 605	/*
 606	 * Use the first empty entry in this queue's command buffer array
 607	 * to contain the Tx command and MAC header concatenated together
 608	 * (payload data will be in another buffer).
 609	 * Size of this varies, due to varying MAC header length.
 610	 * If end is not dword aligned, we'll have 2 extra bytes at the end
 611	 * of the MAC header (device reads on dword boundaries).
 612	 * We'll tell device about this padding later.
 613	 */
 614	len = sizeof(struct iwl3945_tx_cmd) +
 615			sizeof(struct iwl_cmd_header) + hdr_len;
 616	len = (len + 3) & ~3;
 617
 618	/* Physical address of this Tx command's header (not MAC header!),
 619	 * within command buffer array. */
 620	txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
 621				    len, PCI_DMA_TODEVICE);
 622	/* we do not map meta data ... so we can safely access address to
 623	 * provide to unmap command*/
 624	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
 625	dma_unmap_len_set(out_meta, len, len);
 626
 627	/* Add buffer containing Tx command and MAC(!) header to TFD's
 628	 * first entry */
 629	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
 630						   txcmd_phys, len, 1, 0);
 631
 632
 633	/* Set up TFD's 2nd entry to point directly to remainder of skb,
 634	 * if any (802.11 null frames have no payload). */
 635	len = skb->len - hdr_len;
 636	if (len) {
 637		phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
 638					   len, PCI_DMA_TODEVICE);
 639		priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
 640							   phys_addr, len,
 641							   0, U32_PAD(len));
 642	}
 643
 644
 645	/* Tell device the write index *just past* this latest filled TFD */
 646	q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
 647	iwl_legacy_txq_update_write_ptr(priv, txq);
 648	spin_unlock_irqrestore(&priv->lock, flags);
 649
 650	if ((iwl_legacy_queue_space(q) < q->high_mark)
 651	    && priv->mac80211_registered) {
 652		if (wait_write_ptr) {
 653			spin_lock_irqsave(&priv->lock, flags);
 654			txq->need_update = 1;
 655			iwl_legacy_txq_update_write_ptr(priv, txq);
 656			spin_unlock_irqrestore(&priv->lock, flags);
 657		}
 658
 659		iwl_legacy_stop_queue(priv, txq);
 660	}
 661
 662	return 0;
 663
 664drop_unlock:
 665	spin_unlock_irqrestore(&priv->lock, flags);
 666drop:
 667	return -1;
 668}
 669
 670static int iwl3945_get_measurement(struct iwl_priv *priv,
 671			       struct ieee80211_measurement_params *params,
 672			       u8 type)
 673{
 674	struct iwl_spectrum_cmd spectrum;
 675	struct iwl_rx_packet *pkt;
 676	struct iwl_host_cmd cmd = {
 677		.id = REPLY_SPECTRUM_MEASUREMENT_CMD,
 678		.data = (void *)&spectrum,
 679		.flags = CMD_WANT_SKB,
 680	};
 681	u32 add_time = le64_to_cpu(params->start_time);
 682	int rc;
 683	int spectrum_resp_status;
 684	int duration = le16_to_cpu(params->duration);
 685	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 686
 687	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
 688		add_time = iwl_legacy_usecs_to_beacons(priv,
 689			le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
 690			le16_to_cpu(ctx->timing.beacon_interval));
 691
 692	memset(&spectrum, 0, sizeof(spectrum));
 693
 694	spectrum.channel_count = cpu_to_le16(1);
 695	spectrum.flags =
 696	    RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
 697	spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
 698	cmd.len = sizeof(spectrum);
 699	spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
 700
 701	if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
 702		spectrum.start_time =
 703			iwl_legacy_add_beacon_time(priv,
 704				priv->_3945.last_beacon_time, add_time,
 705				le16_to_cpu(ctx->timing.beacon_interval));
 706	else
 707		spectrum.start_time = 0;
 708
 709	spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
 710	spectrum.channels[0].channel = params->channel;
 711	spectrum.channels[0].type = type;
 712	if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
 713		spectrum.flags |= RXON_FLG_BAND_24G_MSK |
 714		    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
 715
 716	rc = iwl_legacy_send_cmd_sync(priv, &cmd);
 717	if (rc)
 718		return rc;
 719
 720	pkt = (struct iwl_rx_packet *)cmd.reply_page;
 721	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 722		IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
 723		rc = -EIO;
 724	}
 725
 726	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
 727	switch (spectrum_resp_status) {
 728	case 0:		/* Command will be handled */
 729		if (pkt->u.spectrum.id != 0xff) {
 730			IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
 731						pkt->u.spectrum.id);
 732			priv->measurement_status &= ~MEASUREMENT_READY;
 733		}
 734		priv->measurement_status |= MEASUREMENT_ACTIVE;
 735		rc = 0;
 736		break;
 737
 738	case 1:		/* Command will not be handled */
 739		rc = -EAGAIN;
 740		break;
 741	}
 742
 743	iwl_legacy_free_pages(priv, cmd.reply_page);
 744
 745	return rc;
 746}
 747
 748static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
 749			       struct iwl_rx_mem_buffer *rxb)
 750{
 751	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 752	struct iwl_alive_resp *palive;
 753	struct delayed_work *pwork;
 754
 755	palive = &pkt->u.alive_frame;
 756
 757	IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
 758		       "0x%01X 0x%01X\n",
 759		       palive->is_valid, palive->ver_type,
 760		       palive->ver_subtype);
 761
 762	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
 763		IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
 764		memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
 765		       sizeof(struct iwl_alive_resp));
 766		pwork = &priv->init_alive_start;
 767	} else {
 768		IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
 769		memcpy(&priv->card_alive, &pkt->u.alive_frame,
 770		       sizeof(struct iwl_alive_resp));
 771		pwork = &priv->alive_start;
 772		iwl3945_disable_events(priv);
 773	}
 774
 775	/* We delay the ALIVE response by 5ms to
 776	 * give the HW RF Kill time to activate... */
 777	if (palive->is_valid == UCODE_VALID_OK)
 778		queue_delayed_work(priv->workqueue, pwork,
 779				   msecs_to_jiffies(5));
 780	else
 781		IWL_WARN(priv, "uCode did not respond OK.\n");
 782}
 783
 784static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
 785				 struct iwl_rx_mem_buffer *rxb)
 786{
 787#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 788	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 789#endif
 790
 791	IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
 792}
 793
 794static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
 795				struct iwl_rx_mem_buffer *rxb)
 796{
 797	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 798	struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
 799#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 800	u8 rate = beacon->beacon_notify_hdr.rate;
 801
 802	IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
 803		"tsf %d %d rate %d\n",
 804		le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
 805		beacon->beacon_notify_hdr.failure_frame,
 806		le32_to_cpu(beacon->ibss_mgr_status),
 807		le32_to_cpu(beacon->high_tsf),
 808		le32_to_cpu(beacon->low_tsf), rate);
 809#endif
 810
 811	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
 812
 813}
 814
 815/* Handle notification from uCode that card's power state is changing
 816 * due to software, hardware, or critical temperature RFKILL */
 817static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
 818				    struct iwl_rx_mem_buffer *rxb)
 819{
 820	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 821	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
 822	unsigned long status = priv->status;
 823
 824	IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
 825			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
 826			  (flags & SW_CARD_DISABLED) ? "Kill" : "On");
 827
 828	iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
 829		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
 830
 831	if (flags & HW_CARD_DISABLED)
 832		set_bit(STATUS_RF_KILL_HW, &priv->status);
 833	else
 834		clear_bit(STATUS_RF_KILL_HW, &priv->status);
 835
 836
 837	iwl_legacy_scan_cancel(priv);
 838
 839	if ((test_bit(STATUS_RF_KILL_HW, &status) !=
 840	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
 841		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
 842				test_bit(STATUS_RF_KILL_HW, &priv->status));
 843	else
 844		wake_up(&priv->wait_command_queue);
 845}
 846
 847/**
 848 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
 849 *
 850 * Setup the RX handlers for each of the reply types sent from the uCode
 851 * to the host.
 852 *
 853 * This function chains into the hardware specific files for them to setup
 854 * any hardware specific handlers as well.
 855 */
 856static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
 857{
 858	priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
 859	priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
 860	priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
 861	priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
 862	priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
 863			iwl_legacy_rx_spectrum_measure_notif;
 864	priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
 865	priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
 866	    iwl_legacy_rx_pm_debug_statistics_notif;
 867	priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
 868
 869	/*
 870	 * The same handler is used for both the REPLY to a discrete
 871	 * statistics request from the host as well as for the periodic
 872	 * statistics notifications (after received beacons) from the uCode.
 873	 */
 874	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
 875	priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
 876
 877	iwl_legacy_setup_rx_scan_handlers(priv);
 878	priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
 879
 880	/* Set up hardware specific Rx handlers */
 881	iwl3945_hw_rx_handler_setup(priv);
 882}
 883
 884/************************** RX-FUNCTIONS ****************************/
 885/*
 886 * Rx theory of operation
 887 *
 888 * The host allocates 32 DMA target addresses and passes the host address
 889 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
 890 * 0 to 31
 891 *
 892 * Rx Queue Indexes
 893 * The host/firmware share two index registers for managing the Rx buffers.
 894 *
 895 * The READ index maps to the first position that the firmware may be writing
 896 * to -- the driver can read up to (but not including) this position and get
 897 * good data.
 898 * The READ index is managed by the firmware once the card is enabled.
 899 *
 900 * The WRITE index maps to the last position the driver has read from -- the
 901 * position preceding WRITE is the last slot the firmware can place a packet.
 902 *
 903 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 904 * WRITE = READ.
 905 *
 906 * During initialization, the host sets up the READ queue position to the first
 907 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 908 *
 909 * When the firmware places a packet in a buffer, it will advance the READ index
 910 * and fire the RX interrupt.  The driver can then query the READ index and
 911 * process as many packets as possible, moving the WRITE index forward as it
 912 * resets the Rx queue buffers with new memory.
 913 *
 914 * The management in the driver is as follows:
 915 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
 916 *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
 917 *   to replenish the iwl->rxq->rx_free.
 918 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
 919 *   iwl->rxq is replenished and the READ INDEX is updated (updating the
 920 *   'processed' and 'read' driver indexes as well)
 921 * + A received packet is processed and handed to the kernel network stack,
 922 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
 923 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
 924 *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
 925 *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
 926 *   were enough free buffers and RX_STALLED is set it is cleared.
 927 *
 928 *
 929 * Driver sequence:
 930 *
 931 * iwl3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
 932 *                            iwl3945_rx_queue_restock
 933 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
 934 *                            queue, updates firmware pointers, and updates
 935 *                            the WRITE index.  If insufficient rx_free buffers
 936 *                            are available, schedules iwl3945_rx_replenish
 937 *
 938 * -- enable interrupts --
 939 * ISR - iwl3945_rx()         Detach iwl_rx_mem_buffers from pool up to the
 940 *                            READ INDEX, detaching the SKB from the pool.
 941 *                            Moves the packet buffer from queue to rx_used.
 942 *                            Calls iwl3945_rx_queue_restock to refill any empty
 943 *                            slots.
 944 * ...
 945 *
 946 */
 947
 948/**
 949 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 950 */
 951static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
 952					  dma_addr_t dma_addr)
 953{
 954	return cpu_to_le32((u32)dma_addr);
 955}
 956
 957/**
 958 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
 959 *
 960 * If there are slots in the RX queue that need to be restocked,
 961 * and we have free pre-allocated buffers, fill the ranks as much
 962 * as we can, pulling from rx_free.
 963 *
 964 * This moves the 'write' index forward to catch up with 'processed', and
 965 * also updates the memory address in the firmware to reference the new
 966 * target buffer.
 967 */
 968static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
 969{
 970	struct iwl_rx_queue *rxq = &priv->rxq;
 971	struct list_head *element;
 972	struct iwl_rx_mem_buffer *rxb;
 973	unsigned long flags;
 974	int write;
 975
 976	spin_lock_irqsave(&rxq->lock, flags);
 977	write = rxq->write & ~0x7;
 978	while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
 979		/* Get next free Rx buffer, remove from free list */
 980		element = rxq->rx_free.next;
 981		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
 982		list_del(element);
 983
 984		/* Point to Rx buffer via next RBD in circular buffer */
 985		rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
 986		rxq->queue[rxq->write] = rxb;
 987		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 988		rxq->free_count--;
 989	}
 990	spin_unlock_irqrestore(&rxq->lock, flags);
 991	/* If the pre-allocated buffer pool is dropping low, schedule to
 992	 * refill it */
 993	if (rxq->free_count <= RX_LOW_WATERMARK)
 994		queue_work(priv->workqueue, &priv->rx_replenish);
 995
 996
 997	/* If we've added more space for the firmware to place data, tell it.
 998	 * Increment device's write pointer in multiples of 8. */
 999	if ((rxq->write_actual != (rxq->write & ~0x7))
1000	    || (abs(rxq->write - rxq->read) > 7)) {
1001		spin_lock_irqsave(&rxq->lock, flags);
1002		rxq->need_update = 1;
1003		spin_unlock_irqrestore(&rxq->lock, flags);
1004		iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1005	}
1006}
1007
1008/**
1009 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1010 *
1011 * When moving to rx_free an SKB is allocated for the slot.
1012 *
1013 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1014 * This is called as a scheduled work item (except for during initialization)
1015 */
1016static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1017{
1018	struct iwl_rx_queue *rxq = &priv->rxq;
1019	struct list_head *element;
1020	struct iwl_rx_mem_buffer *rxb;
1021	struct page *page;
1022	unsigned long flags;
1023	gfp_t gfp_mask = priority;
1024
1025	while (1) {
1026		spin_lock_irqsave(&rxq->lock, flags);
1027
1028		if (list_empty(&rxq->rx_used)) {
1029			spin_unlock_irqrestore(&rxq->lock, flags);
1030			return;
1031		}
1032		spin_unlock_irqrestore(&rxq->lock, flags);
1033
1034		if (rxq->free_count > RX_LOW_WATERMARK)
1035			gfp_mask |= __GFP_NOWARN;
1036
1037		if (priv->hw_params.rx_page_order > 0)
1038			gfp_mask |= __GFP_COMP;
1039
1040		/* Alloc a new receive buffer */
1041		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1042		if (!page) {
1043			if (net_ratelimit())
1044				IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1045			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1046			    net_ratelimit())
1047				IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1048					 priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
1049					 rxq->free_count);
1050			/* We don't reschedule replenish work here -- we will
1051			 * call the restock method and if it still needs
1052			 * more buffers it will schedule replenish */
1053			break;
1054		}
1055
1056		spin_lock_irqsave(&rxq->lock, flags);
1057		if (list_empty(&rxq->rx_used)) {
1058			spin_unlock_irqrestore(&rxq->lock, flags);
1059			__free_pages(page, priv->hw_params.rx_page_order);
1060			return;
1061		}
1062		element = rxq->rx_used.next;
1063		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1064		list_del(element);
1065		spin_unlock_irqrestore(&rxq->lock, flags);
1066
1067		rxb->page = page;
1068		/* Get physical address of RB/SKB */
1069		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1070				PAGE_SIZE << priv->hw_params.rx_page_order,
1071				PCI_DMA_FROMDEVICE);
1072
1073		spin_lock_irqsave(&rxq->lock, flags);
1074
1075		list_add_tail(&rxb->list, &rxq->rx_free);
1076		rxq->free_count++;
1077		priv->alloc_rxb_page++;
1078
1079		spin_unlock_irqrestore(&rxq->lock, flags);
1080	}
1081}
1082
1083void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1084{
1085	unsigned long flags;
1086	int i;
1087	spin_lock_irqsave(&rxq->lock, flags);
1088	INIT_LIST_HEAD(&rxq->rx_free);
1089	INIT_LIST_HEAD(&rxq->rx_used);
1090	/* Fill the rx_used queue with _all_ of the Rx buffers */
1091	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1092		/* In the reset function, these buffers may have been allocated
1093		 * to an SKB, so we need to unmap and free potential storage */
1094		if (rxq->pool[i].page != NULL) {
1095			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1096				PAGE_SIZE << priv->hw_params.rx_page_order,
1097				PCI_DMA_FROMDEVICE);
1098			__iwl_legacy_free_pages(priv, rxq->pool[i].page);
1099			rxq->pool[i].page = NULL;
1100		}
1101		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1102	}
1103
1104	/* Set us so that we have processed and used all buffers, but have
1105	 * not restocked the Rx queue with fresh buffers */
1106	rxq->read = rxq->write = 0;
1107	rxq->write_actual = 0;
1108	rxq->free_count = 0;
1109	spin_unlock_irqrestore(&rxq->lock, flags);
1110}
1111
1112void iwl3945_rx_replenish(void *data)
1113{
1114	struct iwl_priv *priv = data;
1115	unsigned long flags;
1116
1117	iwl3945_rx_allocate(priv, GFP_KERNEL);
1118
1119	spin_lock_irqsave(&priv->lock, flags);
1120	iwl3945_rx_queue_restock(priv);
1121	spin_unlock_irqrestore(&priv->lock, flags);
1122}
1123
1124static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1125{
1126	iwl3945_rx_allocate(priv, GFP_ATOMIC);
1127
1128	iwl3945_rx_queue_restock(priv);
1129}
1130
1131
1132/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1133 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1134 * This free routine walks the list of POOL entries and if SKB is set to
1135 * non NULL it is unmapped and freed
1136 */
1137static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1138{
1139	int i;
1140	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1141		if (rxq->pool[i].page != NULL) {
1142			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1143				PAGE_SIZE << priv->hw_params.rx_page_order,
1144				PCI_DMA_FROMDEVICE);
1145			__iwl_legacy_free_pages(priv, rxq->pool[i].page);
1146			rxq->pool[i].page = NULL;
1147		}
1148	}
1149
1150	dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1151			  rxq->bd_dma);
1152	dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1153			  rxq->rb_stts, rxq->rb_stts_dma);
1154	rxq->bd = NULL;
1155	rxq->rb_stts  = NULL;
1156}
1157
1158
1159/* Convert linear signal-to-noise ratio into dB */
1160static u8 ratio2dB[100] = {
1161/*	 0   1   2   3   4   5   6   7   8   9 */
1162	 0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1163	20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1164	26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1165	29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1166	32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1167	34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1168	36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1169	37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1170	38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1171	39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
1172};
1173
1174/* Calculates a relative dB value from a ratio of linear
1175 *   (i.e. not dB) signal levels.
1176 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1177int iwl3945_calc_db_from_ratio(int sig_ratio)
1178{
1179	/* 1000:1 or higher just report as 60 dB */
1180	if (sig_ratio >= 1000)
1181		return 60;
1182
1183	/* 100:1 or higher, divide by 10 and use table,
1184	 *   add 20 dB to make up for divide by 10 */
1185	if (sig_ratio >= 100)
1186		return 20 + (int)ratio2dB[sig_ratio/10];
1187
1188	/* We shouldn't see this */
1189	if (sig_ratio < 1)
1190		return 0;
1191
1192	/* Use table for ratios 1:1 - 99:1 */
1193	return (int)ratio2dB[sig_ratio];
1194}
1195
1196/**
1197 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1198 *
1199 * Uses the priv->rx_handlers callback function array to invoke
1200 * the appropriate handlers, including command responses,
1201 * frame-received notifications, and other notifications.
1202 */
1203static void iwl3945_rx_handle(struct iwl_priv *priv)
1204{
1205	struct iwl_rx_mem_buffer *rxb;
1206	struct iwl_rx_packet *pkt;
1207	struct iwl_rx_queue *rxq = &priv->rxq;
1208	u32 r, i;
1209	int reclaim;
1210	unsigned long flags;
1211	u8 fill_rx = 0;
1212	u32 count = 8;
1213	int total_empty = 0;
1214
1215	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1216	 * buffer that the driver may process (last buffer filled by ucode). */
1217	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
1218	i = rxq->read;
1219
1220	/* calculate total frames need to be restock after handling RX */
1221	total_empty = r - rxq->write_actual;
1222	if (total_empty < 0)
1223		total_empty += RX_QUEUE_SIZE;
1224
1225	if (total_empty > (RX_QUEUE_SIZE / 2))
1226		fill_rx = 1;
1227	/* Rx interrupt, but nothing sent from uCode */
1228	if (i == r)
1229		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1230
1231	while (i != r) {
1232		int len;
1233
1234		rxb = rxq->queue[i];
1235
1236		/* If an RXB doesn't have a Rx queue slot associated with it,
1237		 * then a bug has been introduced in the queue refilling
1238		 * routines -- catch it here */
1239		BUG_ON(rxb == NULL);
1240
1241		rxq->queue[i] = NULL;
1242
1243		pci_unmap_page(priv->pci_dev, rxb->page_dma,
1244			       PAGE_SIZE << priv->hw_params.rx_page_order,
1245			       PCI_DMA_FROMDEVICE);
1246		pkt = rxb_addr(rxb);
1247
1248		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1249		len += sizeof(u32); /* account for status word */
1250		trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1251
1252		/* Reclaim a command buffer only if this packet is a response
1253		 *   to a (driver-originated) command.
1254		 * If the packet (e.g. Rx frame) originated from uCode,
1255		 *   there is no command buffer to reclaim.
1256		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1257		 *   but apparently a few don't get set; catch them here. */
1258		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1259			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1260			(pkt->hdr.cmd != REPLY_TX);
1261
1262		/* Based on type of command response or notification,
1263		 *   handle those that need handling via function in
1264		 *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
1265		if (priv->rx_handlers[pkt->hdr.cmd]) {
1266			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1267			iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1268			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1269			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1270		} else {
1271			/* No handling needed */
1272			IWL_DEBUG_RX(priv,
1273				"r %d i %d No handler needed for %s, 0x%02x\n",
1274				r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1275				pkt->hdr.cmd);
1276		}
1277
1278		/*
1279		 * XXX: After here, we should always check rxb->page
1280		 * against NULL before touching it or its virtual
1281		 * memory (pkt). Because some rx_handler might have
1282		 * already taken or freed the pages.
1283		 */
1284
1285		if (reclaim) {
1286			/* Invoke any callbacks, transfer the buffer to caller,
1287			 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1288			 * as we reclaim the driver command queue */
1289			if (rxb->page)
1290				iwl_legacy_tx_cmd_complete(priv, rxb);
1291			else
1292				IWL_WARN(priv, "Claim null rxb?\n");
1293		}
1294
1295		/* Reuse the page if possible. For notification packets and
1296		 * SKBs that fail to Rx correctly, add them back into the
1297		 * rx_free list for reuse later. */
1298		spin_lock_irqsave(&rxq->lock, flags);
1299		if (rxb->page != NULL) {
1300			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1301				0, PAGE_SIZE << priv->hw_params.rx_page_order,
1302				PCI_DMA_FROMDEVICE);
1303			list_add_tail(&rxb->list, &rxq->rx_free);
1304			rxq->free_count++;
1305		} else
1306			list_add_tail(&rxb->list, &rxq->rx_used);
1307
1308		spin_unlock_irqrestore(&rxq->lock, flags);
1309
1310		i = (i + 1) & RX_QUEUE_MASK;
1311		/* If there are a lot of unused frames,
1312		 * restock the Rx queue so ucode won't assert. */
1313		if (fill_rx) {
1314			count++;
1315			if (count >= 8) {
1316				rxq->read = i;
1317				iwl3945_rx_replenish_now(priv);
1318				count = 0;
1319			}
1320		}
1321	}
1322
1323	/* Backtrack one entry */
1324	rxq->read = i;
1325	if (fill_rx)
1326		iwl3945_rx_replenish_now(priv);
1327	else
1328		iwl3945_rx_queue_restock(priv);
1329}
1330
1331/* call this function to flush any scheduled tasklet */
1332static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1333{
1334	/* wait to make sure we flush pending tasklet*/
1335	synchronize_irq(priv->pci_dev->irq);
1336	tasklet_kill(&priv->irq_tasklet);
1337}
1338
1339static const char *iwl3945_desc_lookup(int i)
1340{
1341	switch (i) {
1342	case 1:
1343		return "FAIL";
1344	case 2:
1345		return "BAD_PARAM";
1346	case 3:
1347		return "BAD_CHECKSUM";
1348	case 4:
1349		return "NMI_INTERRUPT";
1350	case 5:
1351		return "SYSASSERT";
1352	case 6:
1353		return "FATAL_ERROR";
1354	}
1355
1356	return "UNKNOWN";
1357}
1358
1359#define ERROR_START_OFFSET  (1 * sizeof(u32))
1360#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
1361
1362void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1363{
1364	u32 i;
1365	u32 desc, time, count, base, data1;
1366	u32 blink1, blink2, ilink1, ilink2;
1367
1368	base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1369
1370	if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1371		IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1372		return;
1373	}
1374
1375
1376	count = iwl_legacy_read_targ_mem(priv, base);
1377
1378	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1379		IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1380		IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1381			priv->status, count);
1382	}
1383
1384	IWL_ERR(priv, "Desc       Time       asrtPC  blink2 "
1385		  "ilink1  nmiPC   Line\n");
1386	for (i = ERROR_START_OFFSET;
1387	     i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1388	     i += ERROR_ELEM_SIZE) {
1389		desc = iwl_legacy_read_targ_mem(priv, base + i);
1390		time =
1391		    iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1392		blink1 =
1393		    iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1394		blink2 =
1395		    iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1396		ilink1 =
1397		    iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1398		ilink2 =
1399		    iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1400		data1 =
1401		    iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1402
1403		IWL_ERR(priv,
1404			"%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1405			iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1406			ilink1, ilink2, data1);
1407		trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1408					0, blink1, blink2, ilink1, ilink2);
1409	}
1410}
1411
1412static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1413{
1414	u32 inta, handled = 0;
1415	u32 inta_fh;
1416	unsigned long flags;
1417#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1418	u32 inta_mask;
1419#endif
1420
1421	spin_lock_irqsave(&priv->lock, flags);
1422
1423	/* Ack/clear/reset pending uCode interrupts.
1424	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1425	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
1426	inta = iwl_read32(priv, CSR_INT);
1427	iwl_write32(priv, CSR_INT, inta);
1428
1429	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
1430	 * Any new interrupts that happen after this, either while we're
1431	 * in this tasklet, or later, will show up in next ISR/tasklet. */
1432	inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1433	iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1434
1435#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1436	if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1437		/* just for debug */
1438		inta_mask = iwl_read32(priv, CSR_INT_MASK);
1439		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1440			      inta, inta_mask, inta_fh);
1441	}
1442#endif
1443
1444	spin_unlock_irqrestore(&priv->lock, flags);
1445
1446	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1447	 * atomic, make sure that inta covers all the interrupts that
1448	 * we've discovered, even if FH interrupt came in just after
1449	 * reading CSR_INT. */
1450	if (inta_fh & CSR39_FH_INT_RX_MASK)
1451		inta |= CSR_INT_BIT_FH_RX;
1452	if (inta_fh & CSR39_FH_INT_TX_MASK)
1453		inta |= CSR_INT_BIT_FH_TX;
1454
1455	/* Now service all interrupt bits discovered above. */
1456	if (inta & CSR_INT_BIT_HW_ERR) {
1457		IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
1458
1459		/* Tell the device to stop sending interrupts */
1460		iwl_legacy_disable_interrupts(priv);
1461
1462		priv->isr_stats.hw++;
1463		iwl_legacy_irq_handle_error(priv);
1464
1465		handled |= CSR_INT_BIT_HW_ERR;
1466
1467		return;
1468	}
1469
1470#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1471	if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1472		/* NIC fires this, but we don't use it, redundant with WAKEUP */
1473		if (inta & CSR_INT_BIT_SCD) {
1474			IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1475				      "the frame/frames.\n");
1476			priv->isr_stats.sch++;
1477		}
1478
1479		/* Alive notification via Rx interrupt will do the real work */
1480		if (inta & CSR_INT_BIT_ALIVE) {
1481			IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1482			priv->isr_stats.alive++;
1483		}
1484	}
1485#endif
1486	/* Safely ignore these bits for debug checks below */
1487	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1488
1489	/* Error detected by uCode */
1490	if (inta & CSR_INT_BIT_SW_ERR) {
1491		IWL_ERR(priv, "Microcode SW error detected. "
1492			"Restarting 0x%X.\n", inta);
1493		priv->isr_stats.sw++;
1494		iwl_legacy_irq_handle_error(priv);
1495		handled |= CSR_INT_BIT_SW_ERR;
1496	}
1497
1498	/* uCode wakes up after power-down sleep */
1499	if (inta & CSR_INT_BIT_WAKEUP) {
1500		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1501		iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1502		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1503		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1504		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1505		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1506		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1507		iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1508
1509		priv->isr_stats.wakeup++;
1510		handled |= CSR_INT_BIT_WAKEUP;
1511	}
1512
1513	/* All uCode command responses, including Tx command responses,
1514	 * Rx "responses" (frame-received notification), and other
1515	 * notifications from uCode come through here*/
1516	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1517		iwl3945_rx_handle(priv);
1518		priv->isr_stats.rx++;
1519		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1520	}
1521
1522	if (inta & CSR_INT_BIT_FH_TX) {
1523		IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1524		priv->isr_stats.tx++;
1525
1526		iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1527		iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1528					(FH39_SRVC_CHNL), 0x0);
1529		handled |= CSR_INT_BIT_FH_TX;
1530	}
1531
1532	if (inta & ~handled) {
1533		IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1534		priv->isr_stats.unhandled++;
1535	}
1536
1537	if (inta & ~priv->inta_mask) {
1538		IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1539			 inta & ~priv->inta_mask);
1540		IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
1541	}
1542
1543	/* Re-enable all interrupts */
1544	/* only Re-enable if disabled by irq */
1545	if (test_bit(STATUS_INT_ENABLED, &priv->status))
1546		iwl_legacy_enable_interrupts(priv);
1547
1548#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1549	if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1550		inta = iwl_read32(priv, CSR_INT);
1551		inta_mask = iwl_read32(priv, CSR_INT_MASK);
1552		inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1553		IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1554			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1555	}
1556#endif
1557}
1558
1559static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1560					 enum ieee80211_band band,
1561				     u8 is_active, u8 n_probes,
1562				     struct iwl3945_scan_channel *scan_ch,
1563				     struct ieee80211_vif *vif)
1564{
1565	struct ieee80211_channel *chan;
1566	const struct ieee80211_supported_band *sband;
1567	const struct iwl_channel_info *ch_info;
1568	u16 passive_dwell = 0;
1569	u16 active_dwell = 0;
1570	int added, i;
1571
1572	sband = iwl_get_hw_mode(priv, band);
1573	if (!sband)
1574		return 0;
1575
1576	active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1577	passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1578
1579	if (passive_dwell <= active_dwell)
1580		passive_dwell = active_dwell + 1;
1581
1582	for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1583		chan = priv->scan_request->channels[i];
1584
1585		if (chan->band != band)
1586			continue;
1587
1588		scan_ch->channel = chan->hw_value;
1589
1590		ch_info = iwl_legacy_get_channel_info(priv, band,
1591							scan_ch->channel);
1592		if (!iwl_legacy_is_channel_valid(ch_info)) {
1593			IWL_DEBUG_SCAN(priv,
1594				"Channel %d is INVALID for this band.\n",
1595			       scan_ch->channel);
1596			continue;
1597		}
1598
1599		scan_ch->active_dwell = cpu_to_le16(active_dwell);
1600		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1601		/* If passive , set up for auto-switch
1602		 *  and use long active_dwell time.
1603		 */
1604		if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1605		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1606			scan_ch->type = 0;	/* passive */
1607			if (IWL_UCODE_API(priv->ucode_ver) == 1)
1608				scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1609		} else {
1610			scan_ch->type = 1;	/* active */
1611		}
1612
1613		/* Set direct probe bits. These may be used both for active
1614		 * scan channels (probes gets sent right away),
1615		 * or for passive channels (probes get se sent only after
1616		 * hearing clear Rx packet).*/
1617		if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1618			if (n_probes)
1619				scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1620		} else {
1621			/* uCode v1 does not allow setting direct probe bits on
1622			 * passive channel.…

Large files files are truncated, but you can click here to view the full file