/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c

https://bitbucket.org/wisechild/galaxy-nexus · C · 782 lines · 511 code · 118 blank · 153 comment · 118 complexity · 20b8401ee2e145944ee0e33c8943f7bf MD5 · raw file

  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2 of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called LICENSE.
  20. *
  21. * Contact Information:
  22. * Intel Linux Wireless <ilw@linux.intel.com>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. *****************************************************************************/
  26. #include "iwl-dev.h"
  27. #include "iwl-agn.h"
  28. #include "iwl-sta.h"
  29. #include "iwl-core.h"
  30. #include "iwl-agn-calib.h"
  31. #include "iwl-helpers.h"
  32. static int iwlagn_disable_bss(struct iwl_priv *priv,
  33. struct iwl_rxon_context *ctx,
  34. struct iwl_rxon_cmd *send)
  35. {
  36. __le32 old_filter = send->filter_flags;
  37. int ret;
  38. send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
  39. ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
  40. send->filter_flags = old_filter;
  41. if (ret)
  42. IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
  43. return ret;
  44. }
  45. static int iwlagn_disable_pan(struct iwl_priv *priv,
  46. struct iwl_rxon_context *ctx,
  47. struct iwl_rxon_cmd *send)
  48. {
  49. struct iwl_notification_wait disable_wait;
  50. __le32 old_filter = send->filter_flags;
  51. u8 old_dev_type = send->dev_type;
  52. int ret;
  53. iwlagn_init_notification_wait(priv, &disable_wait,
  54. REPLY_WIPAN_DEACTIVATION_COMPLETE,
  55. NULL, NULL);
  56. send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
  57. send->dev_type = RXON_DEV_TYPE_P2P;
  58. ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
  59. send->filter_flags = old_filter;
  60. send->dev_type = old_dev_type;
  61. if (ret) {
  62. IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
  63. iwlagn_remove_notification(priv, &disable_wait);
  64. } else {
  65. ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
  66. if (ret)
  67. IWL_ERR(priv, "Timed out waiting for PAN disable\n");
  68. }
  69. return ret;
  70. }
  71. static void iwlagn_update_qos(struct iwl_priv *priv,
  72. struct iwl_rxon_context *ctx)
  73. {
  74. int ret;
  75. if (!ctx->is_active)
  76. return;
  77. ctx->qos_data.def_qos_parm.qos_flags = 0;
  78. if (ctx->qos_data.qos_active)
  79. ctx->qos_data.def_qos_parm.qos_flags |=
  80. QOS_PARAM_FLG_UPDATE_EDCA_MSK;
  81. if (ctx->ht.enabled)
  82. ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
  83. IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
  84. ctx->qos_data.qos_active,
  85. ctx->qos_data.def_qos_parm.qos_flags);
  86. ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd,
  87. sizeof(struct iwl_qosparam_cmd),
  88. &ctx->qos_data.def_qos_parm);
  89. if (ret)
  90. IWL_ERR(priv, "Failed to update QoS\n");
  91. }
  92. static int iwlagn_update_beacon(struct iwl_priv *priv,
  93. struct ieee80211_vif *vif)
  94. {
  95. lockdep_assert_held(&priv->mutex);
  96. dev_kfree_skb(priv->beacon_skb);
  97. priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
  98. if (!priv->beacon_skb)
  99. return -ENOMEM;
  100. return iwlagn_send_beacon_cmd(priv);
  101. }
  102. static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
  103. struct iwl_rxon_context *ctx)
  104. {
  105. int ret = 0;
  106. struct iwl_rxon_assoc_cmd rxon_assoc;
  107. const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
  108. const struct iwl_rxon_cmd *rxon2 = &ctx->active;
  109. if ((rxon1->flags == rxon2->flags) &&
  110. (rxon1->filter_flags == rxon2->filter_flags) &&
  111. (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
  112. (rxon1->ofdm_ht_single_stream_basic_rates ==
  113. rxon2->ofdm_ht_single_stream_basic_rates) &&
  114. (rxon1->ofdm_ht_dual_stream_basic_rates ==
  115. rxon2->ofdm_ht_dual_stream_basic_rates) &&
  116. (rxon1->ofdm_ht_triple_stream_basic_rates ==
  117. rxon2->ofdm_ht_triple_stream_basic_rates) &&
  118. (rxon1->acquisition_data == rxon2->acquisition_data) &&
  119. (rxon1->rx_chain == rxon2->rx_chain) &&
  120. (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
  121. IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
  122. return 0;
  123. }
  124. rxon_assoc.flags = ctx->staging.flags;
  125. rxon_assoc.filter_flags = ctx->staging.filter_flags;
  126. rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
  127. rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
  128. rxon_assoc.reserved1 = 0;
  129. rxon_assoc.reserved2 = 0;
  130. rxon_assoc.reserved3 = 0;
  131. rxon_assoc.ofdm_ht_single_stream_basic_rates =
  132. ctx->staging.ofdm_ht_single_stream_basic_rates;
  133. rxon_assoc.ofdm_ht_dual_stream_basic_rates =
  134. ctx->staging.ofdm_ht_dual_stream_basic_rates;
  135. rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
  136. rxon_assoc.ofdm_ht_triple_stream_basic_rates =
  137. ctx->staging.ofdm_ht_triple_stream_basic_rates;
  138. rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
  139. ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
  140. sizeof(rxon_assoc), &rxon_assoc, NULL);
  141. if (ret)
  142. return ret;
  143. return ret;
  144. }
  145. static int iwlagn_rxon_disconn(struct iwl_priv *priv,
  146. struct iwl_rxon_context *ctx)
  147. {
  148. int ret;
  149. struct iwl_rxon_cmd *active = (void *)&ctx->active;
  150. if (ctx->ctxid == IWL_RXON_CTX_BSS)
  151. ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
  152. else
  153. ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
  154. if (ret)
  155. return ret;
  156. /*
  157. * Un-assoc RXON clears the station table and WEP
  158. * keys, so we have to restore those afterwards.
  159. */
  160. iwl_clear_ucode_stations(priv, ctx);
  161. iwl_restore_stations(priv, ctx);
  162. ret = iwl_restore_default_wep_keys(priv, ctx);
  163. if (ret) {
  164. IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
  165. return ret;
  166. }
  167. memcpy(active, &ctx->staging, sizeof(*active));
  168. return 0;
  169. }
  170. static int iwlagn_rxon_connect(struct iwl_priv *priv,
  171. struct iwl_rxon_context *ctx)
  172. {
  173. int ret;
  174. struct iwl_rxon_cmd *active = (void *)&ctx->active;
  175. /* RXON timing must be before associated RXON */
  176. ret = iwl_send_rxon_timing(priv, ctx);
  177. if (ret) {
  178. IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
  179. return ret;
  180. }
  181. /* QoS info may be cleared by previous un-assoc RXON */
  182. iwlagn_update_qos(priv, ctx);
  183. /*
  184. * We'll run into this code path when beaconing is
  185. * enabled, but then we also need to send the beacon
  186. * to the device.
  187. */
  188. if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
  189. ret = iwlagn_update_beacon(priv, ctx->vif);
  190. if (ret) {
  191. IWL_ERR(priv,
  192. "Error sending required beacon (%d)!\n",
  193. ret);
  194. return ret;
  195. }
  196. }
  197. priv->start_calib = 0;
  198. /*
  199. * Apply the new configuration.
  200. *
  201. * Associated RXON doesn't clear the station table in uCode,
  202. * so we don't need to restore stations etc. after this.
  203. */
  204. ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
  205. sizeof(struct iwl_rxon_cmd), &ctx->staging);
  206. if (ret) {
  207. IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
  208. return ret;
  209. }
  210. memcpy(active, &ctx->staging, sizeof(*active));
  211. iwl_reprogram_ap_sta(priv, ctx);
  212. /* IBSS beacon needs to be sent after setting assoc */
  213. if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
  214. if (iwlagn_update_beacon(priv, ctx->vif))
  215. IWL_ERR(priv, "Error sending IBSS beacon\n");
  216. iwl_init_sensitivity(priv);
  217. /*
  218. * If we issue a new RXON command which required a tune then
  219. * we must send a new TXPOWER command or we won't be able to
  220. * Tx any frames.
  221. *
  222. * It's expected we set power here if channel is changing.
  223. */
  224. ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
  225. if (ret) {
  226. IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
  227. return ret;
  228. }
  229. return 0;
  230. }
  231. /**
  232. * iwlagn_commit_rxon - commit staging_rxon to hardware
  233. *
  234. * The RXON command in staging_rxon is committed to the hardware and
  235. * the active_rxon structure is updated with the new data. This
  236. * function correctly transitions out of the RXON_ASSOC_MSK state if
  237. * a HW tune is required based on the RXON structure changes.
  238. *
  239. * The connect/disconnect flow should be as the following:
  240. *
  241. * 1. make sure send RXON command with association bit unset if not connect
  242. * this should include the channel and the band for the candidate
  243. * to be connected to
  244. * 2. Add Station before RXON association with the AP
  245. * 3. RXON_timing has to send before RXON for connection
  246. * 4. full RXON command - associated bit set
  247. * 5. use RXON_ASSOC command to update any flags changes
  248. */
  249. int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
  250. {
  251. /* cast away the const for active_rxon in this function */
  252. struct iwl_rxon_cmd *active = (void *)&ctx->active;
  253. bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
  254. int ret;
  255. lockdep_assert_held(&priv->mutex);
  256. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  257. return -EINVAL;
  258. if (!iwl_is_alive(priv))
  259. return -EBUSY;
  260. /* This function hardcodes a bunch of dual-mode assumptions */
  261. BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
  262. if (!ctx->is_active)
  263. return 0;
  264. /* always get timestamp with Rx frame */
  265. ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
  266. if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
  267. struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
  268. iwl_set_rxon_channel(priv, chan, ctx);
  269. iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
  270. ctx->staging.filter_flags |=
  271. RXON_FILTER_ASSOC_MSK |
  272. RXON_FILTER_PROMISC_MSK |
  273. RXON_FILTER_CTL2HOST_MSK;
  274. ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
  275. new_assoc = true;
  276. if (memcmp(&ctx->staging, &ctx->active,
  277. sizeof(ctx->staging)) == 0)
  278. return 0;
  279. }
  280. /*
  281. * force CTS-to-self frames protection if RTS-CTS is not preferred
  282. * one aggregation protection method
  283. */
  284. if (!(priv->cfg->ht_params &&
  285. priv->cfg->ht_params->use_rts_for_aggregation))
  286. ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
  287. if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
  288. !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
  289. ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
  290. else
  291. ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
  292. iwl_print_rx_config_cmd(priv, ctx);
  293. ret = iwl_check_rxon_cmd(priv, ctx);
  294. if (ret) {
  295. IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
  296. return -EINVAL;
  297. }
  298. /*
  299. * receive commit_rxon request
  300. * abort any previous channel switch if still in process
  301. */
  302. if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
  303. (priv->switch_channel != ctx->staging.channel)) {
  304. IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
  305. le16_to_cpu(priv->switch_channel));
  306. iwl_chswitch_done(priv, false);
  307. }
  308. /*
  309. * If we don't need to send a full RXON, we can use
  310. * iwl_rxon_assoc_cmd which is used to reconfigure filter
  311. * and other flags for the current radio configuration.
  312. */
  313. if (!iwl_full_rxon_required(priv, ctx)) {
  314. ret = iwlagn_send_rxon_assoc(priv, ctx);
  315. if (ret) {
  316. IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
  317. return ret;
  318. }
  319. memcpy(active, &ctx->staging, sizeof(*active));
  320. /*
  321. * We do not commit tx power settings while channel changing,
  322. * do it now if after settings changed.
  323. */
  324. iwl_set_tx_power(priv, priv->tx_power_next, false);
  325. return 0;
  326. }
  327. if (priv->cfg->ops->hcmd->set_pan_params) {
  328. ret = priv->cfg->ops->hcmd->set_pan_params(priv);
  329. if (ret)
  330. return ret;
  331. }
  332. iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
  333. IWL_DEBUG_INFO(priv,
  334. "Going to commit RXON\n"
  335. " * with%s RXON_FILTER_ASSOC_MSK\n"
  336. " * channel = %d\n"
  337. " * bssid = %pM\n",
  338. (new_assoc ? "" : "out"),
  339. le16_to_cpu(ctx->staging.channel),
  340. ctx->staging.bssid_addr);
  341. /*
  342. * Always clear associated first, but with the correct config.
  343. * This is required as for example station addition for the
  344. * AP station must be done after the BSSID is set to correctly
  345. * set up filters in the device.
  346. */
  347. ret = iwlagn_rxon_disconn(priv, ctx);
  348. if (ret)
  349. return ret;
  350. if (new_assoc)
  351. return iwlagn_rxon_connect(priv, ctx);
  352. return 0;
  353. }
  354. int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
  355. {
  356. struct iwl_priv *priv = hw->priv;
  357. struct iwl_rxon_context *ctx;
  358. struct ieee80211_conf *conf = &hw->conf;
  359. struct ieee80211_channel *channel = conf->channel;
  360. const struct iwl_channel_info *ch_info;
  361. int ret = 0;
  362. IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
  363. mutex_lock(&priv->mutex);
  364. if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
  365. IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
  366. goto out;
  367. }
  368. if (!iwl_is_ready(priv)) {
  369. IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
  370. goto out;
  371. }
  372. if (changed & (IEEE80211_CONF_CHANGE_SMPS |
  373. IEEE80211_CONF_CHANGE_CHANNEL)) {
  374. /* mac80211 uses static for non-HT which is what we want */
  375. priv->current_ht_config.smps = conf->smps_mode;
  376. /*
  377. * Recalculate chain counts.
  378. *
  379. * If monitor mode is enabled then mac80211 will
  380. * set up the SM PS mode to OFF if an HT channel is
  381. * configured.
  382. */
  383. if (priv->cfg->ops->hcmd->set_rxon_chain)
  384. for_each_context(priv, ctx)
  385. priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
  386. }
  387. if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
  388. unsigned long flags;
  389. ch_info = iwl_get_channel_info(priv, channel->band,
  390. channel->hw_value);
  391. if (!is_channel_valid(ch_info)) {
  392. IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
  393. ret = -EINVAL;
  394. goto out;
  395. }
  396. spin_lock_irqsave(&priv->lock, flags);
  397. for_each_context(priv, ctx) {
  398. /* Configure HT40 channels */
  399. if (ctx->ht.enabled != conf_is_ht(conf))
  400. ctx->ht.enabled = conf_is_ht(conf);
  401. if (ctx->ht.enabled) {
  402. if (conf_is_ht40_minus(conf)) {
  403. ctx->ht.extension_chan_offset =
  404. IEEE80211_HT_PARAM_CHA_SEC_BELOW;
  405. ctx->ht.is_40mhz = true;
  406. } else if (conf_is_ht40_plus(conf)) {
  407. ctx->ht.extension_chan_offset =
  408. IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
  409. ctx->ht.is_40mhz = true;
  410. } else {
  411. ctx->ht.extension_chan_offset =
  412. IEEE80211_HT_PARAM_CHA_SEC_NONE;
  413. ctx->ht.is_40mhz = false;
  414. }
  415. } else
  416. ctx->ht.is_40mhz = false;
  417. /*
  418. * Default to no protection. Protection mode will
  419. * later be set from BSS config in iwl_ht_conf
  420. */
  421. ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
  422. /* if we are switching from ht to 2.4 clear flags
  423. * from any ht related info since 2.4 does not
  424. * support ht */
  425. if (le16_to_cpu(ctx->staging.channel) !=
  426. channel->hw_value)
  427. ctx->staging.flags = 0;
  428. iwl_set_rxon_channel(priv, channel, ctx);
  429. iwl_set_rxon_ht(priv, &priv->current_ht_config);
  430. iwl_set_flags_for_band(priv, ctx, channel->band,
  431. ctx->vif);
  432. }
  433. spin_unlock_irqrestore(&priv->lock, flags);
  434. iwl_update_bcast_stations(priv);
  435. /*
  436. * The list of supported rates and rate mask can be different
  437. * for each band; since the band may have changed, reset
  438. * the rate mask to what mac80211 lists.
  439. */
  440. iwl_set_rate(priv);
  441. }
  442. if (changed & (IEEE80211_CONF_CHANGE_PS |
  443. IEEE80211_CONF_CHANGE_IDLE)) {
  444. ret = iwl_power_update_mode(priv, false);
  445. if (ret)
  446. IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
  447. }
  448. if (changed & IEEE80211_CONF_CHANGE_POWER) {
  449. IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
  450. priv->tx_power_user_lmt, conf->power_level);
  451. iwl_set_tx_power(priv, conf->power_level, false);
  452. }
  453. for_each_context(priv, ctx) {
  454. if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
  455. continue;
  456. iwlagn_commit_rxon(priv, ctx);
  457. }
  458. out:
  459. mutex_unlock(&priv->mutex);
  460. return ret;
  461. }
  462. static void iwlagn_check_needed_chains(struct iwl_priv *priv,
  463. struct iwl_rxon_context *ctx,
  464. struct ieee80211_bss_conf *bss_conf)
  465. {
  466. struct ieee80211_vif *vif = ctx->vif;
  467. struct iwl_rxon_context *tmp;
  468. struct ieee80211_sta *sta;
  469. struct iwl_ht_config *ht_conf = &priv->current_ht_config;
  470. struct ieee80211_sta_ht_cap *ht_cap;
  471. bool need_multiple;
  472. lockdep_assert_held(&priv->mutex);
  473. switch (vif->type) {
  474. case NL80211_IFTYPE_STATION:
  475. rcu_read_lock();
  476. sta = ieee80211_find_sta(vif, bss_conf->bssid);
  477. if (!sta) {
  478. /*
  479. * If at all, this can only happen through a race
  480. * when the AP disconnects us while we're still
  481. * setting up the connection, in that case mac80211
  482. * will soon tell us about that.
  483. */
  484. need_multiple = false;
  485. rcu_read_unlock();
  486. break;
  487. }
  488. ht_cap = &sta->ht_cap;
  489. need_multiple = true;
  490. /*
  491. * If the peer advertises no support for receiving 2 and 3
  492. * stream MCS rates, it can't be transmitting them either.
  493. */
  494. if (ht_cap->mcs.rx_mask[1] == 0 &&
  495. ht_cap->mcs.rx_mask[2] == 0) {
  496. need_multiple = false;
  497. } else if (!(ht_cap->mcs.tx_params &
  498. IEEE80211_HT_MCS_TX_DEFINED)) {
  499. /* If it can't TX MCS at all ... */
  500. need_multiple = false;
  501. } else if (ht_cap->mcs.tx_params &
  502. IEEE80211_HT_MCS_TX_RX_DIFF) {
  503. int maxstreams;
  504. /*
  505. * But if it can receive them, it might still not
  506. * be able to transmit them, which is what we need
  507. * to check here -- so check the number of streams
  508. * it advertises for TX (if different from RX).
  509. */
  510. maxstreams = (ht_cap->mcs.tx_params &
  511. IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
  512. maxstreams >>=
  513. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
  514. maxstreams += 1;
  515. if (maxstreams <= 1)
  516. need_multiple = false;
  517. }
  518. rcu_read_unlock();
  519. break;
  520. case NL80211_IFTYPE_ADHOC:
  521. /* currently */
  522. need_multiple = false;
  523. break;
  524. default:
  525. /* only AP really */
  526. need_multiple = true;
  527. break;
  528. }
  529. ctx->ht_need_multiple_chains = need_multiple;
  530. if (!need_multiple) {
  531. /* check all contexts */
  532. for_each_context(priv, tmp) {
  533. if (!tmp->vif)
  534. continue;
  535. if (tmp->ht_need_multiple_chains) {
  536. need_multiple = true;
  537. break;
  538. }
  539. }
  540. }
  541. ht_conf->single_chain_sufficient = !need_multiple;
  542. }
  543. void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
  544. struct ieee80211_vif *vif,
  545. struct ieee80211_bss_conf *bss_conf,
  546. u32 changes)
  547. {
  548. struct iwl_priv *priv = hw->priv;
  549. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  550. int ret;
  551. bool force = false;
  552. mutex_lock(&priv->mutex);
  553. if (unlikely(!iwl_is_ready(priv))) {
  554. IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
  555. mutex_unlock(&priv->mutex);
  556. return;
  557. }
  558. if (unlikely(!ctx->vif)) {
  559. IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
  560. mutex_unlock(&priv->mutex);
  561. return;
  562. }
  563. if (changes & BSS_CHANGED_BEACON_INT)
  564. force = true;
  565. if (changes & BSS_CHANGED_QOS) {
  566. ctx->qos_data.qos_active = bss_conf->qos;
  567. iwlagn_update_qos(priv, ctx);
  568. }
  569. ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
  570. if (vif->bss_conf.use_short_preamble)
  571. ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
  572. else
  573. ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
  574. if (changes & BSS_CHANGED_ASSOC) {
  575. if (bss_conf->assoc) {
  576. priv->timestamp = bss_conf->timestamp;
  577. ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
  578. } else {
  579. /*
  580. * If we disassociate while there are pending
  581. * frames, just wake up the queues and let the
  582. * frames "escape" ... This shouldn't really
  583. * be happening to start with, but we should
  584. * not get stuck in this case either since it
  585. * can happen if userspace gets confused.
  586. */
  587. if (ctx->last_tx_rejected) {
  588. ctx->last_tx_rejected = false;
  589. iwl_wake_any_queue(priv, ctx);
  590. }
  591. ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
  592. }
  593. }
  594. if (ctx->ht.enabled) {
  595. ctx->ht.protection = bss_conf->ht_operation_mode &
  596. IEEE80211_HT_OP_MODE_PROTECTION;
  597. ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
  598. IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
  599. iwlagn_check_needed_chains(priv, ctx, bss_conf);
  600. iwl_set_rxon_ht(priv, &priv->current_ht_config);
  601. }
  602. if (priv->cfg->ops->hcmd->set_rxon_chain)
  603. priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
  604. if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
  605. ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
  606. else
  607. ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
  608. if (bss_conf->use_cts_prot)
  609. ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
  610. else
  611. ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
  612. memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
  613. if (vif->type == NL80211_IFTYPE_AP ||
  614. vif->type == NL80211_IFTYPE_ADHOC) {
  615. if (vif->bss_conf.enable_beacon) {
  616. ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
  617. priv->beacon_ctx = ctx;
  618. } else {
  619. ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
  620. priv->beacon_ctx = NULL;
  621. }
  622. }
  623. if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
  624. iwlagn_commit_rxon(priv, ctx);
  625. if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
  626. /*
  627. * The chain noise calibration will enable PM upon
  628. * completion. If calibration has already been run
  629. * then we need to enable power management here.
  630. */
  631. if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
  632. iwl_power_update_mode(priv, false);
  633. /* Enable RX differential gain and sensitivity calibrations */
  634. iwl_chain_noise_reset(priv);
  635. priv->start_calib = 1;
  636. }
  637. if (changes & BSS_CHANGED_IBSS) {
  638. ret = iwlagn_manage_ibss_station(priv, vif,
  639. bss_conf->ibss_joined);
  640. if (ret)
  641. IWL_ERR(priv, "failed to %s IBSS station %pM\n",
  642. bss_conf->ibss_joined ? "add" : "remove",
  643. bss_conf->bssid);
  644. }
  645. if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
  646. priv->beacon_ctx) {
  647. if (iwlagn_update_beacon(priv, vif))
  648. IWL_ERR(priv, "Error sending IBSS beacon\n");
  649. }
  650. mutex_unlock(&priv->mutex);
  651. }
  652. void iwlagn_post_scan(struct iwl_priv *priv)
  653. {
  654. struct iwl_rxon_context *ctx;
  655. /*
  656. * Since setting the RXON may have been deferred while
  657. * performing the scan, fire one off if needed
  658. */
  659. for_each_context(priv, ctx)
  660. if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
  661. iwlagn_commit_rxon(priv, ctx);
  662. if (priv->cfg->ops->hcmd->set_pan_params)
  663. priv->cfg->ops->hcmd->set_pan_params(priv);
  664. }