PageRenderTime 1634ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/net/wireless/ath/ath9k/main.c

https://github.com/Mengqi/linux-2.6
C | 1961 lines | 1368 code | 355 blank | 238 comment | 239 complexity | dd4c0f33f9e8752981b9d06272a27afe MD5 | raw file
  1. /*
  2. * Copyright (c) 2008-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/nl80211.h>
  17. #include <linux/delay.h>
  18. #include "ath9k.h"
  19. #include "btcoex.h"
  20. static u8 parse_mpdudensity(u8 mpdudensity)
  21. {
  22. /*
  23. * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
  24. * 0 for no restriction
  25. * 1 for 1/4 us
  26. * 2 for 1/2 us
  27. * 3 for 1 us
  28. * 4 for 2 us
  29. * 5 for 4 us
  30. * 6 for 8 us
  31. * 7 for 16 us
  32. */
  33. switch (mpdudensity) {
  34. case 0:
  35. return 0;
  36. case 1:
  37. case 2:
  38. case 3:
  39. /* Our lower layer calculations limit our precision to
  40. 1 microsecond */
  41. return 1;
  42. case 4:
  43. return 2;
  44. case 5:
  45. return 4;
  46. case 6:
  47. return 8;
  48. case 7:
  49. return 16;
  50. default:
  51. return 0;
  52. }
  53. }
  54. static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
  55. {
  56. bool pending = false;
  57. spin_lock_bh(&txq->axq_lock);
  58. if (txq->axq_depth || !list_empty(&txq->axq_acq))
  59. pending = true;
  60. spin_unlock_bh(&txq->axq_lock);
  61. return pending;
  62. }
  63. static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
  64. {
  65. unsigned long flags;
  66. bool ret;
  67. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  68. ret = ath9k_hw_setpower(sc->sc_ah, mode);
  69. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  70. return ret;
  71. }
  72. void ath9k_ps_wakeup(struct ath_softc *sc)
  73. {
  74. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  75. unsigned long flags;
  76. enum ath9k_power_mode power_mode;
  77. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  78. if (++sc->ps_usecount != 1)
  79. goto unlock;
  80. power_mode = sc->sc_ah->power_mode;
  81. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  82. /*
  83. * While the hardware is asleep, the cycle counters contain no
  84. * useful data. Better clear them now so that they don't mess up
  85. * survey data results.
  86. */
  87. if (power_mode != ATH9K_PM_AWAKE) {
  88. spin_lock(&common->cc_lock);
  89. ath_hw_cycle_counters_update(common);
  90. memset(&common->cc_survey, 0, sizeof(common->cc_survey));
  91. spin_unlock(&common->cc_lock);
  92. }
  93. unlock:
  94. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  95. }
  96. void ath9k_ps_restore(struct ath_softc *sc)
  97. {
  98. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  99. unsigned long flags;
  100. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  101. if (--sc->ps_usecount != 0)
  102. goto unlock;
  103. spin_lock(&common->cc_lock);
  104. ath_hw_cycle_counters_update(common);
  105. spin_unlock(&common->cc_lock);
  106. if (sc->ps_idle)
  107. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
  108. else if (sc->ps_enabled &&
  109. !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
  110. PS_WAIT_FOR_CAB |
  111. PS_WAIT_FOR_PSPOLL_DATA |
  112. PS_WAIT_FOR_TX_ACK)))
  113. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
  114. unlock:
  115. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  116. }
  117. void ath_start_ani(struct ath_common *common)
  118. {
  119. struct ath_hw *ah = common->ah;
  120. unsigned long timestamp = jiffies_to_msecs(jiffies);
  121. struct ath_softc *sc = (struct ath_softc *) common->priv;
  122. if (!(sc->sc_flags & SC_OP_ANI_RUN))
  123. return;
  124. if (sc->sc_flags & SC_OP_OFFCHANNEL)
  125. return;
  126. common->ani.longcal_timer = timestamp;
  127. common->ani.shortcal_timer = timestamp;
  128. common->ani.checkani_timer = timestamp;
  129. mod_timer(&common->ani.timer,
  130. jiffies +
  131. msecs_to_jiffies((u32)ah->config.ani_poll_interval));
  132. }
  133. static void ath_update_survey_nf(struct ath_softc *sc, int channel)
  134. {
  135. struct ath_hw *ah = sc->sc_ah;
  136. struct ath9k_channel *chan = &ah->channels[channel];
  137. struct survey_info *survey = &sc->survey[channel];
  138. if (chan->noisefloor) {
  139. survey->filled |= SURVEY_INFO_NOISE_DBM;
  140. survey->noise = chan->noisefloor;
  141. }
  142. }
  143. /*
  144. * Updates the survey statistics and returns the busy time since last
  145. * update in %, if the measurement duration was long enough for the
  146. * result to be useful, -1 otherwise.
  147. */
  148. static int ath_update_survey_stats(struct ath_softc *sc)
  149. {
  150. struct ath_hw *ah = sc->sc_ah;
  151. struct ath_common *common = ath9k_hw_common(ah);
  152. int pos = ah->curchan - &ah->channels[0];
  153. struct survey_info *survey = &sc->survey[pos];
  154. struct ath_cycle_counters *cc = &common->cc_survey;
  155. unsigned int div = common->clockrate * 1000;
  156. int ret = 0;
  157. if (!ah->curchan)
  158. return -1;
  159. if (ah->power_mode == ATH9K_PM_AWAKE)
  160. ath_hw_cycle_counters_update(common);
  161. if (cc->cycles > 0) {
  162. survey->filled |= SURVEY_INFO_CHANNEL_TIME |
  163. SURVEY_INFO_CHANNEL_TIME_BUSY |
  164. SURVEY_INFO_CHANNEL_TIME_RX |
  165. SURVEY_INFO_CHANNEL_TIME_TX;
  166. survey->channel_time += cc->cycles / div;
  167. survey->channel_time_busy += cc->rx_busy / div;
  168. survey->channel_time_rx += cc->rx_frame / div;
  169. survey->channel_time_tx += cc->tx_frame / div;
  170. }
  171. if (cc->cycles < div)
  172. return -1;
  173. if (cc->cycles > 0)
  174. ret = cc->rx_busy * 100 / cc->cycles;
  175. memset(cc, 0, sizeof(*cc));
  176. ath_update_survey_nf(sc, pos);
  177. return ret;
  178. }
  179. /*
  180. * Set/change channels. If the channel is really being changed, it's done
  181. * by reseting the chip. To accomplish this we must first cleanup any pending
  182. * DMA, then restart stuff.
  183. */
  184. static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
  185. struct ath9k_channel *hchan)
  186. {
  187. struct ath_hw *ah = sc->sc_ah;
  188. struct ath_common *common = ath9k_hw_common(ah);
  189. struct ieee80211_conf *conf = &common->hw->conf;
  190. bool fastcc = true, stopped;
  191. struct ieee80211_channel *channel = hw->conf.channel;
  192. struct ath9k_hw_cal_data *caldata = NULL;
  193. int r;
  194. if (sc->sc_flags & SC_OP_INVALID)
  195. return -EIO;
  196. sc->hw_busy_count = 0;
  197. del_timer_sync(&common->ani.timer);
  198. cancel_work_sync(&sc->paprd_work);
  199. cancel_work_sync(&sc->hw_check_work);
  200. cancel_delayed_work_sync(&sc->tx_complete_work);
  201. cancel_delayed_work_sync(&sc->hw_pll_work);
  202. ath9k_ps_wakeup(sc);
  203. spin_lock_bh(&sc->sc_pcu_lock);
  204. /*
  205. * This is only performed if the channel settings have
  206. * actually changed.
  207. *
  208. * To switch channels clear any pending DMA operations;
  209. * wait long enough for the RX fifo to drain, reset the
  210. * hardware at the new frequency, and then re-enable
  211. * the relevant bits of the h/w.
  212. */
  213. ath9k_hw_disable_interrupts(ah);
  214. stopped = ath_drain_all_txq(sc, false);
  215. if (!ath_stoprecv(sc))
  216. stopped = false;
  217. if (!ath9k_hw_check_alive(ah))
  218. stopped = false;
  219. /* XXX: do not flush receive queue here. We don't want
  220. * to flush data frames already in queue because of
  221. * changing channel. */
  222. if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL))
  223. fastcc = false;
  224. if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
  225. caldata = &sc->caldata;
  226. ath_dbg(common, ATH_DBG_CONFIG,
  227. "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
  228. sc->sc_ah->curchan->channel,
  229. channel->center_freq, conf_is_ht40(conf),
  230. fastcc);
  231. r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
  232. if (r) {
  233. ath_err(common,
  234. "Unable to reset channel (%u MHz), reset status %d\n",
  235. channel->center_freq, r);
  236. goto ps_restore;
  237. }
  238. if (ath_startrecv(sc) != 0) {
  239. ath_err(common, "Unable to restart recv logic\n");
  240. r = -EIO;
  241. goto ps_restore;
  242. }
  243. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  244. sc->config.txpowlimit, &sc->curtxpow);
  245. ath9k_hw_set_interrupts(ah, ah->imask);
  246. if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
  247. if (sc->sc_flags & SC_OP_BEACONS)
  248. ath_set_beacon(sc);
  249. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
  250. ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
  251. if (!common->disable_ani)
  252. ath_start_ani(common);
  253. }
  254. ps_restore:
  255. ieee80211_wake_queues(hw);
  256. spin_unlock_bh(&sc->sc_pcu_lock);
  257. ath9k_ps_restore(sc);
  258. return r;
  259. }
  260. static void ath_paprd_activate(struct ath_softc *sc)
  261. {
  262. struct ath_hw *ah = sc->sc_ah;
  263. struct ath9k_hw_cal_data *caldata = ah->caldata;
  264. struct ath_common *common = ath9k_hw_common(ah);
  265. int chain;
  266. if (!caldata || !caldata->paprd_done)
  267. return;
  268. ath9k_ps_wakeup(sc);
  269. ar9003_paprd_enable(ah, false);
  270. for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
  271. if (!(common->tx_chainmask & BIT(chain)))
  272. continue;
  273. ar9003_paprd_populate_single_table(ah, caldata, chain);
  274. }
  275. ar9003_paprd_enable(ah, true);
  276. ath9k_ps_restore(sc);
  277. }
  278. static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
  279. {
  280. struct ieee80211_hw *hw = sc->hw;
  281. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  282. struct ath_hw *ah = sc->sc_ah;
  283. struct ath_common *common = ath9k_hw_common(ah);
  284. struct ath_tx_control txctl;
  285. int time_left;
  286. memset(&txctl, 0, sizeof(txctl));
  287. txctl.txq = sc->tx.txq_map[WME_AC_BE];
  288. memset(tx_info, 0, sizeof(*tx_info));
  289. tx_info->band = hw->conf.channel->band;
  290. tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
  291. tx_info->control.rates[0].idx = 0;
  292. tx_info->control.rates[0].count = 1;
  293. tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
  294. tx_info->control.rates[1].idx = -1;
  295. init_completion(&sc->paprd_complete);
  296. txctl.paprd = BIT(chain);
  297. if (ath_tx_start(hw, skb, &txctl) != 0) {
  298. ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n");
  299. dev_kfree_skb_any(skb);
  300. return false;
  301. }
  302. time_left = wait_for_completion_timeout(&sc->paprd_complete,
  303. msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
  304. if (!time_left)
  305. ath_dbg(common, ATH_DBG_CALIBRATE,
  306. "Timeout waiting for paprd training on TX chain %d\n",
  307. chain);
  308. return !!time_left;
  309. }
  310. void ath_paprd_calibrate(struct work_struct *work)
  311. {
  312. struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
  313. struct ieee80211_hw *hw = sc->hw;
  314. struct ath_hw *ah = sc->sc_ah;
  315. struct ieee80211_hdr *hdr;
  316. struct sk_buff *skb = NULL;
  317. struct ath9k_hw_cal_data *caldata = ah->caldata;
  318. struct ath_common *common = ath9k_hw_common(ah);
  319. int ftype;
  320. int chain_ok = 0;
  321. int chain;
  322. int len = 1800;
  323. if (!caldata)
  324. return;
  325. ath9k_ps_wakeup(sc);
  326. if (ar9003_paprd_init_table(ah) < 0)
  327. goto fail_paprd;
  328. skb = alloc_skb(len, GFP_KERNEL);
  329. if (!skb)
  330. goto fail_paprd;
  331. skb_put(skb, len);
  332. memset(skb->data, 0, len);
  333. hdr = (struct ieee80211_hdr *)skb->data;
  334. ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
  335. hdr->frame_control = cpu_to_le16(ftype);
  336. hdr->duration_id = cpu_to_le16(10);
  337. memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
  338. memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
  339. memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
  340. for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
  341. if (!(common->tx_chainmask & BIT(chain)))
  342. continue;
  343. chain_ok = 0;
  344. ath_dbg(common, ATH_DBG_CALIBRATE,
  345. "Sending PAPRD frame for thermal measurement "
  346. "on chain %d\n", chain);
  347. if (!ath_paprd_send_frame(sc, skb, chain))
  348. goto fail_paprd;
  349. ar9003_paprd_setup_gain_table(ah, chain);
  350. ath_dbg(common, ATH_DBG_CALIBRATE,
  351. "Sending PAPRD training frame on chain %d\n", chain);
  352. if (!ath_paprd_send_frame(sc, skb, chain))
  353. goto fail_paprd;
  354. if (!ar9003_paprd_is_done(ah)) {
  355. ath_dbg(common, ATH_DBG_CALIBRATE,
  356. "PAPRD not yet done on chain %d\n", chain);
  357. break;
  358. }
  359. if (ar9003_paprd_create_curve(ah, caldata, chain)) {
  360. ath_dbg(common, ATH_DBG_CALIBRATE,
  361. "PAPRD create curve failed on chain %d\n",
  362. chain);
  363. break;
  364. }
  365. chain_ok = 1;
  366. }
  367. kfree_skb(skb);
  368. if (chain_ok) {
  369. caldata->paprd_done = true;
  370. ath_paprd_activate(sc);
  371. }
  372. fail_paprd:
  373. ath9k_ps_restore(sc);
  374. }
  375. /*
  376. * This routine performs the periodic noise floor calibration function
  377. * that is used to adjust and optimize the chip performance. This
  378. * takes environmental changes (location, temperature) into account.
  379. * When the task is complete, it reschedules itself depending on the
  380. * appropriate interval that was calculated.
  381. */
  382. void ath_ani_calibrate(unsigned long data)
  383. {
  384. struct ath_softc *sc = (struct ath_softc *)data;
  385. struct ath_hw *ah = sc->sc_ah;
  386. struct ath_common *common = ath9k_hw_common(ah);
  387. bool longcal = false;
  388. bool shortcal = false;
  389. bool aniflag = false;
  390. unsigned int timestamp = jiffies_to_msecs(jiffies);
  391. u32 cal_interval, short_cal_interval, long_cal_interval;
  392. unsigned long flags;
  393. if (ah->caldata && ah->caldata->nfcal_interference)
  394. long_cal_interval = ATH_LONG_CALINTERVAL_INT;
  395. else
  396. long_cal_interval = ATH_LONG_CALINTERVAL;
  397. short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
  398. ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
  399. /* Only calibrate if awake */
  400. if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
  401. goto set_timer;
  402. ath9k_ps_wakeup(sc);
  403. /* Long calibration runs independently of short calibration. */
  404. if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
  405. longcal = true;
  406. ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
  407. common->ani.longcal_timer = timestamp;
  408. }
  409. /* Short calibration applies only while caldone is false */
  410. if (!common->ani.caldone) {
  411. if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
  412. shortcal = true;
  413. ath_dbg(common, ATH_DBG_ANI,
  414. "shortcal @%lu\n", jiffies);
  415. common->ani.shortcal_timer = timestamp;
  416. common->ani.resetcal_timer = timestamp;
  417. }
  418. } else {
  419. if ((timestamp - common->ani.resetcal_timer) >=
  420. ATH_RESTART_CALINTERVAL) {
  421. common->ani.caldone = ath9k_hw_reset_calvalid(ah);
  422. if (common->ani.caldone)
  423. common->ani.resetcal_timer = timestamp;
  424. }
  425. }
  426. /* Verify whether we must check ANI */
  427. if ((timestamp - common->ani.checkani_timer) >=
  428. ah->config.ani_poll_interval) {
  429. aniflag = true;
  430. common->ani.checkani_timer = timestamp;
  431. }
  432. /* Call ANI routine if necessary */
  433. if (aniflag) {
  434. spin_lock_irqsave(&common->cc_lock, flags);
  435. ath9k_hw_ani_monitor(ah, ah->curchan);
  436. ath_update_survey_stats(sc);
  437. spin_unlock_irqrestore(&common->cc_lock, flags);
  438. }
  439. /* Perform calibration if necessary */
  440. if (longcal || shortcal) {
  441. common->ani.caldone =
  442. ath9k_hw_calibrate(ah, ah->curchan,
  443. common->rx_chainmask, longcal);
  444. }
  445. ath9k_ps_restore(sc);
  446. set_timer:
  447. /*
  448. * Set timer interval based on previous results.
  449. * The interval must be the shortest necessary to satisfy ANI,
  450. * short calibration and long calibration.
  451. */
  452. cal_interval = ATH_LONG_CALINTERVAL;
  453. if (sc->sc_ah->config.enable_ani)
  454. cal_interval = min(cal_interval,
  455. (u32)ah->config.ani_poll_interval);
  456. if (!common->ani.caldone)
  457. cal_interval = min(cal_interval, (u32)short_cal_interval);
  458. mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
  459. if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
  460. if (!ah->caldata->paprd_done)
  461. ieee80211_queue_work(sc->hw, &sc->paprd_work);
  462. else if (!ah->paprd_table_write_done)
  463. ath_paprd_activate(sc);
  464. }
  465. }
  466. static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
  467. {
  468. struct ath_node *an;
  469. struct ath_hw *ah = sc->sc_ah;
  470. an = (struct ath_node *)sta->drv_priv;
  471. #ifdef CONFIG_ATH9K_DEBUGFS
  472. spin_lock(&sc->nodes_lock);
  473. list_add(&an->list, &sc->nodes);
  474. spin_unlock(&sc->nodes_lock);
  475. an->sta = sta;
  476. #endif
  477. if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
  478. sc->sc_flags |= SC_OP_ENABLE_APM;
  479. if (sc->sc_flags & SC_OP_TXAGGR) {
  480. ath_tx_node_init(sc, an);
  481. an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
  482. sta->ht_cap.ampdu_factor);
  483. an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
  484. }
  485. }
  486. static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
  487. {
  488. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  489. #ifdef CONFIG_ATH9K_DEBUGFS
  490. spin_lock(&sc->nodes_lock);
  491. list_del(&an->list);
  492. spin_unlock(&sc->nodes_lock);
  493. an->sta = NULL;
  494. #endif
  495. if (sc->sc_flags & SC_OP_TXAGGR)
  496. ath_tx_node_cleanup(sc, an);
  497. }
  498. void ath_hw_check(struct work_struct *work)
  499. {
  500. struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
  501. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  502. unsigned long flags;
  503. int busy;
  504. ath9k_ps_wakeup(sc);
  505. if (ath9k_hw_check_alive(sc->sc_ah))
  506. goto out;
  507. spin_lock_irqsave(&common->cc_lock, flags);
  508. busy = ath_update_survey_stats(sc);
  509. spin_unlock_irqrestore(&common->cc_lock, flags);
  510. ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
  511. "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
  512. if (busy >= 99) {
  513. if (++sc->hw_busy_count >= 3) {
  514. spin_lock_bh(&sc->sc_pcu_lock);
  515. ath_reset(sc, true);
  516. spin_unlock_bh(&sc->sc_pcu_lock);
  517. }
  518. } else if (busy >= 0)
  519. sc->hw_busy_count = 0;
  520. out:
  521. ath9k_ps_restore(sc);
  522. }
  523. static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
  524. {
  525. static int count;
  526. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  527. if (pll_sqsum >= 0x40000) {
  528. count++;
  529. if (count == 3) {
  530. /* Rx is hung for more than 500ms. Reset it */
  531. ath_dbg(common, ATH_DBG_RESET,
  532. "Possible RX hang, resetting");
  533. spin_lock_bh(&sc->sc_pcu_lock);
  534. ath_reset(sc, true);
  535. spin_unlock_bh(&sc->sc_pcu_lock);
  536. count = 0;
  537. }
  538. } else
  539. count = 0;
  540. }
  541. void ath_hw_pll_work(struct work_struct *work)
  542. {
  543. struct ath_softc *sc = container_of(work, struct ath_softc,
  544. hw_pll_work.work);
  545. u32 pll_sqsum;
  546. if (AR_SREV_9485(sc->sc_ah)) {
  547. ath9k_ps_wakeup(sc);
  548. pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
  549. ath9k_ps_restore(sc);
  550. ath_hw_pll_rx_hang_check(sc, pll_sqsum);
  551. ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
  552. }
  553. }
  554. void ath9k_tasklet(unsigned long data)
  555. {
  556. struct ath_softc *sc = (struct ath_softc *)data;
  557. struct ath_hw *ah = sc->sc_ah;
  558. struct ath_common *common = ath9k_hw_common(ah);
  559. u32 status = sc->intrstatus;
  560. u32 rxmask;
  561. if ((status & ATH9K_INT_FATAL) ||
  562. (status & ATH9K_INT_BB_WATCHDOG)) {
  563. spin_lock(&sc->sc_pcu_lock);
  564. ath_reset(sc, true);
  565. spin_unlock(&sc->sc_pcu_lock);
  566. return;
  567. }
  568. ath9k_ps_wakeup(sc);
  569. spin_lock(&sc->sc_pcu_lock);
  570. /*
  571. * Only run the baseband hang check if beacons stop working in AP or
  572. * IBSS mode, because it has a high false positive rate. For station
  573. * mode it should not be necessary, since the upper layers will detect
  574. * this through a beacon miss automatically and the following channel
  575. * change will trigger a hardware reset anyway
  576. */
  577. if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
  578. !ath9k_hw_check_alive(ah))
  579. ieee80211_queue_work(sc->hw, &sc->hw_check_work);
  580. if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
  581. /*
  582. * TSF sync does not look correct; remain awake to sync with
  583. * the next Beacon.
  584. */
  585. ath_dbg(common, ATH_DBG_PS,
  586. "TSFOOR - Sync with next Beacon\n");
  587. sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC |
  588. PS_TSFOOR_SYNC;
  589. }
  590. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  591. rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
  592. ATH9K_INT_RXORN);
  593. else
  594. rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  595. if (status & rxmask) {
  596. /* Check for high priority Rx first */
  597. if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  598. (status & ATH9K_INT_RXHP))
  599. ath_rx_tasklet(sc, 0, true);
  600. ath_rx_tasklet(sc, 0, false);
  601. }
  602. if (status & ATH9K_INT_TX) {
  603. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  604. ath_tx_edma_tasklet(sc);
  605. else
  606. ath_tx_tasklet(sc);
  607. }
  608. if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  609. if (status & ATH9K_INT_GENTIMER)
  610. ath_gen_timer_isr(sc->sc_ah);
  611. /* re-enable hardware interrupt */
  612. ath9k_hw_enable_interrupts(ah);
  613. spin_unlock(&sc->sc_pcu_lock);
  614. ath9k_ps_restore(sc);
  615. }
  616. irqreturn_t ath_isr(int irq, void *dev)
  617. {
  618. #define SCHED_INTR ( \
  619. ATH9K_INT_FATAL | \
  620. ATH9K_INT_BB_WATCHDOG | \
  621. ATH9K_INT_RXORN | \
  622. ATH9K_INT_RXEOL | \
  623. ATH9K_INT_RX | \
  624. ATH9K_INT_RXLP | \
  625. ATH9K_INT_RXHP | \
  626. ATH9K_INT_TX | \
  627. ATH9K_INT_BMISS | \
  628. ATH9K_INT_CST | \
  629. ATH9K_INT_TSFOOR | \
  630. ATH9K_INT_GENTIMER)
  631. struct ath_softc *sc = dev;
  632. struct ath_hw *ah = sc->sc_ah;
  633. struct ath_common *common = ath9k_hw_common(ah);
  634. enum ath9k_int status;
  635. bool sched = false;
  636. /*
  637. * The hardware is not ready/present, don't
  638. * touch anything. Note this can happen early
  639. * on if the IRQ is shared.
  640. */
  641. if (sc->sc_flags & SC_OP_INVALID)
  642. return IRQ_NONE;
  643. /* shared irq, not for us */
  644. if (!ath9k_hw_intrpend(ah))
  645. return IRQ_NONE;
  646. /*
  647. * Figure out the reason(s) for the interrupt. Note
  648. * that the hal returns a pseudo-ISR that may include
  649. * bits we haven't explicitly enabled so we mask the
  650. * value to insure we only process bits we requested.
  651. */
  652. ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
  653. status &= ah->imask; /* discard unasked-for bits */
  654. /*
  655. * If there are no status bits set, then this interrupt was not
  656. * for me (should have been caught above).
  657. */
  658. if (!status)
  659. return IRQ_NONE;
  660. /* Cache the status */
  661. sc->intrstatus = status;
  662. if (status & SCHED_INTR)
  663. sched = true;
  664. /*
  665. * If a FATAL or RXORN interrupt is received, we have to reset the
  666. * chip immediately.
  667. */
  668. if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
  669. !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
  670. goto chip_reset;
  671. if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  672. (status & ATH9K_INT_BB_WATCHDOG)) {
  673. spin_lock(&common->cc_lock);
  674. ath_hw_cycle_counters_update(common);
  675. ar9003_hw_bb_watchdog_dbg_info(ah);
  676. spin_unlock(&common->cc_lock);
  677. goto chip_reset;
  678. }
  679. if (status & ATH9K_INT_SWBA)
  680. tasklet_schedule(&sc->bcon_tasklet);
  681. if (status & ATH9K_INT_TXURN)
  682. ath9k_hw_updatetxtriglevel(ah, true);
  683. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  684. if (status & ATH9K_INT_RXEOL) {
  685. ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  686. ath9k_hw_set_interrupts(ah, ah->imask);
  687. }
  688. }
  689. if (status & ATH9K_INT_MIB) {
  690. /*
  691. * Disable interrupts until we service the MIB
  692. * interrupt; otherwise it will continue to
  693. * fire.
  694. */
  695. ath9k_hw_disable_interrupts(ah);
  696. /*
  697. * Let the hal handle the event. We assume
  698. * it will clear whatever condition caused
  699. * the interrupt.
  700. */
  701. spin_lock(&common->cc_lock);
  702. ath9k_hw_proc_mib_event(ah);
  703. spin_unlock(&common->cc_lock);
  704. ath9k_hw_enable_interrupts(ah);
  705. }
  706. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  707. if (status & ATH9K_INT_TIM_TIMER) {
  708. if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
  709. goto chip_reset;
  710. /* Clear RxAbort bit so that we can
  711. * receive frames */
  712. ath9k_setpower(sc, ATH9K_PM_AWAKE);
  713. ath9k_hw_setrxabort(sc->sc_ah, 0);
  714. sc->ps_flags |= PS_WAIT_FOR_BEACON;
  715. }
  716. chip_reset:
  717. ath_debug_stat_interrupt(sc, status);
  718. if (sched) {
  719. /* turn off every interrupt */
  720. ath9k_hw_disable_interrupts(ah);
  721. tasklet_schedule(&sc->intr_tq);
  722. }
  723. return IRQ_HANDLED;
  724. #undef SCHED_INTR
  725. }
  726. static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
  727. {
  728. struct ath_hw *ah = sc->sc_ah;
  729. struct ath_common *common = ath9k_hw_common(ah);
  730. struct ieee80211_channel *channel = hw->conf.channel;
  731. int r;
  732. ath9k_ps_wakeup(sc);
  733. spin_lock_bh(&sc->sc_pcu_lock);
  734. ath9k_hw_configpcipowersave(ah, 0, 0);
  735. if (!ah->curchan)
  736. ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
  737. r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
  738. if (r) {
  739. ath_err(common,
  740. "Unable to reset channel (%u MHz), reset status %d\n",
  741. channel->center_freq, r);
  742. }
  743. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  744. sc->config.txpowlimit, &sc->curtxpow);
  745. if (ath_startrecv(sc) != 0) {
  746. ath_err(common, "Unable to restart recv logic\n");
  747. goto out;
  748. }
  749. if (sc->sc_flags & SC_OP_BEACONS)
  750. ath_set_beacon(sc); /* restart beacons */
  751. /* Re-Enable interrupts */
  752. ath9k_hw_set_interrupts(ah, ah->imask);
  753. /* Enable LED */
  754. ath9k_hw_cfg_output(ah, ah->led_pin,
  755. AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
  756. ath9k_hw_set_gpio(ah, ah->led_pin, 0);
  757. ieee80211_wake_queues(hw);
  758. ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
  759. out:
  760. spin_unlock_bh(&sc->sc_pcu_lock);
  761. ath9k_ps_restore(sc);
  762. }
  763. void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
  764. {
  765. struct ath_hw *ah = sc->sc_ah;
  766. struct ieee80211_channel *channel = hw->conf.channel;
  767. int r;
  768. ath9k_ps_wakeup(sc);
  769. cancel_delayed_work_sync(&sc->hw_pll_work);
  770. spin_lock_bh(&sc->sc_pcu_lock);
  771. ieee80211_stop_queues(hw);
  772. /*
  773. * Keep the LED on when the radio is disabled
  774. * during idle unassociated state.
  775. */
  776. if (!sc->ps_idle) {
  777. ath9k_hw_set_gpio(ah, ah->led_pin, 1);
  778. ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
  779. }
  780. /* Disable interrupts */
  781. ath9k_hw_disable_interrupts(ah);
  782. ath_drain_all_txq(sc, false); /* clear pending tx frames */
  783. ath_stoprecv(sc); /* turn off frame recv */
  784. ath_flushrecv(sc); /* flush recv queue */
  785. if (!ah->curchan)
  786. ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
  787. r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
  788. if (r) {
  789. ath_err(ath9k_hw_common(sc->sc_ah),
  790. "Unable to reset channel (%u MHz), reset status %d\n",
  791. channel->center_freq, r);
  792. }
  793. ath9k_hw_phy_disable(ah);
  794. ath9k_hw_configpcipowersave(ah, 1, 1);
  795. spin_unlock_bh(&sc->sc_pcu_lock);
  796. ath9k_ps_restore(sc);
  797. }
  798. int ath_reset(struct ath_softc *sc, bool retry_tx)
  799. {
  800. struct ath_hw *ah = sc->sc_ah;
  801. struct ath_common *common = ath9k_hw_common(ah);
  802. struct ieee80211_hw *hw = sc->hw;
  803. int r;
  804. sc->hw_busy_count = 0;
  805. /* Stop ANI */
  806. del_timer_sync(&common->ani.timer);
  807. ath9k_ps_wakeup(sc);
  808. ieee80211_stop_queues(hw);
  809. ath9k_hw_disable_interrupts(ah);
  810. ath_drain_all_txq(sc, retry_tx);
  811. ath_stoprecv(sc);
  812. ath_flushrecv(sc);
  813. r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
  814. if (r)
  815. ath_err(common,
  816. "Unable to reset hardware; reset status %d\n", r);
  817. if (ath_startrecv(sc) != 0)
  818. ath_err(common, "Unable to start recv logic\n");
  819. /*
  820. * We may be doing a reset in response to a request
  821. * that changes the channel so update any state that
  822. * might change as a result.
  823. */
  824. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  825. sc->config.txpowlimit, &sc->curtxpow);
  826. if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
  827. ath_set_beacon(sc); /* restart beacons */
  828. ath9k_hw_set_interrupts(ah, ah->imask);
  829. if (retry_tx) {
  830. int i;
  831. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  832. if (ATH_TXQ_SETUP(sc, i)) {
  833. spin_lock_bh(&sc->tx.txq[i].axq_lock);
  834. ath_txq_schedule(sc, &sc->tx.txq[i]);
  835. spin_unlock_bh(&sc->tx.txq[i].axq_lock);
  836. }
  837. }
  838. }
  839. ieee80211_wake_queues(hw);
  840. /* Start ANI */
  841. if (!common->disable_ani)
  842. ath_start_ani(common);
  843. ath9k_ps_restore(sc);
  844. return r;
  845. }
  846. /**********************/
  847. /* mac80211 callbacks */
  848. /**********************/
  849. static int ath9k_start(struct ieee80211_hw *hw)
  850. {
  851. struct ath_softc *sc = hw->priv;
  852. struct ath_hw *ah = sc->sc_ah;
  853. struct ath_common *common = ath9k_hw_common(ah);
  854. struct ieee80211_channel *curchan = hw->conf.channel;
  855. struct ath9k_channel *init_channel;
  856. int r;
  857. ath_dbg(common, ATH_DBG_CONFIG,
  858. "Starting driver with initial channel: %d MHz\n",
  859. curchan->center_freq);
  860. ath9k_ps_wakeup(sc);
  861. mutex_lock(&sc->mutex);
  862. /* setup initial channel */
  863. sc->chan_idx = curchan->hw_value;
  864. init_channel = ath9k_cmn_get_curchannel(hw, ah);
  865. /* Reset SERDES registers */
  866. ath9k_hw_configpcipowersave(ah, 0, 0);
  867. /*
  868. * The basic interface to setting the hardware in a good
  869. * state is ``reset''. On return the hardware is known to
  870. * be powered up and with interrupts disabled. This must
  871. * be followed by initialization of the appropriate bits
  872. * and then setup of the interrupt mask.
  873. */
  874. spin_lock_bh(&sc->sc_pcu_lock);
  875. r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
  876. if (r) {
  877. ath_err(common,
  878. "Unable to reset hardware; reset status %d (freq %u MHz)\n",
  879. r, curchan->center_freq);
  880. spin_unlock_bh(&sc->sc_pcu_lock);
  881. goto mutex_unlock;
  882. }
  883. /*
  884. * This is needed only to setup initial state
  885. * but it's best done after a reset.
  886. */
  887. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  888. sc->config.txpowlimit, &sc->curtxpow);
  889. /*
  890. * Setup the hardware after reset:
  891. * The receive engine is set going.
  892. * Frame transmit is handled entirely
  893. * in the frame output path; there's nothing to do
  894. * here except setup the interrupt mask.
  895. */
  896. if (ath_startrecv(sc) != 0) {
  897. ath_err(common, "Unable to start recv logic\n");
  898. r = -EIO;
  899. spin_unlock_bh(&sc->sc_pcu_lock);
  900. goto mutex_unlock;
  901. }
  902. spin_unlock_bh(&sc->sc_pcu_lock);
  903. /* Setup our intr mask. */
  904. ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
  905. ATH9K_INT_RXORN | ATH9K_INT_FATAL |
  906. ATH9K_INT_GLOBAL;
  907. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  908. ah->imask |= ATH9K_INT_RXHP |
  909. ATH9K_INT_RXLP |
  910. ATH9K_INT_BB_WATCHDOG;
  911. else
  912. ah->imask |= ATH9K_INT_RX;
  913. ah->imask |= ATH9K_INT_GTT;
  914. if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  915. ah->imask |= ATH9K_INT_CST;
  916. sc->sc_flags &= ~SC_OP_INVALID;
  917. sc->sc_ah->is_monitoring = false;
  918. /* Disable BMISS interrupt when we're not associated */
  919. ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
  920. ath9k_hw_set_interrupts(ah, ah->imask);
  921. ieee80211_wake_queues(hw);
  922. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
  923. if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
  924. !ah->btcoex_hw.enabled) {
  925. ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
  926. AR_STOMP_LOW_WLAN_WGHT);
  927. ath9k_hw_btcoex_enable(ah);
  928. if (common->bus_ops->bt_coex_prep)
  929. common->bus_ops->bt_coex_prep(common);
  930. if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  931. ath9k_btcoex_timer_resume(sc);
  932. }
  933. if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
  934. common->bus_ops->extn_synch_en(common);
  935. mutex_unlock:
  936. mutex_unlock(&sc->mutex);
  937. ath9k_ps_restore(sc);
  938. return r;
  939. }
  940. static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
  941. {
  942. struct ath_softc *sc = hw->priv;
  943. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  944. struct ath_tx_control txctl;
  945. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  946. if (sc->ps_enabled) {
  947. /*
  948. * mac80211 does not set PM field for normal data frames, so we
  949. * need to update that based on the current PS mode.
  950. */
  951. if (ieee80211_is_data(hdr->frame_control) &&
  952. !ieee80211_is_nullfunc(hdr->frame_control) &&
  953. !ieee80211_has_pm(hdr->frame_control)) {
  954. ath_dbg(common, ATH_DBG_PS,
  955. "Add PM=1 for a TX frame while in PS mode\n");
  956. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
  957. }
  958. }
  959. if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
  960. /*
  961. * We are using PS-Poll and mac80211 can request TX while in
  962. * power save mode. Need to wake up hardware for the TX to be
  963. * completed and if needed, also for RX of buffered frames.
  964. */
  965. ath9k_ps_wakeup(sc);
  966. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  967. ath9k_hw_setrxabort(sc->sc_ah, 0);
  968. if (ieee80211_is_pspoll(hdr->frame_control)) {
  969. ath_dbg(common, ATH_DBG_PS,
  970. "Sending PS-Poll to pick a buffered frame\n");
  971. sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
  972. } else {
  973. ath_dbg(common, ATH_DBG_PS,
  974. "Wake up to complete TX\n");
  975. sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
  976. }
  977. /*
  978. * The actual restore operation will happen only after
  979. * the sc_flags bit is cleared. We are just dropping
  980. * the ps_usecount here.
  981. */
  982. ath9k_ps_restore(sc);
  983. }
  984. memset(&txctl, 0, sizeof(struct ath_tx_control));
  985. txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
  986. ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
  987. if (ath_tx_start(hw, skb, &txctl) != 0) {
  988. ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
  989. goto exit;
  990. }
  991. return;
  992. exit:
  993. dev_kfree_skb_any(skb);
  994. }
  995. static void ath9k_stop(struct ieee80211_hw *hw)
  996. {
  997. struct ath_softc *sc = hw->priv;
  998. struct ath_hw *ah = sc->sc_ah;
  999. struct ath_common *common = ath9k_hw_common(ah);
  1000. mutex_lock(&sc->mutex);
  1001. cancel_delayed_work_sync(&sc->tx_complete_work);
  1002. cancel_delayed_work_sync(&sc->hw_pll_work);
  1003. cancel_work_sync(&sc->paprd_work);
  1004. cancel_work_sync(&sc->hw_check_work);
  1005. if (sc->sc_flags & SC_OP_INVALID) {
  1006. ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
  1007. mutex_unlock(&sc->mutex);
  1008. return;
  1009. }
  1010. /* Ensure HW is awake when we try to shut it down. */
  1011. ath9k_ps_wakeup(sc);
  1012. if (ah->btcoex_hw.enabled) {
  1013. ath9k_hw_btcoex_disable(ah);
  1014. if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  1015. ath9k_btcoex_timer_pause(sc);
  1016. }
  1017. spin_lock_bh(&sc->sc_pcu_lock);
  1018. /* prevent tasklets to enable interrupts once we disable them */
  1019. ah->imask &= ~ATH9K_INT_GLOBAL;
  1020. /* make sure h/w will not generate any interrupt
  1021. * before setting the invalid flag. */
  1022. ath9k_hw_disable_interrupts(ah);
  1023. if (!(sc->sc_flags & SC_OP_INVALID)) {
  1024. ath_drain_all_txq(sc, false);
  1025. ath_stoprecv(sc);
  1026. ath9k_hw_phy_disable(ah);
  1027. } else
  1028. sc->rx.rxlink = NULL;
  1029. if (sc->rx.frag) {
  1030. dev_kfree_skb_any(sc->rx.frag);
  1031. sc->rx.frag = NULL;
  1032. }
  1033. /* disable HAL and put h/w to sleep */
  1034. ath9k_hw_disable(ah);
  1035. spin_unlock_bh(&sc->sc_pcu_lock);
  1036. /* we can now sync irq and kill any running tasklets, since we already
  1037. * disabled interrupts and not holding a spin lock */
  1038. synchronize_irq(sc->irq);
  1039. tasklet_kill(&sc->intr_tq);
  1040. tasklet_kill(&sc->bcon_tasklet);
  1041. ath9k_ps_restore(sc);
  1042. sc->ps_idle = true;
  1043. ath_radio_disable(sc, hw);
  1044. sc->sc_flags |= SC_OP_INVALID;
  1045. mutex_unlock(&sc->mutex);
  1046. ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
  1047. }
  1048. bool ath9k_uses_beacons(int type)
  1049. {
  1050. switch (type) {
  1051. case NL80211_IFTYPE_AP:
  1052. case NL80211_IFTYPE_ADHOC:
  1053. case NL80211_IFTYPE_MESH_POINT:
  1054. return true;
  1055. default:
  1056. return false;
  1057. }
  1058. }
  1059. static void ath9k_reclaim_beacon(struct ath_softc *sc,
  1060. struct ieee80211_vif *vif)
  1061. {
  1062. struct ath_vif *avp = (void *)vif->drv_priv;
  1063. ath9k_set_beaconing_status(sc, false);
  1064. ath_beacon_return(sc, avp);
  1065. ath9k_set_beaconing_status(sc, true);
  1066. sc->sc_flags &= ~SC_OP_BEACONS;
  1067. }
  1068. static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
  1069. {
  1070. struct ath9k_vif_iter_data *iter_data = data;
  1071. int i;
  1072. if (iter_data->hw_macaddr)
  1073. for (i = 0; i < ETH_ALEN; i++)
  1074. iter_data->mask[i] &=
  1075. ~(iter_data->hw_macaddr[i] ^ mac[i]);
  1076. switch (vif->type) {
  1077. case NL80211_IFTYPE_AP:
  1078. iter_data->naps++;
  1079. break;
  1080. case NL80211_IFTYPE_STATION:
  1081. iter_data->nstations++;
  1082. break;
  1083. case NL80211_IFTYPE_ADHOC:
  1084. iter_data->nadhocs++;
  1085. break;
  1086. case NL80211_IFTYPE_MESH_POINT:
  1087. iter_data->nmeshes++;
  1088. break;
  1089. case NL80211_IFTYPE_WDS:
  1090. iter_data->nwds++;
  1091. break;
  1092. default:
  1093. iter_data->nothers++;
  1094. break;
  1095. }
  1096. }
  1097. /* Called with sc->mutex held. */
  1098. void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
  1099. struct ieee80211_vif *vif,
  1100. struct ath9k_vif_iter_data *iter_data)
  1101. {
  1102. struct ath_softc *sc = hw->priv;
  1103. struct ath_hw *ah = sc->sc_ah;
  1104. struct ath_common *common = ath9k_hw_common(ah);
  1105. /*
  1106. * Use the hardware MAC address as reference, the hardware uses it
  1107. * together with the BSSID mask when matching addresses.
  1108. */
  1109. memset(iter_data, 0, sizeof(*iter_data));
  1110. iter_data->hw_macaddr = common->macaddr;
  1111. memset(&iter_data->mask, 0xff, ETH_ALEN);
  1112. if (vif)
  1113. ath9k_vif_iter(iter_data, vif->addr, vif);
  1114. /* Get list of all active MAC addresses */
  1115. ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
  1116. iter_data);
  1117. }
  1118. /* Called with sc->mutex held. */
  1119. static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
  1120. struct ieee80211_vif *vif)
  1121. {
  1122. struct ath_softc *sc = hw->priv;
  1123. struct ath_hw *ah = sc->sc_ah;
  1124. struct ath_common *common = ath9k_hw_common(ah);
  1125. struct ath9k_vif_iter_data iter_data;
  1126. ath9k_calculate_iter_data(hw, vif, &iter_data);
  1127. /* Set BSSID mask. */
  1128. memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
  1129. ath_hw_setbssidmask(common);
  1130. /* Set op-mode & TSF */
  1131. if (iter_data.naps > 0) {
  1132. ath9k_hw_set_tsfadjust(ah, 1);
  1133. sc->sc_flags |= SC_OP_TSF_RESET;
  1134. ah->opmode = NL80211_IFTYPE_AP;
  1135. } else {
  1136. ath9k_hw_set_tsfadjust(ah, 0);
  1137. sc->sc_flags &= ~SC_OP_TSF_RESET;
  1138. if (iter_data.nmeshes)
  1139. ah->opmode = NL80211_IFTYPE_MESH_POINT;
  1140. else if (iter_data.nwds)
  1141. ah->opmode = NL80211_IFTYPE_AP;
  1142. else if (iter_data.nadhocs)
  1143. ah->opmode = NL80211_IFTYPE_ADHOC;
  1144. else
  1145. ah->opmode = NL80211_IFTYPE_STATION;
  1146. }
  1147. /*
  1148. * Enable MIB interrupts when there are hardware phy counters.
  1149. */
  1150. if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
  1151. if (ah->config.enable_ani)
  1152. ah->imask |= ATH9K_INT_MIB;
  1153. ah->imask |= ATH9K_INT_TSFOOR;
  1154. } else {
  1155. ah->imask &= ~ATH9K_INT_MIB;
  1156. ah->imask &= ~ATH9K_INT_TSFOOR;
  1157. }
  1158. ath9k_hw_set_interrupts(ah, ah->imask);
  1159. /* Set up ANI */
  1160. if (iter_data.naps > 0) {
  1161. sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
  1162. if (!common->disable_ani) {
  1163. sc->sc_flags |= SC_OP_ANI_RUN;
  1164. ath_start_ani(common);
  1165. }
  1166. } else {
  1167. sc->sc_flags &= ~SC_OP_ANI_RUN;
  1168. del_timer_sync(&common->ani.timer);
  1169. }
  1170. }
  1171. /* Called with sc->mutex held, vif counts set up properly. */
  1172. static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
  1173. struct ieee80211_vif *vif)
  1174. {
  1175. struct ath_softc *sc = hw->priv;
  1176. ath9k_calculate_summary_state(hw, vif);
  1177. if (ath9k_uses_beacons(vif->type)) {
  1178. int error;
  1179. /* This may fail because upper levels do not have beacons
  1180. * properly configured yet. That's OK, we assume it
  1181. * will be properly configured and then we will be notified
  1182. * in the info_changed method and set up beacons properly
  1183. * there.
  1184. */
  1185. ath9k_set_beaconing_status(sc, false);
  1186. error = ath_beacon_alloc(sc, vif);
  1187. if (!error)
  1188. ath_beacon_config(sc, vif);
  1189. ath9k_set_beaconing_status(sc, true);
  1190. }
  1191. }
  1192. static int ath9k_add_interface(struct ieee80211_hw *hw,
  1193. struct ieee80211_vif *vif)
  1194. {
  1195. struct ath_softc *sc = hw->priv;
  1196. struct ath_hw *ah = sc->sc_ah;
  1197. struct ath_common *common = ath9k_hw_common(ah);
  1198. int ret = 0;
  1199. ath9k_ps_wakeup(sc);
  1200. mutex_lock(&sc->mutex);
  1201. switch (vif->type) {
  1202. case NL80211_IFTYPE_STATION:
  1203. case NL80211_IFTYPE_WDS:
  1204. case NL80211_IFTYPE_ADHOC:
  1205. case NL80211_IFTYPE_AP:
  1206. case NL80211_IFTYPE_MESH_POINT:
  1207. break;
  1208. default:
  1209. ath_err(common, "Interface type %d not yet supported\n",
  1210. vif->type);
  1211. ret = -EOPNOTSUPP;
  1212. goto out;
  1213. }
  1214. if (ath9k_uses_beacons(vif->type)) {
  1215. if (sc->nbcnvifs >= ATH_BCBUF) {
  1216. ath_err(common, "Not enough beacon buffers when adding"
  1217. " new interface of type: %i\n",
  1218. vif->type);
  1219. ret = -ENOBUFS;
  1220. goto out;
  1221. }
  1222. }
  1223. if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
  1224. ((vif->type == NL80211_IFTYPE_ADHOC) &&
  1225. sc->nvifs > 0)) {
  1226. ath_err(common, "Cannot create ADHOC interface when other"
  1227. " interfaces already exist.\n");
  1228. ret = -EINVAL;
  1229. goto out;
  1230. }
  1231. ath_dbg(common, ATH_DBG_CONFIG,
  1232. "Attach a VIF of type: %d\n", vif->type);
  1233. sc->nvifs++;
  1234. ath9k_do_vif_add_setup(hw, vif);
  1235. out:
  1236. mutex_unlock(&sc->mutex);
  1237. ath9k_ps_restore(sc);
  1238. return ret;
  1239. }
  1240. static int ath9k_change_interface(struct ieee80211_hw *hw,
  1241. struct ieee80211_vif *vif,
  1242. enum nl80211_iftype new_type,
  1243. bool p2p)
  1244. {
  1245. struct ath_softc *sc = hw->priv;
  1246. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1247. int ret = 0;
  1248. ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
  1249. mutex_lock(&sc->mutex);
  1250. ath9k_ps_wakeup(sc);
  1251. /* See if new interface type is valid. */
  1252. if ((new_type == NL80211_IFTYPE_ADHOC) &&
  1253. (sc->nvifs > 1)) {
  1254. ath_err(common, "When using ADHOC, it must be the only"
  1255. " interface.\n");
  1256. ret = -EINVAL;
  1257. goto out;
  1258. }
  1259. if (ath9k_uses_beacons(new_type) &&
  1260. !ath9k_uses_beacons(vif->type)) {
  1261. if (sc->nbcnvifs >= ATH_BCBUF) {
  1262. ath_err(common, "No beacon slot available\n");
  1263. ret = -ENOBUFS;
  1264. goto out;
  1265. }
  1266. }
  1267. /* Clean up old vif stuff */
  1268. if (ath9k_uses_beacons(vif->type))
  1269. ath9k_reclaim_beacon(sc, vif);
  1270. /* Add new settings */
  1271. vif->type = new_type;
  1272. vif->p2p = p2p;
  1273. ath9k_do_vif_add_setup(hw, vif);
  1274. out:
  1275. ath9k_ps_restore(sc);
  1276. mutex_unlock(&sc->mutex);
  1277. return ret;
  1278. }
  1279. static void ath9k_remove_interface(struct ieee80211_hw *hw,
  1280. struct ieee80211_vif *vif)
  1281. {
  1282. struct ath_softc *sc = hw->priv;
  1283. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1284. ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
  1285. ath9k_ps_wakeup(sc);
  1286. mutex_lock(&sc->mutex);
  1287. sc->nvifs--;
  1288. /* Reclaim beacon resources */
  1289. if (ath9k_uses_beacons(vif->type))
  1290. ath9k_reclaim_beacon(sc, vif);
  1291. ath9k_calculate_summary_state(hw, NULL);
  1292. mutex_unlock(&sc->mutex);
  1293. ath9k_ps_restore(sc);
  1294. }
  1295. static void ath9k_enable_ps(struct ath_softc *sc)
  1296. {
  1297. struct ath_hw *ah = sc->sc_ah;
  1298. sc->ps_enabled = true;
  1299. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  1300. if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
  1301. ah->imask |= ATH9K_INT_TIM_TIMER;
  1302. ath9k_hw_set_interrupts(ah, ah->imask);
  1303. }
  1304. ath9k_hw_setrxabort(ah, 1);
  1305. }
  1306. }
  1307. static void ath9k_disable_ps(struct ath_softc *sc)
  1308. {
  1309. struct ath_hw *ah = sc->sc_ah;
  1310. sc->ps_enabled = false;
  1311. ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
  1312. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  1313. ath9k_hw_setrxabort(ah, 0);
  1314. sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
  1315. PS_WAIT_FOR_CAB |
  1316. PS_WAIT_FOR_PSPOLL_DATA |
  1317. PS_WAIT_FOR_TX_ACK);
  1318. if (ah->imask & ATH9K_INT_TIM_TIMER) {
  1319. ah->imask &= ~ATH9K_INT_TIM_TIMER;
  1320. ath9k_hw_set_interrupts(ah, ah->imask);
  1321. }
  1322. }
  1323. }
  1324. static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
  1325. {
  1326. struct ath_softc *sc = hw->priv;
  1327. struct ath_hw *ah = sc->sc_ah;
  1328. struct ath_common *common = ath9k_hw_common(ah);
  1329. struct ieee80211_conf *conf = &hw->conf;
  1330. bool disable_radio = false;
  1331. mutex_lock(&sc->mutex);
  1332. /*
  1333. * Leave this as the first check because we need to turn on the
  1334. * radio if it was disabled before prior to processing the rest
  1335. * of the changes. Likewise we must only disable the radio towards
  1336. * the end.
  1337. */
  1338. if (changed & IEEE80211_CONF_CHANGE_IDLE) {
  1339. sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
  1340. if (!sc->ps_idle) {
  1341. ath_radio_enable(sc, hw);
  1342. ath_dbg(common, ATH_DBG_CONFIG,
  1343. "not-idle: enabling radio\n");
  1344. } else {
  1345. disable_radio = true;
  1346. }
  1347. }
  1348. /*
  1349. * We just prepare to enable PS. We have to wait until our AP has
  1350. * ACK'd our null data frame to disable RX otherwise we'll ignore
  1351. * those ACKs and end up retransmitting the same null data frames.
  1352. * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
  1353. */
  1354. if (changed & IEEE80211_CONF_CHANGE_PS) {
  1355. unsigned long flags;
  1356. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  1357. if (conf->flags & IEEE80211_CONF_PS)
  1358. ath9k_enable_ps(sc);
  1359. else
  1360. ath9k_disable_ps(sc);
  1361. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  1362. }
  1363. if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
  1364. if (conf->flags & IEEE80211_CONF_MONITOR) {
  1365. ath_dbg(common, ATH_DBG_CONFIG,
  1366. "Monitor mode is enabled\n");
  1367. sc->sc_ah->is_monitoring = true;
  1368. } else {
  1369. ath_dbg(common, ATH_DBG_CONFIG,
  1370. "Monitor mode is disabled\n");
  1371. sc->sc_ah->is_monitoring = false;
  1372. }
  1373. }
  1374. if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
  1375. struct ieee80211_channel *curchan = hw->conf.channel;
  1376. int pos = curchan->hw_value;
  1377. int old_pos = -1;
  1378. unsigned long flags;
  1379. if (ah->curchan)
  1380. old_pos = ah->curchan - &ah->channels[0];
  1381. if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
  1382. sc->sc_flags |= SC_OP_OFFCHANNEL;
  1383. else
  1384. sc->sc_flags &= ~SC_OP_OFFCHANNEL;
  1385. ath_dbg(common, ATH_DBG_CONFIG,
  1386. "Set channel: %d MHz type: %d\n",
  1387. curchan->center_freq, conf->channel_type);
  1388. ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
  1389. curchan, conf->channel_type);
  1390. /* update survey stats for the old channel before switching */
  1391. spin_lock_irqsave(&common->cc_lock, flags);
  1392. ath_update_survey_stats(sc);
  1393. spin_unlock_irqrestore(&common->cc_lock, flags);
  1394. /*
  1395. * If the operating channel changes, change the survey in-use flags
  1396. * along with it.
  1397. * Reset the survey data for the new channel, unless we're switching
  1398. * back to the operating channel from an off-channel operation.
  1399. */
  1400. if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
  1401. sc->cur_survey != &sc->survey[pos]) {
  1402. if (sc->cur_survey)
  1403. sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
  1404. sc->cur_survey = &sc->survey[pos];
  1405. memset(sc->cur_survey, 0, sizeof(struct survey_info));
  1406. sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
  1407. } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
  1408. memset(&sc->survey[pos], 0, sizeof(struct survey_info));
  1409. }
  1410. if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
  1411. ath_err(common, "Unable to set channel\n");
  1412. mutex_unlock(&sc->mutex);
  1413. return -EINVAL;
  1414. }
  1415. /*
  1416. * The most recent snapshot of channel->noisefloor for the old
  1417. * channel is only available after the hardware reset. Copy it to
  1418. * the survey stats now.
  1419. */
  1420. if (old_pos >= 0)
  1421. ath_update_survey_nf(sc, old_pos);
  1422. }
  1423. if (changed & IEEE80211_CONF_CHANGE_POWER) {
  1424. ath_dbg(common, ATH_DBG_CONFIG,
  1425. "Set power: %d\n", conf->power_level);
  1426. sc->config.txpowlimit = 2 * conf->power_level;
  1427. ath9k_ps_wakeup(sc);
  1428. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  1429. sc->config.txpowlimit, &sc->curtxpow);
  1430. ath9k_ps_restore(sc);
  1431. }
  1432. if (disable_radio) {
  1433. ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
  1434. ath_radio_disable(sc, hw);
  1435. }
  1436. mutex_unlock(&sc->mutex);
  1437. return 0;
  1438. }
  1439. #define SUPPORTED_FILTERS \
  1440. (FIF_PROMISC_IN_BSS | \
  1441. FIF_ALLMULTI | \
  1442. FIF_CONTROL | \
  1443. FIF_PSPOLL | \
  1444. FIF_OTHER_BSS | \
  1445. FIF_BCN_PRBRESP_PROMISC | \
  1446. FIF_PROBE_REQ | \
  1447. FIF_FCSFAIL)
  1448. /* FIXME: sc->sc_full_reset ? */
  1449. static void ath9k_configure_filter(struct ieee80211_hw *hw,
  1450. unsigned int changed_flags,
  1451. unsigned int *total_flags,
  1452. u64 multicast)
  1453. {
  1454. struct ath_softc *sc = hw->priv;
  1455. u32 rfilt;
  1456. changed_flags &= SUPPORTED_FILTERS;
  1457. *total_flags &= SUPPORTED_FILTERS;
  1458. sc->rx.rxfilter = *total_flags;
  1459. ath9k_ps_wakeup(sc);
  1460. rfilt = ath_calcrxfilter(sc);
  1461. ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
  1462. ath9k_ps_restore(sc);
  1463. ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
  1464. "Set HW RX filter: 0x%x\n", rfilt);
  1465. }
  1466. static int ath9k_sta_add(struct ieee80211_hw *hw,
  1467. struct ieee80211_vif *vif,
  1468. struct ieee80211_sta *sta)
  1469. {
  1470. struct ath_softc *sc = hw->priv;
  1471. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1472. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1473. struct ieee80211_key_conf ps_key = { };
  1474. ath_node_attach(sc, sta);
  1475. if (vif->type != NL80211_IFTYPE_AP &&
  1476. vif->type != NL80211_IFTYPE_AP_VLAN)
  1477. return 0;
  1478. an->ps_key = ath_key_config(common, vif, sta, &ps_key);
  1479. return 0;
  1480. }
  1481. static void ath9k_del_ps_key(struct ath_softc *sc,
  1482. struct ieee80211_vif *vif,
  1483. struct ieee80211_sta *sta)
  1484. {
  1485. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1486. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1487. struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
  1488. if (!an->ps_key)
  1489. return;
  1490. ath_key_delete(common, &ps_key);
  1491. }
  1492. static int ath9k_sta_remove(struct ieee80211_hw *hw,
  1493. struct ieee80211_vif *vif,
  1494. struct ieee80211_sta *sta)
  1495. {
  1496. struct ath_softc *sc = hw->priv;
  1497. ath9k_del_ps_key(sc, vif, sta);
  1498. ath_node_detach(sc, sta);
  1499. return 0;
  1500. }
  1501. static void ath9k_sta_notify(struct ieee80211_hw *hw,
  1502. struct ieee80211_vif *vif,
  1503. enum sta_notify_cmd cmd,
  1504. struct ieee80211_sta *sta)
  1505. {
  1506. struct ath_softc *sc = hw->priv;
  1507. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1508. switch (cmd) {
  1509. case STA_NOTIFY_SLEEP:
  1510. an->sleeping = true;
  1511. if (ath_tx_aggr_sleep(sc, an))
  1512. ieee80211_sta_set_tim(sta);
  1513. break;
  1514. case STA_NOTIFY_AWAKE:
  1515. an->sleeping = false;
  1516. ath_tx_aggr_wakeup(sc, an);
  1517. break;
  1518. }
  1519. }
  1520. static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
  1521. const struct ieee80211_tx_queue_params *params)
  1522. {
  1523. struct ath_softc *sc = hw->priv;
  1524. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1525. struct ath_txq *txq;
  1526. struct ath9k_tx_queue_info qi;
  1527. int ret = 0;
  1528. if (queue >= WME_NUM_AC)
  1529. return 0;
  1530. txq = sc->tx.txq_map[queue];
  1531. ath9k_ps_wakeup(sc);
  1532. mutex_lock(&sc->mutex);
  1533. memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
  1534. qi.tqi_aifs = params->aifs;
  1535. qi.tqi_cwmin = params->cw_min;
  1536. qi.tqi_cwmax = params->cw_max;
  1537. qi.tqi_burstTime = params->txop;
  1538. ath_dbg(common, ATH_DBG_CONFIG,
  1539. "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
  1540. queue, txq->axq_qnum, params->aifs, params->cw_min,
  1541. params->cw_max, params->txop);
  1542. ret = ath_txq_update(sc, txq->axq_qnum, &qi);
  1543. if (ret)
  1544. ath_err(common, "TXQ Update failed\n");
  1545. if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
  1546. if (queue == WME_AC_BE && !ret)
  1547. ath_beaconq_config(sc);
  1548. mutex_unlock(&sc->mutex);
  1549. ath9k_ps_restore(sc);
  1550. return ret;
  1551. }
  1552. static int ath9k_set_key(struct ieee80211_hw *hw,
  1553. enum set_key_cmd cmd,
  1554. struct ieee80211_vif *vif,
  1555. struct ieee80211_sta *sta,
  1556. struct ieee80211_key_conf *key)
  1557. {
  1558. struct ath_softc *sc = hw->priv;
  1559. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1560. int ret = 0;
  1561. if (ath9k_modparam_nohwcrypt)
  1562. return -ENOSPC;
  1563. if (vif->type == NL80211_IFTYPE_ADHOC &&
  1564. (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
  1565. key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
  1566. !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
  1567. /*
  1568. * For now, disable hw crypto for the RSN IBSS group keys. This
  1569. * could be optimized in the future to use a modified key cache
  1570. * design to support per-STA RX GTK, but until that gets
  1571. * implemented, use of software crypto for group addressed
  1572. * frames is a acceptable to allow RSN IBSS to be used.
  1573. */
  1574. return -EOPNOTSUPP;
  1575. }
  1576. mutex_lock(&sc->mutex);
  1577. ath9k_ps_wakeup(sc);
  1578. ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
  1579. switch (cmd) {
  1580. case SET_KEY:
  1581. if (sta)
  1582. ath9k_del_ps_key(sc, vif, sta);
  1583. ret = ath_key_config(common, vif, sta, key);
  1584. if (ret >= 0) {
  1585. key->hw_key_idx = ret;
  1586. /* push IV and Michael MIC generation to stack */
  1587. key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
  1588. if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
  1589. key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
  1590. if (sc->sc_ah->sw_mgmt_crypto &&
  1591. key->cipher == WLAN_CIPHER_SUITE_CCMP)
  1592. key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
  1593. ret = 0;
  1594. }
  1595. break;
  1596. case DISABLE_KEY:
  1597. ath_key_delete(common, key);
  1598. break;
  1599. default:
  1600. ret = -EINVAL;
  1601. }
  1602. ath9k_ps_restore(sc);
  1603. mutex_unlock(&sc->mutex);
  1604. return ret;
  1605. }
  1606. stat