/net/mac80211/offchannel.c

http://github.com/mirrors/linux · C · 1008 lines · 684 code · 170 blank · 154 comment · 169 complexity · de3d04a931b465e36d7ba0e70ff63d80 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Off-channel operation helpers
  4. *
  5. * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
  6. * Copyright 2004, Instant802 Networks, Inc.
  7. * Copyright 2005, Devicescape Software, Inc.
  8. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  9. * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  10. * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
  11. * Copyright (C) 2019 Intel Corporation
  12. */
  13. #include <linux/export.h>
  14. #include <net/mac80211.h>
  15. #include "ieee80211_i.h"
  16. #include "driver-ops.h"
  17. /*
  18. * Tell our hardware to disable PS.
  19. * Optionally inform AP that we will go to sleep so that it will buffer
  20. * the frames while we are doing off-channel work. This is optional
  21. * because we *may* be doing work on-operating channel, and want our
  22. * hardware unconditionally awake, but still let the AP send us normal frames.
  23. */
  24. static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
  25. {
  26. struct ieee80211_local *local = sdata->local;
  27. struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
  28. local->offchannel_ps_enabled = false;
  29. /* FIXME: what to do when local->pspolling is true? */
  30. del_timer_sync(&local->dynamic_ps_timer);
  31. del_timer_sync(&ifmgd->bcn_mon_timer);
  32. del_timer_sync(&ifmgd->conn_mon_timer);
  33. cancel_work_sync(&local->dynamic_ps_enable_work);
  34. if (local->hw.conf.flags & IEEE80211_CONF_PS) {
  35. local->offchannel_ps_enabled = true;
  36. local->hw.conf.flags &= ~IEEE80211_CONF_PS;
  37. ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
  38. }
  39. if (!local->offchannel_ps_enabled ||
  40. !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
  41. /*
  42. * If power save was enabled, no need to send a nullfunc
  43. * frame because AP knows that we are sleeping. But if the
  44. * hardware is creating the nullfunc frame for power save
  45. * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
  46. * enabled) and power save was enabled, the firmware just
  47. * sent a null frame with power save disabled. So we need
  48. * to send a new nullfunc frame to inform the AP that we
  49. * are again sleeping.
  50. */
  51. ieee80211_send_nullfunc(local, sdata, true);
  52. }
  53. /* inform AP that we are awake again, unless power save is enabled */
  54. static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
  55. {
  56. struct ieee80211_local *local = sdata->local;
  57. if (!local->ps_sdata)
  58. ieee80211_send_nullfunc(local, sdata, false);
  59. else if (local->offchannel_ps_enabled) {
  60. /*
  61. * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
  62. * will send a nullfunc frame with the powersave bit set
  63. * even though the AP already knows that we are sleeping.
  64. * This could be avoided by sending a null frame with power
  65. * save bit disabled before enabling the power save, but
  66. * this doesn't gain anything.
  67. *
  68. * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
  69. * to send a nullfunc frame because AP already knows that
  70. * we are sleeping, let's just enable power save mode in
  71. * hardware.
  72. */
  73. /* TODO: Only set hardware if CONF_PS changed?
  74. * TODO: Should we set offchannel_ps_enabled to false?
  75. */
  76. local->hw.conf.flags |= IEEE80211_CONF_PS;
  77. ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
  78. } else if (local->hw.conf.dynamic_ps_timeout > 0) {
  79. /*
  80. * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
  81. * had been running before leaving the operating channel,
  82. * restart the timer now and send a nullfunc frame to inform
  83. * the AP that we are awake.
  84. */
  85. ieee80211_send_nullfunc(local, sdata, false);
  86. mod_timer(&local->dynamic_ps_timer, jiffies +
  87. msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
  88. }
  89. ieee80211_sta_reset_beacon_monitor(sdata);
  90. ieee80211_sta_reset_conn_monitor(sdata);
  91. }
  92. void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
  93. {
  94. struct ieee80211_sub_if_data *sdata;
  95. if (WARN_ON(local->use_chanctx))
  96. return;
  97. /*
  98. * notify the AP about us leaving the channel and stop all
  99. * STA interfaces.
  100. */
  101. /*
  102. * Stop queues and transmit all frames queued by the driver
  103. * before sending nullfunc to enable powersave at the AP.
  104. */
  105. ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
  106. IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
  107. false);
  108. ieee80211_flush_queues(local, NULL, false);
  109. mutex_lock(&local->iflist_mtx);
  110. list_for_each_entry(sdata, &local->interfaces, list) {
  111. if (!ieee80211_sdata_running(sdata))
  112. continue;
  113. if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
  114. sdata->vif.type == NL80211_IFTYPE_NAN)
  115. continue;
  116. if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
  117. set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
  118. /* Check to see if we should disable beaconing. */
  119. if (sdata->vif.bss_conf.enable_beacon) {
  120. set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
  121. &sdata->state);
  122. sdata->vif.bss_conf.enable_beacon = false;
  123. ieee80211_bss_info_change_notify(
  124. sdata, BSS_CHANGED_BEACON_ENABLED);
  125. }
  126. if (sdata->vif.type == NL80211_IFTYPE_STATION &&
  127. sdata->u.mgd.associated)
  128. ieee80211_offchannel_ps_enable(sdata);
  129. }
  130. mutex_unlock(&local->iflist_mtx);
  131. }
  132. void ieee80211_offchannel_return(struct ieee80211_local *local)
  133. {
  134. struct ieee80211_sub_if_data *sdata;
  135. if (WARN_ON(local->use_chanctx))
  136. return;
  137. mutex_lock(&local->iflist_mtx);
  138. list_for_each_entry(sdata, &local->interfaces, list) {
  139. if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
  140. continue;
  141. if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
  142. clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
  143. if (!ieee80211_sdata_running(sdata))
  144. continue;
  145. /* Tell AP we're back */
  146. if (sdata->vif.type == NL80211_IFTYPE_STATION &&
  147. sdata->u.mgd.associated)
  148. ieee80211_offchannel_ps_disable(sdata);
  149. if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
  150. &sdata->state)) {
  151. sdata->vif.bss_conf.enable_beacon = true;
  152. ieee80211_bss_info_change_notify(
  153. sdata, BSS_CHANGED_BEACON_ENABLED);
  154. }
  155. }
  156. mutex_unlock(&local->iflist_mtx);
  157. ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
  158. IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
  159. false);
  160. }
  161. static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
  162. {
  163. /* was never transmitted */
  164. if (roc->frame) {
  165. cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie,
  166. roc->frame->data, roc->frame->len,
  167. false, GFP_KERNEL);
  168. ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame);
  169. }
  170. if (!roc->mgmt_tx_cookie)
  171. cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
  172. roc->cookie, roc->chan,
  173. GFP_KERNEL);
  174. else
  175. cfg80211_tx_mgmt_expired(&roc->sdata->wdev,
  176. roc->mgmt_tx_cookie,
  177. roc->chan, GFP_KERNEL);
  178. list_del(&roc->list);
  179. kfree(roc);
  180. }
  181. static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local,
  182. unsigned long now)
  183. {
  184. struct ieee80211_roc_work *roc, *tmp;
  185. long remaining_dur_min = LONG_MAX;
  186. lockdep_assert_held(&local->mtx);
  187. list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
  188. long remaining;
  189. if (!roc->started)
  190. break;
  191. remaining = roc->start_time +
  192. msecs_to_jiffies(roc->duration) -
  193. now;
  194. /* In case of HW ROC, it is possible that the HW finished the
  195. * ROC session before the actual requested time. In such a case
  196. * end the ROC session (disregarding the remaining time).
  197. */
  198. if (roc->abort || roc->hw_begun || remaining <= 0)
  199. ieee80211_roc_notify_destroy(roc);
  200. else
  201. remaining_dur_min = min(remaining_dur_min, remaining);
  202. }
  203. return remaining_dur_min;
  204. }
  205. static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
  206. unsigned long now)
  207. {
  208. long dur = ieee80211_end_finished_rocs(local, now);
  209. if (dur == LONG_MAX)
  210. return false;
  211. mod_delayed_work(local->workqueue, &local->roc_work, dur);
  212. return true;
  213. }
  214. static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
  215. unsigned long start_time)
  216. {
  217. if (WARN_ON(roc->notified))
  218. return;
  219. roc->start_time = start_time;
  220. roc->started = true;
  221. if (roc->mgmt_tx_cookie) {
  222. if (!WARN_ON(!roc->frame)) {
  223. ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
  224. roc->chan->band, 0);
  225. roc->frame = NULL;
  226. }
  227. } else {
  228. cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
  229. roc->chan, roc->req_duration,
  230. GFP_KERNEL);
  231. }
  232. roc->notified = true;
  233. }
  234. static void ieee80211_hw_roc_start(struct work_struct *work)
  235. {
  236. struct ieee80211_local *local =
  237. container_of(work, struct ieee80211_local, hw_roc_start);
  238. struct ieee80211_roc_work *roc;
  239. mutex_lock(&local->mtx);
  240. list_for_each_entry(roc, &local->roc_list, list) {
  241. if (!roc->started)
  242. break;
  243. roc->hw_begun = true;
  244. ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
  245. }
  246. mutex_unlock(&local->mtx);
  247. }
  248. void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
  249. {
  250. struct ieee80211_local *local = hw_to_local(hw);
  251. local->hw_roc_start_time = jiffies;
  252. trace_api_ready_on_channel(local);
  253. ieee80211_queue_work(hw, &local->hw_roc_start);
  254. }
  255. EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
  256. static void _ieee80211_start_next_roc(struct ieee80211_local *local)
  257. {
  258. struct ieee80211_roc_work *roc, *tmp;
  259. enum ieee80211_roc_type type;
  260. u32 min_dur, max_dur;
  261. lockdep_assert_held(&local->mtx);
  262. if (WARN_ON(list_empty(&local->roc_list)))
  263. return;
  264. roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
  265. list);
  266. if (WARN_ON(roc->started))
  267. return;
  268. min_dur = roc->duration;
  269. max_dur = roc->duration;
  270. type = roc->type;
  271. list_for_each_entry(tmp, &local->roc_list, list) {
  272. if (tmp == roc)
  273. continue;
  274. if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
  275. break;
  276. max_dur = max(tmp->duration, max_dur);
  277. min_dur = min(tmp->duration, min_dur);
  278. type = max(tmp->type, type);
  279. }
  280. if (local->ops->remain_on_channel) {
  281. int ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
  282. max_dur, type);
  283. if (ret) {
  284. wiphy_warn(local->hw.wiphy,
  285. "failed to start next HW ROC (%d)\n", ret);
  286. /*
  287. * queue the work struct again to avoid recursion
  288. * when multiple failures occur
  289. */
  290. list_for_each_entry(tmp, &local->roc_list, list) {
  291. if (tmp->sdata != roc->sdata ||
  292. tmp->chan != roc->chan)
  293. break;
  294. tmp->started = true;
  295. tmp->abort = true;
  296. }
  297. ieee80211_queue_work(&local->hw, &local->hw_roc_done);
  298. return;
  299. }
  300. /* we'll notify about the start once the HW calls back */
  301. list_for_each_entry(tmp, &local->roc_list, list) {
  302. if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
  303. break;
  304. tmp->started = true;
  305. }
  306. } else {
  307. /* If actually operating on the desired channel (with at least
  308. * 20 MHz channel width) don't stop all the operations but still
  309. * treat it as though the ROC operation started properly, so
  310. * other ROC operations won't interfere with this one.
  311. */
  312. roc->on_channel = roc->chan == local->_oper_chandef.chan &&
  313. local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
  314. local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
  315. /* start this ROC */
  316. ieee80211_recalc_idle(local);
  317. if (!roc->on_channel) {
  318. ieee80211_offchannel_stop_vifs(local);
  319. local->tmp_channel = roc->chan;
  320. ieee80211_hw_config(local, 0);
  321. }
  322. ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
  323. msecs_to_jiffies(min_dur));
  324. /* tell userspace or send frame(s) */
  325. list_for_each_entry(tmp, &local->roc_list, list) {
  326. if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
  327. break;
  328. tmp->on_channel = roc->on_channel;
  329. ieee80211_handle_roc_started(tmp, jiffies);
  330. }
  331. }
  332. }
  333. void ieee80211_start_next_roc(struct ieee80211_local *local)
  334. {
  335. struct ieee80211_roc_work *roc;
  336. lockdep_assert_held(&local->mtx);
  337. if (list_empty(&local->roc_list)) {
  338. ieee80211_run_deferred_scan(local);
  339. return;
  340. }
  341. /* defer roc if driver is not started (i.e. during reconfig) */
  342. if (local->in_reconfig)
  343. return;
  344. roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
  345. list);
  346. if (WARN_ON_ONCE(roc->started))
  347. return;
  348. if (local->ops->remain_on_channel) {
  349. _ieee80211_start_next_roc(local);
  350. } else {
  351. /* delay it a bit */
  352. ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
  353. round_jiffies_relative(HZ/2));
  354. }
  355. }
  356. static void __ieee80211_roc_work(struct ieee80211_local *local)
  357. {
  358. struct ieee80211_roc_work *roc;
  359. bool on_channel;
  360. lockdep_assert_held(&local->mtx);
  361. if (WARN_ON(local->ops->remain_on_channel))
  362. return;
  363. roc = list_first_entry_or_null(&local->roc_list,
  364. struct ieee80211_roc_work, list);
  365. if (!roc)
  366. return;
  367. if (!roc->started) {
  368. WARN_ON(local->use_chanctx);
  369. _ieee80211_start_next_roc(local);
  370. } else {
  371. on_channel = roc->on_channel;
  372. if (ieee80211_recalc_sw_work(local, jiffies))
  373. return;
  374. /* careful - roc pointer became invalid during recalc */
  375. if (!on_channel) {
  376. ieee80211_flush_queues(local, NULL, false);
  377. local->tmp_channel = NULL;
  378. ieee80211_hw_config(local, 0);
  379. ieee80211_offchannel_return(local);
  380. }
  381. ieee80211_recalc_idle(local);
  382. ieee80211_start_next_roc(local);
  383. }
  384. }
  385. static void ieee80211_roc_work(struct work_struct *work)
  386. {
  387. struct ieee80211_local *local =
  388. container_of(work, struct ieee80211_local, roc_work.work);
  389. mutex_lock(&local->mtx);
  390. __ieee80211_roc_work(local);
  391. mutex_unlock(&local->mtx);
  392. }
  393. static void ieee80211_hw_roc_done(struct work_struct *work)
  394. {
  395. struct ieee80211_local *local =
  396. container_of(work, struct ieee80211_local, hw_roc_done);
  397. mutex_lock(&local->mtx);
  398. ieee80211_end_finished_rocs(local, jiffies);
  399. /* if there's another roc, start it now */
  400. ieee80211_start_next_roc(local);
  401. mutex_unlock(&local->mtx);
  402. }
  403. void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
  404. {
  405. struct ieee80211_local *local = hw_to_local(hw);
  406. trace_api_remain_on_channel_expired(local);
  407. ieee80211_queue_work(hw, &local->hw_roc_done);
  408. }
  409. EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
  410. static bool
  411. ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
  412. struct ieee80211_roc_work *new_roc,
  413. struct ieee80211_roc_work *cur_roc)
  414. {
  415. unsigned long now = jiffies;
  416. unsigned long remaining;
  417. if (WARN_ON(!cur_roc->started))
  418. return false;
  419. /* if it was scheduled in the hardware, but not started yet,
  420. * we can only combine if the older one had a longer duration
  421. */
  422. if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration)
  423. return false;
  424. remaining = cur_roc->start_time +
  425. msecs_to_jiffies(cur_roc->duration) -
  426. now;
  427. /* if it doesn't fit entirely, schedule a new one */
  428. if (new_roc->duration > jiffies_to_msecs(remaining))
  429. return false;
  430. /* add just after the current one so we combine their finish later */
  431. list_add(&new_roc->list, &cur_roc->list);
  432. /* if the existing one has already begun then let this one also
  433. * begin, otherwise they'll both be marked properly by the work
  434. * struct that runs once the driver notifies us of the beginning
  435. */
  436. if (cur_roc->hw_begun) {
  437. new_roc->hw_begun = true;
  438. ieee80211_handle_roc_started(new_roc, now);
  439. }
  440. return true;
  441. }
  442. static int ieee80211_start_roc_work(struct ieee80211_local *local,
  443. struct ieee80211_sub_if_data *sdata,
  444. struct ieee80211_channel *channel,
  445. unsigned int duration, u64 *cookie,
  446. struct sk_buff *txskb,
  447. enum ieee80211_roc_type type)
  448. {
  449. struct ieee80211_roc_work *roc, *tmp;
  450. bool queued = false, combine_started = true;
  451. int ret;
  452. lockdep_assert_held(&local->mtx);
  453. if (local->use_chanctx && !local->ops->remain_on_channel)
  454. return -EOPNOTSUPP;
  455. roc = kzalloc(sizeof(*roc), GFP_KERNEL);
  456. if (!roc)
  457. return -ENOMEM;
  458. /*
  459. * If the duration is zero, then the driver
  460. * wouldn't actually do anything. Set it to
  461. * 10 for now.
  462. *
  463. * TODO: cancel the off-channel operation
  464. * when we get the SKB's TX status and
  465. * the wait time was zero before.
  466. */
  467. if (!duration)
  468. duration = 10;
  469. roc->chan = channel;
  470. roc->duration = duration;
  471. roc->req_duration = duration;
  472. roc->frame = txskb;
  473. roc->type = type;
  474. roc->sdata = sdata;
  475. /*
  476. * cookie is either the roc cookie (for normal roc)
  477. * or the SKB (for mgmt TX)
  478. */
  479. if (!txskb) {
  480. roc->cookie = ieee80211_mgmt_tx_cookie(local);
  481. *cookie = roc->cookie;
  482. } else {
  483. roc->mgmt_tx_cookie = *cookie;
  484. }
  485. /* if there's no need to queue, handle it immediately */
  486. if (list_empty(&local->roc_list) &&
  487. !local->scanning && !ieee80211_is_radar_required(local)) {
  488. /* if not HW assist, just queue & schedule work */
  489. if (!local->ops->remain_on_channel) {
  490. list_add_tail(&roc->list, &local->roc_list);
  491. ieee80211_queue_delayed_work(&local->hw,
  492. &local->roc_work, 0);
  493. } else {
  494. /* otherwise actually kick it off here
  495. * (for error handling)
  496. */
  497. ret = drv_remain_on_channel(local, sdata, channel,
  498. duration, type);
  499. if (ret) {
  500. kfree(roc);
  501. return ret;
  502. }
  503. roc->started = true;
  504. list_add_tail(&roc->list, &local->roc_list);
  505. }
  506. return 0;
  507. }
  508. /* otherwise handle queueing */
  509. list_for_each_entry(tmp, &local->roc_list, list) {
  510. if (tmp->chan != channel || tmp->sdata != sdata)
  511. continue;
  512. /*
  513. * Extend this ROC if possible: If it hasn't started, add
  514. * just after the new one to combine.
  515. */
  516. if (!tmp->started) {
  517. list_add(&roc->list, &tmp->list);
  518. queued = true;
  519. break;
  520. }
  521. if (!combine_started)
  522. continue;
  523. if (!local->ops->remain_on_channel) {
  524. /* If there's no hardware remain-on-channel, and
  525. * doing so won't push us over the maximum r-o-c
  526. * we allow, then we can just add the new one to
  527. * the list and mark it as having started now.
  528. * If it would push over the limit, don't try to
  529. * combine with other started ones (that haven't
  530. * been running as long) but potentially sort it
  531. * with others that had the same fate.
  532. */
  533. unsigned long now = jiffies;
  534. u32 elapsed = jiffies_to_msecs(now - tmp->start_time);
  535. struct wiphy *wiphy = local->hw.wiphy;
  536. u32 max_roc = wiphy->max_remain_on_channel_duration;
  537. if (elapsed + roc->duration > max_roc) {
  538. combine_started = false;
  539. continue;
  540. }
  541. list_add(&roc->list, &tmp->list);
  542. queued = true;
  543. roc->on_channel = tmp->on_channel;
  544. ieee80211_handle_roc_started(roc, now);
  545. ieee80211_recalc_sw_work(local, now);
  546. break;
  547. }
  548. queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp);
  549. if (queued)
  550. break;
  551. /* if it wasn't queued, perhaps it can be combined with
  552. * another that also couldn't get combined previously,
  553. * but no need to check for already started ones, since
  554. * that can't work.
  555. */
  556. combine_started = false;
  557. }
  558. if (!queued)
  559. list_add_tail(&roc->list, &local->roc_list);
  560. return 0;
  561. }
  562. int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
  563. struct ieee80211_channel *chan,
  564. unsigned int duration, u64 *cookie)
  565. {
  566. struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
  567. struct ieee80211_local *local = sdata->local;
  568. int ret;
  569. mutex_lock(&local->mtx);
  570. ret = ieee80211_start_roc_work(local, sdata, chan,
  571. duration, cookie, NULL,
  572. IEEE80211_ROC_TYPE_NORMAL);
  573. mutex_unlock(&local->mtx);
  574. return ret;
  575. }
  576. static int ieee80211_cancel_roc(struct ieee80211_local *local,
  577. u64 cookie, bool mgmt_tx)
  578. {
  579. struct ieee80211_roc_work *roc, *tmp, *found = NULL;
  580. int ret;
  581. if (!cookie)
  582. return -ENOENT;
  583. flush_work(&local->hw_roc_start);
  584. mutex_lock(&local->mtx);
  585. list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
  586. if (!mgmt_tx && roc->cookie != cookie)
  587. continue;
  588. else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
  589. continue;
  590. found = roc;
  591. break;
  592. }
  593. if (!found) {
  594. mutex_unlock(&local->mtx);
  595. return -ENOENT;
  596. }
  597. if (!found->started) {
  598. ieee80211_roc_notify_destroy(found);
  599. goto out_unlock;
  600. }
  601. if (local->ops->remain_on_channel) {
  602. ret = drv_cancel_remain_on_channel(local, roc->sdata);
  603. if (WARN_ON_ONCE(ret)) {
  604. mutex_unlock(&local->mtx);
  605. return ret;
  606. }
  607. /* TODO:
  608. * if multiple items were combined here then we really shouldn't
  609. * cancel them all - we should wait for as much time as needed
  610. * for the longest remaining one, and only then cancel ...
  611. */
  612. list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
  613. if (!roc->started)
  614. break;
  615. if (roc == found)
  616. found = NULL;
  617. ieee80211_roc_notify_destroy(roc);
  618. }
  619. /* that really must not happen - it was started */
  620. WARN_ON(found);
  621. ieee80211_start_next_roc(local);
  622. } else {
  623. /* go through work struct to return to the operating channel */
  624. found->abort = true;
  625. mod_delayed_work(local->workqueue, &local->roc_work, 0);
  626. }
  627. out_unlock:
  628. mutex_unlock(&local->mtx);
  629. return 0;
  630. }
  631. int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
  632. struct wireless_dev *wdev, u64 cookie)
  633. {
  634. struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
  635. struct ieee80211_local *local = sdata->local;
  636. return ieee80211_cancel_roc(local, cookie, false);
  637. }
  638. int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
  639. struct cfg80211_mgmt_tx_params *params, u64 *cookie)
  640. {
  641. struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
  642. struct ieee80211_local *local = sdata->local;
  643. struct sk_buff *skb;
  644. struct sta_info *sta;
  645. const struct ieee80211_mgmt *mgmt = (void *)params->buf;
  646. bool need_offchan = false;
  647. u32 flags;
  648. int ret;
  649. u8 *data;
  650. if (params->dont_wait_for_ack)
  651. flags = IEEE80211_TX_CTL_NO_ACK;
  652. else
  653. flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
  654. IEEE80211_TX_CTL_REQ_TX_STATUS;
  655. if (params->no_cck)
  656. flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
  657. switch (sdata->vif.type) {
  658. case NL80211_IFTYPE_ADHOC:
  659. if (!sdata->vif.bss_conf.ibss_joined)
  660. need_offchan = true;
  661. #ifdef CONFIG_MAC80211_MESH
  662. /* fall through */
  663. case NL80211_IFTYPE_MESH_POINT:
  664. if (ieee80211_vif_is_mesh(&sdata->vif) &&
  665. !sdata->u.mesh.mesh_id_len)
  666. need_offchan = true;
  667. #endif
  668. /* fall through */
  669. case NL80211_IFTYPE_AP:
  670. case NL80211_IFTYPE_AP_VLAN:
  671. case NL80211_IFTYPE_P2P_GO:
  672. if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
  673. !ieee80211_vif_is_mesh(&sdata->vif) &&
  674. !rcu_access_pointer(sdata->bss->beacon))
  675. need_offchan = true;
  676. if (!ieee80211_is_action(mgmt->frame_control) ||
  677. mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
  678. mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
  679. mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
  680. break;
  681. rcu_read_lock();
  682. sta = sta_info_get_bss(sdata, mgmt->da);
  683. rcu_read_unlock();
  684. if (!sta)
  685. return -ENOLINK;
  686. break;
  687. case NL80211_IFTYPE_STATION:
  688. case NL80211_IFTYPE_P2P_CLIENT:
  689. sdata_lock(sdata);
  690. if (!sdata->u.mgd.associated ||
  691. (params->offchan && params->wait &&
  692. local->ops->remain_on_channel &&
  693. memcmp(sdata->u.mgd.associated->bssid,
  694. mgmt->bssid, ETH_ALEN)))
  695. need_offchan = true;
  696. sdata_unlock(sdata);
  697. break;
  698. case NL80211_IFTYPE_P2P_DEVICE:
  699. need_offchan = true;
  700. break;
  701. case NL80211_IFTYPE_NAN:
  702. default:
  703. return -EOPNOTSUPP;
  704. }
  705. /* configurations requiring offchan cannot work if no channel has been
  706. * specified
  707. */
  708. if (need_offchan && !params->chan)
  709. return -EINVAL;
  710. mutex_lock(&local->mtx);
  711. /* Check if the operating channel is the requested channel */
  712. if (!need_offchan) {
  713. struct ieee80211_chanctx_conf *chanctx_conf;
  714. rcu_read_lock();
  715. chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
  716. if (chanctx_conf) {
  717. need_offchan = params->chan &&
  718. (params->chan !=
  719. chanctx_conf->def.chan);
  720. } else if (!params->chan) {
  721. ret = -EINVAL;
  722. rcu_read_unlock();
  723. goto out_unlock;
  724. } else {
  725. need_offchan = true;
  726. }
  727. rcu_read_unlock();
  728. }
  729. if (need_offchan && !params->offchan) {
  730. ret = -EBUSY;
  731. goto out_unlock;
  732. }
  733. skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
  734. if (!skb) {
  735. ret = -ENOMEM;
  736. goto out_unlock;
  737. }
  738. skb_reserve(skb, local->hw.extra_tx_headroom);
  739. data = skb_put_data(skb, params->buf, params->len);
  740. /* Update CSA counters */
  741. if (sdata->vif.csa_active &&
  742. (sdata->vif.type == NL80211_IFTYPE_AP ||
  743. sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
  744. sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
  745. params->n_csa_offsets) {
  746. int i;
  747. struct beacon_data *beacon = NULL;
  748. rcu_read_lock();
  749. if (sdata->vif.type == NL80211_IFTYPE_AP)
  750. beacon = rcu_dereference(sdata->u.ap.beacon);
  751. else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
  752. beacon = rcu_dereference(sdata->u.ibss.presp);
  753. else if (ieee80211_vif_is_mesh(&sdata->vif))
  754. beacon = rcu_dereference(sdata->u.mesh.beacon);
  755. if (beacon)
  756. for (i = 0; i < params->n_csa_offsets; i++)
  757. data[params->csa_offsets[i]] =
  758. beacon->csa_current_counter;
  759. rcu_read_unlock();
  760. }
  761. IEEE80211_SKB_CB(skb)->flags = flags;
  762. skb->dev = sdata->dev;
  763. if (!params->dont_wait_for_ack) {
  764. /* make a copy to preserve the frame contents
  765. * in case of encryption.
  766. */
  767. ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
  768. if (ret) {
  769. kfree_skb(skb);
  770. goto out_unlock;
  771. }
  772. } else {
  773. /* Assign a dummy non-zero cookie, it's not sent to
  774. * userspace in this case but we rely on its value
  775. * internally in the need_offchan case to distinguish
  776. * mgmt-tx from remain-on-channel.
  777. */
  778. *cookie = 0xffffffff;
  779. }
  780. if (!need_offchan) {
  781. ieee80211_tx_skb(sdata, skb);
  782. ret = 0;
  783. goto out_unlock;
  784. }
  785. IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
  786. IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
  787. if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
  788. IEEE80211_SKB_CB(skb)->hw_queue =
  789. local->hw.offchannel_tx_hw_queue;
  790. /* This will handle all kinds of coalescing and immediate TX */
  791. ret = ieee80211_start_roc_work(local, sdata, params->chan,
  792. params->wait, cookie, skb,
  793. IEEE80211_ROC_TYPE_MGMT_TX);
  794. if (ret)
  795. ieee80211_free_txskb(&local->hw, skb);
  796. out_unlock:
  797. mutex_unlock(&local->mtx);
  798. return ret;
  799. }
  800. int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
  801. struct wireless_dev *wdev, u64 cookie)
  802. {
  803. struct ieee80211_local *local = wiphy_priv(wiphy);
  804. return ieee80211_cancel_roc(local, cookie, true);
  805. }
  806. void ieee80211_roc_setup(struct ieee80211_local *local)
  807. {
  808. INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
  809. INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
  810. INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
  811. INIT_LIST_HEAD(&local->roc_list);
  812. }
  813. void ieee80211_roc_purge(struct ieee80211_local *local,
  814. struct ieee80211_sub_if_data *sdata)
  815. {
  816. struct ieee80211_roc_work *roc, *tmp;
  817. bool work_to_do = false;
  818. mutex_lock(&local->mtx);
  819. list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
  820. if (sdata && roc->sdata != sdata)
  821. continue;
  822. if (roc->started) {
  823. if (local->ops->remain_on_channel) {
  824. /* can race, so ignore return value */
  825. drv_cancel_remain_on_channel(local, sdata);
  826. ieee80211_roc_notify_destroy(roc);
  827. } else {
  828. roc->abort = true;
  829. work_to_do = true;
  830. }
  831. } else {
  832. ieee80211_roc_notify_destroy(roc);
  833. }
  834. }
  835. if (work_to_do)
  836. __ieee80211_roc_work(local);
  837. mutex_unlock(&local->mtx);
  838. }