PageRenderTime 49ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/core/dp/txrx/ol_tx_queue.c

https://gitlab.com/Codeaurora/platform_vendor_qcom-opensource_wlan_qcacld-3.0
C | 429 lines | 274 code | 54 blank | 101 comment | 25 complexity | 7672878c9cca42f56ce711a8aa4db3b1 MD5 | raw file
  1. /*
  2. * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
  27. #include <cdf_atomic.h> /* cdf_atomic_read, etc. */
  28. #include <ol_cfg.h> /* ol_cfg_addba_retry */
  29. #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
  30. #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
  31. #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
  32. #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync, ol_tx_addba_conf */
  33. #include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */
  34. #include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
  35. #include <ol_txrx_types.h> /* pdev stats */
  36. #include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
  37. #include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
  38. #include <ol_tx_queue.h>
  39. #include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
  40. #include <cdf_types.h> /* bool */
  41. #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
  42. void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
  43. {
  44. /* TO DO: log the queue pause */
  45. /* acquire the mutex lock, since we'll be modifying the queues */
  46. TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
  47. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  48. vdev->ll_pause.paused_reason |= reason;
  49. vdev->ll_pause.q_pause_cnt++;
  50. vdev->ll_pause.is_q_paused = true;
  51. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  52. DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_PAUSE,
  53. NULL, 0));
  54. TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
  55. }
  56. void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
  57. {
  58. /* TO DO: log the queue unpause */
  59. /* acquire the mutex lock, since we'll be modifying the queues */
  60. TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
  61. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  62. if (vdev->ll_pause.paused_reason & reason) {
  63. vdev->ll_pause.paused_reason &= ~reason;
  64. if (!vdev->ll_pause.paused_reason) {
  65. vdev->ll_pause.is_q_paused = false;
  66. vdev->ll_pause.q_unpause_cnt++;
  67. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  68. ol_tx_vdev_ll_pause_queue_send(vdev);
  69. } else {
  70. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  71. }
  72. } else {
  73. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  74. }
  75. DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_UNPAUSE,
  76. NULL, 0));
  77. TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
  78. }
  79. void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
  80. {
  81. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  82. cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
  83. vdev->ll_pause.is_q_timer_on = false;
  84. while (vdev->ll_pause.txq.head) {
  85. cdf_nbuf_t next =
  86. cdf_nbuf_next(vdev->ll_pause.txq.head);
  87. cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
  88. cdf_nbuf_unmap(vdev->pdev->osdev,
  89. vdev->ll_pause.txq.head,
  90. CDF_DMA_TO_DEVICE);
  91. cdf_nbuf_tx_free(vdev->ll_pause.txq.head,
  92. NBUF_PKT_ERROR);
  93. vdev->ll_pause.txq.head = next;
  94. }
  95. vdev->ll_pause.txq.tail = NULL;
  96. vdev->ll_pause.txq.depth = 0;
  97. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  98. }
  99. #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
  100. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  101. /**
  102. * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
  103. * @reason: reason
  104. *
  105. * Return: netif_reason_type
  106. */
  107. enum netif_reason_type
  108. ol_txrx_map_to_netif_reason_type(uint32_t reason)
  109. {
  110. switch (reason) {
  111. case OL_TXQ_PAUSE_REASON_FW:
  112. return WLAN_FW_PAUSE;
  113. case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
  114. return WLAN_PEER_UNAUTHORISED;
  115. case OL_TXQ_PAUSE_REASON_TX_ABORT:
  116. return WLAN_TX_ABORT;
  117. case OL_TXQ_PAUSE_REASON_VDEV_STOP:
  118. return WLAN_VDEV_STOP;
  119. case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
  120. return WLAN_THERMAL_MITIGATION;
  121. default:
  122. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  123. "%s: reason not supported %d\n",
  124. __func__, reason);
  125. return WLAN_REASON_TYPE_MAX;
  126. }
  127. }
  128. /**
  129. * ol_txrx_vdev_pause() - pause vdev network queues
  130. * @vdev: vdev handle
  131. * @reason: reason
  132. *
  133. * Return: none
  134. */
  135. void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
  136. {
  137. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  138. enum netif_reason_type netif_reason;
  139. if (cdf_unlikely((!pdev) || (!pdev->pause_cb))) {
  140. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  141. "%s: invalid pdev\n", __func__);
  142. return;
  143. }
  144. netif_reason = ol_txrx_map_to_netif_reason_type(reason);
  145. if (netif_reason == WLAN_REASON_TYPE_MAX)
  146. return;
  147. pdev->pause_cb(vdev->vdev_id, WLAN_NETIF_TX_DISABLE, netif_reason);
  148. }
  149. /**
  150. * ol_txrx_vdev_unpause() - unpause vdev network queues
  151. * @vdev: vdev handle
  152. * @reason: reason
  153. *
  154. * Return: none
  155. */
  156. void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
  157. {
  158. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  159. enum netif_reason_type netif_reason;
  160. if (cdf_unlikely((!pdev) || (!pdev->pause_cb))) {
  161. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  162. "%s: invalid pdev\n", __func__);
  163. return;
  164. }
  165. netif_reason = ol_txrx_map_to_netif_reason_type(reason);
  166. if (netif_reason == WLAN_REASON_TYPE_MAX)
  167. return;
  168. pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
  169. netif_reason);
  170. }
  171. /**
  172. * ol_txrx_pdev_pause() - pause network queues for each vdev
  173. * @pdev: pdev handle
  174. * @reason: reason
  175. *
  176. * Return: none
  177. */
  178. void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
  179. {
  180. struct ol_txrx_vdev_t *vdev = NULL, *tmp;
  181. TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
  182. ol_txrx_vdev_pause(vdev, reason);
  183. }
  184. }
  185. /**
  186. * ol_txrx_pdev_unpause() - unpause network queues for each vdev
  187. * @pdev: pdev handle
  188. * @reason: reason
  189. *
  190. * Return: none
  191. */
  192. void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
  193. {
  194. struct ol_txrx_vdev_t *vdev = NULL, *tmp;
  195. TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
  196. ol_txrx_vdev_unpause(vdev, reason);
  197. }
  198. }
  199. #endif
  200. /*--- LL tx throttle queue code --------------------------------------------*/
  201. #if defined(QCA_SUPPORT_TX_THROTTLE)
  202. uint8_t ol_tx_pdev_is_target_empty(void)
  203. {
  204. /* TM TODO */
  205. return 1;
  206. }
  207. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  208. /**
  209. * ol_txrx_thermal_pause() - pause due to thermal mitigation
  210. * @pdev: pdev handle
  211. *
  212. * Return: none
  213. */
  214. static inline
  215. void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
  216. {
  217. ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
  218. return;
  219. }
  220. /**
  221. * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
  222. * @pdev: pdev handle
  223. *
  224. * Return: none
  225. */
  226. static inline
  227. void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
  228. {
  229. ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
  230. return;
  231. }
  232. #else
  233. /**
  234. * ol_txrx_thermal_pause() - pause due to thermal mitigation
  235. * @pdev: pdev handle
  236. *
  237. * Return: none
  238. */
  239. static inline
  240. void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
  241. {
  242. return;
  243. }
  244. /**
  245. * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
  246. * @pdev: pdev handle
  247. *
  248. * Return: none
  249. */
  250. static inline
  251. void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
  252. {
  253. ol_tx_pdev_ll_pause_queue_send_all(pdev);
  254. return;
  255. }
  256. #endif
  257. void ol_tx_pdev_throttle_phase_timer(void *context)
  258. {
  259. struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
  260. int ms;
  261. enum throttle_level cur_level;
  262. enum throttle_phase cur_phase;
  263. /* update the phase */
  264. pdev->tx_throttle.current_throttle_phase++;
  265. if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX)
  266. pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
  267. if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
  268. /* Traffic is stopped */
  269. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  270. "throttle phase --> OFF\n");
  271. ol_txrx_thermal_pause(pdev);
  272. cur_level = pdev->tx_throttle.current_throttle_level;
  273. cur_phase = pdev->tx_throttle.current_throttle_phase;
  274. ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
  275. if (pdev->tx_throttle.current_throttle_level !=
  276. THROTTLE_LEVEL_0) {
  277. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  278. "start timer %d ms\n", ms);
  279. cdf_softirq_timer_start(&pdev->tx_throttle.
  280. phase_timer, ms);
  281. }
  282. } else {
  283. /* Traffic can go */
  284. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  285. "throttle phase --> ON\n");
  286. ol_txrx_thermal_unpause(pdev);
  287. cur_level = pdev->tx_throttle.current_throttle_level;
  288. cur_phase = pdev->tx_throttle.current_throttle_phase;
  289. ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
  290. if (pdev->tx_throttle.current_throttle_level !=
  291. THROTTLE_LEVEL_0) {
  292. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "start timer %d ms\n",
  293. ms);
  294. cdf_softirq_timer_start(&pdev->tx_throttle.phase_timer,
  295. ms);
  296. }
  297. }
  298. }
  299. #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
  300. void ol_tx_pdev_throttle_tx_timer(void *context)
  301. {
  302. struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
  303. ol_tx_pdev_ll_pause_queue_send_all(pdev);
  304. }
  305. #endif
  306. void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
  307. {
  308. int ms = 0;
  309. if (level >= THROTTLE_LEVEL_MAX) {
  310. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  311. "%s invalid throttle level set %d, ignoring\n",
  312. __func__, level);
  313. return;
  314. }
  315. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Setting throttle level %d\n", level);
  316. /* Set the current throttle level */
  317. pdev->tx_throttle.current_throttle_level = (enum throttle_level) level;
  318. /* Reset the phase */
  319. pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
  320. ol_txrx_thermal_unpause(pdev);
  321. /* Start with the new time */
  322. ms = pdev->tx_throttle.
  323. throttle_time_ms[level][THROTTLE_PHASE_OFF];
  324. cdf_softirq_timer_cancel(&pdev->tx_throttle.phase_timer);
  325. if (level != THROTTLE_LEVEL_0)
  326. cdf_softirq_timer_start(&pdev->tx_throttle.phase_timer, ms);
  327. }
  328. /* This table stores the duty cycle for each level.
  329. Example "on" time for level 2 with duty period 100ms is:
  330. "on" time = duty_period_ms >> throttle_duty_cycle_table[2]
  331. "on" time = 100 ms >> 2 = 25ms */
  332. static uint8_t g_throttle_duty_cycle_table[THROTTLE_LEVEL_MAX] = { 0, 1, 2, 4 };
  333. void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period)
  334. {
  335. int i;
  336. /* Set the current throttle level */
  337. pdev->tx_throttle.throttle_period_ms = period;
  338. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "level OFF ON\n");
  339. for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
  340. pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
  341. pdev->tx_throttle.throttle_period_ms >>
  342. g_throttle_duty_cycle_table[i];
  343. pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] =
  344. pdev->tx_throttle.throttle_period_ms -
  345. pdev->tx_throttle.throttle_time_ms[
  346. i][THROTTLE_PHASE_ON];
  347. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%d %d %d\n", i,
  348. pdev->tx_throttle.
  349. throttle_time_ms[i][THROTTLE_PHASE_OFF],
  350. pdev->tx_throttle.
  351. throttle_time_ms[i][THROTTLE_PHASE_ON]);
  352. }
  353. }
  354. void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
  355. {
  356. uint32_t throttle_period;
  357. pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
  358. pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
  359. cdf_spinlock_init(&pdev->tx_throttle.mutex);
  360. throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
  361. ol_tx_throttle_init_period(pdev, throttle_period);
  362. cdf_softirq_timer_init(pdev->osdev,
  363. &pdev->tx_throttle.phase_timer,
  364. ol_tx_pdev_throttle_phase_timer, pdev,
  365. CDF_TIMER_TYPE_SW);
  366. #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
  367. cdf_softirq_timer_init(pdev->osdev,
  368. &pdev->tx_throttle.tx_timer,
  369. ol_tx_pdev_throttle_tx_timer, pdev,
  370. CDF_TIMER_TYPE_SW);
  371. #endif
  372. pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
  373. }
  374. #endif /* QCA_SUPPORT_TX_THROTTLE */
  375. /*--- End of LL tx throttle queue code ---------------------------------------*/