/drivers/staging/ath6kl/reorder/rcv_aggr.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 661 lines · 434 code · 103 blank · 124 comment · 84 complexity · 13529efa990b782c11a2545636d1b226 MD5 · raw file

  1. /*
  2. *
  3. * Copyright (c) 2010 Atheros Communications Inc.
  4. * All rights reserved.
  5. *
  6. *
  7. //
  8. // Permission to use, copy, modify, and/or distribute this software for any
  9. // purpose with or without fee is hereby granted, provided that the above
  10. // copyright notice and this permission notice appear in all copies.
  11. //
  12. // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  13. // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  14. // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  15. // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  16. // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17. // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18. // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19. //
  20. //
  21. *
  22. */
  23. #include <a_config.h>
  24. #include <athdefs.h>
  25. #include <a_osapi.h>
  26. #include <a_debug.h>
  27. #include "pkt_log.h"
  28. #include "aggr_recv_api.h"
  29. #include "aggr_rx_internal.h"
  30. #include "wmi.h"
  31. extern int
  32. wmi_dot3_2_dix(void *osbuf);
  33. static void
  34. aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf);
  35. static void
  36. aggr_timeout(unsigned long arg);
  37. static void
  38. aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order);
  39. static void
  40. aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q);
  41. static void *
  42. aggr_get_osbuf(struct aggr_info *p_aggr);
  43. void *
  44. aggr_init(ALLOC_NETBUFS netbuf_allocator)
  45. {
  46. struct aggr_info *p_aggr = NULL;
  47. struct rxtid *rxtid;
  48. u8 i;
  49. int status = 0;
  50. A_PRINTF("In aggr_init..\n");
  51. do {
  52. p_aggr = A_MALLOC(sizeof(struct aggr_info));
  53. if(!p_aggr) {
  54. A_PRINTF("Failed to allocate memory for aggr_node\n");
  55. status = A_ERROR;
  56. break;
  57. }
  58. /* Init timer and data structures */
  59. A_MEMZERO(p_aggr, sizeof(struct aggr_info));
  60. p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
  61. A_INIT_TIMER(&p_aggr->timer, aggr_timeout, p_aggr);
  62. p_aggr->timerScheduled = false;
  63. A_NETBUF_QUEUE_INIT(&p_aggr->freeQ);
  64. p_aggr->netbuf_allocator = netbuf_allocator;
  65. p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
  66. for(i = 0; i < NUM_OF_TIDS; i++) {
  67. rxtid = AGGR_GET_RXTID(p_aggr, i);
  68. rxtid->aggr = false;
  69. rxtid->progress = false;
  70. rxtid->timerMon = false;
  71. A_NETBUF_QUEUE_INIT(&rxtid->q);
  72. A_MUTEX_INIT(&rxtid->lock);
  73. }
  74. }while(false);
  75. A_PRINTF("going out of aggr_init..status %s\n",
  76. (status == 0) ? "OK":"Error");
  77. if (status) {
  78. /* Cleanup */
  79. aggr_module_destroy(p_aggr);
  80. }
  81. return ((status == 0) ? p_aggr : NULL);
  82. }
  83. /* utility function to clear rx hold_q for a tid */
  84. static void
  85. aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
  86. {
  87. struct rxtid *rxtid;
  88. struct rxtid_stats *stats;
  89. A_ASSERT(tid < NUM_OF_TIDS && p_aggr);
  90. rxtid = AGGR_GET_RXTID(p_aggr, tid);
  91. stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
  92. if(rxtid->aggr) {
  93. aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
  94. }
  95. rxtid->aggr = false;
  96. rxtid->progress = false;
  97. rxtid->timerMon = false;
  98. rxtid->win_sz = 0;
  99. rxtid->seq_next = 0;
  100. rxtid->hold_q_sz = 0;
  101. if(rxtid->hold_q) {
  102. kfree(rxtid->hold_q);
  103. rxtid->hold_q = NULL;
  104. }
  105. A_MEMZERO(stats, sizeof(struct rxtid_stats));
  106. }
  107. void
  108. aggr_module_destroy(void *cntxt)
  109. {
  110. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  111. struct rxtid *rxtid;
  112. u8 i, k;
  113. A_PRINTF("%s(): aggr = %p\n",_A_FUNCNAME_, p_aggr);
  114. A_ASSERT(p_aggr);
  115. if(p_aggr) {
  116. if(p_aggr->timerScheduled) {
  117. A_UNTIMEOUT(&p_aggr->timer);
  118. p_aggr->timerScheduled = false;
  119. }
  120. for(i = 0; i < NUM_OF_TIDS; i++) {
  121. rxtid = AGGR_GET_RXTID(p_aggr, i);
  122. /* Free the hold q contents and hold_q*/
  123. if(rxtid->hold_q) {
  124. for(k = 0; k< rxtid->hold_q_sz; k++) {
  125. if(rxtid->hold_q[k].osbuf) {
  126. A_NETBUF_FREE(rxtid->hold_q[k].osbuf);
  127. }
  128. }
  129. kfree(rxtid->hold_q);
  130. }
  131. /* Free the dispatch q contents*/
  132. while(A_NETBUF_QUEUE_SIZE(&rxtid->q)) {
  133. A_NETBUF_FREE(A_NETBUF_DEQUEUE(&rxtid->q));
  134. }
  135. if (A_IS_MUTEX_VALID(&rxtid->lock)) {
  136. A_MUTEX_DELETE(&rxtid->lock);
  137. }
  138. }
  139. /* free the freeQ and its contents*/
  140. while(A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
  141. A_NETBUF_FREE(A_NETBUF_DEQUEUE(&p_aggr->freeQ));
  142. }
  143. kfree(p_aggr);
  144. }
  145. A_PRINTF("out aggr_module_destroy\n");
  146. }
  147. void
  148. aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn)
  149. {
  150. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  151. A_ASSERT(p_aggr && fn && dev);
  152. p_aggr->rx_fn = fn;
  153. p_aggr->dev = dev;
  154. }
  155. void
  156. aggr_process_bar(void *cntxt, u8 tid, u16 seq_no)
  157. {
  158. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  159. struct rxtid_stats *stats;
  160. A_ASSERT(p_aggr);
  161. stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
  162. stats->num_bar++;
  163. aggr_deque_frms(p_aggr, tid, seq_no, ALL_SEQNO);
  164. }
  165. void
  166. aggr_recv_addba_req_evt(void *cntxt, u8 tid, u16 seq_no, u8 win_sz)
  167. {
  168. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  169. struct rxtid *rxtid;
  170. struct rxtid_stats *stats;
  171. A_ASSERT(p_aggr);
  172. rxtid = AGGR_GET_RXTID(p_aggr, tid);
  173. stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
  174. A_PRINTF("%s(): win_sz = %d aggr %d\n", _A_FUNCNAME_, win_sz, rxtid->aggr);
  175. if(win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) {
  176. A_PRINTF("win_sz %d, tid %d\n", win_sz, tid);
  177. }
  178. if(rxtid->aggr) {
  179. /* Just go and deliver all the frames up from this
  180. * queue, as if we got DELBA and re-initialize the queue
  181. */
  182. aggr_delete_tid_state(p_aggr, tid);
  183. }
  184. rxtid->seq_next = seq_no;
  185. /* create these queues, only upon receiving of ADDBA for a
  186. * tid, reducing memory requirement
  187. */
  188. rxtid->hold_q = A_MALLOC(HOLD_Q_SZ(win_sz));
  189. if((rxtid->hold_q == NULL)) {
  190. A_PRINTF("Failed to allocate memory, tid = %d\n", tid);
  191. A_ASSERT(0);
  192. }
  193. A_MEMZERO(rxtid->hold_q, HOLD_Q_SZ(win_sz));
  194. /* Update rxtid for the window sz */
  195. rxtid->win_sz = win_sz;
  196. /* hold_q_sz inicates the depth of holding q - which is
  197. * a factor of win_sz. Compute once, as it will be used often
  198. */
  199. rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
  200. /* There should be no frames on q - even when second ADDBA comes in.
  201. * If aggr was previously ON on this tid, we would have cleaned up
  202. * the q
  203. */
  204. if(A_NETBUF_QUEUE_SIZE(&rxtid->q) != 0) {
  205. A_PRINTF("ERROR: Frames still on queue ?\n");
  206. A_ASSERT(0);
  207. }
  208. rxtid->aggr = true;
  209. }
  210. void
  211. aggr_recv_delba_req_evt(void *cntxt, u8 tid)
  212. {
  213. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  214. struct rxtid *rxtid;
  215. A_ASSERT(p_aggr);
  216. A_PRINTF("%s(): tid %d\n", _A_FUNCNAME_, tid);
  217. rxtid = AGGR_GET_RXTID(p_aggr, tid);
  218. if(rxtid->aggr) {
  219. aggr_delete_tid_state(p_aggr, tid);
  220. }
  221. }
  222. static void
  223. aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order)
  224. {
  225. struct rxtid *rxtid;
  226. struct osbuf_hold_q *node;
  227. u16 idx, idx_end, seq_end;
  228. struct rxtid_stats *stats;
  229. A_ASSERT(p_aggr);
  230. rxtid = AGGR_GET_RXTID(p_aggr, tid);
  231. stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
  232. /* idx is absolute location for first frame */
  233. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  234. /* idx_end is typically the last possible frame in the window,
  235. * but changes to 'the' seq_no, when BAR comes. If seq_no
  236. * is non-zero, we will go up to that and stop.
  237. * Note: last seq no in current window will occupy the same
  238. * index position as index that is just previous to start.
  239. * An imp point : if win_sz is 7, for seq_no space of 4095,
  240. * then, there would be holes when sequence wrap around occurs.
  241. * Target should judiciously choose the win_sz, based on
  242. * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
  243. * 2, 4, 8, 16 win_sz works fine).
  244. * We must deque from "idx" to "idx_end", including both.
  245. */
  246. seq_end = (seq_no) ? seq_no : rxtid->seq_next;
  247. idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
  248. /* Critical section begins */
  249. A_MUTEX_LOCK(&rxtid->lock);
  250. do {
  251. node = &rxtid->hold_q[idx];
  252. if((order == CONTIGUOUS_SEQNO) && (!node->osbuf))
  253. break;
  254. /* chain frames and deliver frames bcos:
  255. * 1. either the frames are in order and window is contiguous, OR
  256. * 2. we need to deque frames, irrespective of holes
  257. */
  258. if(node->osbuf) {
  259. if(node->is_amsdu) {
  260. aggr_slice_amsdu(p_aggr, rxtid, &node->osbuf);
  261. } else {
  262. A_NETBUF_ENQUEUE(&rxtid->q, node->osbuf);
  263. }
  264. node->osbuf = NULL;
  265. } else {
  266. stats->num_hole++;
  267. }
  268. /* window is moving */
  269. rxtid->seq_next = IEEE80211_NEXT_SEQ_NO(rxtid->seq_next);
  270. idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
  271. } while(idx != idx_end);
  272. /* Critical section ends */
  273. A_MUTEX_UNLOCK(&rxtid->lock);
  274. stats->num_delivered += A_NETBUF_QUEUE_SIZE(&rxtid->q);
  275. aggr_dispatch_frames(p_aggr, &rxtid->q);
  276. }
  277. static void *
  278. aggr_get_osbuf(struct aggr_info *p_aggr)
  279. {
  280. void *buf = NULL;
  281. /* Starving for buffers? get more from OS
  282. * check for low netbuffers( < 1/4 AGGR_NUM_OF_FREE_NETBUFS) :
  283. * re-allocate bufs if so
  284. * allocate a free buf from freeQ
  285. */
  286. if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) {
  287. p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS);
  288. }
  289. if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) {
  290. buf = A_NETBUF_DEQUEUE(&p_aggr->freeQ);
  291. }
  292. return buf;
  293. }
  294. static void
  295. aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf)
  296. {
  297. void *new_buf;
  298. u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
  299. u8 *framep;
  300. /* Frame format at this point:
  301. * [DIX hdr | 802.3 | 802.3 | ... | 802.3]
  302. *
  303. * Strip the DIX header.
  304. * Iterate through the osbuf and do:
  305. * grab a free netbuf from freeQ
  306. * find the start and end of a frame
  307. * copy it to netbuf(Vista can do better here)
  308. * convert all msdu's(802.3) frames to upper layer format - os routine
  309. * -for now lets convert from 802.3 to dix
  310. * enque this to dispatch q of tid
  311. * repeat
  312. * free the osbuf - to OS. It's been sliced.
  313. */
  314. mac_hdr_len = sizeof(ATH_MAC_HDR);
  315. framep = A_NETBUF_DATA(*osbuf) + mac_hdr_len;
  316. amsdu_len = A_NETBUF_LEN(*osbuf) - mac_hdr_len;
  317. while(amsdu_len > mac_hdr_len) {
  318. /* Begin of a 802.3 frame */
  319. payload_8023_len = A_BE2CPU16(((ATH_MAC_HDR *)framep)->typeOrLen);
  320. #define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
  321. #define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
  322. if(payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
  323. A_PRINTF("802.3 AMSDU frame bound check failed. len %d\n", payload_8023_len);
  324. break;
  325. }
  326. frame_8023_len = payload_8023_len + mac_hdr_len;
  327. new_buf = aggr_get_osbuf(p_aggr);
  328. if(new_buf == NULL) {
  329. A_PRINTF("No buffer available \n");
  330. break;
  331. }
  332. memcpy(A_NETBUF_DATA(new_buf), framep, frame_8023_len);
  333. A_NETBUF_PUT(new_buf, frame_8023_len);
  334. if (wmi_dot3_2_dix(new_buf) != 0) {
  335. A_PRINTF("dot3_2_dix err..\n");
  336. A_NETBUF_FREE(new_buf);
  337. break;
  338. }
  339. A_NETBUF_ENQUEUE(&rxtid->q, new_buf);
  340. /* Is this the last subframe within this aggregate ? */
  341. if ((amsdu_len - frame_8023_len) == 0) {
  342. break;
  343. }
  344. /* Add the length of A-MSDU subframe padding bytes -
  345. * Round to nearest word.
  346. */
  347. frame_8023_len = ((frame_8023_len + 3) & ~3);
  348. framep += frame_8023_len;
  349. amsdu_len -= frame_8023_len;
  350. }
  351. A_NETBUF_FREE(*osbuf);
  352. *osbuf = NULL;
  353. }
  354. void
  355. aggr_process_recv_frm(void *cntxt, u8 tid, u16 seq_no, bool is_amsdu, void **osbuf)
  356. {
  357. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  358. struct rxtid *rxtid;
  359. struct rxtid_stats *stats;
  360. u16 idx, st, cur, end;
  361. u16 *log_idx;
  362. struct osbuf_hold_q *node;
  363. PACKET_LOG *log;
  364. A_ASSERT(p_aggr);
  365. A_ASSERT(tid < NUM_OF_TIDS);
  366. rxtid = AGGR_GET_RXTID(p_aggr, tid);
  367. stats = AGGR_GET_RXTID_STATS(p_aggr, tid);
  368. stats->num_into_aggr++;
  369. if(!rxtid->aggr) {
  370. if(is_amsdu) {
  371. aggr_slice_amsdu(p_aggr, rxtid, osbuf);
  372. stats->num_amsdu++;
  373. aggr_dispatch_frames(p_aggr, &rxtid->q);
  374. }
  375. return;
  376. }
  377. /* Check the incoming sequence no, if it's in the window */
  378. st = rxtid->seq_next;
  379. cur = seq_no;
  380. end = (st + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
  381. /* Log the pkt info for future analysis */
  382. log = &p_aggr->pkt_log;
  383. log_idx = &log->last_idx;
  384. log->info[*log_idx].cur = cur;
  385. log->info[*log_idx].st = st;
  386. log->info[*log_idx].end = end;
  387. *log_idx = IEEE80211_NEXT_SEQ_NO(*log_idx);
  388. if(((st < end) && (cur < st || cur > end)) ||
  389. ((st > end) && (cur > end) && (cur < st))) {
  390. /* the cur frame is outside the window. Since we know
  391. * our target would not do this without reason it must
  392. * be assumed that the window has moved for some valid reason.
  393. * Therefore, we dequeue all frames and start fresh.
  394. */
  395. u16 extended_end;
  396. extended_end = (end + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO;
  397. if(((end < extended_end) && (cur < end || cur > extended_end)) ||
  398. ((end > extended_end) && (cur > extended_end) && (cur < end))) {
  399. // dequeue all frames in queue and shift window to new frame
  400. aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO);
  401. //set window start so that new frame is last frame in window
  402. if(cur >= rxtid->hold_q_sz-1) {
  403. rxtid->seq_next = cur - (rxtid->hold_q_sz-1);
  404. }else{
  405. rxtid->seq_next = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
  406. }
  407. } else {
  408. // dequeue only those frames that are outside the new shifted window
  409. if(cur >= rxtid->hold_q_sz-1) {
  410. st = cur - (rxtid->hold_q_sz-1);
  411. }else{
  412. st = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur);
  413. }
  414. aggr_deque_frms(p_aggr, tid, st, ALL_SEQNO);
  415. }
  416. stats->num_oow++;
  417. }
  418. idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
  419. /*enque the frame, in hold_q */
  420. node = &rxtid->hold_q[idx];
  421. A_MUTEX_LOCK(&rxtid->lock);
  422. if(node->osbuf) {
  423. /* Is the cur frame duplicate or something beyond our
  424. * window(hold_q -> which is 2x, already)?
  425. * 1. Duplicate is easy - drop incoming frame.
  426. * 2. Not falling in current sliding window.
  427. * 2a. is the frame_seq_no preceding current tid_seq_no?
  428. * -> drop the frame. perhaps sender did not get our ACK.
  429. * this is taken care of above.
  430. * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
  431. * -> Taken care of it above, by moving window forward.
  432. *
  433. */
  434. A_NETBUF_FREE(node->osbuf);
  435. stats->num_dups++;
  436. }
  437. node->osbuf = *osbuf;
  438. node->is_amsdu = is_amsdu;
  439. node->seq_no = seq_no;
  440. if(node->is_amsdu) {
  441. stats->num_amsdu++;
  442. } else {
  443. stats->num_mpdu++;
  444. }
  445. A_MUTEX_UNLOCK(&rxtid->lock);
  446. *osbuf = NULL;
  447. aggr_deque_frms(p_aggr, tid, 0, CONTIGUOUS_SEQNO);
  448. if(p_aggr->timerScheduled) {
  449. rxtid->progress = true;
  450. }else{
  451. for(idx=0 ; idx<rxtid->hold_q_sz ; idx++) {
  452. if(rxtid->hold_q[idx].osbuf) {
  453. /* there is a frame in the queue and no timer so
  454. * start a timer to ensure that the frame doesn't remain
  455. * stuck forever. */
  456. p_aggr->timerScheduled = true;
  457. A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
  458. rxtid->progress = false;
  459. rxtid->timerMon = true;
  460. break;
  461. }
  462. }
  463. }
  464. }
  465. /*
  466. * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate
  467. * hold Q state. Examples include when a Connect event or disconnect event is
  468. * received.
  469. */
  470. void
  471. aggr_reset_state(void *cntxt)
  472. {
  473. u8 tid;
  474. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  475. A_ASSERT(p_aggr);
  476. for(tid=0 ; tid<NUM_OF_TIDS ; tid++) {
  477. aggr_delete_tid_state(p_aggr, tid);
  478. }
  479. }
  480. static void
  481. aggr_timeout(unsigned long arg)
  482. {
  483. u8 i,j;
  484. struct aggr_info *p_aggr = (struct aggr_info *)arg;
  485. struct rxtid *rxtid;
  486. struct rxtid_stats *stats;
  487. /*
  488. * If the q for which the timer was originally started has
  489. * not progressed then it is necessary to dequeue all the
  490. * contained frames so that they are not held forever.
  491. */
  492. for(i = 0; i < NUM_OF_TIDS; i++) {
  493. rxtid = AGGR_GET_RXTID(p_aggr, i);
  494. stats = AGGR_GET_RXTID_STATS(p_aggr, i);
  495. if(rxtid->aggr == false ||
  496. rxtid->timerMon == false ||
  497. rxtid->progress == true) {
  498. continue;
  499. }
  500. // dequeue all frames in for this tid
  501. stats->num_timeouts++;
  502. A_PRINTF("TO: st %d end %d\n", rxtid->seq_next, ((rxtid->seq_next + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO));
  503. aggr_deque_frms(p_aggr, i, 0, ALL_SEQNO);
  504. }
  505. p_aggr->timerScheduled = false;
  506. // determine whether a new timer should be started.
  507. for(i = 0; i < NUM_OF_TIDS; i++) {
  508. rxtid = AGGR_GET_RXTID(p_aggr, i);
  509. if(rxtid->aggr == true && rxtid->hold_q) {
  510. for(j = 0 ; j < rxtid->hold_q_sz ; j++)
  511. {
  512. if(rxtid->hold_q[j].osbuf)
  513. {
  514. p_aggr->timerScheduled = true;
  515. rxtid->timerMon = true;
  516. rxtid->progress = false;
  517. break;
  518. }
  519. }
  520. if(j >= rxtid->hold_q_sz) {
  521. rxtid->timerMon = false;
  522. }
  523. }
  524. }
  525. if(p_aggr->timerScheduled) {
  526. /* Rearm the timer*/
  527. A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0);
  528. }
  529. }
  530. static void
  531. aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q)
  532. {
  533. void *osbuf;
  534. while((osbuf = A_NETBUF_DEQUEUE(q))) {
  535. p_aggr->rx_fn(p_aggr->dev, osbuf);
  536. }
  537. }
  538. void
  539. aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf)
  540. {
  541. struct aggr_info *p_aggr = (struct aggr_info *)cntxt;
  542. struct rxtid *rxtid;
  543. struct rxtid_stats *stats;
  544. u8 i;
  545. *log_buf = &p_aggr->pkt_log;
  546. A_PRINTF("\n\n================================================\n");
  547. A_PRINTF("tid: num_into_aggr, dups, oow, mpdu, amsdu, delivered, timeouts, holes, bar, seq_next\n");
  548. for(i = 0; i < NUM_OF_TIDS; i++) {
  549. stats = AGGR_GET_RXTID_STATS(p_aggr, i);
  550. rxtid = AGGR_GET_RXTID(p_aggr, i);
  551. A_PRINTF("%d: %d %d %d %d %d %d %d %d %d : %d\n", i, stats->num_into_aggr, stats->num_dups,
  552. stats->num_oow, stats->num_mpdu,
  553. stats->num_amsdu, stats->num_delivered, stats->num_timeouts,
  554. stats->num_hole, stats->num_bar,
  555. rxtid->seq_next);
  556. }
  557. A_PRINTF("================================================\n\n");
  558. }