/drivers/net/wireless/ath/ath6kl/wmi.c

http://github.com/mirrors/linux · C · 4173 lines · 3177 code · 764 blank · 232 comment · 489 complexity · 5cecb7efd1534bb7667e9e3a2b41f31d MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * Copyright (c) 2004-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/ip.h>
  18. #include <linux/in.h>
  19. #include "core.h"
  20. #include "debug.h"
  21. #include "testmode.h"
  22. #include "trace.h"
  23. #include "../regd.h"
  24. #include "../regd_common.h"
  25. static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
  26. static const s32 wmi_rate_tbl[][2] = {
  27. /* {W/O SGI, with SGI} */
  28. {1000, 1000},
  29. {2000, 2000},
  30. {5500, 5500},
  31. {11000, 11000},
  32. {6000, 6000},
  33. {9000, 9000},
  34. {12000, 12000},
  35. {18000, 18000},
  36. {24000, 24000},
  37. {36000, 36000},
  38. {48000, 48000},
  39. {54000, 54000},
  40. {6500, 7200},
  41. {13000, 14400},
  42. {19500, 21700},
  43. {26000, 28900},
  44. {39000, 43300},
  45. {52000, 57800},
  46. {58500, 65000},
  47. {65000, 72200},
  48. {13500, 15000},
  49. {27000, 30000},
  50. {40500, 45000},
  51. {54000, 60000},
  52. {81000, 90000},
  53. {108000, 120000},
  54. {121500, 135000},
  55. {135000, 150000},
  56. {0, 0}
  57. };
  58. static const s32 wmi_rate_tbl_mcs15[][2] = {
  59. /* {W/O SGI, with SGI} */
  60. {1000, 1000},
  61. {2000, 2000},
  62. {5500, 5500},
  63. {11000, 11000},
  64. {6000, 6000},
  65. {9000, 9000},
  66. {12000, 12000},
  67. {18000, 18000},
  68. {24000, 24000},
  69. {36000, 36000},
  70. {48000, 48000},
  71. {54000, 54000},
  72. {6500, 7200}, /* HT 20, MCS 0 */
  73. {13000, 14400},
  74. {19500, 21700},
  75. {26000, 28900},
  76. {39000, 43300},
  77. {52000, 57800},
  78. {58500, 65000},
  79. {65000, 72200},
  80. {13000, 14400}, /* HT 20, MCS 8 */
  81. {26000, 28900},
  82. {39000, 43300},
  83. {52000, 57800},
  84. {78000, 86700},
  85. {104000, 115600},
  86. {117000, 130000},
  87. {130000, 144400}, /* HT 20, MCS 15 */
  88. {13500, 15000}, /*HT 40, MCS 0 */
  89. {27000, 30000},
  90. {40500, 45000},
  91. {54000, 60000},
  92. {81000, 90000},
  93. {108000, 120000},
  94. {121500, 135000},
  95. {135000, 150000},
  96. {27000, 30000}, /*HT 40, MCS 8 */
  97. {54000, 60000},
  98. {81000, 90000},
  99. {108000, 120000},
  100. {162000, 180000},
  101. {216000, 240000},
  102. {243000, 270000},
  103. {270000, 300000}, /*HT 40, MCS 15 */
  104. {0, 0}
  105. };
  106. /* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
  107. static const u8 up_to_ac[] = {
  108. WMM_AC_BE,
  109. WMM_AC_BK,
  110. WMM_AC_BK,
  111. WMM_AC_BE,
  112. WMM_AC_VI,
  113. WMM_AC_VI,
  114. WMM_AC_VO,
  115. WMM_AC_VO,
  116. };
  117. void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
  118. {
  119. if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
  120. return;
  121. wmi->ep_id = ep_id;
  122. }
  123. enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
  124. {
  125. return wmi->ep_id;
  126. }
  127. struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
  128. {
  129. struct ath6kl_vif *vif, *found = NULL;
  130. if (WARN_ON(if_idx > (ar->vif_max - 1)))
  131. return NULL;
  132. /* FIXME: Locking */
  133. spin_lock_bh(&ar->list_lock);
  134. list_for_each_entry(vif, &ar->vif_list, list) {
  135. if (vif->fw_vif_idx == if_idx) {
  136. found = vif;
  137. break;
  138. }
  139. }
  140. spin_unlock_bh(&ar->list_lock);
  141. return found;
  142. }
  143. /* Performs DIX to 802.3 encapsulation for transmit packets.
  144. * Assumes the entire DIX header is contiguous and that there is
  145. * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
  146. */
  147. int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
  148. {
  149. struct ath6kl_llc_snap_hdr *llc_hdr;
  150. struct ethhdr *eth_hdr;
  151. size_t new_len;
  152. __be16 type;
  153. u8 *datap;
  154. u16 size;
  155. if (WARN_ON(skb == NULL))
  156. return -EINVAL;
  157. size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr);
  158. if (skb_headroom(skb) < size)
  159. return -ENOMEM;
  160. eth_hdr = (struct ethhdr *) skb->data;
  161. type = eth_hdr->h_proto;
  162. if (!is_ethertype(be16_to_cpu(type))) {
  163. ath6kl_dbg(ATH6KL_DBG_WMI,
  164. "%s: pkt is already in 802.3 format\n", __func__);
  165. return 0;
  166. }
  167. new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
  168. skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
  169. datap = skb->data;
  170. eth_hdr->h_proto = cpu_to_be16(new_len);
  171. memcpy(datap, eth_hdr, sizeof(*eth_hdr));
  172. llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr));
  173. llc_hdr->dsap = 0xAA;
  174. llc_hdr->ssap = 0xAA;
  175. llc_hdr->cntl = 0x03;
  176. llc_hdr->org_code[0] = 0x0;
  177. llc_hdr->org_code[1] = 0x0;
  178. llc_hdr->org_code[2] = 0x0;
  179. llc_hdr->eth_type = type;
  180. return 0;
  181. }
  182. static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
  183. u8 *version, void *tx_meta_info)
  184. {
  185. struct wmi_tx_meta_v1 *v1;
  186. struct wmi_tx_meta_v2 *v2;
  187. if (WARN_ON(skb == NULL || version == NULL))
  188. return -EINVAL;
  189. switch (*version) {
  190. case WMI_META_VERSION_1:
  191. skb_push(skb, WMI_MAX_TX_META_SZ);
  192. v1 = (struct wmi_tx_meta_v1 *) skb->data;
  193. v1->pkt_id = 0;
  194. v1->rate_plcy_id = 0;
  195. *version = WMI_META_VERSION_1;
  196. break;
  197. case WMI_META_VERSION_2:
  198. skb_push(skb, WMI_MAX_TX_META_SZ);
  199. v2 = (struct wmi_tx_meta_v2 *) skb->data;
  200. memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info,
  201. sizeof(struct wmi_tx_meta_v2));
  202. break;
  203. }
  204. return 0;
  205. }
  206. int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
  207. u8 msg_type, u32 flags,
  208. enum wmi_data_hdr_data_type data_type,
  209. u8 meta_ver, void *tx_meta_info, u8 if_idx)
  210. {
  211. struct wmi_data_hdr *data_hdr;
  212. int ret;
  213. if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
  214. return -EINVAL;
  215. if (tx_meta_info) {
  216. ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
  217. if (ret)
  218. return ret;
  219. }
  220. skb_push(skb, sizeof(struct wmi_data_hdr));
  221. data_hdr = (struct wmi_data_hdr *)skb->data;
  222. memset(data_hdr, 0, sizeof(struct wmi_data_hdr));
  223. data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
  224. data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
  225. if (flags & WMI_DATA_HDR_FLAGS_MORE)
  226. data_hdr->info |= WMI_DATA_HDR_MORE;
  227. if (flags & WMI_DATA_HDR_FLAGS_EOSP)
  228. data_hdr->info3 |= cpu_to_le16(WMI_DATA_HDR_EOSP);
  229. data_hdr->info2 |= cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
  230. data_hdr->info3 |= cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
  231. return 0;
  232. }
  233. u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
  234. {
  235. struct iphdr *ip_hdr = (struct iphdr *) pkt;
  236. u8 ip_pri;
  237. /*
  238. * Determine IPTOS priority
  239. *
  240. * IP-TOS - 8bits
  241. * : DSCP(6-bits) ECN(2-bits)
  242. * : DSCP - P2 P1 P0 X X X
  243. * where (P2 P1 P0) form 802.1D
  244. */
  245. ip_pri = ip_hdr->tos >> 5;
  246. ip_pri &= 0x7;
  247. if ((layer2_pri & 0x7) > ip_pri)
  248. return (u8) layer2_pri & 0x7;
  249. else
  250. return ip_pri;
  251. }
  252. u8 ath6kl_wmi_get_traffic_class(u8 user_priority)
  253. {
  254. return up_to_ac[user_priority & 0x7];
  255. }
  256. int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
  257. struct sk_buff *skb,
  258. u32 layer2_priority, bool wmm_enabled,
  259. u8 *ac)
  260. {
  261. struct wmi_data_hdr *data_hdr;
  262. struct ath6kl_llc_snap_hdr *llc_hdr;
  263. struct wmi_create_pstream_cmd cmd;
  264. u32 meta_size, hdr_size;
  265. u16 ip_type = IP_ETHERTYPE;
  266. u8 stream_exist, usr_pri;
  267. u8 traffic_class = WMM_AC_BE;
  268. u8 *datap;
  269. if (WARN_ON(skb == NULL))
  270. return -EINVAL;
  271. datap = skb->data;
  272. data_hdr = (struct wmi_data_hdr *) datap;
  273. meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
  274. WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0;
  275. if (!wmm_enabled) {
  276. /* If WMM is disabled all traffic goes as BE traffic */
  277. usr_pri = 0;
  278. } else {
  279. hdr_size = sizeof(struct ethhdr);
  280. llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap +
  281. sizeof(struct
  282. wmi_data_hdr) +
  283. meta_size + hdr_size);
  284. if (llc_hdr->eth_type == htons(ip_type)) {
  285. /*
  286. * Extract the endpoint info from the TOS field
  287. * in the IP header.
  288. */
  289. usr_pri =
  290. ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
  291. sizeof(struct ath6kl_llc_snap_hdr),
  292. layer2_priority);
  293. } else {
  294. usr_pri = layer2_priority & 0x7;
  295. }
  296. /*
  297. * Queue the EAPOL frames in the same WMM_AC_VO queue
  298. * as that of management frames.
  299. */
  300. if (skb->protocol == cpu_to_be16(ETH_P_PAE))
  301. usr_pri = WMI_VOICE_USER_PRIORITY;
  302. }
  303. /*
  304. * workaround for WMM S5
  305. *
  306. * FIXME: wmi->traffic_class is always 100 so this test doesn't
  307. * make sense
  308. */
  309. if ((wmi->traffic_class == WMM_AC_VI) &&
  310. ((usr_pri == 5) || (usr_pri == 4)))
  311. usr_pri = 1;
  312. /* Convert user priority to traffic class */
  313. traffic_class = up_to_ac[usr_pri & 0x7];
  314. wmi_data_hdr_set_up(data_hdr, usr_pri);
  315. spin_lock_bh(&wmi->lock);
  316. stream_exist = wmi->fat_pipe_exist;
  317. spin_unlock_bh(&wmi->lock);
  318. if (!(stream_exist & (1 << traffic_class))) {
  319. memset(&cmd, 0, sizeof(cmd));
  320. cmd.traffic_class = traffic_class;
  321. cmd.user_pri = usr_pri;
  322. cmd.inactivity_int =
  323. cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
  324. /* Implicit streams are created with TSID 0xFF */
  325. cmd.tsid = WMI_IMPLICIT_PSTREAM;
  326. ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
  327. }
  328. *ac = traffic_class;
  329. return 0;
  330. }
  331. int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
  332. {
  333. struct ieee80211_hdr_3addr *pwh, wh;
  334. struct ath6kl_llc_snap_hdr *llc_hdr;
  335. struct ethhdr eth_hdr;
  336. u32 hdr_size;
  337. u8 *datap;
  338. __le16 sub_type;
  339. if (WARN_ON(skb == NULL))
  340. return -EINVAL;
  341. datap = skb->data;
  342. pwh = (struct ieee80211_hdr_3addr *) datap;
  343. sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
  344. memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr));
  345. /* Strip off the 802.11 header */
  346. if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
  347. hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
  348. sizeof(u32));
  349. skb_pull(skb, hdr_size);
  350. } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
  351. skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
  352. }
  353. datap = skb->data;
  354. llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
  355. memset(&eth_hdr, 0, sizeof(eth_hdr));
  356. eth_hdr.h_proto = llc_hdr->eth_type;
  357. switch ((le16_to_cpu(wh.frame_control)) &
  358. (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
  359. case IEEE80211_FCTL_TODS:
  360. memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
  361. memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
  362. break;
  363. case IEEE80211_FCTL_FROMDS:
  364. memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
  365. memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN);
  366. break;
  367. case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
  368. break;
  369. default:
  370. memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
  371. memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
  372. break;
  373. }
  374. skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
  375. skb_push(skb, sizeof(eth_hdr));
  376. datap = skb->data;
  377. memcpy(datap, &eth_hdr, sizeof(eth_hdr));
  378. return 0;
  379. }
  380. /*
  381. * Performs 802.3 to DIX encapsulation for received packets.
  382. * Assumes the entire 802.3 header is contiguous.
  383. */
  384. int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
  385. {
  386. struct ath6kl_llc_snap_hdr *llc_hdr;
  387. struct ethhdr eth_hdr;
  388. u8 *datap;
  389. if (WARN_ON(skb == NULL))
  390. return -EINVAL;
  391. datap = skb->data;
  392. memcpy(&eth_hdr, datap, sizeof(eth_hdr));
  393. llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr));
  394. eth_hdr.h_proto = llc_hdr->eth_type;
  395. skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
  396. datap = skb->data;
  397. memcpy(datap, &eth_hdr, sizeof(eth_hdr));
  398. return 0;
  399. }
  400. static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
  401. {
  402. struct tx_complete_msg_v1 *msg_v1;
  403. struct wmi_tx_complete_event *evt;
  404. int index;
  405. u16 size;
  406. evt = (struct wmi_tx_complete_event *) datap;
  407. ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
  408. evt->num_msg, evt->msg_len, evt->msg_type);
  409. for (index = 0; index < evt->num_msg; index++) {
  410. size = sizeof(struct wmi_tx_complete_event) +
  411. (index * sizeof(struct tx_complete_msg_v1));
  412. msg_v1 = (struct tx_complete_msg_v1 *)(datap + size);
  413. ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n",
  414. msg_v1->status, msg_v1->pkt_id,
  415. msg_v1->rate_idx, msg_v1->ack_failures);
  416. }
  417. return 0;
  418. }
  419. static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
  420. int len, struct ath6kl_vif *vif)
  421. {
  422. struct wmi_remain_on_chnl_event *ev;
  423. u32 freq;
  424. u32 dur;
  425. struct ieee80211_channel *chan;
  426. struct ath6kl *ar = wmi->parent_dev;
  427. u32 id;
  428. if (len < sizeof(*ev))
  429. return -EINVAL;
  430. ev = (struct wmi_remain_on_chnl_event *) datap;
  431. freq = le32_to_cpu(ev->freq);
  432. dur = le32_to_cpu(ev->duration);
  433. ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
  434. freq, dur);
  435. chan = ieee80211_get_channel(ar->wiphy, freq);
  436. if (!chan) {
  437. ath6kl_dbg(ATH6KL_DBG_WMI,
  438. "remain_on_chnl: Unknown channel (freq=%u)\n",
  439. freq);
  440. return -EINVAL;
  441. }
  442. id = vif->last_roc_id;
  443. cfg80211_ready_on_channel(&vif->wdev, id, chan,
  444. dur, GFP_ATOMIC);
  445. return 0;
  446. }
  447. static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
  448. u8 *datap, int len,
  449. struct ath6kl_vif *vif)
  450. {
  451. struct wmi_cancel_remain_on_chnl_event *ev;
  452. u32 freq;
  453. u32 dur;
  454. struct ieee80211_channel *chan;
  455. struct ath6kl *ar = wmi->parent_dev;
  456. u32 id;
  457. if (len < sizeof(*ev))
  458. return -EINVAL;
  459. ev = (struct wmi_cancel_remain_on_chnl_event *) datap;
  460. freq = le32_to_cpu(ev->freq);
  461. dur = le32_to_cpu(ev->duration);
  462. ath6kl_dbg(ATH6KL_DBG_WMI,
  463. "cancel_remain_on_chnl: freq=%u dur=%u status=%u\n",
  464. freq, dur, ev->status);
  465. chan = ieee80211_get_channel(ar->wiphy, freq);
  466. if (!chan) {
  467. ath6kl_dbg(ATH6KL_DBG_WMI,
  468. "cancel_remain_on_chnl: Unknown channel (freq=%u)\n",
  469. freq);
  470. return -EINVAL;
  471. }
  472. if (vif->last_cancel_roc_id &&
  473. vif->last_cancel_roc_id + 1 == vif->last_roc_id)
  474. id = vif->last_cancel_roc_id; /* event for cancel command */
  475. else
  476. id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
  477. vif->last_cancel_roc_id = 0;
  478. cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, GFP_ATOMIC);
  479. return 0;
  480. }
  481. static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
  482. struct ath6kl_vif *vif)
  483. {
  484. struct wmi_tx_status_event *ev;
  485. u32 id;
  486. if (len < sizeof(*ev))
  487. return -EINVAL;
  488. ev = (struct wmi_tx_status_event *) datap;
  489. id = le32_to_cpu(ev->id);
  490. ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
  491. id, ev->ack_status);
  492. if (wmi->last_mgmt_tx_frame) {
  493. cfg80211_mgmt_tx_status(&vif->wdev, id,
  494. wmi->last_mgmt_tx_frame,
  495. wmi->last_mgmt_tx_frame_len,
  496. !!ev->ack_status, GFP_ATOMIC);
  497. kfree(wmi->last_mgmt_tx_frame);
  498. wmi->last_mgmt_tx_frame = NULL;
  499. wmi->last_mgmt_tx_frame_len = 0;
  500. }
  501. return 0;
  502. }
  503. static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
  504. struct ath6kl_vif *vif)
  505. {
  506. struct wmi_p2p_rx_probe_req_event *ev;
  507. u32 freq;
  508. u16 dlen;
  509. if (len < sizeof(*ev))
  510. return -EINVAL;
  511. ev = (struct wmi_p2p_rx_probe_req_event *) datap;
  512. freq = le32_to_cpu(ev->freq);
  513. dlen = le16_to_cpu(ev->len);
  514. if (datap + len < ev->data + dlen) {
  515. ath6kl_err("invalid wmi_p2p_rx_probe_req_event: len=%d dlen=%u\n",
  516. len, dlen);
  517. return -EINVAL;
  518. }
  519. ath6kl_dbg(ATH6KL_DBG_WMI,
  520. "rx_probe_req: len=%u freq=%u probe_req_report=%d\n",
  521. dlen, freq, vif->probe_req_report);
  522. if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
  523. cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0);
  524. return 0;
  525. }
  526. static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
  527. {
  528. struct wmi_p2p_capabilities_event *ev;
  529. u16 dlen;
  530. if (len < sizeof(*ev))
  531. return -EINVAL;
  532. ev = (struct wmi_p2p_capabilities_event *) datap;
  533. dlen = le16_to_cpu(ev->len);
  534. ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_capab: len=%u\n", dlen);
  535. return 0;
  536. }
  537. static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
  538. struct ath6kl_vif *vif)
  539. {
  540. struct wmi_rx_action_event *ev;
  541. u32 freq;
  542. u16 dlen;
  543. if (len < sizeof(*ev))
  544. return -EINVAL;
  545. ev = (struct wmi_rx_action_event *) datap;
  546. freq = le32_to_cpu(ev->freq);
  547. dlen = le16_to_cpu(ev->len);
  548. if (datap + len < ev->data + dlen) {
  549. ath6kl_err("invalid wmi_rx_action_event: len=%d dlen=%u\n",
  550. len, dlen);
  551. return -EINVAL;
  552. }
  553. ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
  554. cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0);
  555. return 0;
  556. }
  557. static int ath6kl_wmi_p2p_info_event_rx(u8 *datap, int len)
  558. {
  559. struct wmi_p2p_info_event *ev;
  560. u32 flags;
  561. u16 dlen;
  562. if (len < sizeof(*ev))
  563. return -EINVAL;
  564. ev = (struct wmi_p2p_info_event *) datap;
  565. flags = le32_to_cpu(ev->info_req_flags);
  566. dlen = le16_to_cpu(ev->len);
  567. ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: flags=%x len=%d\n", flags, dlen);
  568. if (flags & P2P_FLAG_CAPABILITIES_REQ) {
  569. struct wmi_p2p_capabilities *cap;
  570. if (dlen < sizeof(*cap))
  571. return -EINVAL;
  572. cap = (struct wmi_p2p_capabilities *) ev->data;
  573. ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: GO Power Save = %d\n",
  574. cap->go_power_save);
  575. }
  576. if (flags & P2P_FLAG_MACADDR_REQ) {
  577. struct wmi_p2p_macaddr *mac;
  578. if (dlen < sizeof(*mac))
  579. return -EINVAL;
  580. mac = (struct wmi_p2p_macaddr *) ev->data;
  581. ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: MAC Address = %pM\n",
  582. mac->mac_addr);
  583. }
  584. if (flags & P2P_FLAG_HMODEL_REQ) {
  585. struct wmi_p2p_hmodel *mod;
  586. if (dlen < sizeof(*mod))
  587. return -EINVAL;
  588. mod = (struct wmi_p2p_hmodel *) ev->data;
  589. ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: P2P Model = %d (%s)\n",
  590. mod->p2p_model,
  591. mod->p2p_model ? "host" : "firmware");
  592. }
  593. return 0;
  594. }
  595. static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
  596. {
  597. struct sk_buff *skb;
  598. skb = ath6kl_buf_alloc(size);
  599. if (!skb)
  600. return NULL;
  601. skb_put(skb, size);
  602. if (size)
  603. memset(skb->data, 0, size);
  604. return skb;
  605. }
  606. /* Send a "simple" wmi command -- one with no arguments */
  607. static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
  608. enum wmi_cmd_id cmd_id)
  609. {
  610. struct sk_buff *skb;
  611. int ret;
  612. skb = ath6kl_wmi_get_new_buf(0);
  613. if (!skb)
  614. return -ENOMEM;
  615. ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
  616. return ret;
  617. }
  618. static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
  619. {
  620. struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
  621. if (len < sizeof(struct wmi_ready_event_2))
  622. return -EINVAL;
  623. ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
  624. le32_to_cpu(ev->sw_version),
  625. le32_to_cpu(ev->abi_version), ev->phy_cap);
  626. return 0;
  627. }
  628. /*
  629. * Mechanism to modify the roaming behavior in the firmware. The lower rssi
  630. * at which the station has to roam can be passed with
  631. * WMI_SET_LRSSI_SCAN_PARAMS. Subtract 96 from RSSI to get the signal level
  632. * in dBm.
  633. */
  634. int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
  635. {
  636. struct sk_buff *skb;
  637. struct roam_ctrl_cmd *cmd;
  638. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  639. if (!skb)
  640. return -ENOMEM;
  641. cmd = (struct roam_ctrl_cmd *) skb->data;
  642. cmd->info.params.lrssi_scan_period = cpu_to_le16(DEF_LRSSI_SCAN_PERIOD);
  643. cmd->info.params.lrssi_scan_threshold = a_cpu_to_sle16(lrssi +
  644. DEF_SCAN_FOR_ROAM_INTVL);
  645. cmd->info.params.lrssi_roam_threshold = a_cpu_to_sle16(lrssi);
  646. cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
  647. cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
  648. return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
  649. NO_SYNC_WMIFLAG);
  650. }
  651. int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
  652. {
  653. struct sk_buff *skb;
  654. struct roam_ctrl_cmd *cmd;
  655. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  656. if (!skb)
  657. return -ENOMEM;
  658. cmd = (struct roam_ctrl_cmd *) skb->data;
  659. memcpy(cmd->info.bssid, bssid, ETH_ALEN);
  660. cmd->roam_ctrl = WMI_FORCE_ROAM;
  661. ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
  662. return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
  663. NO_SYNC_WMIFLAG);
  664. }
  665. int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
  666. u32 beacon_intvl)
  667. {
  668. struct sk_buff *skb;
  669. struct set_beacon_int_cmd *cmd;
  670. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  671. if (!skb)
  672. return -ENOMEM;
  673. cmd = (struct set_beacon_int_cmd *) skb->data;
  674. cmd->beacon_intvl = cpu_to_le32(beacon_intvl);
  675. return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
  676. WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG);
  677. }
  678. int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
  679. {
  680. struct sk_buff *skb;
  681. struct set_dtim_cmd *cmd;
  682. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  683. if (!skb)
  684. return -ENOMEM;
  685. cmd = (struct set_dtim_cmd *) skb->data;
  686. cmd->dtim_period = cpu_to_le32(dtim_period);
  687. return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
  688. WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG);
  689. }
  690. int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
  691. {
  692. struct sk_buff *skb;
  693. struct roam_ctrl_cmd *cmd;
  694. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  695. if (!skb)
  696. return -ENOMEM;
  697. cmd = (struct roam_ctrl_cmd *) skb->data;
  698. cmd->info.roam_mode = mode;
  699. cmd->roam_ctrl = WMI_SET_ROAM_MODE;
  700. ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
  701. return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
  702. NO_SYNC_WMIFLAG);
  703. }
  704. static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
  705. struct ath6kl_vif *vif)
  706. {
  707. struct wmi_connect_event *ev;
  708. u8 *pie, *peie;
  709. if (len < sizeof(struct wmi_connect_event))
  710. return -EINVAL;
  711. ev = (struct wmi_connect_event *) datap;
  712. if (vif->nw_type == AP_NETWORK) {
  713. /* AP mode start/STA connected event */
  714. struct net_device *dev = vif->ndev;
  715. if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
  716. ath6kl_dbg(ATH6KL_DBG_WMI,
  717. "%s: freq %d bssid %pM (AP started)\n",
  718. __func__, le16_to_cpu(ev->u.ap_bss.ch),
  719. ev->u.ap_bss.bssid);
  720. ath6kl_connect_ap_mode_bss(
  721. vif, le16_to_cpu(ev->u.ap_bss.ch));
  722. } else {
  723. ath6kl_dbg(ATH6KL_DBG_WMI,
  724. "%s: aid %u mac_addr %pM auth=%u keymgmt=%u cipher=%u apsd_info=%u (STA connected)\n",
  725. __func__, ev->u.ap_sta.aid,
  726. ev->u.ap_sta.mac_addr,
  727. ev->u.ap_sta.auth,
  728. ev->u.ap_sta.keymgmt,
  729. le16_to_cpu(ev->u.ap_sta.cipher),
  730. ev->u.ap_sta.apsd_info);
  731. ath6kl_connect_ap_mode_sta(
  732. vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
  733. ev->u.ap_sta.keymgmt,
  734. le16_to_cpu(ev->u.ap_sta.cipher),
  735. ev->u.ap_sta.auth, ev->assoc_req_len,
  736. ev->assoc_info + ev->beacon_ie_len,
  737. ev->u.ap_sta.apsd_info);
  738. }
  739. return 0;
  740. }
  741. /* STA/IBSS mode connection event */
  742. ath6kl_dbg(ATH6KL_DBG_WMI,
  743. "wmi event connect freq %d bssid %pM listen_intvl %d beacon_intvl %d type %d\n",
  744. le16_to_cpu(ev->u.sta.ch), ev->u.sta.bssid,
  745. le16_to_cpu(ev->u.sta.listen_intvl),
  746. le16_to_cpu(ev->u.sta.beacon_intvl),
  747. le32_to_cpu(ev->u.sta.nw_type));
  748. /* Start of assoc rsp IEs */
  749. pie = ev->assoc_info + ev->beacon_ie_len +
  750. ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */
  751. /* End of assoc rsp IEs */
  752. peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len +
  753. ev->assoc_resp_len;
  754. while (pie < peie) {
  755. switch (*pie) {
  756. case WLAN_EID_VENDOR_SPECIFIC:
  757. if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
  758. pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
  759. /* WMM OUT (00:50:F2) */
  760. if (pie[1] > 5 &&
  761. pie[6] == WMM_PARAM_OUI_SUBTYPE)
  762. wmi->is_wmm_enabled = true;
  763. }
  764. break;
  765. }
  766. if (wmi->is_wmm_enabled)
  767. break;
  768. pie += pie[1] + 2;
  769. }
  770. ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
  771. ev->u.sta.bssid,
  772. le16_to_cpu(ev->u.sta.listen_intvl),
  773. le16_to_cpu(ev->u.sta.beacon_intvl),
  774. le32_to_cpu(ev->u.sta.nw_type),
  775. ev->beacon_ie_len, ev->assoc_req_len,
  776. ev->assoc_resp_len, ev->assoc_info);
  777. return 0;
  778. }
  779. static struct country_code_to_enum_rd *
  780. ath6kl_regd_find_country(u16 countryCode)
  781. {
  782. int i;
  783. for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
  784. if (allCountries[i].countryCode == countryCode)
  785. return &allCountries[i];
  786. }
  787. return NULL;
  788. }
  789. static struct reg_dmn_pair_mapping *
  790. ath6kl_get_regpair(u16 regdmn)
  791. {
  792. int i;
  793. if (regdmn == NO_ENUMRD)
  794. return NULL;
  795. for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
  796. if (regDomainPairs[i].reg_domain == regdmn)
  797. return &regDomainPairs[i];
  798. }
  799. return NULL;
  800. }
  801. static struct country_code_to_enum_rd *
  802. ath6kl_regd_find_country_by_rd(u16 regdmn)
  803. {
  804. int i;
  805. for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
  806. if (allCountries[i].regDmnEnum == regdmn)
  807. return &allCountries[i];
  808. }
  809. return NULL;
  810. }
  811. static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
  812. {
  813. struct ath6kl_wmi_regdomain *ev;
  814. struct country_code_to_enum_rd *country = NULL;
  815. struct reg_dmn_pair_mapping *regpair = NULL;
  816. char alpha2[2];
  817. u32 reg_code;
  818. ev = (struct ath6kl_wmi_regdomain *) datap;
  819. reg_code = le32_to_cpu(ev->reg_code);
  820. if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
  821. country = ath6kl_regd_find_country((u16) reg_code);
  822. } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
  823. regpair = ath6kl_get_regpair((u16) reg_code);
  824. country = ath6kl_regd_find_country_by_rd((u16) reg_code);
  825. if (regpair)
  826. ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
  827. regpair->reg_domain);
  828. else
  829. ath6kl_warn("Regpair not found reg_code 0x%0x\n",
  830. reg_code);
  831. }
  832. if (country && wmi->parent_dev->wiphy_registered) {
  833. alpha2[0] = country->isoName[0];
  834. alpha2[1] = country->isoName[1];
  835. regulatory_hint(wmi->parent_dev->wiphy, alpha2);
  836. ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
  837. alpha2[0], alpha2[1]);
  838. }
  839. }
  840. static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
  841. struct ath6kl_vif *vif)
  842. {
  843. struct wmi_disconnect_event *ev;
  844. wmi->traffic_class = 100;
  845. if (len < sizeof(struct wmi_disconnect_event))
  846. return -EINVAL;
  847. ev = (struct wmi_disconnect_event *) datap;
  848. ath6kl_dbg(ATH6KL_DBG_WMI,
  849. "wmi event disconnect proto_reason %d bssid %pM wmi_reason %d assoc_resp_len %d\n",
  850. le16_to_cpu(ev->proto_reason_status), ev->bssid,
  851. ev->disconn_reason, ev->assoc_resp_len);
  852. wmi->is_wmm_enabled = false;
  853. ath6kl_disconnect_event(vif, ev->disconn_reason,
  854. ev->bssid, ev->assoc_resp_len, ev->assoc_info,
  855. le16_to_cpu(ev->proto_reason_status));
  856. return 0;
  857. }
  858. static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
  859. {
  860. struct wmi_peer_node_event *ev;
  861. if (len < sizeof(struct wmi_peer_node_event))
  862. return -EINVAL;
  863. ev = (struct wmi_peer_node_event *) datap;
  864. if (ev->event_code == PEER_NODE_JOIN_EVENT)
  865. ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n",
  866. ev->peer_mac_addr);
  867. else if (ev->event_code == PEER_NODE_LEAVE_EVENT)
  868. ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n",
  869. ev->peer_mac_addr);
  870. return 0;
  871. }
  872. static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
  873. struct ath6kl_vif *vif)
  874. {
  875. struct wmi_tkip_micerr_event *ev;
  876. if (len < sizeof(struct wmi_tkip_micerr_event))
  877. return -EINVAL;
  878. ev = (struct wmi_tkip_micerr_event *) datap;
  879. ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
  880. return 0;
  881. }
  882. void ath6kl_wmi_sscan_timer(struct timer_list *t)
  883. {
  884. struct ath6kl_vif *vif = from_timer(vif, t, sched_scan_timer);
  885. cfg80211_sched_scan_results(vif->ar->wiphy, 0);
  886. }
  887. static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
  888. struct ath6kl_vif *vif)
  889. {
  890. struct wmi_bss_info_hdr2 *bih;
  891. u8 *buf;
  892. struct ieee80211_channel *channel;
  893. struct ath6kl *ar = wmi->parent_dev;
  894. struct cfg80211_bss *bss;
  895. if (len <= sizeof(struct wmi_bss_info_hdr2))
  896. return -EINVAL;
  897. bih = (struct wmi_bss_info_hdr2 *) datap;
  898. buf = datap + sizeof(struct wmi_bss_info_hdr2);
  899. len -= sizeof(struct wmi_bss_info_hdr2);
  900. ath6kl_dbg(ATH6KL_DBG_WMI,
  901. "bss info evt - ch %u, snr %d, rssi %d, bssid \"%pM\" "
  902. "frame_type=%d\n",
  903. bih->ch, bih->snr, bih->snr - 95, bih->bssid,
  904. bih->frame_type);
  905. if (bih->frame_type != BEACON_FTYPE &&
  906. bih->frame_type != PROBERESP_FTYPE)
  907. return 0; /* Only update BSS table for now */
  908. if (bih->frame_type == BEACON_FTYPE &&
  909. test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
  910. clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
  911. ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
  912. NONE_BSS_FILTER, 0);
  913. }
  914. channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
  915. if (channel == NULL)
  916. return -EINVAL;
  917. if (len < 8 + 2 + 2)
  918. return -EINVAL;
  919. if (bih->frame_type == BEACON_FTYPE &&
  920. test_bit(CONNECTED, &vif->flags) &&
  921. memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
  922. const u8 *tim;
  923. tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
  924. len - 8 - 2 - 2);
  925. if (tim && tim[1] >= 2) {
  926. vif->assoc_bss_dtim_period = tim[3];
  927. set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
  928. }
  929. }
  930. bss = cfg80211_inform_bss(ar->wiphy, channel,
  931. bih->frame_type == BEACON_FTYPE ?
  932. CFG80211_BSS_FTYPE_BEACON :
  933. CFG80211_BSS_FTYPE_PRESP,
  934. bih->bssid, get_unaligned_le64((__le64 *)buf),
  935. get_unaligned_le16(((__le16 *)buf) + 5),
  936. get_unaligned_le16(((__le16 *)buf) + 4),
  937. buf + 8 + 2 + 2, len - 8 - 2 - 2,
  938. (bih->snr - 95) * 100, GFP_ATOMIC);
  939. if (bss == NULL)
  940. return -ENOMEM;
  941. cfg80211_put_bss(ar->wiphy, bss);
  942. /*
  943. * Firmware doesn't return any event when scheduled scan has
  944. * finished, so we need to use a timer to find out when there are
  945. * no more results.
  946. *
  947. * The timer is started from the first bss info received, otherwise
  948. * the timer would not ever fire if the scan interval is short
  949. * enough.
  950. */
  951. if (test_bit(SCHED_SCANNING, &vif->flags) &&
  952. !timer_pending(&vif->sched_scan_timer)) {
  953. mod_timer(&vif->sched_scan_timer, jiffies +
  954. msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
  955. }
  956. return 0;
  957. }
  958. /* Inactivity timeout of a fatpipe(pstream) at the target */
  959. static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
  960. int len)
  961. {
  962. struct wmi_pstream_timeout_event *ev;
  963. if (len < sizeof(struct wmi_pstream_timeout_event))
  964. return -EINVAL;
  965. ev = (struct wmi_pstream_timeout_event *) datap;
  966. if (ev->traffic_class >= WMM_NUM_AC) {
  967. ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
  968. return -EINVAL;
  969. }
  970. /*
  971. * When the pstream (fat pipe == AC) timesout, it means there were
  972. * no thinStreams within this pstream & it got implicitly created
  973. * due to data flow on this AC. We start the inactivity timer only
  974. * for implicitly created pstream. Just reset the host state.
  975. */
  976. spin_lock_bh(&wmi->lock);
  977. wmi->stream_exist_for_ac[ev->traffic_class] = 0;
  978. wmi->fat_pipe_exist &= ~(1 << ev->traffic_class);
  979. spin_unlock_bh(&wmi->lock);
  980. /* Indicate inactivity to driver layer for this fatpipe (pstream) */
  981. ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false);
  982. return 0;
  983. }
  984. static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
  985. {
  986. struct wmi_bit_rate_reply *reply;
  987. s32 rate;
  988. u32 sgi, index;
  989. if (len < sizeof(struct wmi_bit_rate_reply))
  990. return -EINVAL;
  991. reply = (struct wmi_bit_rate_reply *) datap;
  992. ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
  993. if (reply->rate_index == (s8) RATE_AUTO) {
  994. rate = RATE_AUTO;
  995. } else {
  996. index = reply->rate_index & 0x7f;
  997. if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
  998. return -EINVAL;
  999. sgi = (reply->rate_index & 0x80) ? 1 : 0;
  1000. rate = wmi_rate_tbl[index][sgi];
  1001. }
  1002. ath6kl_wakeup_event(wmi->parent_dev);
  1003. return 0;
  1004. }
  1005. static int ath6kl_wmi_test_rx(struct wmi *wmi, u8 *datap, int len)
  1006. {
  1007. ath6kl_tm_rx_event(wmi->parent_dev, datap, len);
  1008. return 0;
  1009. }
  1010. static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len)
  1011. {
  1012. if (len < sizeof(struct wmi_fix_rates_reply))
  1013. return -EINVAL;
  1014. ath6kl_wakeup_event(wmi->parent_dev);
  1015. return 0;
  1016. }
  1017. static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len)
  1018. {
  1019. if (len < sizeof(struct wmi_channel_list_reply))
  1020. return -EINVAL;
  1021. ath6kl_wakeup_event(wmi->parent_dev);
  1022. return 0;
  1023. }
  1024. static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len)
  1025. {
  1026. struct wmi_tx_pwr_reply *reply;
  1027. if (len < sizeof(struct wmi_tx_pwr_reply))
  1028. return -EINVAL;
  1029. reply = (struct wmi_tx_pwr_reply *) datap;
  1030. ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM);
  1031. return 0;
  1032. }
  1033. static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
  1034. {
  1035. if (len < sizeof(struct wmi_get_keepalive_cmd))
  1036. return -EINVAL;
  1037. ath6kl_wakeup_event(wmi->parent_dev);
  1038. return 0;
  1039. }
  1040. static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
  1041. struct ath6kl_vif *vif)
  1042. {
  1043. struct wmi_scan_complete_event *ev;
  1044. ev = (struct wmi_scan_complete_event *) datap;
  1045. ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
  1046. wmi->is_probe_ssid = false;
  1047. return 0;
  1048. }
  1049. static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
  1050. int len, struct ath6kl_vif *vif)
  1051. {
  1052. struct wmi_neighbor_report_event *ev;
  1053. u8 i;
  1054. if (len < sizeof(*ev))
  1055. return -EINVAL;
  1056. ev = (struct wmi_neighbor_report_event *) datap;
  1057. if (struct_size(ev, neighbor, ev->num_neighbors) > len) {
  1058. ath6kl_dbg(ATH6KL_DBG_WMI,
  1059. "truncated neighbor event (num=%d len=%d)\n",
  1060. ev->num_neighbors, len);
  1061. return -EINVAL;
  1062. }
  1063. for (i = 0; i < ev->num_neighbors; i++) {
  1064. ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
  1065. i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
  1066. ev->neighbor[i].bss_flags);
  1067. cfg80211_pmksa_candidate_notify(vif->ndev, i,
  1068. ev->neighbor[i].bssid,
  1069. !!(ev->neighbor[i].bss_flags &
  1070. WMI_PREAUTH_CAPABLE_BSS),
  1071. GFP_ATOMIC);
  1072. }
  1073. return 0;
  1074. }
  1075. /*
  1076. * Target is reporting a programming error. This is for
  1077. * developer aid only. Target only checks a few common violations
  1078. * and it is responsibility of host to do all error checking.
  1079. * Behavior of target after wmi error event is undefined.
  1080. * A reset is recommended.
  1081. */
  1082. static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
  1083. {
  1084. const char *type = "unknown error";
  1085. struct wmi_cmd_error_event *ev;
  1086. ev = (struct wmi_cmd_error_event *) datap;
  1087. switch (ev->err_code) {
  1088. case INVALID_PARAM:
  1089. type = "invalid parameter";
  1090. break;
  1091. case ILLEGAL_STATE:
  1092. type = "invalid state";
  1093. break;
  1094. case INTERNAL_ERROR:
  1095. type = "internal error";
  1096. break;
  1097. }
  1098. ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n",
  1099. ev->cmd_id, type);
  1100. return 0;
  1101. }
  1102. static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
  1103. struct ath6kl_vif *vif)
  1104. {
  1105. ath6kl_tgt_stats_event(vif, datap, len);
  1106. return 0;
  1107. }
  1108. static u8 ath6kl_wmi_get_upper_threshold(s16 rssi,
  1109. struct sq_threshold_params *sq_thresh,
  1110. u32 size)
  1111. {
  1112. u32 index;
  1113. u8 threshold = (u8) sq_thresh->upper_threshold[size - 1];
  1114. /* The list is already in sorted order. Get the next lower value */
  1115. for (index = 0; index < size; index++) {
  1116. if (rssi < sq_thresh->upper_threshold[index]) {
  1117. threshold = (u8) sq_thresh->upper_threshold[index];
  1118. break;
  1119. }
  1120. }
  1121. return threshold;
  1122. }
  1123. static u8 ath6kl_wmi_get_lower_threshold(s16 rssi,
  1124. struct sq_threshold_params *sq_thresh,
  1125. u32 size)
  1126. {
  1127. u32 index;
  1128. u8 threshold = (u8) sq_thresh->lower_threshold[size - 1];
  1129. /* The list is already in sorted order. Get the next lower value */
  1130. for (index = 0; index < size; index++) {
  1131. if (rssi > sq_thresh->lower_threshold[index]) {
  1132. threshold = (u8) sq_thresh->lower_threshold[index];
  1133. break;
  1134. }
  1135. }
  1136. return threshold;
  1137. }
  1138. static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
  1139. struct wmi_rssi_threshold_params_cmd *rssi_cmd)
  1140. {
  1141. struct sk_buff *skb;
  1142. struct wmi_rssi_threshold_params_cmd *cmd;
  1143. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  1144. if (!skb)
  1145. return -ENOMEM;
  1146. cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
  1147. memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
  1148. return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
  1149. NO_SYNC_WMIFLAG);
  1150. }
  1151. static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
  1152. int len)
  1153. {
  1154. struct wmi_rssi_threshold_event *reply;
  1155. struct wmi_rssi_threshold_params_cmd cmd;
  1156. struct sq_threshold_params *sq_thresh;
  1157. enum wmi_rssi_threshold_val new_threshold;
  1158. u8 upper_rssi_threshold, lower_rssi_threshold;
  1159. s16 rssi;
  1160. int ret;
  1161. if (len < sizeof(struct wmi_rssi_threshold_event))
  1162. return -EINVAL;
  1163. reply = (struct wmi_rssi_threshold_event *) datap;
  1164. new_threshold = (enum wmi_rssi_threshold_val) reply->range;
  1165. rssi = a_sle16_to_cpu(reply->rssi);
  1166. sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI];
  1167. /*
  1168. * Identify the threshold breached and communicate that to the app.
  1169. * After that install a new set of thresholds based on the signal
  1170. * quality reported by the target
  1171. */
  1172. if (new_threshold) {
  1173. /* Upper threshold breached */
  1174. if (rssi < sq_thresh->upper_threshold[0]) {
  1175. ath6kl_dbg(ATH6KL_DBG_WMI,
  1176. "spurious upper rssi threshold event: %d\n",
  1177. rssi);
  1178. } else if ((rssi < sq_thresh->upper_threshold[1]) &&
  1179. (rssi >= sq_thresh->upper_threshold[0])) {
  1180. new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
  1181. } else if ((rssi < sq_thresh->upper_threshold[2]) &&
  1182. (rssi >= sq_thresh->upper_threshold[1])) {
  1183. new_threshold = WMI_RSSI_THRESHOLD2_ABOVE;
  1184. } else if ((rssi < sq_thresh->upper_threshold[3]) &&
  1185. (rssi >= sq_thresh->upper_threshold[2])) {
  1186. new_threshold = WMI_RSSI_THRESHOLD3_ABOVE;
  1187. } else if ((rssi < sq_thresh->upper_threshold[4]) &&
  1188. (rssi >= sq_thresh->upper_threshold[3])) {
  1189. new_threshold = WMI_RSSI_THRESHOLD4_ABOVE;
  1190. } else if ((rssi < sq_thresh->upper_threshold[5]) &&
  1191. (rssi >= sq_thresh->upper_threshold[4])) {
  1192. new_threshold = WMI_RSSI_THRESHOLD5_ABOVE;
  1193. } else if (rssi >= sq_thresh->upper_threshold[5]) {
  1194. new_threshold = WMI_RSSI_THRESHOLD6_ABOVE;
  1195. }
  1196. } else {
  1197. /* Lower threshold breached */
  1198. if (rssi > sq_thresh->lower_threshold[0]) {
  1199. ath6kl_dbg(ATH6KL_DBG_WMI,
  1200. "spurious lower rssi threshold event: %d %d\n",
  1201. rssi, sq_thresh->lower_threshold[0]);
  1202. } else if ((rssi > sq_thresh->lower_threshold[1]) &&
  1203. (rssi <= sq_thresh->lower_threshold[0])) {
  1204. new_threshold = WMI_RSSI_THRESHOLD6_BELOW;
  1205. } else if ((rssi > sq_thresh->lower_threshold[2]) &&
  1206. (rssi <= sq_thresh->lower_threshold[1])) {
  1207. new_threshold = WMI_RSSI_THRESHOLD5_BELOW;
  1208. } else if ((rssi > sq_thresh->lower_threshold[3]) &&
  1209. (rssi <= sq_thresh->lower_threshold[2])) {
  1210. new_threshold = WMI_RSSI_THRESHOLD4_BELOW;
  1211. } else if ((rssi > sq_thresh->lower_threshold[4]) &&
  1212. (rssi <= sq_thresh->lower_threshold[3])) {
  1213. new_threshold = WMI_RSSI_THRESHOLD3_BELOW;
  1214. } else if ((rssi > sq_thresh->lower_threshold[5]) &&
  1215. (rssi <= sq_thresh->lower_threshold[4])) {
  1216. new_threshold = WMI_RSSI_THRESHOLD2_BELOW;
  1217. } else if (rssi <= sq_thresh->lower_threshold[5]) {
  1218. new_threshold = WMI_RSSI_THRESHOLD1_BELOW;
  1219. }
  1220. }
  1221. /* Calculate and install the next set of thresholds */
  1222. lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh,
  1223. sq_thresh->lower_threshold_valid_count);
  1224. upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh,
  1225. sq_thresh->upper_threshold_valid_count);
  1226. /* Issue a wmi command to install the thresholds */
  1227. cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold);
  1228. cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold);
  1229. cmd.weight = sq_thresh->weight;
  1230. cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
  1231. ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd);
  1232. if (ret) {
  1233. ath6kl_err("unable to configure rssi thresholds\n");
  1234. return -EIO;
  1235. }
  1236. return 0;
  1237. }
  1238. static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
  1239. struct ath6kl_vif *vif)
  1240. {
  1241. struct wmi_cac_event *reply;
  1242. struct ieee80211_tspec_ie *ts;
  1243. u16 active_tsids, tsinfo;
  1244. u8 tsid, index;
  1245. u8 ts_id;
  1246. if (len < sizeof(struct wmi_cac_event))
  1247. return -EINVAL;
  1248. reply = (struct wmi_cac_event *) datap;
  1249. if (reply->ac >= WMM_NUM_AC) {
  1250. ath6kl_err("invalid AC: %d\n", reply->ac);
  1251. return -EINVAL;
  1252. }
  1253. if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
  1254. (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
  1255. ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
  1256. tsinfo = le16_to_cpu(ts->tsinfo);
  1257. tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
  1258. IEEE80211_WMM_IE_TSPEC_TID_MASK;
  1259. ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
  1260. reply->ac, tsid);
  1261. } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
  1262. /*
  1263. * Following assumes that there is only one outstanding
  1264. * ADDTS request when this event is received
  1265. */
  1266. spin_lock_bh(&wmi->lock);
  1267. active_tsids = wmi->stream_exist_for_ac[reply->ac];
  1268. spin_unlock_bh(&wmi->lock);
  1269. for (index = 0; index < sizeof(active_tsids) * 8; index++) {
  1270. if ((active_tsids >> index) & 1)
  1271. break;
  1272. }
  1273. if (index < (sizeof(active_tsids) * 8))
  1274. ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
  1275. reply->ac, index);
  1276. }
  1277. /*
  1278. * Clear active tsids and Add missing handling
  1279. * for delete qos stream from AP
  1280. */
  1281. else if (reply->cac_indication == CAC_INDICATION_DELETE) {
  1282. ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
  1283. tsinfo = le16_to_cpu(ts->tsinfo);
  1284. ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
  1285. IEEE80211_WMM_IE_TSPEC_TID_MASK);
  1286. spin_lock_bh(&wmi->lock);
  1287. wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
  1288. active_tsids = wmi->stream_exist_for_ac[reply->ac];
  1289. spin_unlock_bh(&wmi->lock);
  1290. /* Indicate stream inactivity to driver layer only if all tsids
  1291. * within this AC are deleted.
  1292. */
  1293. if (!active_tsids) {
  1294. ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
  1295. false);
  1296. wmi->fat_pipe_exist &= ~(1 << reply->ac);
  1297. }
  1298. }
  1299. return 0;
  1300. }
  1301. static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
  1302. struct ath6kl_vif *vif)
  1303. {
  1304. struct wmi_txe_notify_event *ev;
  1305. u32 rate, pkts;
  1306. if (len < sizeof(*ev))
  1307. return -EINVAL;
  1308. if (vif->nw_type != INFRA_NETWORK ||
  1309. !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
  1310. vif->ar->fw_capabilities))
  1311. return -EOPNOTSUPP;
  1312. if (vif->sme_state != SME_CONNECTED)
  1313. return -ENOTCONN;
  1314. ev = (struct wmi_txe_notify_event *) datap;
  1315. rate = le32_to_cpu(ev->rate);
  1316. pkts = le32_to_cpu(ev->pkts);
  1317. ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d%% pkts %d intvl %ds\n",
  1318. vif->bssid, rate, pkts, vif->txe_intvl);
  1319. cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,
  1320. rate, vif->txe_intvl, GFP_KERNEL);
  1321. return 0;
  1322. }
  1323. int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
  1324. u32 rate, u32 pkts, u32 intvl)
  1325. {
  1326. struct sk_buff *skb;
  1327. struct wmi_txe_notify_cmd *cmd;
  1328. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  1329. if (!skb)
  1330. return -ENOMEM;
  1331. cmd = (struct wmi_txe_notify_cmd *) skb->data;
  1332. cmd->rate = cpu_to_le32(rate);
  1333. cmd->pkts = cpu_to_le32(pkts);
  1334. cmd->intvl = cpu_to_le32(intvl);
  1335. return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID,
  1336. NO_SYNC_WMIFLAG);
  1337. }
  1338. int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi)
  1339. {
  1340. struct sk_buff *skb;
  1341. struct wmi_set_rssi_filter_cmd *cmd;
  1342. int ret;
  1343. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  1344. if (!skb)
  1345. return -ENOMEM;
  1346. cmd = (struct wmi_set_rssi_filter_cmd *) skb->data;
  1347. cmd->rssi = rssi;
  1348. ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID,
  1349. NO_SYNC_WMIFLAG);
  1350. return ret;
  1351. }
  1352. static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
  1353. struct wmi_snr_threshold_params_cmd *snr_cmd)
  1354. {
  1355. struct sk_buff *skb;
  1356. struct wmi_snr_threshold_params_cmd *cmd;
  1357. skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
  1358. if (!skb)
  1359. return -ENOMEM;
  1360. cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
  1361. memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
  1362. return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
  1363. NO_SYNC_WMIFLAG);
  1364. }
  1365. static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
  1366. int len)
  1367. {
  1368. struct wmi_snr_threshold_event *reply;
  1369. struct sq_threshold_params *sq_thresh;
  1370. struct wmi_snr_threshold_params_cmd cmd;
  1371. enum wmi_snr_threshold_val new_threshold;
  1372. u8 upper_snr_threshold, lower_snr_threshold;
  1373. s16 snr;
  1374. int ret;
  1375. if (len < sizeof(struct wmi_snr_threshold_event))
  1376. return -EINVAL;
  1377. reply = (struct wmi_snr_threshold_event *) datap;
  1378. new_threshold = (enum wmi_snr_threshold_val) reply->range;
  1379. snr = reply->snr;
  1380. sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR];
  1381. /*
  1382. * Identify the threshold breached and communicate that to the app.
  1383. * After that install a new set of thresholds based on the signal
  1384. * quality reported by the target.
  1385. */
  1386. if (new_threshold) {
  1387. /* Upper threshold breached */
  1388. if (snr < sq_thresh->upper_threshold[0]) {
  1389. ath6kl_dbg(ATH6KL_DBG_WMI,
  1390. "spurious upper snr threshold event: %d\n",
  1391. snr);
  1392. } else if ((snr < sq_thresh->upper_threshold[1]) &&
  1393. (snr >= sq_thresh->upper_threshold[0])) {
  1394. new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
  1395. } else if ((snr < sq_thresh->upper_threshold[2]) &&
  1396. (snr >= sq_thresh->upper_threshold[1])) {
  1397. new_threshold = WMI_SNR_THRESHOLD2_ABOVE;
  1398. } else if ((snr < sq_thresh->upper_threshold[3]) &&
  1399. (snr >= sq_thresh->upper_threshold[2])) {
  1400. new_threshold = WMI_SNR_THRESHOLD3_ABOVE;
  1401. } else if (snr >= sq_thresh->upper_threshold[3]) {
  1402. new_threshold = WMI_SNR_THRESHOLD4_ABOVE;
  1403. }
  1404. } else {
  1405. /* Lower threshold breached */
  1406. if (snr > sq_thresh->lower_threshold[0]) {
  1407. ath6kl_dbg(ATH6KL_DBG_WMI,
  1408. "spurious lower snr threshold event: %d\n",
  1409. sq_thresh->lower_threshold[0]);
  1410. } else if ((snr > sq_thresh->lower_threshold[1]) &&
  1411. (snr <= sq_thresh->lower_threshold[0])) {
  1412. new_threshold = WMI_SNR_THRESHOLD4_BELOW;
  1413. } else if ((snr > sq_thresh->lower_threshold[2]) &&
  1414. (snr <= sq_thresh->lower_threshold[1])) {
  1415. new_threshold = WMI_SNR_THRESHOLD3_BELOW;
  1416. } else if ((snr > sq_thresh->lower_threshold[3]) &&
  1417. (snr <= sq_thresh->lower_threshold[2])) {
  1418. new_threshold = WMI_SNR_THRESHOLD2_BELOW;
  1419. } else if (snr <= sq_thresh->lower_threshold[3]) {
  1420. new_threshold = WMI_SNR_THRESHOLD1_BELOW;
  1421. }
  1422. }
  1423. /* Calculate and install the next set of thresholds */
  1424. lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh,
  1425. sq_thresh->lower_threshold_valid_count);
  1426. upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh,
  1427. sq_thresh->upper_threshold_valid_count);
  1428. /* Issue a wmi command to install the thresholds */
  1429. cmd.thresh_above1_val = upper_snr_threshold;
  1430. cmd.thresh_below1_val = lower_snr_threshold;
  1431. cmd.weight = sq_thresh->weight;
  1432. cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
  1433. ath6kl_dbg(ATH6KL_DBG_WMI,
  1434. "snr: %d, threshold: %d, lower: %d, upper: %d\n",
  1435. snr, new_threshold,
  1436. lower_snr_threshold, upper_snr_threshold);
  1437. ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd);
  1438. if (ret) {
  1439. ath6kl_err("unable to configure snr threshold\n");
  1440. return -EIO;
  1441. }
  1442. return 0;
  1443. }
  1444. static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
  1445. {
  1446. u16 ap_info_entry_size;
  1447. struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
  1448. struct wmi_ap_info_v1 *ap_info_v1;
  1449. u8 index;
  1450. if (len < sizeof(struct wmi_aplist_event) ||
  1451. ev->ap_list_ver != APLIST_VER1)
  1452. return -EINVAL;
  1453. ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
  1454. ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
  1455. ath6kl_dbg(ATH6KL_DBG_WMI,
  1456. "number of APs in aplist event: %d\n", ev->num_ap);
  1457. if (len < (int) (sizeof(struct wmi_aplist_event) +
  1458. (ev->num_ap - 1) * ap_info_entry_size))
  1459. return -EINVAL;
  1460. /* AP list version 1 contents */
  1461. for (index = 0; index < ev->num_ap; index++) {
  1462. ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n",
  1463. index, ap_info_v1->bssid, ap_info_v1->channel);
  1464. ap_info_v1++;
  1465. }
  1466. return 0;
  1467. }
  1468. int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
  1469. enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
  1470. {
  1471. struct wmi_cmd_hdr *cmd_hdr;
  1472. enum htc_endpoint_id ep_id = wmi->ep_id;
  1473. int ret;
  1474. u16 info1;
  1475. if (WARN_ON(skb == NULL ||
  1476. (if_idx > (wmi->parent_dev->vif_max - 1)))) {
  1477. dev_kfree_skb(skb);
  1478. return -EINVAL;
  1479. }
  1480. ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
  1481. cmd_id, skb->len, sync_flag);
  1482. ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi tx ",
  1483. skb->data, skb->len);
  1484. if (sync_flag >= END_WMIFLAG) {
  1485. dev_kfree_skb(skb);
  1486. return -EINVAL;
  1487. }
  1488. if ((sync_flag == SYNC_BEFORE_WMIFLAG) ||
  1489. (sync_flag == SYNC_BOTH_WMIFLAG)) {
  1490. /*
  1491. * Make sure all data currently queued is transmitted before
  1492. * the cmd execution. Establish a new sync point.
  1493. */
  1494. ath6kl_wmi_sync_point(wmi, if_idx);
  1495. }
  1496. skb_push(skb, sizeof(struct wmi_cmd_hdr));
  1497. cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
  1498. cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
  1499. info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
  1500. cmd_hdr->info1 = cpu_to_le16(info1);
  1501. /* Only for OPT_TX_CMD, use BE endpoint. */
  1502. if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
  1503. ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
  1504. false, false, 0, NULL, if_idx);
  1505. if (ret) {
  1506. dev_kfree_skb(skb);
  1507. return ret;
  1508. }
  1509. ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE);
  1510. }
  1511. ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
  1512. if ((sync_flag == SYNC_AFTER_WMIFLAG) ||
  1513. (sync_flag == SYNC_BOTH_WMIFLAG)) {
  1514. /*
  1515. * Make sure all new data queued waits for the command to
  1516. * execute. Establish a new sync point.
  1517. */
  1518. ath6kl_wmi_sync_point(wmi, if_idx);
  1519. }
  1520. return 0;
  1521. }
  1522. int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
  1523. enum network_type nw_type,
  1524. enum dot11_auth_mode dot11_auth_mode,
  1525. enum auth_mode auth_mode,
  1526. enum ath6kl_crypto_type pairwise_crypto,
  1527. u8 pairwise_crypto_len,
  1528. enum ath6kl_crypto_type group_crypto,
  1529. u8 group_crypto_len, int ssid_len, u8 *ssid,
  1530. u8 *bssid, u16 channel, u32 ctrl_flags,
  1531. u8 nw_subtype)
  1532. {
  1533. struct sk_buff *skb;
  1534. struct wmi_connect_cmd *cc;
  1535. int ret;
  1536. ath6kl_dbg(ATH6KL_DBG_WMI,
  1537. "wmi connect bssid %pM freq %d flags 0x%x ssid_len %d "
  1538. "type %d dot11_auth %d auth %d pairwise %d group %d\n",
  1539. bssid, channel, ctrl_