/drivers/net/wireless/wl12xx/main.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 4095 lines · 3042 code · 698 blank · 355 comment · 458 complexity · f019852059d6ce130b8d50f428d81163 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * This file is part of wl1271
  3. *
  4. * Copyright (C) 2008-2010 Nokia Corporation
  5. *
  6. * Contact: Luciano Coelho <luciano.coelho@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20. * 02110-1301 USA
  21. *
  22. */
  23. #include <linux/module.h>
  24. #include <linux/firmware.h>
  25. #include <linux/delay.h>
  26. #include <linux/spi/spi.h>
  27. #include <linux/crc32.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/wl12xx.h>
  33. #include "wl12xx.h"
  34. #include "wl12xx_80211.h"
  35. #include "reg.h"
  36. #include "io.h"
  37. #include "event.h"
  38. #include "tx.h"
  39. #include "rx.h"
  40. #include "ps.h"
  41. #include "init.h"
  42. #include "debugfs.h"
  43. #include "cmd.h"
  44. #include "boot.h"
  45. #include "testmode.h"
  46. #include "scan.h"
  47. #define WL1271_BOOT_RETRIES 3
  48. static struct conf_drv_settings default_conf = {
  49. .sg = {
  50. .sta_params = {
  51. [CONF_SG_BT_PER_THRESHOLD] = 7500,
  52. [CONF_SG_HV3_MAX_OVERRIDE] = 0,
  53. [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
  54. [CONF_SG_BT_LOAD_RATIO] = 200,
  55. [CONF_SG_AUTO_PS_MODE] = 1,
  56. [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
  57. [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
  58. [CONF_SG_ANTENNA_CONFIGURATION] = 0,
  59. [CONF_SG_BEACON_MISS_PERCENT] = 60,
  60. [CONF_SG_RATE_ADAPT_THRESH] = 12,
  61. [CONF_SG_RATE_ADAPT_SNR] = 0,
  62. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
  63. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
  64. [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
  65. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
  66. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
  67. /* Note: with UPSD, this should be 4 */
  68. [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
  69. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
  70. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
  71. [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
  72. /* Note: with UPDS, this should be 15 */
  73. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
  74. /* Note: with UPDS, this should be 50 */
  75. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
  76. /* Note: with UPDS, this should be 10 */
  77. [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
  78. [CONF_SG_RXT] = 1200,
  79. [CONF_SG_TXT] = 1000,
  80. [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
  81. [CONF_SG_PS_POLL_TIMEOUT] = 10,
  82. [CONF_SG_UPSD_TIMEOUT] = 10,
  83. [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
  84. [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
  85. [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
  86. [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
  87. [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
  88. [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
  89. [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
  90. [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
  91. [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
  92. [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
  93. [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
  94. [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
  95. [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
  96. [CONF_SG_HV3_MAX_SERVED] = 6,
  97. [CONF_SG_DHCP_TIME] = 5000,
  98. [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
  99. },
  100. .ap_params = {
  101. [CONF_SG_BT_PER_THRESHOLD] = 7500,
  102. [CONF_SG_HV3_MAX_OVERRIDE] = 0,
  103. [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
  104. [CONF_SG_BT_LOAD_RATIO] = 50,
  105. [CONF_SG_AUTO_PS_MODE] = 1,
  106. [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
  107. [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
  108. [CONF_SG_ANTENNA_CONFIGURATION] = 0,
  109. [CONF_SG_BEACON_MISS_PERCENT] = 60,
  110. [CONF_SG_RATE_ADAPT_THRESH] = 64,
  111. [CONF_SG_RATE_ADAPT_SNR] = 1,
  112. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
  113. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 25,
  114. [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 25,
  115. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
  116. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 25,
  117. [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 25,
  118. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
  119. [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
  120. [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 25,
  121. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
  122. [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 25,
  123. [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 25,
  124. [CONF_SG_RXT] = 1200,
  125. [CONF_SG_TXT] = 1000,
  126. [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
  127. [CONF_SG_PS_POLL_TIMEOUT] = 10,
  128. [CONF_SG_UPSD_TIMEOUT] = 10,
  129. [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
  130. [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
  131. [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
  132. [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
  133. [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
  134. [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
  135. [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
  136. [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
  137. [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
  138. [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
  139. [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
  140. [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
  141. [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
  142. [CONF_SG_HV3_MAX_SERVED] = 6,
  143. [CONF_SG_DHCP_TIME] = 5000,
  144. [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
  145. [CONF_SG_TEMP_PARAM_1] = 0,
  146. [CONF_SG_TEMP_PARAM_2] = 0,
  147. [CONF_SG_TEMP_PARAM_3] = 0,
  148. [CONF_SG_TEMP_PARAM_4] = 0,
  149. [CONF_SG_TEMP_PARAM_5] = 0,
  150. [CONF_SG_AP_BEACON_MISS_TX] = 3,
  151. [CONF_SG_RX_WINDOW_LENGTH] = 6,
  152. [CONF_SG_AP_CONNECTION_PROTECTION_TIME] = 50,
  153. [CONF_SG_TEMP_PARAM_6] = 1,
  154. },
  155. .state = CONF_SG_PROTECTIVE,
  156. },
  157. .rx = {
  158. .rx_msdu_life_time = 512000,
  159. .packet_detection_threshold = 0,
  160. .ps_poll_timeout = 15,
  161. .upsd_timeout = 15,
  162. .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
  163. .rx_cca_threshold = 0,
  164. .irq_blk_threshold = 0xFFFF,
  165. .irq_pkt_threshold = 0,
  166. .irq_timeout = 600,
  167. .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
  168. },
  169. .tx = {
  170. .tx_energy_detection = 0,
  171. .sta_rc_conf = {
  172. .enabled_rates = 0,
  173. .short_retry_limit = 10,
  174. .long_retry_limit = 10,
  175. .aflags = 0,
  176. },
  177. .ac_conf_count = 4,
  178. .ac_conf = {
  179. [CONF_TX_AC_BE] = {
  180. .ac = CONF_TX_AC_BE,
  181. .cw_min = 15,
  182. .cw_max = 63,
  183. .aifsn = 3,
  184. .tx_op_limit = 0,
  185. },
  186. [CONF_TX_AC_BK] = {
  187. .ac = CONF_TX_AC_BK,
  188. .cw_min = 15,
  189. .cw_max = 63,
  190. .aifsn = 7,
  191. .tx_op_limit = 0,
  192. },
  193. [CONF_TX_AC_VI] = {
  194. .ac = CONF_TX_AC_VI,
  195. .cw_min = 15,
  196. .cw_max = 63,
  197. .aifsn = CONF_TX_AIFS_PIFS,
  198. .tx_op_limit = 3008,
  199. },
  200. [CONF_TX_AC_VO] = {
  201. .ac = CONF_TX_AC_VO,
  202. .cw_min = 15,
  203. .cw_max = 63,
  204. .aifsn = CONF_TX_AIFS_PIFS,
  205. .tx_op_limit = 1504,
  206. },
  207. },
  208. .ap_max_tx_retries = 100,
  209. .tid_conf_count = 4,
  210. .tid_conf = {
  211. [CONF_TX_AC_BE] = {
  212. .queue_id = CONF_TX_AC_BE,
  213. .channel_type = CONF_CHANNEL_TYPE_EDCF,
  214. .tsid = CONF_TX_AC_BE,
  215. .ps_scheme = CONF_PS_SCHEME_LEGACY,
  216. .ack_policy = CONF_ACK_POLICY_LEGACY,
  217. .apsd_conf = {0, 0},
  218. },
  219. [CONF_TX_AC_BK] = {
  220. .queue_id = CONF_TX_AC_BK,
  221. .channel_type = CONF_CHANNEL_TYPE_EDCF,
  222. .tsid = CONF_TX_AC_BK,
  223. .ps_scheme = CONF_PS_SCHEME_LEGACY,
  224. .ack_policy = CONF_ACK_POLICY_LEGACY,
  225. .apsd_conf = {0, 0},
  226. },
  227. [CONF_TX_AC_VI] = {
  228. .queue_id = CONF_TX_AC_VI,
  229. .channel_type = CONF_CHANNEL_TYPE_EDCF,
  230. .tsid = CONF_TX_AC_VI,
  231. .ps_scheme = CONF_PS_SCHEME_LEGACY,
  232. .ack_policy = CONF_ACK_POLICY_LEGACY,
  233. .apsd_conf = {0, 0},
  234. },
  235. [CONF_TX_AC_VO] = {
  236. .queue_id = CONF_TX_AC_VO,
  237. .channel_type = CONF_CHANNEL_TYPE_EDCF,
  238. .tsid = CONF_TX_AC_VO,
  239. .ps_scheme = CONF_PS_SCHEME_LEGACY,
  240. .ack_policy = CONF_ACK_POLICY_LEGACY,
  241. .apsd_conf = {0, 0},
  242. },
  243. },
  244. .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
  245. .tx_compl_timeout = 700,
  246. .tx_compl_threshold = 4,
  247. .basic_rate = CONF_HW_BIT_RATE_1MBPS,
  248. .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
  249. .tmpl_short_retry_limit = 10,
  250. .tmpl_long_retry_limit = 10,
  251. },
  252. .conn = {
  253. .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
  254. .listen_interval = 1,
  255. .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
  256. .bcn_filt_ie_count = 2,
  257. .bcn_filt_ie = {
  258. [0] = {
  259. .ie = WLAN_EID_CHANNEL_SWITCH,
  260. .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
  261. },
  262. [1] = {
  263. .ie = WLAN_EID_HT_INFORMATION,
  264. .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
  265. },
  266. },
  267. .synch_fail_thold = 10,
  268. .bss_lose_timeout = 100,
  269. .beacon_rx_timeout = 10000,
  270. .broadcast_timeout = 20000,
  271. .rx_broadcast_in_ps = 1,
  272. .ps_poll_threshold = 10,
  273. .ps_poll_recovery_period = 700,
  274. .bet_enable = CONF_BET_MODE_ENABLE,
  275. .bet_max_consecutive = 50,
  276. .psm_entry_retries = 5,
  277. .psm_exit_retries = 16,
  278. .psm_entry_nullfunc_retries = 3,
  279. .psm_entry_hangover_period = 1,
  280. .keep_alive_interval = 55000,
  281. .max_listen_interval = 20,
  282. },
  283. .itrim = {
  284. .enable = false,
  285. .timeout = 50000,
  286. },
  287. .pm_config = {
  288. .host_clk_settling_time = 5000,
  289. .host_fast_wakeup_support = false
  290. },
  291. .roam_trigger = {
  292. .trigger_pacing = 1,
  293. .avg_weight_rssi_beacon = 20,
  294. .avg_weight_rssi_data = 10,
  295. .avg_weight_snr_beacon = 20,
  296. .avg_weight_snr_data = 10,
  297. },
  298. .scan = {
  299. .min_dwell_time_active = 7500,
  300. .max_dwell_time_active = 30000,
  301. .min_dwell_time_passive = 100000,
  302. .max_dwell_time_passive = 100000,
  303. .num_probe_reqs = 2,
  304. },
  305. .sched_scan = {
  306. /* sched_scan requires dwell times in TU instead of TU/1000 */
  307. .min_dwell_time_active = 8,
  308. .max_dwell_time_active = 30,
  309. .dwell_time_passive = 100,
  310. .dwell_time_dfs = 150,
  311. .num_probe_reqs = 2,
  312. .rssi_threshold = -90,
  313. .snr_threshold = 0,
  314. },
  315. .rf = {
  316. .tx_per_channel_power_compensation_2 = {
  317. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  318. },
  319. .tx_per_channel_power_compensation_5 = {
  320. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  321. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  322. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  323. },
  324. },
  325. .ht = {
  326. .tx_ba_win_size = 64,
  327. .inactivity_timeout = 10000,
  328. },
  329. .mem_wl127x = {
  330. .num_stations = 1,
  331. .ssid_profiles = 1,
  332. .rx_block_num = 70,
  333. .tx_min_block_num = 40,
  334. .dynamic_memory = 1,
  335. .min_req_tx_blocks = 100,
  336. .min_req_rx_blocks = 22,
  337. .tx_min = 27,
  338. },
  339. .mem_wl128x = {
  340. .num_stations = 1,
  341. .ssid_profiles = 1,
  342. .rx_block_num = 40,
  343. .tx_min_block_num = 40,
  344. .dynamic_memory = 1,
  345. .min_req_tx_blocks = 45,
  346. .min_req_rx_blocks = 22,
  347. .tx_min = 27,
  348. },
  349. .fm_coex = {
  350. .enable = true,
  351. .swallow_period = 5,
  352. .n_divider_fref_set_1 = 0xff, /* default */
  353. .n_divider_fref_set_2 = 12,
  354. .m_divider_fref_set_1 = 148,
  355. .m_divider_fref_set_2 = 0xffff, /* default */
  356. .coex_pll_stabilization_time = 0xffffffff, /* default */
  357. .ldo_stabilization_time = 0xffff, /* default */
  358. .fm_disturbed_band_margin = 0xff, /* default */
  359. .swallow_clk_diff = 0xff, /* default */
  360. },
  361. .hci_io_ds = HCI_IO_DS_6MA,
  362. };
  363. static void __wl1271_op_remove_interface(struct wl1271 *wl,
  364. bool reset_tx_queues);
  365. static void wl1271_free_ap_keys(struct wl1271 *wl);
  366. static void wl1271_device_release(struct device *dev)
  367. {
  368. }
  369. static struct platform_device wl1271_device = {
  370. .name = "wl1271",
  371. .id = -1,
  372. /* device model insists to have a release function */
  373. .dev = {
  374. .release = wl1271_device_release,
  375. },
  376. };
  377. static DEFINE_MUTEX(wl_list_mutex);
  378. static LIST_HEAD(wl_list);
  379. static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
  380. void *arg)
  381. {
  382. struct net_device *dev = arg;
  383. struct wireless_dev *wdev;
  384. struct wiphy *wiphy;
  385. struct ieee80211_hw *hw;
  386. struct wl1271 *wl;
  387. struct wl1271 *wl_temp;
  388. int ret = 0;
  389. /* Check that this notification is for us. */
  390. if (what != NETDEV_CHANGE)
  391. return NOTIFY_DONE;
  392. wdev = dev->ieee80211_ptr;
  393. if (wdev == NULL)
  394. return NOTIFY_DONE;
  395. wiphy = wdev->wiphy;
  396. if (wiphy == NULL)
  397. return NOTIFY_DONE;
  398. hw = wiphy_priv(wiphy);
  399. if (hw == NULL)
  400. return NOTIFY_DONE;
  401. wl_temp = hw->priv;
  402. mutex_lock(&wl_list_mutex);
  403. list_for_each_entry(wl, &wl_list, list) {
  404. if (wl == wl_temp)
  405. break;
  406. }
  407. mutex_unlock(&wl_list_mutex);
  408. if (wl != wl_temp)
  409. return NOTIFY_DONE;
  410. mutex_lock(&wl->mutex);
  411. if (wl->state == WL1271_STATE_OFF)
  412. goto out;
  413. if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
  414. goto out;
  415. ret = wl1271_ps_elp_wakeup(wl);
  416. if (ret < 0)
  417. goto out;
  418. if ((dev->operstate == IF_OPER_UP) &&
  419. !test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) {
  420. wl1271_cmd_set_sta_state(wl);
  421. wl1271_info("Association completed.");
  422. }
  423. wl1271_ps_elp_sleep(wl);
  424. out:
  425. mutex_unlock(&wl->mutex);
  426. return NOTIFY_OK;
  427. }
  428. static int wl1271_reg_notify(struct wiphy *wiphy,
  429. struct regulatory_request *request)
  430. {
  431. struct ieee80211_supported_band *band;
  432. struct ieee80211_channel *ch;
  433. int i;
  434. band = wiphy->bands[IEEE80211_BAND_5GHZ];
  435. for (i = 0; i < band->n_channels; i++) {
  436. ch = &band->channels[i];
  437. if (ch->flags & IEEE80211_CHAN_DISABLED)
  438. continue;
  439. if (ch->flags & IEEE80211_CHAN_RADAR)
  440. ch->flags |= IEEE80211_CHAN_NO_IBSS |
  441. IEEE80211_CHAN_PASSIVE_SCAN;
  442. }
  443. return 0;
  444. }
  445. static void wl1271_conf_init(struct wl1271 *wl)
  446. {
  447. /*
  448. * This function applies the default configuration to the driver. This
  449. * function is invoked upon driver load (spi probe.)
  450. *
  451. * The configuration is stored in a run-time structure in order to
  452. * facilitate for run-time adjustment of any of the parameters. Making
  453. * changes to the configuration structure will apply the new values on
  454. * the next interface up (wl1271_op_start.)
  455. */
  456. /* apply driver default configuration */
  457. memcpy(&wl->conf, &default_conf, sizeof(default_conf));
  458. }
  459. static int wl1271_plt_init(struct wl1271 *wl)
  460. {
  461. struct conf_tx_ac_category *conf_ac;
  462. struct conf_tx_tid *conf_tid;
  463. int ret, i;
  464. if (wl->chip.id == CHIP_ID_1283_PG20)
  465. ret = wl128x_cmd_general_parms(wl);
  466. else
  467. ret = wl1271_cmd_general_parms(wl);
  468. if (ret < 0)
  469. return ret;
  470. if (wl->chip.id == CHIP_ID_1283_PG20)
  471. ret = wl128x_cmd_radio_parms(wl);
  472. else
  473. ret = wl1271_cmd_radio_parms(wl);
  474. if (ret < 0)
  475. return ret;
  476. if (wl->chip.id != CHIP_ID_1283_PG20) {
  477. ret = wl1271_cmd_ext_radio_parms(wl);
  478. if (ret < 0)
  479. return ret;
  480. }
  481. if (ret < 0)
  482. return ret;
  483. /* Chip-specific initializations */
  484. ret = wl1271_chip_specific_init(wl);
  485. if (ret < 0)
  486. return ret;
  487. ret = wl1271_sta_init_templates_config(wl);
  488. if (ret < 0)
  489. return ret;
  490. ret = wl1271_acx_init_mem_config(wl);
  491. if (ret < 0)
  492. return ret;
  493. /* PHY layer config */
  494. ret = wl1271_init_phy_config(wl);
  495. if (ret < 0)
  496. goto out_free_memmap;
  497. ret = wl1271_acx_dco_itrim_params(wl);
  498. if (ret < 0)
  499. goto out_free_memmap;
  500. /* Initialize connection monitoring thresholds */
  501. ret = wl1271_acx_conn_monit_params(wl, false);
  502. if (ret < 0)
  503. goto out_free_memmap;
  504. /* Bluetooth WLAN coexistence */
  505. ret = wl1271_init_pta(wl);
  506. if (ret < 0)
  507. goto out_free_memmap;
  508. /* FM WLAN coexistence */
  509. ret = wl1271_acx_fm_coex(wl);
  510. if (ret < 0)
  511. goto out_free_memmap;
  512. /* Energy detection */
  513. ret = wl1271_init_energy_detection(wl);
  514. if (ret < 0)
  515. goto out_free_memmap;
  516. ret = wl1271_acx_sta_mem_cfg(wl);
  517. if (ret < 0)
  518. goto out_free_memmap;
  519. /* Default fragmentation threshold */
  520. ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
  521. if (ret < 0)
  522. goto out_free_memmap;
  523. /* Default TID/AC configuration */
  524. BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
  525. for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
  526. conf_ac = &wl->conf.tx.ac_conf[i];
  527. ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
  528. conf_ac->cw_max, conf_ac->aifsn,
  529. conf_ac->tx_op_limit);
  530. if (ret < 0)
  531. goto out_free_memmap;
  532. conf_tid = &wl->conf.tx.tid_conf[i];
  533. ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
  534. conf_tid->channel_type,
  535. conf_tid->tsid,
  536. conf_tid->ps_scheme,
  537. conf_tid->ack_policy,
  538. conf_tid->apsd_conf[0],
  539. conf_tid->apsd_conf[1]);
  540. if (ret < 0)
  541. goto out_free_memmap;
  542. }
  543. /* Enable data path */
  544. ret = wl1271_cmd_data_path(wl, 1);
  545. if (ret < 0)
  546. goto out_free_memmap;
  547. /* Configure for CAM power saving (ie. always active) */
  548. ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
  549. if (ret < 0)
  550. goto out_free_memmap;
  551. /* configure PM */
  552. ret = wl1271_acx_pm_config(wl);
  553. if (ret < 0)
  554. goto out_free_memmap;
  555. return 0;
  556. out_free_memmap:
  557. kfree(wl->target_mem_map);
  558. wl->target_mem_map = NULL;
  559. return ret;
  560. }
  561. static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
  562. {
  563. bool fw_ps;
  564. /* only regulate station links */
  565. if (hlid < WL1271_AP_STA_HLID_START)
  566. return;
  567. fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
  568. /*
  569. * Wake up from high level PS if the STA is asleep with too little
  570. * blocks in FW or if the STA is awake.
  571. */
  572. if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
  573. wl1271_ps_link_end(wl, hlid);
  574. /* Start high-level PS if the STA is asleep with enough blocks in FW */
  575. else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
  576. wl1271_ps_link_start(wl, hlid, true);
  577. }
  578. static void wl1271_irq_update_links_status(struct wl1271 *wl,
  579. struct wl1271_fw_ap_status *status)
  580. {
  581. u32 cur_fw_ps_map;
  582. u8 hlid;
  583. cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
  584. if (wl->ap_fw_ps_map != cur_fw_ps_map) {
  585. wl1271_debug(DEBUG_PSM,
  586. "link ps prev 0x%x cur 0x%x changed 0x%x",
  587. wl->ap_fw_ps_map, cur_fw_ps_map,
  588. wl->ap_fw_ps_map ^ cur_fw_ps_map);
  589. wl->ap_fw_ps_map = cur_fw_ps_map;
  590. }
  591. for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
  592. u8 cnt = status->tx_lnk_free_blks[hlid] -
  593. wl->links[hlid].prev_freed_blks;
  594. wl->links[hlid].prev_freed_blks =
  595. status->tx_lnk_free_blks[hlid];
  596. wl->links[hlid].allocated_blks -= cnt;
  597. wl1271_irq_ps_regulate_link(wl, hlid,
  598. wl->links[hlid].allocated_blks);
  599. }
  600. }
  601. static void wl1271_fw_status(struct wl1271 *wl,
  602. struct wl1271_fw_full_status *full_status)
  603. {
  604. struct wl1271_fw_common_status *status = &full_status->common;
  605. struct timespec ts;
  606. u32 old_tx_blk_count = wl->tx_blocks_available;
  607. u32 freed_blocks = 0;
  608. int i;
  609. if (wl->bss_type == BSS_TYPE_AP_BSS) {
  610. wl1271_raw_read(wl, FW_STATUS_ADDR, status,
  611. sizeof(struct wl1271_fw_ap_status), false);
  612. } else {
  613. wl1271_raw_read(wl, FW_STATUS_ADDR, status,
  614. sizeof(struct wl1271_fw_sta_status), false);
  615. }
  616. wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
  617. "drv_rx_counter = %d, tx_results_counter = %d)",
  618. status->intr,
  619. status->fw_rx_counter,
  620. status->drv_rx_counter,
  621. status->tx_results_counter);
  622. /* update number of available TX blocks */
  623. for (i = 0; i < NUM_TX_QUEUES; i++) {
  624. freed_blocks += le32_to_cpu(status->tx_released_blks[i]) -
  625. wl->tx_blocks_freed[i];
  626. wl->tx_blocks_freed[i] =
  627. le32_to_cpu(status->tx_released_blks[i]);
  628. }
  629. wl->tx_allocated_blocks -= freed_blocks;
  630. if (wl->bss_type == BSS_TYPE_AP_BSS) {
  631. /* Update num of allocated TX blocks per link and ps status */
  632. wl1271_irq_update_links_status(wl, &full_status->ap);
  633. wl->tx_blocks_available += freed_blocks;
  634. } else {
  635. int avail = full_status->sta.tx_total - wl->tx_allocated_blocks;
  636. /*
  637. * The FW might change the total number of TX memblocks before
  638. * we get a notification about blocks being released. Thus, the
  639. * available blocks calculation might yield a temporary result
  640. * which is lower than the actual available blocks. Keeping in
  641. * mind that only blocks that were allocated can be moved from
  642. * TX to RX, tx_blocks_available should never decrease here.
  643. */
  644. wl->tx_blocks_available = max((int)wl->tx_blocks_available,
  645. avail);
  646. }
  647. /* if more blocks are available now, tx work can be scheduled */
  648. if (wl->tx_blocks_available > old_tx_blk_count)
  649. clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
  650. /* update the host-chipset time offset */
  651. getnstimeofday(&ts);
  652. wl->time_offset = (timespec_to_ns(&ts) >> 10) -
  653. (s64)le32_to_cpu(status->fw_localtime);
  654. }
  655. static void wl1271_flush_deferred_work(struct wl1271 *wl)
  656. {
  657. struct sk_buff *skb;
  658. /* Pass all received frames to the network stack */
  659. while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
  660. ieee80211_rx_ni(wl->hw, skb);
  661. /* Return sent skbs to the network stack */
  662. while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
  663. ieee80211_tx_status(wl->hw, skb);
  664. }
  665. static void wl1271_netstack_work(struct work_struct *work)
  666. {
  667. struct wl1271 *wl =
  668. container_of(work, struct wl1271, netstack_work);
  669. do {
  670. wl1271_flush_deferred_work(wl);
  671. } while (skb_queue_len(&wl->deferred_rx_queue));
  672. }
  673. #define WL1271_IRQ_MAX_LOOPS 256
  674. irqreturn_t wl1271_irq(int irq, void *cookie)
  675. {
  676. int ret;
  677. u32 intr;
  678. int loopcount = WL1271_IRQ_MAX_LOOPS;
  679. struct wl1271 *wl = (struct wl1271 *)cookie;
  680. bool done = false;
  681. unsigned int defer_count;
  682. unsigned long flags;
  683. /* TX might be handled here, avoid redundant work */
  684. set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
  685. cancel_work_sync(&wl->tx_work);
  686. /*
  687. * In case edge triggered interrupt must be used, we cannot iterate
  688. * more than once without introducing race conditions with the hardirq.
  689. */
  690. if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
  691. loopcount = 1;
  692. mutex_lock(&wl->mutex);
  693. wl1271_debug(DEBUG_IRQ, "IRQ work");
  694. if (unlikely(wl->state == WL1271_STATE_OFF))
  695. goto out;
  696. ret = wl1271_ps_elp_wakeup(wl);
  697. if (ret < 0)
  698. goto out;
  699. while (!done && loopcount--) {
  700. /*
  701. * In order to avoid a race with the hardirq, clear the flag
  702. * before acknowledging the chip. Since the mutex is held,
  703. * wl1271_ps_elp_wakeup cannot be called concurrently.
  704. */
  705. clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
  706. smp_mb__after_clear_bit();
  707. wl1271_fw_status(wl, wl->fw_status);
  708. intr = le32_to_cpu(wl->fw_status->common.intr);
  709. intr &= WL1271_INTR_MASK;
  710. if (!intr) {
  711. done = true;
  712. continue;
  713. }
  714. if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
  715. wl1271_error("watchdog interrupt received! "
  716. "starting recovery.");
  717. ieee80211_queue_work(wl->hw, &wl->recovery_work);
  718. /* restarting the chip. ignore any other interrupt. */
  719. goto out;
  720. }
  721. if (likely(intr & WL1271_ACX_INTR_DATA)) {
  722. wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
  723. wl1271_rx(wl, &wl->fw_status->common);
  724. /* Check if any tx blocks were freed */
  725. spin_lock_irqsave(&wl->wl_lock, flags);
  726. if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
  727. wl->tx_queue_count) {
  728. spin_unlock_irqrestore(&wl->wl_lock, flags);
  729. /*
  730. * In order to avoid starvation of the TX path,
  731. * call the work function directly.
  732. */
  733. wl1271_tx_work_locked(wl);
  734. } else {
  735. spin_unlock_irqrestore(&wl->wl_lock, flags);
  736. }
  737. /* check for tx results */
  738. if (wl->fw_status->common.tx_results_counter !=
  739. (wl->tx_results_count & 0xff))
  740. wl1271_tx_complete(wl);
  741. /* Make sure the deferred queues don't get too long */
  742. defer_count = skb_queue_len(&wl->deferred_tx_queue) +
  743. skb_queue_len(&wl->deferred_rx_queue);
  744. if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
  745. wl1271_flush_deferred_work(wl);
  746. }
  747. if (intr & WL1271_ACX_INTR_EVENT_A) {
  748. wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
  749. wl1271_event_handle(wl, 0);
  750. }
  751. if (intr & WL1271_ACX_INTR_EVENT_B) {
  752. wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
  753. wl1271_event_handle(wl, 1);
  754. }
  755. if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
  756. wl1271_debug(DEBUG_IRQ,
  757. "WL1271_ACX_INTR_INIT_COMPLETE");
  758. if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
  759. wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
  760. }
  761. wl1271_ps_elp_sleep(wl);
  762. out:
  763. spin_lock_irqsave(&wl->wl_lock, flags);
  764. /* In case TX was not handled here, queue TX work */
  765. clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
  766. if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
  767. wl->tx_queue_count)
  768. ieee80211_queue_work(wl->hw, &wl->tx_work);
  769. spin_unlock_irqrestore(&wl->wl_lock, flags);
  770. mutex_unlock(&wl->mutex);
  771. return IRQ_HANDLED;
  772. }
  773. EXPORT_SYMBOL_GPL(wl1271_irq);
  774. static int wl1271_fetch_firmware(struct wl1271 *wl)
  775. {
  776. const struct firmware *fw;
  777. const char *fw_name;
  778. int ret;
  779. switch (wl->bss_type) {
  780. case BSS_TYPE_AP_BSS:
  781. if (wl->chip.id == CHIP_ID_1283_PG20)
  782. fw_name = WL128X_AP_FW_NAME;
  783. else
  784. fw_name = WL127X_AP_FW_NAME;
  785. break;
  786. case BSS_TYPE_IBSS:
  787. case BSS_TYPE_STA_BSS:
  788. if (wl->chip.id == CHIP_ID_1283_PG20)
  789. fw_name = WL128X_FW_NAME;
  790. else
  791. fw_name = WL1271_FW_NAME;
  792. break;
  793. default:
  794. wl1271_error("no compatible firmware for bss_type %d",
  795. wl->bss_type);
  796. return -EINVAL;
  797. }
  798. wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
  799. ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
  800. if (ret < 0) {
  801. wl1271_error("could not get firmware: %d", ret);
  802. return ret;
  803. }
  804. if (fw->size % 4) {
  805. wl1271_error("firmware size is not multiple of 32 bits: %zu",
  806. fw->size);
  807. ret = -EILSEQ;
  808. goto out;
  809. }
  810. vfree(wl->fw);
  811. wl->fw_len = fw->size;
  812. wl->fw = vmalloc(wl->fw_len);
  813. if (!wl->fw) {
  814. wl1271_error("could not allocate memory for the firmware");
  815. ret = -ENOMEM;
  816. goto out;
  817. }
  818. memcpy(wl->fw, fw->data, wl->fw_len);
  819. wl->fw_bss_type = wl->bss_type;
  820. ret = 0;
  821. out:
  822. release_firmware(fw);
  823. return ret;
  824. }
  825. static int wl1271_fetch_nvs(struct wl1271 *wl)
  826. {
  827. const struct firmware *fw;
  828. int ret;
  829. ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
  830. if (ret < 0) {
  831. wl1271_error("could not get nvs file: %d", ret);
  832. return ret;
  833. }
  834. wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
  835. if (!wl->nvs) {
  836. wl1271_error("could not allocate memory for the nvs file");
  837. ret = -ENOMEM;
  838. goto out;
  839. }
  840. wl->nvs_len = fw->size;
  841. out:
  842. release_firmware(fw);
  843. return ret;
  844. }
  845. static void wl1271_recovery_work(struct work_struct *work)
  846. {
  847. struct wl1271 *wl =
  848. container_of(work, struct wl1271, recovery_work);
  849. mutex_lock(&wl->mutex);
  850. if (wl->state != WL1271_STATE_ON)
  851. goto out;
  852. wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
  853. wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
  854. if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
  855. ieee80211_connection_loss(wl->vif);
  856. /* Prevent spurious TX during FW restart */
  857. ieee80211_stop_queues(wl->hw);
  858. if (wl->sched_scanning) {
  859. ieee80211_sched_scan_stopped(wl->hw);
  860. wl->sched_scanning = false;
  861. }
  862. /* reboot the chipset */
  863. __wl1271_op_remove_interface(wl, false);
  864. ieee80211_restart_hw(wl->hw);
  865. /*
  866. * Its safe to enable TX now - the queues are stopped after a request
  867. * to restart the HW.
  868. */
  869. ieee80211_wake_queues(wl->hw);
  870. out:
  871. mutex_unlock(&wl->mutex);
  872. }
  873. static void wl1271_fw_wakeup(struct wl1271 *wl)
  874. {
  875. u32 elp_reg;
  876. elp_reg = ELPCTRL_WAKE_UP;
  877. wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
  878. }
  879. static int wl1271_setup(struct wl1271 *wl)
  880. {
  881. wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
  882. if (!wl->fw_status)
  883. return -ENOMEM;
  884. wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
  885. if (!wl->tx_res_if) {
  886. kfree(wl->fw_status);
  887. return -ENOMEM;
  888. }
  889. return 0;
  890. }
  891. static int wl1271_chip_wakeup(struct wl1271 *wl)
  892. {
  893. struct wl1271_partition_set partition;
  894. int ret = 0;
  895. msleep(WL1271_PRE_POWER_ON_SLEEP);
  896. ret = wl1271_power_on(wl);
  897. if (ret < 0)
  898. goto out;
  899. msleep(WL1271_POWER_ON_SLEEP);
  900. wl1271_io_reset(wl);
  901. wl1271_io_init(wl);
  902. /* We don't need a real memory partition here, because we only want
  903. * to use the registers at this point. */
  904. memset(&partition, 0, sizeof(partition));
  905. partition.reg.start = REGISTERS_BASE;
  906. partition.reg.size = REGISTERS_DOWN_SIZE;
  907. wl1271_set_partition(wl, &partition);
  908. /* ELP module wake up */
  909. wl1271_fw_wakeup(wl);
  910. /* whal_FwCtrl_BootSm() */
  911. /* 0. read chip id from CHIP_ID */
  912. wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
  913. /* 1. check if chip id is valid */
  914. switch (wl->chip.id) {
  915. case CHIP_ID_1271_PG10:
  916. wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
  917. wl->chip.id);
  918. ret = wl1271_setup(wl);
  919. if (ret < 0)
  920. goto out;
  921. break;
  922. case CHIP_ID_1271_PG20:
  923. wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
  924. wl->chip.id);
  925. /* end-of-transaction flag should be set in wl127x AP mode */
  926. if (wl->bss_type == BSS_TYPE_AP_BSS)
  927. wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
  928. ret = wl1271_setup(wl);
  929. if (ret < 0)
  930. goto out;
  931. break;
  932. case CHIP_ID_1283_PG20:
  933. wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
  934. wl->chip.id);
  935. ret = wl1271_setup(wl);
  936. if (ret < 0)
  937. goto out;
  938. if (wl1271_set_block_size(wl))
  939. wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
  940. break;
  941. case CHIP_ID_1283_PG10:
  942. default:
  943. wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
  944. ret = -ENODEV;
  945. goto out;
  946. }
  947. /* Make sure the firmware type matches the BSS type */
  948. if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
  949. ret = wl1271_fetch_firmware(wl);
  950. if (ret < 0)
  951. goto out;
  952. }
  953. /* No NVS from netlink, try to get it from the filesystem */
  954. if (wl->nvs == NULL) {
  955. ret = wl1271_fetch_nvs(wl);
  956. if (ret < 0)
  957. goto out;
  958. }
  959. out:
  960. return ret;
  961. }
  962. static unsigned int wl1271_get_fw_ver_quirks(struct wl1271 *wl)
  963. {
  964. unsigned int quirks = 0;
  965. unsigned int *fw_ver = wl->chip.fw_ver;
  966. /* Only for wl127x */
  967. if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
  968. /* Check STA version */
  969. (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
  970. (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
  971. /* Check AP version */
  972. ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
  973. (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
  974. quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
  975. return quirks;
  976. }
  977. int wl1271_plt_start(struct wl1271 *wl)
  978. {
  979. int retries = WL1271_BOOT_RETRIES;
  980. int ret;
  981. mutex_lock(&wl->mutex);
  982. wl1271_notice("power up");
  983. if (wl->state != WL1271_STATE_OFF) {
  984. wl1271_error("cannot go into PLT state because not "
  985. "in off state: %d", wl->state);
  986. ret = -EBUSY;
  987. goto out;
  988. }
  989. wl->bss_type = BSS_TYPE_STA_BSS;
  990. while (retries) {
  991. retries--;
  992. ret = wl1271_chip_wakeup(wl);
  993. if (ret < 0)
  994. goto power_off;
  995. ret = wl1271_boot(wl);
  996. if (ret < 0)
  997. goto power_off;
  998. ret = wl1271_plt_init(wl);
  999. if (ret < 0)
  1000. goto irq_disable;
  1001. wl->state = WL1271_STATE_PLT;
  1002. wl1271_notice("firmware booted in PLT mode (%s)",
  1003. wl->chip.fw_ver_str);
  1004. /* Check if any quirks are needed with older fw versions */
  1005. wl->quirks |= wl1271_get_fw_ver_quirks(wl);
  1006. goto out;
  1007. irq_disable:
  1008. mutex_unlock(&wl->mutex);
  1009. /* Unlocking the mutex in the middle of handling is
  1010. inherently unsafe. In this case we deem it safe to do,
  1011. because we need to let any possibly pending IRQ out of
  1012. the system (and while we are WL1271_STATE_OFF the IRQ
  1013. work function will not do anything.) Also, any other
  1014. possible concurrent operations will fail due to the
  1015. current state, hence the wl1271 struct should be safe. */
  1016. wl1271_disable_interrupts(wl);
  1017. wl1271_flush_deferred_work(wl);
  1018. cancel_work_sync(&wl->netstack_work);
  1019. mutex_lock(&wl->mutex);
  1020. power_off:
  1021. wl1271_power_off(wl);
  1022. }
  1023. wl1271_error("firmware boot in PLT mode failed despite %d retries",
  1024. WL1271_BOOT_RETRIES);
  1025. out:
  1026. mutex_unlock(&wl->mutex);
  1027. return ret;
  1028. }
  1029. static int __wl1271_plt_stop(struct wl1271 *wl)
  1030. {
  1031. int ret = 0;
  1032. wl1271_notice("power down");
  1033. if (wl->state != WL1271_STATE_PLT) {
  1034. wl1271_error("cannot power down because not in PLT "
  1035. "state: %d", wl->state);
  1036. ret = -EBUSY;
  1037. goto out;
  1038. }
  1039. wl1271_power_off(wl);
  1040. wl->state = WL1271_STATE_OFF;
  1041. wl->rx_counter = 0;
  1042. mutex_unlock(&wl->mutex);
  1043. wl1271_disable_interrupts(wl);
  1044. wl1271_flush_deferred_work(wl);
  1045. cancel_work_sync(&wl->netstack_work);
  1046. cancel_work_sync(&wl->recovery_work);
  1047. mutex_lock(&wl->mutex);
  1048. out:
  1049. return ret;
  1050. }
  1051. int wl1271_plt_stop(struct wl1271 *wl)
  1052. {
  1053. int ret;
  1054. mutex_lock(&wl->mutex);
  1055. ret = __wl1271_plt_stop(wl);
  1056. mutex_unlock(&wl->mutex);
  1057. return ret;
  1058. }
  1059. static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
  1060. {
  1061. struct wl1271 *wl = hw->priv;
  1062. unsigned long flags;
  1063. int q;
  1064. u8 hlid = 0;
  1065. q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
  1066. if (wl->bss_type == BSS_TYPE_AP_BSS)
  1067. hlid = wl1271_tx_get_hlid(skb);
  1068. spin_lock_irqsave(&wl->wl_lock, flags);
  1069. wl->tx_queue_count++;
  1070. /*
  1071. * The workqueue is slow to process the tx_queue and we need stop
  1072. * the queue here, otherwise the queue will get too long.
  1073. */
  1074. if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
  1075. wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
  1076. ieee80211_stop_queues(wl->hw);
  1077. set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
  1078. }
  1079. /* queue the packet */
  1080. if (wl->bss_type == BSS_TYPE_AP_BSS) {
  1081. wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
  1082. skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
  1083. } else {
  1084. skb_queue_tail(&wl->tx_queue[q], skb);
  1085. }
  1086. /*
  1087. * The chip specific setup must run before the first TX packet -
  1088. * before that, the tx_work will not be initialized!
  1089. */
  1090. if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
  1091. !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
  1092. ieee80211_queue_work(wl->hw, &wl->tx_work);
  1093. spin_unlock_irqrestore(&wl->wl_lock, flags);
  1094. }
  1095. int wl1271_tx_dummy_packet(struct wl1271 *wl)
  1096. {
  1097. unsigned long flags;
  1098. spin_lock_irqsave(&wl->wl_lock, flags);
  1099. set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
  1100. wl->tx_queue_count++;
  1101. spin_unlock_irqrestore(&wl->wl_lock, flags);
  1102. /* The FW is low on RX memory blocks, so send the dummy packet asap */
  1103. if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
  1104. wl1271_tx_work_locked(wl);
  1105. /*
  1106. * If the FW TX is busy, TX work will be scheduled by the threaded
  1107. * interrupt handler function
  1108. */
  1109. return 0;
  1110. }
  1111. /*
  1112. * The size of the dummy packet should be at least 1400 bytes. However, in
  1113. * order to minimize the number of bus transactions, aligning it to 512 bytes
  1114. * boundaries could be beneficial, performance wise
  1115. */
  1116. #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
  1117. static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
  1118. {
  1119. struct sk_buff *skb;
  1120. struct ieee80211_hdr_3addr *hdr;
  1121. unsigned int dummy_packet_size;
  1122. dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
  1123. sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
  1124. skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
  1125. if (!skb) {
  1126. wl1271_warning("Failed to allocate a dummy packet skb");
  1127. return NULL;
  1128. }
  1129. skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
  1130. hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
  1131. memset(hdr, 0, sizeof(*hdr));
  1132. hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
  1133. IEEE80211_STYPE_NULLFUNC |
  1134. IEEE80211_FCTL_TODS);
  1135. memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
  1136. /* Dummy packets require the TID to be management */
  1137. skb->priority = WL1271_TID_MGMT;
  1138. /* Initialize all fields that might be used */
  1139. skb_set_queue_mapping(skb, 0);
  1140. memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
  1141. return skb;
  1142. }
  1143. static struct notifier_block wl1271_dev_notifier = {
  1144. .notifier_call = wl1271_dev_notify,
  1145. };
  1146. #ifdef CONFIG_PM
  1147. static int wl1271_configure_suspend(struct wl1271 *wl)
  1148. {
  1149. int ret;
  1150. if (wl->bss_type != BSS_TYPE_STA_BSS)
  1151. return 0;
  1152. mutex_lock(&wl->mutex);
  1153. ret = wl1271_ps_elp_wakeup(wl);
  1154. if (ret < 0)
  1155. goto out_unlock;
  1156. /* enter psm if needed*/
  1157. if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
  1158. DECLARE_COMPLETION_ONSTACK(compl);
  1159. wl->ps_compl = &compl;
  1160. ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
  1161. wl->basic_rate, true);
  1162. if (ret < 0)
  1163. goto out_sleep;
  1164. /* we must unlock here so we will be able to get events */
  1165. wl1271_ps_elp_sleep(wl);
  1166. mutex_unlock(&wl->mutex);
  1167. ret = wait_for_completion_timeout(
  1168. &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
  1169. if (ret <= 0) {
  1170. wl1271_warning("couldn't enter ps mode!");
  1171. ret = -EBUSY;
  1172. goto out;
  1173. }
  1174. /* take mutex again, and wakeup */
  1175. mutex_lock(&wl->mutex);
  1176. ret = wl1271_ps_elp_wakeup(wl);
  1177. if (ret < 0)
  1178. goto out_unlock;
  1179. }
  1180. out_sleep:
  1181. wl1271_ps_elp_sleep(wl);
  1182. out_unlock:
  1183. mutex_unlock(&wl->mutex);
  1184. out:
  1185. return ret;
  1186. }
  1187. static void wl1271_configure_resume(struct wl1271 *wl)
  1188. {
  1189. int ret;
  1190. if (wl->bss_type != BSS_TYPE_STA_BSS)
  1191. return;
  1192. mutex_lock(&wl->mutex);
  1193. ret = wl1271_ps_elp_wakeup(wl);
  1194. if (ret < 0)
  1195. goto out;
  1196. /* exit psm if it wasn't configured */
  1197. if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
  1198. wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
  1199. wl->basic_rate, true);
  1200. wl1271_ps_elp_sleep(wl);
  1201. out:
  1202. mutex_unlock(&wl->mutex);
  1203. }
  1204. static int wl1271_op_suspend(struct ieee80211_hw *hw,
  1205. struct cfg80211_wowlan *wow)
  1206. {
  1207. struct wl1271 *wl = hw->priv;
  1208. wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
  1209. wl->wow_enabled = !!wow;
  1210. if (wl->wow_enabled) {
  1211. int ret;
  1212. ret = wl1271_configure_suspend(wl);
  1213. if (ret < 0) {
  1214. wl1271_warning("couldn't prepare device to suspend");
  1215. return ret;
  1216. }
  1217. /* flush any remaining work */
  1218. wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
  1219. flush_delayed_work(&wl->scan_complete_work);
  1220. /*
  1221. * disable and re-enable interrupts in order to flush
  1222. * the threaded_irq
  1223. */
  1224. wl1271_disable_interrupts(wl);
  1225. /*
  1226. * set suspended flag to avoid triggering a new threaded_irq
  1227. * work. no need for spinlock as interrupts are disabled.
  1228. */
  1229. set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
  1230. wl1271_enable_interrupts(wl);
  1231. flush_work(&wl->tx_work);
  1232. flush_delayed_work(&wl->pspoll_work);
  1233. flush_delayed_work(&wl->elp_work);
  1234. }
  1235. return 0;
  1236. }
  1237. static int wl1271_op_resume(struct ieee80211_hw *hw)
  1238. {
  1239. struct wl1271 *wl = hw->priv;
  1240. wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
  1241. wl->wow_enabled);
  1242. /*
  1243. * re-enable irq_work enqueuing, and call irq_work directly if
  1244. * there is a pending work.
  1245. */
  1246. if (wl->wow_enabled) {
  1247. struct wl1271 *wl = hw->priv;
  1248. unsigned long flags;
  1249. bool run_irq_work = false;
  1250. spin_lock_irqsave(&wl->wl_lock, flags);
  1251. clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
  1252. if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
  1253. run_irq_work = true;
  1254. spin_unlock_irqrestore(&wl->wl_lock, flags);
  1255. if (run_irq_work) {
  1256. wl1271_debug(DEBUG_MAC80211,
  1257. "run postponed irq_work directly");
  1258. wl1271_irq(0, wl);
  1259. wl1271_enable_interrupts(wl);
  1260. }
  1261. wl1271_configure_resume(wl);
  1262. }
  1263. return 0;
  1264. }
  1265. #endif
  1266. static int wl1271_op_start(struct ieee80211_hw *hw)
  1267. {
  1268. wl1271_debug(DEBUG_MAC80211, "mac80211 start");
  1269. /*
  1270. * We have to delay the booting of the hardware because
  1271. * we need to know the local MAC address before downloading and
  1272. * initializing the firmware. The MAC address cannot be changed
  1273. * after boot, and without the proper MAC address, the firmware
  1274. * will not function properly.
  1275. *
  1276. * The MAC address is first known when the corresponding interface
  1277. * is added. That is where we will initialize the hardware.
  1278. *
  1279. * In addition, we currently have different firmwares for AP and managed
  1280. * operation. We will know which to boot according to interface type.
  1281. */
  1282. return 0;
  1283. }
  1284. static void wl1271_op_stop(struct ieee80211_hw *hw)
  1285. {
  1286. wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
  1287. }
  1288. static int wl1271_op_add_interface(struct ieee80211_hw *hw,
  1289. struct ieee80211_vif *vif)
  1290. {
  1291. struct wl1271 *wl = hw->priv;
  1292. struct wiphy *wiphy = hw->wiphy;
  1293. int retries = WL1271_BOOT_RETRIES;
  1294. int ret = 0;
  1295. bool booted = false;
  1296. wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
  1297. vif->type, vif->addr);
  1298. mutex_lock(&wl->mutex);
  1299. if (wl->vif) {
  1300. wl1271_debug(DEBUG_MAC80211,
  1301. "multiple vifs are not supported yet");
  1302. ret = -EBUSY;
  1303. goto out;
  1304. }
  1305. /*
  1306. * in some very corner case HW recovery scenarios its possible to
  1307. * get here before __wl1271_op_remove_interface is complete, so
  1308. * opt out if that is the case.
  1309. */
  1310. if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
  1311. ret = -EBUSY;
  1312. goto out;
  1313. }
  1314. switch (vif->type) {
  1315. case NL80211_IFTYPE_STATION:
  1316. wl->bss_type = BSS_TYPE_STA_BSS;
  1317. wl->set_bss_type = BSS_TYPE_STA_BSS;
  1318. break;
  1319. case NL80211_IFTYPE_ADHOC:
  1320. wl->bss_type = BSS_TYPE_IBSS;
  1321. wl->set_bss_type = BSS_TYPE_STA_BSS;
  1322. break;
  1323. case NL80211_IFTYPE_AP:
  1324. wl->bss_type = BSS_TYPE_AP_BSS;
  1325. break;
  1326. default:
  1327. ret = -EOPNOTSUPP;
  1328. goto out;
  1329. }
  1330. memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
  1331. if (wl->state != WL1271_STATE_OFF) {
  1332. wl1271_error("cannot start because not in off state: %d",
  1333. wl->state);
  1334. ret = -EBUSY;
  1335. goto out;
  1336. }
  1337. while (retries) {
  1338. retries--;
  1339. ret = wl1271_chip_wakeup(wl);
  1340. if (ret < 0)
  1341. goto power_off;
  1342. ret = wl1271_boot(wl);
  1343. if (ret < 0)
  1344. goto power_off;
  1345. ret = wl1271_hw_init(wl);
  1346. if (ret < 0)
  1347. goto irq_disable;
  1348. booted = true;
  1349. break;
  1350. irq_disable:
  1351. mutex_unlock(&wl->mutex);
  1352. /* Unlocking the mutex in the middle of handling is
  1353. inherently unsafe. In this case we deem it safe to do,
  1354. because we need to let any possibly pending IRQ out of
  1355. the system (and while we are WL1271_STATE_OFF the IRQ
  1356. work function will not do anything.) Also, any other
  1357. possible concurrent operations will fail due to the
  1358. current state, hence the wl1271 struct should be safe. */
  1359. wl1271_disable_interrupts(wl);
  1360. wl1271_flush_deferred_work(wl);
  1361. cancel_work_sync(&wl->netstack_work);
  1362. mutex_lock(&wl->mutex);
  1363. power_off:
  1364. wl1271_power_off(wl);
  1365. }
  1366. if (!booted) {
  1367. wl1271_error("firmware boot failed despite %d retries",
  1368. WL1271_BOOT_RETRIES);
  1369. goto out;
  1370. }
  1371. wl->vif = vif;
  1372. wl->state = WL1271_STATE_ON;
  1373. set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
  1374. wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
  1375. /* update hw/fw version info in wiphy struct */
  1376. wiphy->hw_version = wl->chip.id;
  1377. strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
  1378. sizeof(wiphy->fw_version));
  1379. /* Check if any quirks are needed with older fw versions */
  1380. wl->quirks |= wl1271_get_fw_ver_quirks(wl);
  1381. /*
  1382. * Now we know if 11a is supported (info from the NVS), so disable
  1383. * 11a channels if not supported
  1384. */
  1385. if (!wl->enable_11a)
  1386. wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
  1387. wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
  1388. wl->enable_11a ? "" : "not ");
  1389. out:
  1390. mutex_unlock(&wl->mutex);
  1391. mutex_lock(&wl_list_mutex);
  1392. if (!ret)
  1393. list_add(&wl->list, &wl_list);
  1394. mutex_unlock(&wl_list_mutex);
  1395. return ret;
  1396. }
  1397. static void __wl1271_op_remove_interface(struct wl1271 *wl,
  1398. bool reset_tx_queues)
  1399. {
  1400. int i;
  1401. wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
  1402. /* because of hardware recovery, we may get here twice */
  1403. if (wl->state != WL1271_STATE_ON)
  1404. return;
  1405. wl1271_info("down");
  1406. mutex_lock(&wl_list_mutex);
  1407. list_del(&wl->list);
  1408. mutex_unlock(&wl_list_mutex);
  1409. /* enable dyn ps just in case (if left on due to fw crash etc) */
  1410. if (wl->bss_type == BSS_TYPE_STA_BSS)
  1411. ieee80211_enable_dyn_ps(wl->vif);
  1412. if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
  1413. wl->scan.state = WL1271_SCAN_STATE_IDLE;
  1414. memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
  1415. wl->scan.req = NULL;
  1416. ieee80211_scan_completed(wl->hw, true);
  1417. }
  1418. /*
  1419. * this must be before the cancel_work calls below, so that the work
  1420. * functions don't perform further work.
  1421. */
  1422. wl->state = WL1271_STATE_OFF;
  1423. mutex_unlock(&wl->mutex);
  1424. wl1271_disable_interrupts(wl);
  1425. wl1271_flush_deferred_work(wl);
  1426. cancel_delayed_work_sync(&wl->scan_complete_work);
  1427. cancel_work_sync(&wl->netstack_work);
  1428. cancel_work_sync(&wl->tx_work);
  1429. cancel_delayed_work_sync(&wl->pspoll_work);
  1430. cancel_delayed_work_sync(&wl->elp_work);
  1431. mutex_lock(&wl->mutex);
  1432. /* let's notify MAC80211 about the remaining pending TX frames */
  1433. wl1271_tx_reset(wl, reset_tx_queues);
  1434. wl1271_power_off(wl);
  1435. memset(wl->bssid, 0, ETH_ALEN);
  1436. memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
  1437. wl->ssid_len = 0;
  1438. wl->bss_type = MAX_BSS_TYPE;
  1439. wl->set_bss_type = MAX_BSS_TYPE;
  1440. wl->band = IEEE80211_BAND_2GHZ;
  1441. wl->rx_counter = 0;
  1442. wl->psm_entry_retry = 0;
  1443. wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
  1444. wl->tx_blocks_available = 0;
  1445. wl->tx_allocated_blocks = 0;
  1446. wl->tx_results_count = 0;
  1447. wl->tx_packets_count = 0;
  1448. wl->tx_security_last_seq = 0;
  1449. wl->tx_security_seq = 0;
  1450. wl->time_offset = 0;
  1451. wl->session_counter = 0;
  1452. wl->rate_set = CONF_TX_RATE_MASK_BASIC;
  1453. wl->vif = NULL;
  1454. wl->filters = 0;
  1455. wl1271_free_ap_keys(wl);
  1456. memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
  1457. wl->ap_fw_ps_map = 0;
  1458. wl->ap_ps_map = 0;
  1459. wl->sched_scanning = false;
  1460. /*
  1461. * this is performed after the cancel_work calls and the associated
  1462. * mutex_lock, so that wl1271_op_add_interface does not accidentally
  1463. * get executed before all these vars have been reset.
  1464. */
  1465. wl->flags = 0;
  1466. for (i = 0; i < NUM_TX_QUEUES; i++)
  1467. wl->tx_blocks_freed[i] = 0;
  1468. wl1271_debugfs_reset(wl);
  1469. kfree(wl->fw_status);
  1470. wl->fw_status = NULL;
  1471. kfree(wl->tx_res_if);
  1472. wl->tx_res_if = NULL;
  1473. kfree(wl->target_mem_map);
  1474. wl->target_mem_map = NULL;
  1475. }
  1476. static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
  1477. struct ieee80211_vif *vif)
  1478. {
  1479. struct wl1271 *wl = hw->priv;
  1480. mutex_lock(&wl->mutex);
  1481. /*
  1482. * wl->vif can be null here if someone shuts down the interface
  1483. * just when hardware recovery has been started.
  1484. */
  1485. if (wl->vif) {
  1486. WARN_ON(wl->vif != vif);
  1487. __wl1271_op_remove_interface(wl, true);
  1488. }
  1489. mutex_unlock(&wl->mutex);
  1490. cancel_work_sync(&wl->recovery_work);
  1491. }
  1492. void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
  1493. {
  1494. wl1271_set_default_filters(wl);
  1495. /* combine requested filters with current filter config */
  1496. filters = wl->filters | filters;
  1497. wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
  1498. if (filters & FIF_PROMISC_IN_BSS) {
  1499. wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
  1500. wl->rx_config &= ~CFG_UNI_FILTER_EN;
  1501. wl->rx_config |= CFG_BSSID_FILTER_EN;
  1502. }
  1503. if (filters & FIF_BCN_PRBRESP_PROMISC) {
  1504. wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
  1505. wl->rx_config &= ~CFG_BSSID_FILTER_EN;
  1506. wl->rx_config &= ~CFG_SSID_FILTER_EN;
  1507. }
  1508. if (filters & FIF_OTHER_BSS) {
  1509. wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
  1510. wl->rx_config &= ~CFG_BSSID_FILTER_EN;
  1511. }
  1512. if (filters & FIF_CONTROL) {
  1513. wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
  1514. wl->rx_filter |= CFG_RX_CTL_EN;
  1515. }
  1516. if (filters & FIF_FCSFAIL) {
  1517. wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
  1518. wl->rx_filter |= CFG_RX_FCS_ERROR;
  1519. }
  1520. }
  1521. static int wl1271_dummy_join(struct wl1271 *wl)
  1522. {
  1523. int ret = 0;
  1524. /* we need to use a dummy BSSID for now */
  1525. static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
  1526. 0xad, 0xbe, 0xef };
  1527. memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
  1528. /* pass through frames from all BSS */
  1529. wl1271_configure_filters(wl, FIF_OTHER_BSS);
  1530. ret = wl1271_cmd_join(wl, wl->set_bss_type);
  1531. if (ret < 0)
  1532. goto out;
  1533. set_bit(WL1271_FLAG_JOINED, &wl->flags);
  1534. out:
  1535. return ret;
  1536. }
  1537. static int wl1271_join(struct wl1271 *wl, bool set_assoc)
  1538. {
  1539. int ret;
  1540. /*
  1541. * One of the side effects of the JOIN command is that is clears
  1542. * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
  1543. * to a WPA/WPA2 access point will therefore kill the data-path.
  1544. * Currently the only valid scenario for JOIN during association
  1545. * is on roaming, in which case we will also be given new keys.
  1546. * Keep the below message for now, unless it starts bothering
  1547. * users who really like to roam a lot :)
  1548. */
  1549. if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
  1550. wl1271_info("JOIN while associated.");
  1551. if (set_assoc)
  1552. set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
  1553. ret = wl1271_cmd_join(wl, wl->set_bss_type);
  1554. if (ret < 0)
  1555. goto out;
  1556. set_bit(WL1271_FLAG_JOINED, &wl->flags);
  1557. if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
  1558. goto out;
  1559. /*
  1560. * The join command disable the keep-alive mode, shut down its process,
  1561. * and also clear the template config, so we need to reset it all after
  1562. * the join. The acx_aid starts the keep-alive process, and the order
  1563. * of the commands below is relevant.
  1564. */
  1565. ret = wl1271_acx_keep_alive_mode(wl, true);
  1566. if (ret < 0)
  1567. goto out;
  1568. ret = wl1271_acx_aid(wl, wl->a