/mac80211/compat_wl12xx/drivers/net/wireless/wl12xx/main.c
C | 6146 lines | 4596 code | 1056 blank | 494 comment | 766 complexity | 30a110685dd6d5734e7145f902b23def MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
Large files files are truncated, but you can click here to view the full file
- /*
- * This file is part of wl1271
- *
- * Copyright (C) 2008-2010 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
- #include <linux/module.h>
- #include <linux/firmware.h>
- #include <linux/delay.h>
- #include <linux/spi/spi.h>
- #include <linux/crc32.h>
- #include <linux/etherdevice.h>
- #include <linux/vmalloc.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
- #include <linux/wl12xx.h>
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include "wl12xx.h"
- #include "debug.h"
- #include "wl12xx_80211.h"
- #include "reg.h"
- #include "io.h"
- #include "event.h"
- #include "tx.h"
- #include "rx.h"
- #include "ps.h"
- #include "init.h"
- #include "debugfs.h"
- #include "cmd.h"
- #include "boot.h"
- #include "testmode.h"
- #include "scan.h"
- #include "version.h"
- #define WL1271_BOOT_RETRIES 3
- static struct conf_drv_settings default_conf = {
- .sg = {
- .params = {
- [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
- [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
- [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
- [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
- [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
- [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
- [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
- [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
- [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
- [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
- [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
- [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
- [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
- [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
- [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
- [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
- [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
- [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
- [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
- [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
- [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
- [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
- [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
- [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
- [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
- [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
- /* active scan params */
- [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
- [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
- [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
- /* passive scan params */
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
- [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
- /* passive scan in dual antenna params */
- [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
- [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
- [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
- /* general params */
- [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
- [CONF_SG_ANTENNA_CONFIGURATION] = 0,
- [CONF_SG_BEACON_MISS_PERCENT] = 60,
- [CONF_SG_DHCP_TIME] = 5000,
- [CONF_SG_RXT] = 1200,
- [CONF_SG_TXT] = 1000,
- [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
- [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
- [CONF_SG_HV3_MAX_SERVED] = 6,
- [CONF_SG_PS_POLL_TIMEOUT] = 10,
- [CONF_SG_UPSD_TIMEOUT] = 10,
- [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
- [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
- [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
- /* AP params */
- [CONF_AP_BEACON_MISS_TX] = 3,
- [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
- [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
- [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
- [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
- [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
- /* CTS Diluting params */
- [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
- [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
- },
- .state = CONF_SG_PROTECTIVE,
- },
- .rx = {
- .rx_msdu_life_time = 512000,
- .packet_detection_threshold = 0,
- .ps_poll_timeout = 15,
- .upsd_timeout = 15,
- .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
- .rx_cca_threshold = 0,
- .irq_blk_threshold = 0xFFFF,
- .irq_pkt_threshold = 0,
- .irq_timeout = 600,
- .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
- },
- .tx = {
- .tx_energy_detection = 0,
- .sta_rc_conf = {
- .enabled_rates = 0,
- .short_retry_limit = 10,
- .long_retry_limit = 10,
- .aflags = 0,
- },
- .ac_conf_count = 4,
- .ac_conf = {
- [CONF_TX_AC_BE] = {
- .ac = CONF_TX_AC_BE,
- .cw_min = 15,
- .cw_max = 63,
- .aifsn = 3,
- .tx_op_limit = 0,
- },
- [CONF_TX_AC_BK] = {
- .ac = CONF_TX_AC_BK,
- .cw_min = 15,
- .cw_max = 63,
- .aifsn = 7,
- .tx_op_limit = 0,
- },
- [CONF_TX_AC_VI] = {
- .ac = CONF_TX_AC_VI,
- .cw_min = 15,
- .cw_max = 63,
- .aifsn = CONF_TX_AIFS_PIFS,
- .tx_op_limit = 3008,
- },
- [CONF_TX_AC_VO] = {
- .ac = CONF_TX_AC_VO,
- .cw_min = 15,
- .cw_max = 63,
- .aifsn = CONF_TX_AIFS_PIFS,
- .tx_op_limit = 1504,
- },
- },
- .max_tx_retries = 100,
- .ap_aging_period = 300,
- .tid_conf_count = 4,
- .tid_conf = {
- [CONF_TX_AC_BE] = {
- .queue_id = CONF_TX_AC_BE,
- .channel_type = CONF_CHANNEL_TYPE_EDCF,
- .tsid = CONF_TX_AC_BE,
- .ps_scheme = CONF_PS_SCHEME_LEGACY,
- .ack_policy = CONF_ACK_POLICY_LEGACY,
- .apsd_conf = {0, 0},
- },
- [CONF_TX_AC_BK] = {
- .queue_id = CONF_TX_AC_BK,
- .channel_type = CONF_CHANNEL_TYPE_EDCF,
- .tsid = CONF_TX_AC_BK,
- .ps_scheme = CONF_PS_SCHEME_LEGACY,
- .ack_policy = CONF_ACK_POLICY_LEGACY,
- .apsd_conf = {0, 0},
- },
- [CONF_TX_AC_VI] = {
- .queue_id = CONF_TX_AC_VI,
- .channel_type = CONF_CHANNEL_TYPE_EDCF,
- .tsid = CONF_TX_AC_VI,
- .ps_scheme = CONF_PS_SCHEME_LEGACY,
- .ack_policy = CONF_ACK_POLICY_LEGACY,
- .apsd_conf = {0, 0},
- },
- [CONF_TX_AC_VO] = {
- .queue_id = CONF_TX_AC_VO,
- .channel_type = CONF_CHANNEL_TYPE_EDCF,
- .tsid = CONF_TX_AC_VO,
- .ps_scheme = CONF_PS_SCHEME_LEGACY,
- .ack_policy = CONF_ACK_POLICY_LEGACY,
- .apsd_conf = {0, 0},
- },
- },
- .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .tx_compl_timeout = 700,
- .tx_compl_threshold = 4,
- .basic_rate = CONF_HW_BIT_RATE_1MBPS,
- .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
- .tmpl_short_retry_limit = 10,
- .tmpl_long_retry_limit = 10,
- },
- .conn = {
- .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
- .listen_interval = 1,
- .suspend_wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
- .suspend_listen_interval = 1,
- .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
- .bcn_filt_ie_count = 3,
- .bcn_filt_ie = {
- [0] = {
- .ie = WLAN_EID_CHANNEL_SWITCH,
- .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
- },
- [1] = {
- .ie = WLAN_EID_HT_INFORMATION,
- .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
- },
- [2] = {
- .ie = WLAN_EID_ERP_INFO,
- .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
- },
- },
- .synch_fail_thold = 12,
- .bss_lose_timeout = 400,
- .cons_bcn_loss_time = 5000,
- .max_bcn_loss_time = 10000,
- .beacon_rx_timeout = 10000,
- .broadcast_timeout = 20000,
- .rx_broadcast_in_ps = 1,
- .ps_poll_threshold = 10,
- .bet_enable = CONF_BET_MODE_ENABLE,
- .bet_max_consecutive = 50,
- .psm_entry_retries = 8,
- .psm_exit_retries = 16,
- .psm_entry_nullfunc_retries = 3,
- .dynamic_ps_timeout = 1500,
- .forced_ps = false,
- .keep_alive_interval = 55000,
- .max_listen_interval = 20,
- },
- .itrim = {
- .enable = false,
- .timeout = 50000,
- },
- .pm_config = {
- .host_clk_settling_time = 5000,
- .host_fast_wakeup_support = false
- },
- .roam_trigger = {
- .trigger_pacing = 1,
- .avg_weight_rssi_beacon = 20,
- .avg_weight_rssi_data = 10,
- .avg_weight_snr_beacon = 20,
- .avg_weight_snr_data = 10,
- },
- .scan = {
- .min_dwell_time_active = 7500,
- .max_dwell_time_active = 30000,
- .min_dwell_time_passive = 100000,
- .max_dwell_time_passive = 100000,
- .num_probe_reqs = 2,
- .split_scan_timeout = 50000,
- },
- .sched_scan = {
- /*
- * Values are in TU/1000 but since sched scan FW command
- * params are in TUs rounding up may occur.
- */
- .base_dwell_time = 7500,
- .max_dwell_time_delta = 22500,
- /* based on 250bits per probe @1Mbps */
- .dwell_time_delta_per_probe = 2000,
- /* based on 250bits per probe @6Mbps (plus a bit more) */
- .dwell_time_delta_per_probe_5 = 350,
- .dwell_time_passive = 100000,
- .dwell_time_dfs = 150000,
- .num_probe_reqs = 2,
- .rssi_threshold = -90,
- .snr_threshold = 0,
- },
- .rf = {
- .tx_per_channel_power_compensation_2 = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- .tx_per_channel_power_compensation_5 = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- },
- .ht = {
- .rx_ba_win_size = 8,
- .tx_ba_win_size = 64,
- .inactivity_timeout = 10000,
- .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
- },
- .mem_wl127x = {
- .num_stations = 1,
- .ssid_profiles = 1,
- .rx_block_num = 70,
- .tx_min_block_num = 40,
- .dynamic_memory = 1,
- .min_req_tx_blocks = 100,
- .min_req_rx_blocks = 22,
- .tx_min = 27,
- },
- .mem_wl128x = {
- .num_stations = 1,
- .ssid_profiles = 1,
- .rx_block_num = 40,
- .tx_min_block_num = 40,
- .dynamic_memory = 1,
- .min_req_tx_blocks = 45,
- .min_req_rx_blocks = 22,
- .tx_min = 27,
- },
- .fm_coex = {
- .enable = true,
- .swallow_period = 5,
- .n_divider_fref_set_1 = 0xff, /* default */
- .n_divider_fref_set_2 = 12,
- .m_divider_fref_set_1 = 0xffff,
- .m_divider_fref_set_2 = 148, /* default */
- .coex_pll_stabilization_time = 0xffffffff, /* default */
- .ldo_stabilization_time = 0xffff, /* default */
- .fm_disturbed_band_margin = 0xff, /* default */
- .swallow_clk_diff = 0xff, /* default */
- },
- .rx_streaming = {
- .duration = 150,
- .queues = 0x1,
- .interval = 20,
- .always = 0,
- },
- .fwlog = {
- .mode = WL12XX_FWLOG_ON_DEMAND,
- .mem_blocks = 2,
- .severity = 0,
- .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
- .output = WL12XX_FWLOG_OUTPUT_HOST,
- .threshold = 0,
- },
- .hci_io_ds = HCI_IO_DS_6MA,
- .rate = {
- .rate_retry_score = 32000,
- .per_add = 8192,
- .per_th1 = 2048,
- .per_th2 = 4096,
- .max_per = 8100,
- .inverse_curiosity_factor = 5,
- .tx_fail_low_th = 4,
- .tx_fail_high_th = 10,
- .per_alpha_shift = 4,
- .per_add_shift = 13,
- .per_beta1_shift = 10,
- .per_beta2_shift = 8,
- .rate_check_up = 2,
- .rate_check_down = 12,
- .rate_retry_policy = {
- 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00,
- },
- },
- .hangover = {
- .recover_time = 0,
- .hangover_period = 20,
- .dynamic_mode = 1,
- .early_termination_mode = 1,
- .max_period = 20,
- .min_period = 1,
- .increase_delta = 1,
- .decrease_delta = 2,
- .quiet_time = 4,
- .increase_time = 1,
- .window_size = 16,
- },
- };
- static char *fwlog_param;
- static bool bug_on_recovery;
- static char *fref_param;
- static char *tcxo_param;
- static void __wl1271_op_remove_interface(struct wl1271 *wl,
- struct ieee80211_vif *vif,
- bool reset_tx_queues);
- static void wl1271_op_stop(struct ieee80211_hw *hw);
- static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
- static DEFINE_MUTEX(wl_list_mutex);
- static LIST_HEAD(wl_list);
- static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- unsigned char operstate)
- {
- int ret;
- if (operstate != IF_OPER_UP)
- return 0;
- if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
- return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
- if (ret < 0)
- return ret;
- wl12xx_croc(wl, wlvif->role_id);
- wl1271_info("Association completed.");
- return 0;
- }
- static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
- void *arg)
- {
- struct net_device *dev = arg;
- struct wireless_dev *wdev;
- struct wiphy *wiphy;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- struct wl1271 *wl_temp;
- struct wl12xx_vif *wlvif;
- int ret = 0;
- /* Check that this notification is for us. */
- if (what != NETDEV_CHANGE)
- return NOTIFY_DONE;
- wdev = dev->ieee80211_ptr;
- if (wdev == NULL)
- return NOTIFY_DONE;
- wiphy = wdev->wiphy;
- if (wiphy == NULL)
- return NOTIFY_DONE;
- hw = wiphy_priv(wiphy);
- if (hw == NULL)
- return NOTIFY_DONE;
- wl_temp = hw->priv;
- mutex_lock(&wl_list_mutex);
- list_for_each_entry(wl, &wl_list, list) {
- if (wl == wl_temp)
- break;
- }
- mutex_unlock(&wl_list_mutex);
- if (wl != wl_temp)
- return NOTIFY_DONE;
- mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
- goto out;
- if (dev->operstate != IF_OPER_UP)
- goto out;
- /*
- * The correct behavior should be just getting the appropriate wlvif
- * from the given dev, but currently we don't have a mac80211
- * interface for it.
- */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
- wl1271_check_operstate(wl, wlvif,
- ieee80211_get_operstate(vif));
- wl1271_ps_elp_sleep(wl);
- }
- out:
- mutex_unlock(&wl->mutex);
- return NOTIFY_OK;
- }
- static int wl1271_reg_notify(struct wiphy *wiphy,
- struct regulatory_request *request)
- {
- struct ieee80211_supported_band *band;
- struct ieee80211_channel *ch;
- int i;
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- for (i = 0; i < band->n_channels; i++) {
- ch = &band->channels[i];
- if (ch->flags & IEEE80211_CHAN_DISABLED)
- continue;
- if (ch->flags & IEEE80211_CHAN_RADAR)
- ch->flags |= IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
- }
- return 0;
- }
- static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- bool enable)
- {
- int ret = 0;
- /* we should hold wl->mutex */
- ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
- if (ret < 0)
- goto out;
- if (enable)
- set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
- else
- clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
- out:
- return ret;
- }
- /*
- * this function is being called when the rx_streaming interval
- * has beed changed or rx_streaming should be disabled
- */
- int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
- {
- int ret = 0;
- int period = wl->conf.rx_streaming.interval;
- /* don't reconfigure if rx_streaming is disabled */
- if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
- goto out;
- /* reconfigure/disable according to new streaming_period */
- if (period &&
- test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
- ret = wl1271_set_rx_streaming(wl, wlvif, true);
- else {
- ret = wl1271_set_rx_streaming(wl, wlvif, false);
- /* don't cancel_work_sync since we might deadlock */
- del_timer_sync(&wlvif->rx_streaming_timer);
- }
- out:
- return ret;
- }
- static void wl1271_rx_streaming_enable_work(struct work_struct *work)
- {
- int ret;
- struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
- rx_streaming_enable_work);
- struct wl1271 *wl = wlvif->wl;
- mutex_lock(&wl->mutex);
- if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
- !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
- (!wl->conf.rx_streaming.always &&
- !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
- goto out;
- if (!wl->conf.rx_streaming.interval)
- goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
- ret = wl1271_set_rx_streaming(wl, wlvif, true);
- if (ret < 0)
- goto out_sleep;
- /* stop it after some time of inactivity */
- mod_timer(&wlvif->rx_streaming_timer,
- jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
- out_sleep:
- wl1271_ps_elp_sleep(wl);
- out:
- mutex_unlock(&wl->mutex);
- }
- static void wl1271_rx_streaming_disable_work(struct work_struct *work)
- {
- int ret;
- struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
- rx_streaming_disable_work);
- struct wl1271 *wl = wlvif->wl;
- mutex_lock(&wl->mutex);
- if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
- goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
- ret = wl1271_set_rx_streaming(wl, wlvif, false);
- if (ret)
- goto out_sleep;
- out_sleep:
- wl1271_ps_elp_sleep(wl);
- out:
- mutex_unlock(&wl->mutex);
- }
- static void wl1271_rx_streaming_timer(unsigned long data)
- {
- struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
- struct wl1271 *wl = wlvif->wl;
- ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
- }
- static void wl1271_conf_init(struct wl1271 *wl)
- {
- /*
- * This function applies the default configuration to the driver. This
- * function is invoked upon driver load (spi probe.)
- *
- * The configuration is stored in a run-time structure in order to
- * facilitate for run-time adjustment of any of the parameters. Making
- * changes to the configuration structure will apply the new values on
- * the next interface up (wl1271_op_start.)
- */
- /* apply driver default configuration */
- memcpy(&wl->conf, &default_conf, sizeof(default_conf));
- /* Adjust settings according to optional module parameters */
- if (fwlog_param) {
- if (!strcmp(fwlog_param, "continuous")) {
- wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
- } else if (!strcmp(fwlog_param, "ondemand")) {
- wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
- } else if (!strcmp(fwlog_param, "dbgpins")) {
- wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
- wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
- } else if (!strcmp(fwlog_param, "disable")) {
- wl->conf.fwlog.mem_blocks = 0;
- wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
- } else {
- wl1271_error("Unknown fwlog parameter %s", fwlog_param);
- }
- }
- wl->ref_clock = -1;
- if (fref_param) {
- if (!strcmp(fref_param, "19.2"))
- wl->ref_clock = WL12XX_REFCLOCK_19;
- else if (!strcmp(fref_param, "26"))
- wl->ref_clock = WL12XX_REFCLOCK_26;
- else if (!strcmp(fref_param, "26x"))
- wl->ref_clock = WL12XX_REFCLOCK_26_XTAL;
- else if (!strcmp(fref_param, "38.4"))
- wl->ref_clock = WL12XX_REFCLOCK_38;
- else if (!strcmp(fref_param, "38.4x"))
- wl->ref_clock = WL12XX_REFCLOCK_38_XTAL;
- else if (!strcmp(fref_param, "52"))
- wl->ref_clock = WL12XX_REFCLOCK_52;
- else
- wl1271_error("Invalid fref parameter %s", fref_param);
- }
- wl->tcxo_clock = -1;
- if (tcxo_param) {
- if (!strcmp(tcxo_param, "19.2"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
- else if (!strcmp(tcxo_param, "26"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_26;
- else if (!strcmp(tcxo_param, "38.4"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_38_4;
- else if (!strcmp(tcxo_param, "52"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_52;
- else if (!strcmp(tcxo_param, "16.368"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_16_368;
- else if (!strcmp(tcxo_param, "32.736"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_32_736;
- else if (!strcmp(tcxo_param, "16.8"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_16_8;
- else if (!strcmp(tcxo_param, "33.6"))
- wl->tcxo_clock = WL12XX_TCXOCLOCK_33_6;
- else
- wl1271_error("Invalid tcxo parameter %s", tcxo_param);
- }
- }
- static int wl1271_plt_init(struct wl1271 *wl)
- {
- int ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_general_parms(wl);
- else
- ret = wl1271_cmd_general_parms(wl);
- if (ret < 0)
- return ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_radio_parms(wl);
- else
- ret = wl1271_cmd_radio_parms(wl);
- if (ret < 0)
- return ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
- }
- if (ret < 0)
- return ret;
- /* Chip-specific initializations */
- ret = wl1271_chip_specific_init(wl);
- if (ret < 0)
- return ret;
- ret = wl1271_acx_init_mem_config(wl);
- if (ret < 0)
- return ret;
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
- /* Enable data path */
- ret = wl1271_cmd_data_path(wl, 1);
- if (ret < 0)
- goto out_free_memmap;
- /* Configure for CAM power saving (ie. always active) */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_free_memmap;
- /* configure PM */
- ret = wl1271_acx_pm_config(wl);
- if (ret < 0)
- goto out_free_memmap;
- return 0;
- out_free_memmap:
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
- return ret;
- }
- static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- u8 hlid, u8 tx_pkts)
- {
- bool fw_ps, single_sta;
- fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
- single_sta = (wl->active_sta_count == 1);
- /*
- * Wake up from high level PS if the STA is asleep with too little
- * packets in FW or if the STA is awake.
- */
- if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
- wl12xx_ps_link_end(wl, wlvif, hlid);
- /*
- * Start high-level PS if the STA is asleep with enough blocks in FW.
- * Make an exception if this is the only connected station. In this
- * case FW-memory congestion is not a problem.
- */
- else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl12xx_ps_link_start(wl, wlvif, hlid, true);
- }
- static void wl12xx_irq_update_links_status(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct wl12xx_fw_status *status)
- {
- struct wl1271_link *lnk;
- u32 cur_fw_ps_map;
- u8 hlid, cnt;
- /* TODO: also use link_fast_bitmap here */
- cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
- if (wl->ap_fw_ps_map != cur_fw_ps_map) {
- wl1271_debug(DEBUG_PSM,
- "link ps prev 0x%x cur 0x%x changed 0x%x",
- wl->ap_fw_ps_map, cur_fw_ps_map,
- wl->ap_fw_ps_map ^ cur_fw_ps_map);
- wl->ap_fw_ps_map = cur_fw_ps_map;
- }
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
- lnk = &wl->links[hlid];
- cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
- lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
- lnk->allocated_pkts -= cnt;
- wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
- lnk->allocated_pkts);
- }
- }
- static void wl12xx_fw_status(struct wl1271 *wl,
- struct wl12xx_fw_status *status)
- {
- struct wl12xx_vif *wlvif;
- struct timespec ts;
- u32 old_tx_blk_count = wl->tx_blocks_available;
- int avail, freed_blocks;
- int i;
- wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
- wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
- "drv_rx_counter = %d, tx_results_counter = %d)",
- status->intr,
- status->fw_rx_counter,
- status->drv_rx_counter,
- status->tx_results_counter);
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- /* prevent wrap-around in freed-packets counter */
- wl->tx_allocated_pkts[i] -=
- (status->tx_released_pkts[i] -
- wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status->tx_released_pkts[i];
- }
- /* prevent wrap-around in total blocks counter */
- if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status->total_released_blks)))
- freed_blocks = le32_to_cpu(status->total_released_blks) -
- wl->tx_blocks_freed;
- else
- freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status->total_released_blks);
- wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
- wl->tx_allocated_blocks -= freed_blocks;
- avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
- /*
- * The FW might change the total number of TX memblocks before
- * we get a notification about blocks being released. Thus, the
- * available blocks calculation might yield a temporary result
- * which is lower than the actual available blocks. Keeping in
- * mind that only blocks that were allocated can be moved from
- * TX to RX, tx_blocks_available should never decrease here.
- */
- wl->tx_blocks_available = max((int)wl->tx_blocks_available,
- avail);
- /* if more blocks are available now, tx work can be scheduled */
- if (wl->tx_blocks_available > old_tx_blk_count)
- clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
- /* for AP update num of allocated TX blocks per link and ps status */
- wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status);
- }
- /* update the host-chipset time offset */
- getnstimeofday(&ts);
- wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status->fw_localtime);
- }
- static void wl1271_flush_deferred_work(struct wl1271 *wl)
- {
- struct sk_buff *skb;
- /* Pass all received frames to the network stack */
- while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
- ieee80211_rx_ni(wl->hw, skb);
- /* Return sent skbs to the network stack */
- while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
- ieee80211_tx_status_ni(wl->hw, skb);
- }
- static void wl1271_netstack_work(struct work_struct *work)
- {
- struct wl1271 *wl =
- container_of(work, struct wl1271, netstack_work);
- do {
- wl1271_flush_deferred_work(wl);
- } while (skb_queue_len(&wl->deferred_rx_queue));
- }
- #define WL1271_IRQ_MAX_LOOPS 256
- static irqreturn_t wl1271_irq(int irq, void *cookie)
- {
- int ret;
- u32 intr;
- int loopcount = WL1271_IRQ_MAX_LOOPS;
- struct wl1271 *wl = (struct wl1271 *)cookie;
- bool done = false;
- unsigned int defer_count;
- unsigned long flags;
- /* TX might be handled here, avoid redundant work */
- set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
- cancel_work_sync(&wl->tx_work);
- /*
- * In case edge triggered interrupt must be used, we cannot iterate
- * more than once without introducing race conditions with the hardirq.
- */
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- loopcount = 1;
- mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_IRQ, "IRQ work");
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
- while (!done && loopcount--) {
- /*
- * In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip. Since the mutex is held,
- * wl1271_ps_elp_wakeup cannot be called concurrently.
- */
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- smp_mb__after_clear_bit();
- wl12xx_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
- intr &= WL1271_INTR_MASK;
- if (!intr) {
- done = true;
- continue;
- }
- if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
- wl1271_error("watchdog interrupt received! "
- "starting recovery.");
- wl->watchdog_recovery = true;
- wl12xx_queue_recovery_work(wl);
- /* restarting the chip. ignore any other interrupt. */
- goto out;
- }
- if (likely(intr & WL1271_ACX_INTR_DATA)) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- wl12xx_rx(wl, wl->fw_status);
- /* Check if any tx blocks were freed */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
- wl1271_tx_total_queue_count(wl) > 0) {
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- /*
- * In order to avoid starvation of the TX path,
- * call the work function directly.
- */
- wl1271_tx_work_locked(wl);
- } else {
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
- /* check for tx results */
- if (wl->fw_status->tx_results_counter !=
- (wl->tx_results_count & 0xff))
- wl1271_tx_complete(wl);
- /* Make sure the deferred queues don't get too long */
- defer_count = skb_queue_len(&wl->deferred_tx_queue) +
- skb_queue_len(&wl->deferred_rx_queue);
- if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
- wl1271_flush_deferred_work(wl);
- }
- if (intr & WL1271_ACX_INTR_EVENT_A) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0);
- }
- if (intr & WL1271_ACX_INTR_EVENT_B) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1);
- }
- if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
- wl1271_debug(DEBUG_IRQ,
- "WL1271_ACX_INTR_INIT_COMPLETE");
- if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
- }
- wl1271_ps_elp_sleep(wl);
- out:
- spin_lock_irqsave(&wl->wl_lock, flags);
- /* In case TX was not handled here, queue TX work */
- clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
- if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
- wl1271_tx_total_queue_count(wl) > 0)
- ieee80211_queue_work(wl->hw, &wl->tx_work);
- #ifdef CONFIG_HAS_WAKELOCK
- if (test_and_clear_bit(WL1271_FLAG_WAKE_LOCK, &wl->flags))
- wake_unlock(&wl->wake_lock);
- #endif
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- mutex_unlock(&wl->mutex);
- return IRQ_HANDLED;
- }
- static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
- {
- const struct firmware *fw;
- const char *fw_name;
- enum wl12xx_fw_type fw_type;
- int ret;
- u8 open_count;
- open_count = ieee80211_get_open_count(wl->hw, NULL);
- if (plt) {
- fw_type = WL12XX_FW_TYPE_PLT;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fw_name = WL128X_PLT_FW_NAME;
- else
- fw_name = WL127X_PLT_FW_NAME;
- } else {
- if (open_count > 1) {
- fw_type = WL12XX_FW_TYPE_MULTI;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fw_name = WL128X_FW_NAME_MULTI;
- else
- fw_name = WL127X_FW_NAME_MULTI;
- } else {
- fw_type = WL12XX_FW_TYPE_NORMAL;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- fw_name = WL128X_FW_NAME_SINGLE;
- else
- fw_name = WL127X_FW_NAME_SINGLE;
- }
- }
- if (wl->saved_fw_type == fw_type)
- return 0;
- wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
- ret = request_firmware(&fw, fw_name, wl->dev);
- if (ret < 0) {
- wl1271_error("could not get firmware %s: %d", fw_name, ret);
- return ret;
- }
- if (fw->size % 4) {
- wl1271_error("firmware size is not multiple of 32 bits: %zu",
- fw->size);
- ret = -EILSEQ;
- goto out;
- }
- vfree(wl->fw);
- wl->saved_fw_type = WL12XX_FW_TYPE_NONE;
- wl->fw_len = fw->size;
- wl->fw = vmalloc(wl->fw_len);
- if (!wl->fw) {
- wl1271_error("could not allocate memory for the firmware");
- ret = -ENOMEM;
- goto out;
- }
- memcpy(wl->fw, fw->data, wl->fw_len);
- ret = 0;
- wl->saved_fw_type = fw_type;
- out:
- release_firmware(fw);
- return ret;
- }
- static int wl1271_fetch_nvs(struct wl1271 *wl)
- {
- const struct firmware *fw;
- int ret;
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
- if (ret < 0) {
- wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
- ret);
- return ret;
- }
- wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
- goto out;
- }
- wl->nvs_len = fw->size;
- out:
- release_firmware(fw);
- return ret;
- }
- void wl12xx_queue_recovery_work(struct wl1271 *wl)
- {
- if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
- }
- size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
- {
- size_t len = 0;
- /* The FW log is a length-value list, find where the log end */
- while (len < maxlen) {
- if (memblock[len] == 0)
- break;
- if (len + memblock[len] + 1 > maxlen)
- break;
- len += memblock[len] + 1;
- }
- /* Make sure we have enough room */
- len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
- /* Fill the FW log file, consumed by the sysfs fwlog entry */
- memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
- wl->fwlog_size += len;
- return len;
- }
- static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
- {
- u32 addr;
- u32 first_addr;
- u8 *block;
- if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
- (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
- (wl->conf.fwlog.mem_blocks == 0))
- return;
- wl1271_info("Reading FW panic log");
- block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
- if (!block)
- return;
- /* Make sure the chip is awake and the logger isn't active. */
- if (!wl1271_ps_elp_wakeup(wl)) {
- /* Do not send a stop fwlog command if the fw is hanged */
- if (!wl->watchdog_recovery)
- wl12xx_cmd_stop_fwlog(wl);
- }
- else
- goto out;
- /* Read the first memory block address */
- wl12xx_fw_status(wl, wl->fw_status);
- first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
- if (!first_addr)
- goto out;
- /* Traverse the memory blocks linked list */
- addr = first_addr;
- do {
- memset(block, 0, WL12XX_HW_BLOCK_SIZE);
- wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
- false);
- /*
- * Memory blocks are linked to one another. The first 4 bytes
- * of each memory block hold the hardware address of the next
- * one. The last memory block points to the first one.
- */
- addr = le32_to_cpup((__le32 *)block);
- if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
- WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
- break;
- } while (addr && (addr != first_addr));
- wake_up_interruptible(&wl->fwlog_waitq);
- out:
- kfree(block);
- }
- static void wl1271_recovery_work(struct work_struct *work)
- {
- struct wl1271 *wl =
- container_of(work, struct wl1271, recovery_work);
- struct wl12xx_vif *wlvif;
- struct ieee80211_vif *vif;
- mutex_lock(&wl->mutex);
- if (wl->state != WL1271_STATE_ON)
- goto out_unlock;
- /* Avoid a recursive recovery */
- set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
- wl12xx_read_fwlog_panic(wl);
- wl->watchdog_recovery = false;
- wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
- wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
- BUG_ON(bug_on_recovery &&
- !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
- /*
- * Advance security sequence number to overcome potential progress
- * in the firmware during recovery. This doens't hurt if the network is
- * not encrypted.
- */
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
- test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
- wlvif->tx_security_seq +=
- WL1271_TX_SQN_POST_RECOVERY_PADDING;
- }
- /* Prevent spurious TX during FW restart */
- ieee80211_stop_queues(wl->hw);
- if (wl->sched_scanning) {
- ieee80211_sched_scan_stopped(wl->hw);
- wl->sched_scanning = false;
- }
- /* reboot the chipset */
- while (!list_empty(&wl->wlvif_list)) {
- wlvif = list_first_entry(&wl->wlvif_list,
- struct wl12xx_vif, list);
- vif = wl12xx_wlvif_to_vif(wlvif);
- __wl1271_op_remove_interface(wl, vif, false);
- }
- mutex_unlock(&wl->mutex);
- wl1271_op_stop(wl->hw);
- clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
- ieee80211_restart_hw(wl->hw);
- /*
- * Its safe to enable TX now - the queues are stopped after a request
- * to restart the HW.
- */
- ieee80211_wake_queues(wl->hw);
- return;
- out_unlock:
- mutex_unlock(&wl->mutex);
- }
- static void wl1271_fw_wakeup(struct wl1271 *wl)
- {
- u32 elp_reg;
- elp_reg = ELPCTRL_WAKE_UP;
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
- }
- static int wl1271_setup(struct wl1271 *wl)
- {
- wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
- if (!wl->fw_status)
- return -ENOMEM;
- wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
- if (!wl->tx_res_if) {
- kfree(wl->fw_status);
- return -ENOMEM;
- }
- return 0;
- }
- static int wl12xx_set_power_on(struct wl1271 *wl)
- {
- int ret;
- msleep(WL1271_PRE_POWER_ON_SLEEP);
- ret = wl1271_power_on(wl);
- if (ret < 0)
- goto out;
- msleep(WL1271_POWER_ON_SLEEP);
- wl1271_io_reset(wl);
- wl1271_io_init(wl);
- wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
- /* ELP module wake up */
- wl1271_fw_wakeup(wl);
- out:
- return ret;
- }
- static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
- {
- int ret = 0;
- ret = wl12xx_set_power_on(wl);
- if (ret < 0)
- goto out;
- /*
- * For wl127x based devices we could use the default block
- * size (512 bytes), but due to a bug in the sdio driver, we
- * need to set it explicitly after the chip is powered on. To
- * simplify the code and since the performance impact is
- * negligible, we use the same block size for all different
- * chip types.
- */
- if (!wl1271_set_block_size(wl))
- wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
- switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
- wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
- wl->chip.id);
- ret = wl1271_setup(wl);
- if (ret < 0)
- goto out;
- wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
- break;
- case CHIP_ID_1271_PG20:
- wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
- wl->chip.id);
- ret = wl1271_setup(wl);
- if (ret < 0)
- goto out;
- wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
- break;
- case CHIP_ID_1283_PG20:
- wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
- wl->chip.id);
- ret = wl1271_setup(wl);
- if (ret < 0)
- goto out;
- break;
- case CHIP_ID_1283_PG10:
- default:
- wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
- ret = -ENODEV;
- goto out;
- }
- ret = wl12xx_fetch_firmware(wl, plt);
- if (ret < 0)
- goto out;
- /* No NVS from netlink, try to get it from the filesystem */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0)
- goto out;
- }
- out:
- return ret;
- }
- int wl1271_plt_start(struct wl1271 *wl)
- {
- int retries = WL1271_BOOT_RETRIES;
- struct wiphy *wiphy = wl->hw->wiphy;
- int ret;
- mutex_lock(&wl->mutex);
- wl1271_notice("power up");
- if (wl->state != WL1271_STATE_OFF) {
- wl1271_error("cannot go into PLT state because not "
- "in off state: %d", wl->state);
- ret = -EBUSY;
- goto out;
- }
- while (retries) {
- retries--;
- ret = wl12xx_chip_wakeup(wl, true);
- if (ret < 0)
- goto power_off;
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto power_off;
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto irq_disable;
- wl->state = WL1271_STATE_PLT;
- wl1271_notice("firmware booted in PLT mode (%s)",
- wl->chip.fw_ver_str);
- /* update hw/fw version info in wiphy struct */
- wiphy->hw_version = wl->chip.id;
- strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
- sizeof(wiphy->fw_version));
- goto out;
- irq_disable:
- mutex_unlock(&wl->mutex);
- /* Unlocking the mutex in the middle of handling is
- inherently unsafe. In this case we deem it safe to do,
- because we need to let any possibly pending IRQ out of
- the system (and while we are WL1271_STATE_OFF the IRQ
- work function will not do anything.) Also, any other
- possible concurrent operations will fail due to the
- current state, hence the wl1271 struct should be safe. */
- wl1271_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_work_sync(&wl->netstack_work);
- mutex_lock(&wl->mutex);
- power_off:
- wl1271_power_off(wl);
- }
- wl1271_error("firmware boot in PLT mode failed despite %d retries",
- WL1271_BOOT_RETRIES);
- out:
- mutex_unlock(&wl->mutex);
- return ret;
- }
- int wl1271_plt_stop(struct wl1271 *wl)
- {
- int ret = 0;
- wl1271_notice("power down");
- /*
- * Interrupts must be disabled before setting the state to OFF.
- * Otherwise, the interrupt handler might be called and exit without
- * reading the interrupt status.
- */
- wl1271_disable_interrupts(wl);
- mutex_lock(&wl->mutex);
- if (wl->state != WL1271_STATE_PLT) {
- mutex_unlock(&wl->mutex);
- /*
- * This will not necessarily enable interrupts as interrupts
- * may have been disabled when op_stop was called. It will,
- * however, balance the above call to disable_interrupts().
- */
- wl1271_enable_interrupts(wl);
- wl1271_error("cannot power down because not in PLT "
- "state: %d", wl->state);
- ret = -EBUSY;
- goto out;
- }
- mutex_unlock(&wl->mutex);
- wl1271_flush_deferred_work(wl);
- cancel_work_sync(&wl->netstack_work);
- cancel_work_sync(&wl->recovery_work);
- cancel_delayed_work_sync(&wl->elp_work);
- mutex_lock(&wl->mutex);
- wl1271_power_off(wl);
- wl->flags = 0;
- wl->state = WL1271_STATE_OFF;
- wl->rx_counter = 0;
- mutex_unlock(&wl->mutex);
- out:
- return ret;
- }
- static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
- {
- struct wl1271 *wl = hw->priv;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct wl12xx_vif *wlvif = NULL;
- unsigned long flags;
- int q, mapping;
- u8 hlid;
- if (vif)
- wlvif = wl12xx_vif_to_data(vif);
- mapping = skb_get_queue_mapping(skb);
- q = wl1271_tx_get_queue(mapping);
- hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
- spin_lock_irqsave(&wl->wl_lock, flags);
- /* queue the packet */
- if (hlid == WL12XX_INVALID_LINK_ID ||
- (wlvif && !test_bit(hlid, wlvif->links_map))) {
- wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
- ieee80211_free_txskb(hw, skb);
- goto out;
- }
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- wl->tx_queue_count[q]++;
- /*
- * The workqueue is slow to process the tx_queue and we need stop
- * the queue here, otherwise the queue will get too long.
- */
- if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
- wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
- ieee80211_stop_queue(wl->hw, mapping);
- set_bit(q, &wl->stopped_queues_map);
- }
- /*
- * The chip specific setup must run before the first TX packet -
- * before that, the tx_work will not be initialized!
- */
- if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
- !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->tx_work);
- out:
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
- int wl1271_tx_dummy_packet(struct wl1271 *wl)
- {
- unsigned long flags;
- int q;
- /* no need to queue a new dummy packet if one is already pending */
- if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
- return 0;
- q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- wl->tx_queue_count[q]++;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- /* The FW is low on RX memory blocks, so send the dummy packet asap */
- if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
- wl1271_tx_work_locked(wl);
- /*
- * If the FW TX is busy, TX work will be scheduled by the threaded
- * interrupt handler function
- */
- return 0;
- }
- /*
- * The size of the dummy packet should be at least 1400 bytes. However, in
- * order to minimize the number of bus transactions, aligning it to 512 bytes
- * boundaries could be beneficial, performance wise
- */
- #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
- static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
- {
- struct sk_buff *skb;
- struct ieee80211_hdr_3addr *hdr;
- unsigned int dummy_packet_size;
- dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
- sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
- skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
- if (!skb) {
- wl1271_warning("Failed to allocate a dummy packet skb");
- return NULL;
- }
- skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
- hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
- memset(hdr, 0, sizeof(*hdr));
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
- memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
- /* Dummy packets require the TID to be management */
- skb->priority = WL1271_TID_MGMT;
- /* Initialize all fields that might be used */
- skb_set_queue_mapping(skb, 0);
- memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
- return skb;
- }
- static struct notifier_block wl1271_dev_notifier = {
- .notifier_call = wl1271_dev_notify,
- };
- #ifdef CONFIG_PM
- int wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
- {
- int num_fields = 0, in_field = 0, fields_size = 0;
- int i, pattern_len = 0;
- if (!p->mask) {
- wl1271_warning("No mask in WoWLAN pattern");
- return -EINVAL;
- }
- /* The pattern is broken up into segments of bytes at different offsets
- * that need to be checked by the FW filter. Each segment is called
- * a field in the FW API. We verify that the total number of fields
- * required for this pattern won't exceed FW limits (8)
- * as well as the total fields buffer won't exceed the FW limit.
- * Note that if there's a pattern which crosses Ethernet/IP header
- * boundary a new field is required.
- */
- for (i = 0; i < p->pattern_len; i++) {
- if (test_bit(i, (unsigned long *)p->mask)) {
- if (!in_field) {
- in_field = 1;
- pattern_len = 1;
- } else {
- if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
- num_fields++;
- fields_size += pattern_len +
- RX_FILTER_FIELD_OVERHEAD;
- pattern_len = 1;
- } else
- pattern_len++;
- }
- } else {
- if (in_field) {
- in_field = 0;
- fields_size += pattern_len +
- RX_FILTER_FIELD_OVERHEAD;
- num_fields++;
- }
- }
- }
- if (in_field) {
- fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
- num_fields++;
- }
- if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
- wl1271_warning("RX Filter too complex. Too many segments");
- return -EINVAL;
- }
- if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
- wl1271_warning("RX filter pattern is too big");
- return -E2BIG;
- }
- return 0;
- }
- struct wl12xx_rx_data_filter *wl1271_rx_filter_alloc(void)
- {
- return kzalloc(sizeof(struct wl12xx_rx_data_filter), GFP_KERNEL);
- }
- void wl1271_rx_filter_free(struct wl12xx_rx_data_filter *filter)
- {
- int i;
- if (filter == NULL)
- return;
- for (i = 0; i < filter->num_fields; i++)
- kfree(filter->fields[i].pattern);
- kfree(filter);
- }
- int wl1271_rx_filter_alloc_field(struct wl12xx_rx_data_filter *filter,
- u16 offset, u8 flags,
- u8 *pattern, u8 len)
- {
- struct wl12xx_rx_data_filter_field *field;
- if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
- wl1271_warning("Max fields per RX filter. can't alloc another");
- return -EINVAL;
- }
- field = &filter->fields[filter->num_fields];
- field->pattern = kzalloc(len, GFP_KERNEL);
- if (!field->pattern) {
- wl1271_warning("Failed to allocate RX filter pattern");
- return -ENOMEM;
- }
- filter->num_fields++;
- field->offset = cpu_to_le16(offset);
- field->flags = flags;
- field->len = len;
- memcpy(field->pattern, pattern, len);
- return 0;
- }
- int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_data_filter *filter)
- {
- int i, fields_size = 0;
- for (i = 0; i < filter->num_fields; i++)
- fields_size += filter->fields[i].len +
- sizeof(struct wl12xx_rx_data_filter_field) -
- sizeof(u8 *);
- return fields_size;
- }
- void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_data_filter *filter,
- u8 *buf)
- {
- int i;
- struct wl12xx_rx_data_filter_field *field;
- for (i = 0; i < filter->num_fields; i++) {
- field = (struct wl12xx_rx_data_filter_field *)buf;
- field->offset = filter->fields[i].offset;
- field->flags = filter->fields[i].flags;
- field->len = filter->fields[i].len;
- memcpy(&field->pattern, filter->fields[i].pattern, field->len);
- buf += sizeof(struct wl12xx_rx_data_filter_field) -
- sizeof(u8 *) + field->len;
- }
- }
- /* Allocates an RX filter returned through f
- * which needs to be freed using rx_filter_free()
- */
- int wl1271_convert_wowlan_pattern_to_rx_filter(
- struct cfg80211_wowlan_trig_pkt_pattern *p,
- struct wl12xx_rx_data_filter **f)
- {
- int i, j, ret = 0;
- struct wl12xx_rx_data_filter *filter;
- u16 offset;
- u8 flags, len;
- filter = wl1271_rx_filter_alloc();
- if (!filter) {
- wl1271_warning("Failed to alloc rx filter");
- ret = -ENOMEM;
- goto err;
- }
- i = 0;
- while (i < p->pattern_len) {
- if (!test_bit(i, (unsigned long *)p->mask)) {
- i++;
- continue;
- }
- for (j = i; j < p->pattern_len; j++) {
- if (!test_bit(j, (unsigned long *)p->mask))
- break;
- if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
- j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
- break;
- }
- if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
- offset = i;
- flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
- } else {
- offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
- flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
- }
- len = j - i;
- ret = wl1271_rx_filter_alloc_field(filter,
- offset,
- flags,
- &p->pattern[i], len);
- if (ret)
- goto err;
- i = j;
- }
- filter->action = FILTER_SIGNAL;
- *f = filter;
- return 0;
- err:
- wl1271_rx_filter_free(filter);
- *f = NULL;
- return ret;
- }
- static int wl1271_configure_wowlan(struct wl1271 *wl,
- struct cfg80211_wowlan *wow)
- {
- int i, ret;
- if (!wow || wow->any || !wow->n_patterns) {
- wl1271_rx_data_filtering_enable(wl, 0, FILTER_SIGNAL);
- wl1271_rx_data_filters_clear_all(wl);
- return 0;
- }
- WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS);
- /* Validate all incoming patterns before clearing current FW state */
- for (i = 0; i < wow->n_patterns; i++) {
- ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
- if (ret) {
- wl1271_warning("validate_wowlan_pattern "
- "failed (%d)", ret);
- return ret;
- }
- }
- wl1271_rx_data_filtering_enable(wl, 0, FILTER_SIGNAL);
- wl1271_rx_data_filters_clear_all…
Large files files are truncated, but you can click here to view the full file