/drivers/net/wireless/ath/ath9k/init.c
C | 937 lines | 692 code | 158 blank | 87 comment | 89 complexity | 29e0f8799af09513ea982719a2b11ae2 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/ath9k_platform.h>
19
20#include "ath9k.h"
21
22static char *dev_info = "ath9k";
23
24MODULE_AUTHOR("Atheros Communications");
25MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
26MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
27MODULE_LICENSE("Dual BSD/GPL");
28
29static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
30module_param_named(debug, ath9k_debug, uint, 0);
31MODULE_PARM_DESC(debug, "Debugging mask");
32
33int ath9k_modparam_nohwcrypt;
34module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
35MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
36
37int led_blink;
38module_param_named(blink, led_blink, int, 0444);
39MODULE_PARM_DESC(blink, "Enable LED blink on activity");
40
41static int ath9k_btcoex_enable;
42module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
43MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
44
45bool is_ath9k_unloaded;
46/* We use the hw_value as an index into our private channel structure */
47
48#define CHAN2G(_freq, _idx) { \
49 .band = IEEE80211_BAND_2GHZ, \
50 .center_freq = (_freq), \
51 .hw_value = (_idx), \
52 .max_power = 20, \
53}
54
55#define CHAN5G(_freq, _idx) { \
56 .band = IEEE80211_BAND_5GHZ, \
57 .center_freq = (_freq), \
58 .hw_value = (_idx), \
59 .max_power = 20, \
60}
61
62/* Some 2 GHz radios are actually tunable on 2312-2732
63 * on 5 MHz steps, we support the channels which we know
64 * we have calibration data for all cards though to make
65 * this static */
66static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
67 CHAN2G(2412, 0), /* Channel 1 */
68 CHAN2G(2417, 1), /* Channel 2 */
69 CHAN2G(2422, 2), /* Channel 3 */
70 CHAN2G(2427, 3), /* Channel 4 */
71 CHAN2G(2432, 4), /* Channel 5 */
72 CHAN2G(2437, 5), /* Channel 6 */
73 CHAN2G(2442, 6), /* Channel 7 */
74 CHAN2G(2447, 7), /* Channel 8 */
75 CHAN2G(2452, 8), /* Channel 9 */
76 CHAN2G(2457, 9), /* Channel 10 */
77 CHAN2G(2462, 10), /* Channel 11 */
78 CHAN2G(2467, 11), /* Channel 12 */
79 CHAN2G(2472, 12), /* Channel 13 */
80 CHAN2G(2484, 13), /* Channel 14 */
81};
82
83/* Some 5 GHz radios are actually tunable on XXXX-YYYY
84 * on 5 MHz steps, we support the channels which we know
85 * we have calibration data for all cards though to make
86 * this static */
87static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
88 /* _We_ call this UNII 1 */
89 CHAN5G(5180, 14), /* Channel 36 */
90 CHAN5G(5200, 15), /* Channel 40 */
91 CHAN5G(5220, 16), /* Channel 44 */
92 CHAN5G(5240, 17), /* Channel 48 */
93 /* _We_ call this UNII 2 */
94 CHAN5G(5260, 18), /* Channel 52 */
95 CHAN5G(5280, 19), /* Channel 56 */
96 CHAN5G(5300, 20), /* Channel 60 */
97 CHAN5G(5320, 21), /* Channel 64 */
98 /* _We_ call this "Middle band" */
99 CHAN5G(5500, 22), /* Channel 100 */
100 CHAN5G(5520, 23), /* Channel 104 */
101 CHAN5G(5540, 24), /* Channel 108 */
102 CHAN5G(5560, 25), /* Channel 112 */
103 CHAN5G(5580, 26), /* Channel 116 */
104 CHAN5G(5600, 27), /* Channel 120 */
105 CHAN5G(5620, 28), /* Channel 124 */
106 CHAN5G(5640, 29), /* Channel 128 */
107 CHAN5G(5660, 30), /* Channel 132 */
108 CHAN5G(5680, 31), /* Channel 136 */
109 CHAN5G(5700, 32), /* Channel 140 */
110 /* _We_ call this UNII 3 */
111 CHAN5G(5745, 33), /* Channel 149 */
112 CHAN5G(5765, 34), /* Channel 153 */
113 CHAN5G(5785, 35), /* Channel 157 */
114 CHAN5G(5805, 36), /* Channel 161 */
115 CHAN5G(5825, 37), /* Channel 165 */
116};
117
118/* Atheros hardware rate code addition for short premble */
119#define SHPCHECK(__hw_rate, __flags) \
120 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
121
122#define RATE(_bitrate, _hw_rate, _flags) { \
123 .bitrate = (_bitrate), \
124 .flags = (_flags), \
125 .hw_value = (_hw_rate), \
126 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
127}
128
129static struct ieee80211_rate ath9k_legacy_rates[] = {
130 RATE(10, 0x1b, 0),
131 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
132 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
133 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(60, 0x0b, 0),
135 RATE(90, 0x0f, 0),
136 RATE(120, 0x0a, 0),
137 RATE(180, 0x0e, 0),
138 RATE(240, 0x09, 0),
139 RATE(360, 0x0d, 0),
140 RATE(480, 0x08, 0),
141 RATE(540, 0x0c, 0),
142};
143
144#ifdef CONFIG_MAC80211_LEDS
145static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
146 { .throughput = 0 * 1024, .blink_time = 334 },
147 { .throughput = 1 * 1024, .blink_time = 260 },
148 { .throughput = 5 * 1024, .blink_time = 220 },
149 { .throughput = 10 * 1024, .blink_time = 190 },
150 { .throughput = 20 * 1024, .blink_time = 170 },
151 { .throughput = 50 * 1024, .blink_time = 150 },
152 { .throughput = 70 * 1024, .blink_time = 130 },
153 { .throughput = 100 * 1024, .blink_time = 110 },
154 { .throughput = 200 * 1024, .blink_time = 80 },
155 { .throughput = 300 * 1024, .blink_time = 50 },
156};
157#endif
158
159static void ath9k_deinit_softc(struct ath_softc *sc);
160
161/*
162 * Read and write, they both share the same lock. We do this to serialize
163 * reads and writes on Atheros 802.11n PCI devices only. This is required
164 * as the FIFO on these devices can only accept sanely 2 requests.
165 */
166
167static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
168{
169 struct ath_hw *ah = (struct ath_hw *) hw_priv;
170 struct ath_common *common = ath9k_hw_common(ah);
171 struct ath_softc *sc = (struct ath_softc *) common->priv;
172
173 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
174 unsigned long flags;
175 spin_lock_irqsave(&sc->sc_serial_rw, flags);
176 iowrite32(val, sc->mem + reg_offset);
177 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
178 } else
179 iowrite32(val, sc->mem + reg_offset);
180}
181
182static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
183{
184 struct ath_hw *ah = (struct ath_hw *) hw_priv;
185 struct ath_common *common = ath9k_hw_common(ah);
186 struct ath_softc *sc = (struct ath_softc *) common->priv;
187 u32 val;
188
189 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
190 unsigned long flags;
191 spin_lock_irqsave(&sc->sc_serial_rw, flags);
192 val = ioread32(sc->mem + reg_offset);
193 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
194 } else
195 val = ioread32(sc->mem + reg_offset);
196 return val;
197}
198
199static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
200{
201 struct ath_hw *ah = (struct ath_hw *) hw_priv;
202 struct ath_common *common = ath9k_hw_common(ah);
203 struct ath_softc *sc = (struct ath_softc *) common->priv;
204 unsigned long uninitialized_var(flags);
205 u32 val;
206
207 if (ah->config.serialize_regmode == SER_REG_MODE_ON)
208 spin_lock_irqsave(&sc->sc_serial_rw, flags);
209
210 val = ioread32(sc->mem + reg_offset);
211 val &= ~clr;
212 val |= set;
213 iowrite32(val, sc->mem + reg_offset);
214
215 if (ah->config.serialize_regmode == SER_REG_MODE_ON)
216 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
217
218 return val;
219}
220
221/**************************/
222/* Initialization */
223/**************************/
224
225static void setup_ht_cap(struct ath_softc *sc,
226 struct ieee80211_sta_ht_cap *ht_info)
227{
228 struct ath_hw *ah = sc->sc_ah;
229 struct ath_common *common = ath9k_hw_common(ah);
230 u8 tx_streams, rx_streams;
231 int i, max_streams;
232
233 ht_info->ht_supported = true;
234 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
235 IEEE80211_HT_CAP_SM_PS |
236 IEEE80211_HT_CAP_SGI_40 |
237 IEEE80211_HT_CAP_DSSSCCK40;
238
239 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
240 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
241
242 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
243 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
244
245 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
246 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
247
248 if (AR_SREV_9485(ah))
249 max_streams = 1;
250 else if (AR_SREV_9300_20_OR_LATER(ah))
251 max_streams = 3;
252 else
253 max_streams = 2;
254
255 if (AR_SREV_9280_20_OR_LATER(ah)) {
256 if (max_streams >= 2)
257 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
258 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
259 }
260
261 /* set up supported mcs set */
262 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
263 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
264 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
265
266 ath_dbg(common, ATH_DBG_CONFIG,
267 "TX streams %d, RX streams: %d\n",
268 tx_streams, rx_streams);
269
270 if (tx_streams != rx_streams) {
271 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
272 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
273 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
274 }
275
276 for (i = 0; i < rx_streams; i++)
277 ht_info->mcs.rx_mask[i] = 0xff;
278
279 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
280}
281
282static int ath9k_reg_notifier(struct wiphy *wiphy,
283 struct regulatory_request *request)
284{
285 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
286 struct ath_softc *sc = hw->priv;
287 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
288
289 return ath_reg_notifier_apply(wiphy, request, reg);
290}
291
292/*
293 * This function will allocate both the DMA descriptor structure, and the
294 * buffers it contains. These are used to contain the descriptors used
295 * by the system.
296*/
297int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
298 struct list_head *head, const char *name,
299 int nbuf, int ndesc, bool is_tx)
300{
301#define DS2PHYS(_dd, _ds) \
302 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
303#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
304#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
305 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
306 u8 *ds;
307 struct ath_buf *bf;
308 int i, bsize, error, desc_len;
309
310 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
311 name, nbuf, ndesc);
312
313 INIT_LIST_HEAD(head);
314
315 if (is_tx)
316 desc_len = sc->sc_ah->caps.tx_desc_len;
317 else
318 desc_len = sizeof(struct ath_desc);
319
320 /* ath_desc must be a multiple of DWORDs */
321 if ((desc_len % 4) != 0) {
322 ath_err(common, "ath_desc not DWORD aligned\n");
323 BUG_ON((desc_len % 4) != 0);
324 error = -ENOMEM;
325 goto fail;
326 }
327
328 dd->dd_desc_len = desc_len * nbuf * ndesc;
329
330 /*
331 * Need additional DMA memory because we can't use
332 * descriptors that cross the 4K page boundary. Assume
333 * one skipped descriptor per 4K page.
334 */
335 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
336 u32 ndesc_skipped =
337 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
338 u32 dma_len;
339
340 while (ndesc_skipped) {
341 dma_len = ndesc_skipped * desc_len;
342 dd->dd_desc_len += dma_len;
343
344 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
345 }
346 }
347
348 /* allocate descriptors */
349 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
350 &dd->dd_desc_paddr, GFP_KERNEL);
351 if (dd->dd_desc == NULL) {
352 error = -ENOMEM;
353 goto fail;
354 }
355 ds = (u8 *) dd->dd_desc;
356 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
357 name, ds, (u32) dd->dd_desc_len,
358 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
359
360 /* allocate buffers */
361 bsize = sizeof(struct ath_buf) * nbuf;
362 bf = kzalloc(bsize, GFP_KERNEL);
363 if (bf == NULL) {
364 error = -ENOMEM;
365 goto fail2;
366 }
367 dd->dd_bufptr = bf;
368
369 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
370 bf->bf_desc = ds;
371 bf->bf_daddr = DS2PHYS(dd, ds);
372
373 if (!(sc->sc_ah->caps.hw_caps &
374 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
375 /*
376 * Skip descriptor addresses which can cause 4KB
377 * boundary crossing (addr + length) with a 32 dword
378 * descriptor fetch.
379 */
380 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
381 BUG_ON((caddr_t) bf->bf_desc >=
382 ((caddr_t) dd->dd_desc +
383 dd->dd_desc_len));
384
385 ds += (desc_len * ndesc);
386 bf->bf_desc = ds;
387 bf->bf_daddr = DS2PHYS(dd, ds);
388 }
389 }
390 list_add_tail(&bf->list, head);
391 }
392 return 0;
393fail2:
394 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
395 dd->dd_desc_paddr);
396fail:
397 memset(dd, 0, sizeof(*dd));
398 return error;
399#undef ATH_DESC_4KB_BOUND_CHECK
400#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
401#undef DS2PHYS
402}
403
404void ath9k_init_crypto(struct ath_softc *sc)
405{
406 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
407 int i = 0;
408
409 /* Get the hardware key cache size. */
410 common->keymax = AR_KEYTABLE_SIZE;
411
412 /*
413 * Reset the key cache since some parts do not
414 * reset the contents on initial power up.
415 */
416 for (i = 0; i < common->keymax; i++)
417 ath_hw_keyreset(common, (u16) i);
418
419 /*
420 * Check whether the separate key cache entries
421 * are required to handle both tx+rx MIC keys.
422 * With split mic keys the number of stations is limited
423 * to 27 otherwise 59.
424 */
425 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
426 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
427}
428
429static int ath9k_init_btcoex(struct ath_softc *sc)
430{
431 struct ath_txq *txq;
432 int r;
433
434 switch (sc->sc_ah->btcoex_hw.scheme) {
435 case ATH_BTCOEX_CFG_NONE:
436 break;
437 case ATH_BTCOEX_CFG_2WIRE:
438 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
439 break;
440 case ATH_BTCOEX_CFG_3WIRE:
441 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
442 r = ath_init_btcoex_timer(sc);
443 if (r)
444 return -1;
445 txq = sc->tx.txq_map[WME_AC_BE];
446 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
447 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
448 break;
449 default:
450 WARN_ON(1);
451 break;
452 }
453
454 return 0;
455}
456
457static int ath9k_init_queues(struct ath_softc *sc)
458{
459 int i = 0;
460
461 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
462 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
463
464 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
465 ath_cabq_update(sc);
466
467 for (i = 0; i < WME_NUM_AC; i++) {
468 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
469 sc->tx.txq_map[i]->mac80211_qnum = i;
470 }
471 return 0;
472}
473
474static int ath9k_init_channels_rates(struct ath_softc *sc)
475{
476 void *channels;
477
478 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
479 ARRAY_SIZE(ath9k_5ghz_chantable) !=
480 ATH9K_NUM_CHANNELS);
481
482 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
483 channels = kmemdup(ath9k_2ghz_chantable,
484 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
485 if (!channels)
486 return -ENOMEM;
487
488 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
489 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
490 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
491 ARRAY_SIZE(ath9k_2ghz_chantable);
492 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
493 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
494 ARRAY_SIZE(ath9k_legacy_rates);
495 }
496
497 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
498 channels = kmemdup(ath9k_5ghz_chantable,
499 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
500 if (!channels) {
501 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
502 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
503 return -ENOMEM;
504 }
505
506 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
507 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
508 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
509 ARRAY_SIZE(ath9k_5ghz_chantable);
510 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
511 ath9k_legacy_rates + 4;
512 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
513 ARRAY_SIZE(ath9k_legacy_rates) - 4;
514 }
515 return 0;
516}
517
518static void ath9k_init_misc(struct ath_softc *sc)
519{
520 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
521 int i = 0;
522
523 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
524
525 sc->config.txpowlimit = ATH_TXPOWER_MAX;
526
527 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
528 sc->sc_flags |= SC_OP_TXAGGR;
529 sc->sc_flags |= SC_OP_RXAGGR;
530 }
531
532 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
533 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
534
535 ath9k_hw_set_diversity(sc->sc_ah, true);
536 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
537
538 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
539
540 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
541
542 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
543 sc->beacon.bslot[i] = NULL;
544
545 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
546 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
547}
548
549static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
550 const struct ath_bus_ops *bus_ops)
551{
552 struct ath9k_platform_data *pdata = sc->dev->platform_data;
553 struct ath_hw *ah = NULL;
554 struct ath_common *common;
555 int ret = 0, i;
556 int csz = 0;
557
558 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
559 if (!ah)
560 return -ENOMEM;
561
562 ah->hw = sc->hw;
563 ah->hw_version.devid = devid;
564 ah->hw_version.subsysid = subsysid;
565 ah->reg_ops.read = ath9k_ioread32;
566 ah->reg_ops.write = ath9k_iowrite32;
567 ah->reg_ops.rmw = ath9k_reg_rmw;
568 sc->sc_ah = ah;
569
570 if (!pdata) {
571 ah->ah_flags |= AH_USE_EEPROM;
572 sc->sc_ah->led_pin = -1;
573 } else {
574 sc->sc_ah->gpio_mask = pdata->gpio_mask;
575 sc->sc_ah->gpio_val = pdata->gpio_val;
576 sc->sc_ah->led_pin = pdata->led_pin;
577 ah->is_clk_25mhz = pdata->is_clk_25mhz;
578 }
579
580 common = ath9k_hw_common(ah);
581 common->ops = &ah->reg_ops;
582 common->bus_ops = bus_ops;
583 common->ah = ah;
584 common->hw = sc->hw;
585 common->priv = sc;
586 common->debug_mask = ath9k_debug;
587 common->btcoex_enabled = ath9k_btcoex_enable == 1;
588 spin_lock_init(&common->cc_lock);
589
590 spin_lock_init(&sc->sc_serial_rw);
591 spin_lock_init(&sc->sc_pm_lock);
592 mutex_init(&sc->mutex);
593#ifdef CONFIG_ATH9K_DEBUGFS
594 spin_lock_init(&sc->nodes_lock);
595 INIT_LIST_HEAD(&sc->nodes);
596#endif
597 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
598 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
599 (unsigned long)sc);
600
601 /*
602 * Cache line size is used to size and align various
603 * structures used to communicate with the hardware.
604 */
605 ath_read_cachesize(common, &csz);
606 common->cachelsz = csz << 2; /* convert to bytes */
607
608 /* Initializes the hardware for all supported chipsets */
609 ret = ath9k_hw_init(ah);
610 if (ret)
611 goto err_hw;
612
613 if (pdata && pdata->macaddr)
614 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
615
616 ret = ath9k_init_queues(sc);
617 if (ret)
618 goto err_queues;
619
620 ret = ath9k_init_btcoex(sc);
621 if (ret)
622 goto err_btcoex;
623
624 ret = ath9k_init_channels_rates(sc);
625 if (ret)
626 goto err_btcoex;
627
628 ath9k_init_crypto(sc);
629 ath9k_init_misc(sc);
630
631 return 0;
632
633err_btcoex:
634 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
635 if (ATH_TXQ_SETUP(sc, i))
636 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
637err_queues:
638 ath9k_hw_deinit(ah);
639err_hw:
640
641 kfree(ah);
642 sc->sc_ah = NULL;
643
644 return ret;
645}
646
647static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
648{
649 struct ieee80211_supported_band *sband;
650 struct ieee80211_channel *chan;
651 struct ath_hw *ah = sc->sc_ah;
652 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
653 int i;
654
655 sband = &sc->sbands[band];
656 for (i = 0; i < sband->n_channels; i++) {
657 chan = &sband->channels[i];
658 ah->curchan = &ah->channels[chan->hw_value];
659 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
660 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
661 chan->max_power = reg->max_power_level / 2;
662 }
663}
664
665static void ath9k_init_txpower_limits(struct ath_softc *sc)
666{
667 struct ath_hw *ah = sc->sc_ah;
668 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
669 struct ath9k_channel *curchan = ah->curchan;
670
671 ah->txchainmask = common->tx_chainmask;
672 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
673 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
674 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
675 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
676
677 ah->curchan = curchan;
678}
679
680void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
681{
682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
683
684 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
685 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
686 IEEE80211_HW_SIGNAL_DBM |
687 IEEE80211_HW_SUPPORTS_PS |
688 IEEE80211_HW_PS_NULLFUNC_STACK |
689 IEEE80211_HW_SPECTRUM_MGMT |
690 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
691
692 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
693 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
694
695 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
696 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
697
698 hw->wiphy->interface_modes =
699 BIT(NL80211_IFTYPE_P2P_GO) |
700 BIT(NL80211_IFTYPE_P2P_CLIENT) |
701 BIT(NL80211_IFTYPE_AP) |
702 BIT(NL80211_IFTYPE_WDS) |
703 BIT(NL80211_IFTYPE_STATION) |
704 BIT(NL80211_IFTYPE_ADHOC) |
705 BIT(NL80211_IFTYPE_MESH_POINT);
706
707 if (AR_SREV_5416(sc->sc_ah))
708 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
709
710 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
711
712 hw->queues = 4;
713 hw->max_rates = 4;
714 hw->channel_change_time = 5000;
715 hw->max_listen_interval = 10;
716 hw->max_rate_tries = 10;
717 hw->sta_data_size = sizeof(struct ath_node);
718 hw->vif_data_size = sizeof(struct ath_vif);
719
720#ifdef CONFIG_ATH9K_RATE_CONTROL
721 hw->rate_control_algorithm = "ath9k_rate_control";
722#endif
723
724 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
725 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
726 &sc->sbands[IEEE80211_BAND_2GHZ];
727 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
728 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
729 &sc->sbands[IEEE80211_BAND_5GHZ];
730
731 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
732 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
733 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
734 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
735 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
736 }
737
738 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
739}
740
741int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
742 const struct ath_bus_ops *bus_ops)
743{
744 struct ieee80211_hw *hw = sc->hw;
745 struct ath_common *common;
746 struct ath_hw *ah;
747 int error = 0;
748 struct ath_regulatory *reg;
749
750 /* Bring up device */
751 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
752 if (error != 0)
753 goto error_init;
754
755 ah = sc->sc_ah;
756 common = ath9k_hw_common(ah);
757 ath9k_set_hw_capab(sc, hw);
758
759 /* Initialize regulatory */
760 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
761 ath9k_reg_notifier);
762 if (error)
763 goto error_regd;
764
765 reg = &common->regulatory;
766
767 /* Setup TX DMA */
768 error = ath_tx_init(sc, ATH_TXBUF);
769 if (error != 0)
770 goto error_tx;
771
772 /* Setup RX DMA */
773 error = ath_rx_init(sc, ATH_RXBUF);
774 if (error != 0)
775 goto error_rx;
776
777 ath9k_init_txpower_limits(sc);
778
779#ifdef CONFIG_MAC80211_LEDS
780 /* must be initialized before ieee80211_register_hw */
781 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
782 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
783 ARRAY_SIZE(ath9k_tpt_blink));
784#endif
785
786 /* Register with mac80211 */
787 error = ieee80211_register_hw(hw);
788 if (error)
789 goto error_register;
790
791 error = ath9k_init_debug(ah);
792 if (error) {
793 ath_err(common, "Unable to create debugfs files\n");
794 goto error_world;
795 }
796
797 /* Handle world regulatory */
798 if (!ath_is_world_regd(reg)) {
799 error = regulatory_hint(hw->wiphy, reg->alpha2);
800 if (error)
801 goto error_world;
802 }
803
804 INIT_WORK(&sc->hw_check_work, ath_hw_check);
805 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
806 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
807 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
808
809 ath_init_leds(sc);
810 ath_start_rfkill_poll(sc);
811
812 return 0;
813
814error_world:
815 ieee80211_unregister_hw(hw);
816error_register:
817 ath_rx_cleanup(sc);
818error_rx:
819 ath_tx_cleanup(sc);
820error_tx:
821 /* Nothing */
822error_regd:
823 ath9k_deinit_softc(sc);
824error_init:
825 return error;
826}
827
828/*****************************/
829/* De-Initialization */
830/*****************************/
831
832static void ath9k_deinit_softc(struct ath_softc *sc)
833{
834 int i = 0;
835
836 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
837 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
838
839 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
840 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
841
842 if ((sc->btcoex.no_stomp_timer) &&
843 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
844 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
845
846 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
847 if (ATH_TXQ_SETUP(sc, i))
848 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
849
850 ath9k_hw_deinit(sc->sc_ah);
851
852 kfree(sc->sc_ah);
853 sc->sc_ah = NULL;
854}
855
856void ath9k_deinit_device(struct ath_softc *sc)
857{
858 struct ieee80211_hw *hw = sc->hw;
859
860 ath9k_ps_wakeup(sc);
861
862 wiphy_rfkill_stop_polling(sc->hw->wiphy);
863 ath_deinit_leds(sc);
864
865 ath9k_ps_restore(sc);
866
867 ieee80211_unregister_hw(hw);
868 ath_rx_cleanup(sc);
869 ath_tx_cleanup(sc);
870 ath9k_deinit_softc(sc);
871}
872
873void ath_descdma_cleanup(struct ath_softc *sc,
874 struct ath_descdma *dd,
875 struct list_head *head)
876{
877 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
878 dd->dd_desc_paddr);
879
880 INIT_LIST_HEAD(head);
881 kfree(dd->dd_bufptr);
882 memset(dd, 0, sizeof(*dd));
883}
884
885/************************/
886/* Module Hooks */
887/************************/
888
889static int __init ath9k_init(void)
890{
891 int error;
892
893 /* Register rate control algorithm */
894 error = ath_rate_control_register();
895 if (error != 0) {
896 printk(KERN_ERR
897 "ath9k: Unable to register rate control "
898 "algorithm: %d\n",
899 error);
900 goto err_out;
901 }
902
903 error = ath_pci_init();
904 if (error < 0) {
905 printk(KERN_ERR
906 "ath9k: No PCI devices found, driver not installed.\n");
907 error = -ENODEV;
908 goto err_rate_unregister;
909 }
910
911 error = ath_ahb_init();
912 if (error < 0) {
913 error = -ENODEV;
914 goto err_pci_exit;
915 }
916
917 return 0;
918
919 err_pci_exit:
920 ath_pci_exit();
921
922 err_rate_unregister:
923 ath_rate_control_unregister();
924 err_out:
925 return error;
926}
927module_init(ath9k_init);
928
929static void __exit ath9k_exit(void)
930{
931 is_ath9k_unloaded = true;
932 ath_ahb_exit();
933 ath_pci_exit();
934 ath_rate_control_unregister();
935 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
936}
937module_exit(ath9k_exit);