Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
init.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/ath9k_platform.h>
22 #include <linux/module.h>
23 
24 #include "ath9k.h"
25 
26 static char *dev_info = "ath9k";
27 
28 MODULE_AUTHOR("Atheros Communications");
29 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
30 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
31 MODULE_LICENSE("Dual BSD/GPL");
32 
33 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
34 module_param_named(debug, ath9k_debug, uint, 0);
35 MODULE_PARM_DESC(debug, "Debugging mask");
36 
38 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
39 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
40 
42 module_param_named(blink, led_blink, int, 0444);
43 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
44 
45 static int ath9k_btcoex_enable;
46 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
47 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
48 
49 static int ath9k_enable_diversity;
50 module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
51 MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
52 
54 /* We use the hw_value as an index into our private channel structure */
55 
56 #define CHAN2G(_freq, _idx) { \
57  .band = IEEE80211_BAND_2GHZ, \
58  .center_freq = (_freq), \
59  .hw_value = (_idx), \
60  .max_power = 20, \
61 }
62 
63 #define CHAN5G(_freq, _idx) { \
64  .band = IEEE80211_BAND_5GHZ, \
65  .center_freq = (_freq), \
66  .hw_value = (_idx), \
67  .max_power = 20, \
68 }
69 
70 /* Some 2 GHz radios are actually tunable on 2312-2732
71  * on 5 MHz steps, we support the channels which we know
72  * we have calibration data for all cards though to make
73  * this static */
74 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
75  CHAN2G(2412, 0), /* Channel 1 */
76  CHAN2G(2417, 1), /* Channel 2 */
77  CHAN2G(2422, 2), /* Channel 3 */
78  CHAN2G(2427, 3), /* Channel 4 */
79  CHAN2G(2432, 4), /* Channel 5 */
80  CHAN2G(2437, 5), /* Channel 6 */
81  CHAN2G(2442, 6), /* Channel 7 */
82  CHAN2G(2447, 7), /* Channel 8 */
83  CHAN2G(2452, 8), /* Channel 9 */
84  CHAN2G(2457, 9), /* Channel 10 */
85  CHAN2G(2462, 10), /* Channel 11 */
86  CHAN2G(2467, 11), /* Channel 12 */
87  CHAN2G(2472, 12), /* Channel 13 */
88  CHAN2G(2484, 13), /* Channel 14 */
89 };
90 
91 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
92  * on 5 MHz steps, we support the channels which we know
93  * we have calibration data for all cards though to make
94  * this static */
95 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
96  /* _We_ call this UNII 1 */
97  CHAN5G(5180, 14), /* Channel 36 */
98  CHAN5G(5200, 15), /* Channel 40 */
99  CHAN5G(5220, 16), /* Channel 44 */
100  CHAN5G(5240, 17), /* Channel 48 */
101  /* _We_ call this UNII 2 */
102  CHAN5G(5260, 18), /* Channel 52 */
103  CHAN5G(5280, 19), /* Channel 56 */
104  CHAN5G(5300, 20), /* Channel 60 */
105  CHAN5G(5320, 21), /* Channel 64 */
106  /* _We_ call this "Middle band" */
107  CHAN5G(5500, 22), /* Channel 100 */
108  CHAN5G(5520, 23), /* Channel 104 */
109  CHAN5G(5540, 24), /* Channel 108 */
110  CHAN5G(5560, 25), /* Channel 112 */
111  CHAN5G(5580, 26), /* Channel 116 */
112  CHAN5G(5600, 27), /* Channel 120 */
113  CHAN5G(5620, 28), /* Channel 124 */
114  CHAN5G(5640, 29), /* Channel 128 */
115  CHAN5G(5660, 30), /* Channel 132 */
116  CHAN5G(5680, 31), /* Channel 136 */
117  CHAN5G(5700, 32), /* Channel 140 */
118  /* _We_ call this UNII 3 */
119  CHAN5G(5745, 33), /* Channel 149 */
120  CHAN5G(5765, 34), /* Channel 153 */
121  CHAN5G(5785, 35), /* Channel 157 */
122  CHAN5G(5805, 36), /* Channel 161 */
123  CHAN5G(5825, 37), /* Channel 165 */
124 };
125 
126 /* Atheros hardware rate code addition for short premble */
127 #define SHPCHECK(__hw_rate, __flags) \
128  ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
129 
130 #define RATE(_bitrate, _hw_rate, _flags) { \
131  .bitrate = (_bitrate), \
132  .flags = (_flags), \
133  .hw_value = (_hw_rate), \
134  .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
135 }
136 
137 static struct ieee80211_rate ath9k_legacy_rates[] = {
138  RATE(10, 0x1b, 0),
142  RATE(60, 0x0b, 0),
143  RATE(90, 0x0f, 0),
144  RATE(120, 0x0a, 0),
145  RATE(180, 0x0e, 0),
146  RATE(240, 0x09, 0),
147  RATE(360, 0x0d, 0),
148  RATE(480, 0x08, 0),
149  RATE(540, 0x0c, 0),
150 };
151 
152 #ifdef CONFIG_MAC80211_LEDS
153 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
154  { .throughput = 0 * 1024, .blink_time = 334 },
155  { .throughput = 1 * 1024, .blink_time = 260 },
156  { .throughput = 5 * 1024, .blink_time = 220 },
157  { .throughput = 10 * 1024, .blink_time = 190 },
158  { .throughput = 20 * 1024, .blink_time = 170 },
159  { .throughput = 50 * 1024, .blink_time = 150 },
160  { .throughput = 70 * 1024, .blink_time = 130 },
161  { .throughput = 100 * 1024, .blink_time = 110 },
162  { .throughput = 200 * 1024, .blink_time = 80 },
163  { .throughput = 300 * 1024, .blink_time = 50 },
164 };
165 #endif
166 
167 static void ath9k_deinit_softc(struct ath_softc *sc);
168 
169 /*
170  * Read and write, they both share the same lock. We do this to serialize
171  * reads and writes on Atheros 802.11n PCI devices only. This is required
172  * as the FIFO on these devices can only accept sanely 2 requests.
173  */
174 
175 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
176 {
177  struct ath_hw *ah = (struct ath_hw *) hw_priv;
178  struct ath_common *common = ath9k_hw_common(ah);
179  struct ath_softc *sc = (struct ath_softc *) common->priv;
180 
181  if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
182  unsigned long flags;
183  spin_lock_irqsave(&sc->sc_serial_rw, flags);
184  iowrite32(val, sc->mem + reg_offset);
185  spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
186  } else
187  iowrite32(val, sc->mem + reg_offset);
188 }
189 
190 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
191 {
192  struct ath_hw *ah = (struct ath_hw *) hw_priv;
193  struct ath_common *common = ath9k_hw_common(ah);
194  struct ath_softc *sc = (struct ath_softc *) common->priv;
195  u32 val;
196 
197  if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
198  unsigned long flags;
199  spin_lock_irqsave(&sc->sc_serial_rw, flags);
200  val = ioread32(sc->mem + reg_offset);
201  spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
202  } else
203  val = ioread32(sc->mem + reg_offset);
204  return val;
205 }
206 
207 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
208  u32 set, u32 clr)
209 {
210  u32 val;
211 
212  val = ioread32(sc->mem + reg_offset);
213  val &= ~clr;
214  val |= set;
215  iowrite32(val, sc->mem + reg_offset);
216 
217  return val;
218 }
219 
220 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
221 {
222  struct ath_hw *ah = (struct ath_hw *) hw_priv;
223  struct ath_common *common = ath9k_hw_common(ah);
224  struct ath_softc *sc = (struct ath_softc *) common->priv;
225  unsigned long uninitialized_var(flags);
226  u32 val;
227 
228  if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
230  val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
231  spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
232  } else
233  val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
234 
235  return val;
236 }
237 
238 /**************************/
239 /* Initialization */
240 /**************************/
241 
242 static void setup_ht_cap(struct ath_softc *sc,
243  struct ieee80211_sta_ht_cap *ht_info)
244 {
245  struct ath_hw *ah = sc->sc_ah;
246  struct ath_common *common = ath9k_hw_common(ah);
247  u8 tx_streams, rx_streams;
248  int i, max_streams;
249 
250  ht_info->ht_supported = true;
255 
256  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
257  ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
258 
259  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
260  ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
261 
264 
265  if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
266  max_streams = 1;
267  else if (AR_SREV_9462(ah))
268  max_streams = 2;
269  else if (AR_SREV_9300_20_OR_LATER(ah))
270  max_streams = 3;
271  else
272  max_streams = 2;
273 
274  if (AR_SREV_9280_20_OR_LATER(ah)) {
275  if (max_streams >= 2)
276  ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
277  ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
278  }
279 
280  /* set up supported mcs set */
281  memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
282  tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
283  rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
284 
285  ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
286  tx_streams, rx_streams);
287 
288  if (tx_streams != rx_streams) {
289  ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
290  ht_info->mcs.tx_params |= ((tx_streams - 1) <<
292  }
293 
294  for (i = 0; i < rx_streams; i++)
295  ht_info->mcs.rx_mask[i] = 0xff;
296 
297  ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
298 }
299 
300 static int ath9k_reg_notifier(struct wiphy *wiphy,
301  struct regulatory_request *request)
302 {
303  struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
304  struct ath_softc *sc = hw->priv;
305  struct ath_hw *ah = sc->sc_ah;
306  struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
307  int ret;
308 
309  ret = ath_reg_notifier_apply(wiphy, request, reg);
310 
311  /* Set tx power */
312  if (ah->curchan) {
313  sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
314  ath9k_ps_wakeup(sc);
315  ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
316  sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
317  ath9k_ps_restore(sc);
318  }
319 
320  return ret;
321 }
322 
323 /*
324  * This function will allocate both the DMA descriptor structure, and the
325  * buffers it contains. These are used to contain the descriptors used
326  * by the system.
327 */
328 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
329  struct list_head *head, const char *name,
330  int nbuf, int ndesc, bool is_tx)
331 {
332  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
333  u8 *ds;
334  struct ath_buf *bf;
335  int i, bsize, error, desc_len;
336 
337  ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
338  name, nbuf, ndesc);
339 
340  INIT_LIST_HEAD(head);
341 
342  if (is_tx)
343  desc_len = sc->sc_ah->caps.tx_desc_len;
344  else
345  desc_len = sizeof(struct ath_desc);
346 
347  /* ath_desc must be a multiple of DWORDs */
348  if ((desc_len % 4) != 0) {
349  ath_err(common, "ath_desc not DWORD aligned\n");
350  BUG_ON((desc_len % 4) != 0);
351  error = -ENOMEM;
352  goto fail;
353  }
354 
355  dd->dd_desc_len = desc_len * nbuf * ndesc;
356 
357  /*
358  * Need additional DMA memory because we can't use
359  * descriptors that cross the 4K page boundary. Assume
360  * one skipped descriptor per 4K page.
361  */
362  if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
363  u32 ndesc_skipped =
365  u32 dma_len;
366 
367  while (ndesc_skipped) {
368  dma_len = ndesc_skipped * desc_len;
369  dd->dd_desc_len += dma_len;
370 
371  ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
372  }
373  }
374 
375  /* allocate descriptors */
376  dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
377  &dd->dd_desc_paddr, GFP_KERNEL);
378  if (dd->dd_desc == NULL) {
379  error = -ENOMEM;
380  goto fail;
381  }
382  ds = (u8 *) dd->dd_desc;
383  ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
384  name, ds, (u32) dd->dd_desc_len,
385  ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
386 
387  /* allocate buffers */
388  bsize = sizeof(struct ath_buf) * nbuf;
389  bf = kzalloc(bsize, GFP_KERNEL);
390  if (bf == NULL) {
391  error = -ENOMEM;
392  goto fail2;
393  }
394  dd->dd_bufptr = bf;
395 
396  for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
397  bf->bf_desc = ds;
398  bf->bf_daddr = DS2PHYS(dd, ds);
399 
400  if (!(sc->sc_ah->caps.hw_caps &
402  /*
403  * Skip descriptor addresses which can cause 4KB
404  * boundary crossing (addr + length) with a 32 dword
405  * descriptor fetch.
406  */
407  while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
408  BUG_ON((caddr_t) bf->bf_desc >=
409  ((caddr_t) dd->dd_desc +
410  dd->dd_desc_len));
411 
412  ds += (desc_len * ndesc);
413  bf->bf_desc = ds;
414  bf->bf_daddr = DS2PHYS(dd, ds);
415  }
416  }
417  list_add_tail(&bf->list, head);
418  }
419  return 0;
420 fail2:
422  dd->dd_desc_paddr);
423 fail:
424  memset(dd, 0, sizeof(*dd));
425  return error;
426 }
427 
428 static int ath9k_init_queues(struct ath_softc *sc)
429 {
430  int i = 0;
431 
432  sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
433  sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
434 
435  sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
436  ath_cabq_update(sc);
437 
438  for (i = 0; i < WME_NUM_AC; i++) {
439  sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
440  sc->tx.txq_map[i]->mac80211_qnum = i;
441  sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
442  }
443  return 0;
444 }
445 
446 static int ath9k_init_channels_rates(struct ath_softc *sc)
447 {
448  void *channels;
449 
450  BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
451  ARRAY_SIZE(ath9k_5ghz_chantable) !=
453 
454  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
455  channels = kmemdup(ath9k_2ghz_chantable,
456  sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
457  if (!channels)
458  return -ENOMEM;
459 
460  sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
462  sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
463  ARRAY_SIZE(ath9k_2ghz_chantable);
464  sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
465  sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
466  ARRAY_SIZE(ath9k_legacy_rates);
467  }
468 
469  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
470  channels = kmemdup(ath9k_5ghz_chantable,
471  sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
472  if (!channels) {
473  if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
474  kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
475  return -ENOMEM;
476  }
477 
478  sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
480  sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
481  ARRAY_SIZE(ath9k_5ghz_chantable);
482  sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
483  ath9k_legacy_rates + 4;
484  sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
485  ARRAY_SIZE(ath9k_legacy_rates) - 4;
486  }
487  return 0;
488 }
489 
490 static void ath9k_init_misc(struct ath_softc *sc)
491 {
492  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
493  int i = 0;
494 
495  setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
496 
498  sc->config.txpowlimit = ATH_TXPOWER_MAX;
499  memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
500  sc->beacon.slottime = ATH9K_SLOT_TIME_9;
501 
502  for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
503  sc->beacon.bslot[i] = NULL;
504 
505  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
507 }
508 
509 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
510  const struct ath_bus_ops *bus_ops)
511 {
512  struct ath9k_platform_data *pdata = sc->dev->platform_data;
513  struct ath_hw *ah = NULL;
514  struct ath_common *common;
515  int ret = 0, i;
516  int csz = 0;
517 
518  ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
519  if (!ah)
520  return -ENOMEM;
521 
522  ah->hw = sc->hw;
523  ah->hw_version.devid = devid;
524  ah->reg_ops.read = ath9k_ioread32;
525  ah->reg_ops.write = ath9k_iowrite32;
526  ah->reg_ops.rmw = ath9k_reg_rmw;
527  atomic_set(&ah->intr_ref_cnt, -1);
528  sc->sc_ah = ah;
529 
531 
532  if (!pdata) {
533  ah->ah_flags |= AH_USE_EEPROM;
534  sc->sc_ah->led_pin = -1;
535  } else {
536  sc->sc_ah->gpio_mask = pdata->gpio_mask;
537  sc->sc_ah->gpio_val = pdata->gpio_val;
538  sc->sc_ah->led_pin = pdata->led_pin;
539  ah->is_clk_25mhz = pdata->is_clk_25mhz;
540  ah->get_mac_revision = pdata->get_mac_revision;
541  ah->external_reset = pdata->external_reset;
542  }
543 
544  common = ath9k_hw_common(ah);
545  common->ops = &ah->reg_ops;
546  common->bus_ops = bus_ops;
547  common->ah = ah;
548  common->hw = sc->hw;
549  common->priv = sc;
550  common->debug_mask = ath9k_debug;
551  common->btcoex_enabled = ath9k_btcoex_enable == 1;
552  common->disable_ani = false;
553 
554  /*
555  * Enable Antenna diversity only when BTCOEX is disabled
556  * and the user manually requests the feature.
557  */
558  if (!common->btcoex_enabled && ath9k_enable_diversity)
559  common->antenna_diversity = 1;
560 
561  spin_lock_init(&common->cc_lock);
562 
565  mutex_init(&sc->mutex);
566 #ifdef CONFIG_ATH9K_DEBUGFS
567  spin_lock_init(&sc->nodes_lock);
568  INIT_LIST_HEAD(&sc->nodes);
569 #endif
570 #ifdef CONFIG_ATH9K_MAC_DEBUG
571  spin_lock_init(&sc->debug.samp_lock);
572 #endif
573  tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
575  (unsigned long)sc);
576 
581  setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
582 
583  /*
584  * Cache line size is used to size and align various
585  * structures used to communicate with the hardware.
586  */
587  ath_read_cachesize(common, &csz);
588  common->cachelsz = csz << 2; /* convert to bytes */
589 
590  /* Initializes the hardware for all supported chipsets */
591  ret = ath9k_hw_init(ah);
592  if (ret)
593  goto err_hw;
594 
595  if (pdata && pdata->macaddr)
596  memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
597 
598  ret = ath9k_init_queues(sc);
599  if (ret)
600  goto err_queues;
601 
602  ret = ath9k_init_btcoex(sc);
603  if (ret)
604  goto err_btcoex;
605 
606  ret = ath9k_init_channels_rates(sc);
607  if (ret)
608  goto err_btcoex;
609 
611  ath9k_init_misc(sc);
612  ath_fill_led_pin(sc);
613 
614  if (common->bus_ops->aspm_init)
615  common->bus_ops->aspm_init(common);
616 
617  return 0;
618 
619 err_btcoex:
620  for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
621  if (ATH_TXQ_SETUP(sc, i))
622  ath_tx_cleanupq(sc, &sc->tx.txq[i]);
623 err_queues:
624  ath9k_hw_deinit(ah);
625 err_hw:
626 
627  kfree(ah);
628  sc->sc_ah = NULL;
629 
630  return ret;
631 }
632 
633 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
634 {
635  struct ieee80211_supported_band *sband;
636  struct ieee80211_channel *chan;
637  struct ath_hw *ah = sc->sc_ah;
638  int i;
639 
640  sband = &sc->sbands[band];
641  for (i = 0; i < sband->n_channels; i++) {
642  chan = &sband->channels[i];
643  ah->curchan = &ah->channels[chan->hw_value];
646  }
647 }
648 
649 static void ath9k_init_txpower_limits(struct ath_softc *sc)
650 {
651  struct ath_hw *ah = sc->sc_ah;
652  struct ath9k_channel *curchan = ah->curchan;
653 
654  if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
655  ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
656  if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
657  ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
658 
659  ah->curchan = curchan;
660 }
661 
663 {
664  if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
665  return;
666 
667  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
668  setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
669  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
670  setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
671 }
672 
673 static const struct ieee80211_iface_limit if_limits[] = {
674  { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
677  { .max = 8, .types =
678 #ifdef CONFIG_MAC80211_MESH
680 #endif
683 };
684 
685 static const struct ieee80211_iface_combination if_comb = {
686  .limits = if_limits,
687  .n_limits = ARRAY_SIZE(if_limits),
688  .max_interfaces = 2048,
689  .num_different_channels = 1,
690 };
691 
692 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
693 {
694  struct ath_hw *ah = sc->sc_ah;
695  struct ath_common *common = ath9k_hw_common(ah);
696 
704 
705  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
707 
710 
711  hw->wiphy->interface_modes =
719 
720  hw->wiphy->iface_combinations = &if_comb;
721  hw->wiphy->n_iface_combinations = 1;
722 
723  if (AR_SREV_5416(sc->sc_ah))
724  hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
725 
726  hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
727  hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
729 
730 #ifdef CONFIG_PM_SLEEP
731 
732  if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
733  device_can_wakeup(sc->dev)) {
734 
735  hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
737  hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
738  hw->wiphy->wowlan.pattern_min_len = 1;
739  hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
740 
741  }
742 
743  atomic_set(&sc->wow_sleep_proc_intr, -1);
744  atomic_set(&sc->wow_got_bmiss_intr, -1);
745 
746 #endif
747 
748  hw->queues = 4;
749  hw->max_rates = 4;
750  hw->channel_change_time = 5000;
751  hw->max_listen_interval = 1;
752  hw->max_rate_tries = 10;
753  hw->sta_data_size = sizeof(struct ath_node);
754  hw->vif_data_size = sizeof(struct ath_vif);
755 
756  hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
757  hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
758 
759  /* single chain devices with rx diversity */
760  if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
761  hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
762 
763  sc->ant_rx = hw->wiphy->available_antennas_rx;
764  sc->ant_tx = hw->wiphy->available_antennas_tx;
765 
766 #ifdef CONFIG_ATH9K_RATE_CONTROL
767  hw->rate_control_algorithm = "ath9k_rate_control";
768 #endif
769 
770  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
771  hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
773  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
774  hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
776 
778 
779  SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
780 }
781 
782 int ath9k_init_device(u16 devid, struct ath_softc *sc,
783  const struct ath_bus_ops *bus_ops)
784 {
785  struct ieee80211_hw *hw = sc->hw;
786  struct ath_common *common;
787  struct ath_hw *ah;
788  int error = 0;
789  struct ath_regulatory *reg;
790 
791  /* Bring up device */
792  error = ath9k_init_softc(devid, sc, bus_ops);
793  if (error != 0)
794  goto error_init;
795 
796  ah = sc->sc_ah;
797  common = ath9k_hw_common(ah);
798  ath9k_set_hw_capab(sc, hw);
799 
800  /* Initialize regulatory */
801  error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
802  ath9k_reg_notifier);
803  if (error)
804  goto error_regd;
805 
806  reg = &common->regulatory;
807 
808  /* Setup TX DMA */
809  error = ath_tx_init(sc, ATH_TXBUF);
810  if (error != 0)
811  goto error_tx;
812 
813  /* Setup RX DMA */
814  error = ath_rx_init(sc, ATH_RXBUF);
815  if (error != 0)
816  goto error_rx;
817 
818  ath9k_init_txpower_limits(sc);
819 
820 #ifdef CONFIG_MAC80211_LEDS
821  /* must be initialized before ieee80211_register_hw */
822  sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
823  IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
824  ARRAY_SIZE(ath9k_tpt_blink));
825 #endif
826 
827  /* Register with mac80211 */
828  error = ieee80211_register_hw(hw);
829  if (error)
830  goto error_register;
831 
832  error = ath9k_init_debug(ah);
833  if (error) {
834  ath_err(common, "Unable to create debugfs files\n");
835  goto error_world;
836  }
837 
838  /* Handle world regulatory */
839  if (!ath_is_world_regd(reg)) {
840  error = regulatory_hint(hw->wiphy, reg->alpha2);
841  if (error)
842  goto error_world;
843  }
844 
845  ath_init_leds(sc);
847 
848  return 0;
849 
850 error_world:
852 error_register:
853  ath_rx_cleanup(sc);
854 error_rx:
855  ath_tx_cleanup(sc);
856 error_tx:
857  /* Nothing */
858 error_regd:
859  ath9k_deinit_softc(sc);
860 error_init:
861  return error;
862 }
863 
864 /*****************************/
865 /* De-Initialization */
866 /*****************************/
867 
868 static void ath9k_deinit_softc(struct ath_softc *sc)
869 {
870  int i = 0;
871 
872  if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
873  kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
874 
875  if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
876  kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
877 
878  ath9k_deinit_btcoex(sc);
879 
880  for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
881  if (ATH_TXQ_SETUP(sc, i))
882  ath_tx_cleanupq(sc, &sc->tx.txq[i]);
883 
884  ath9k_hw_deinit(sc->sc_ah);
885  if (sc->dfs_detector != NULL)
886  sc->dfs_detector->exit(sc->dfs_detector);
887 
888  kfree(sc->sc_ah);
889  sc->sc_ah = NULL;
890 }
891 
893 {
894  struct ieee80211_hw *hw = sc->hw;
895 
896  ath9k_ps_wakeup(sc);
897 
898  wiphy_rfkill_stop_polling(sc->hw->wiphy);
899  ath_deinit_leds(sc);
900 
901  ath9k_ps_restore(sc);
902 
904  ath_rx_cleanup(sc);
905  ath_tx_cleanup(sc);
906  ath9k_deinit_softc(sc);
907 }
908 
910  struct ath_descdma *dd,
911  struct list_head *head)
912 {
914  dd->dd_desc_paddr);
915 
916  INIT_LIST_HEAD(head);
917  kfree(dd->dd_bufptr);
918  memset(dd, 0, sizeof(*dd));
919 }
920 
921 /************************/
922 /* Module Hooks */
923 /************************/
924 
925 static int __init ath9k_init(void)
926 {
927  int error;
928 
929  /* Register rate control algorithm */
930  error = ath_rate_control_register();
931  if (error != 0) {
932  pr_err("Unable to register rate control algorithm: %d\n",
933  error);
934  goto err_out;
935  }
936 
937  error = ath_pci_init();
938  if (error < 0) {
939  pr_err("No PCI devices found, driver not installed\n");
940  error = -ENODEV;
941  goto err_rate_unregister;
942  }
943 
944  error = ath_ahb_init();
945  if (error < 0) {
946  error = -ENODEV;
947  goto err_pci_exit;
948  }
949 
950  return 0;
951 
952  err_pci_exit:
953  ath_pci_exit();
954 
955  err_rate_unregister:
957  err_out:
958  return error;
959 }
960 module_init(ath9k_init);
961 
962 static void __exit ath9k_exit(void)
963 {
964  is_ath9k_unloaded = true;
965  ath_ahb_exit();
966  ath_pci_exit();
968  pr_info("%s: Driver unloaded\n", dev_info);
969 }
970 module_exit(ath9k_exit);