Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
main.c
Go to the documentation of this file.
1 /*
2  * Atheros CARL9170 driver
3  *
4  * mac80211 interaction code
5  *
6  * Copyright 2008, Johannes Berg <[email protected]>
7  * Copyright 2009, 2010, Christian Lamparter <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING. If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  * Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  * Permission to use, copy, modify, and/or distribute this software for any
28  * purpose with or without fee is hereby granted, provided that the above
29  * copyright notice and this permission notice appear in all copies.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39 
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
50 
51 static bool modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54 
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58 
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60  .bitrate = (_bitrate), \
61  .flags = (_flags), \
62  .hw_value = (_hw_rate) | (_txpidx) << 4, \
63 }
64 
66  RATE(10, 0, 0, 0),
70  RATE(60, 0xb, 0, 0),
71  RATE(90, 0xf, 0, 0),
72  RATE(120, 0xa, 0, 0),
73  RATE(180, 0xe, 0, 0),
74  RATE(240, 0x9, 0, 0),
75  RATE(360, 0xd, 1, 0),
76  RATE(480, 0x8, 2, 0),
77  RATE(540, 0xc, 3, 0),
78 };
79 #undef RATE
80 
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
85 
86 /*
87  * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88  * array in phy.c so that we don't have to do frequency lookups!
89  */
90 #define CHAN(_freq, _idx) { \
91  .center_freq = (_freq), \
92  .hw_value = (_idx), \
93  .max_power = 18, /* XXX */ \
94 }
95 
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97  CHAN(2412, 0),
98  CHAN(2417, 1),
99  CHAN(2422, 2),
100  CHAN(2427, 3),
101  CHAN(2432, 4),
102  CHAN(2437, 5),
103  CHAN(2442, 6),
104  CHAN(2447, 7),
105  CHAN(2452, 8),
106  CHAN(2457, 9),
107  CHAN(2462, 10),
108  CHAN(2467, 11),
109  CHAN(2472, 12),
110  CHAN(2484, 13),
111 };
112 
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114  CHAN(4920, 14),
115  CHAN(4940, 15),
116  CHAN(4960, 16),
117  CHAN(4980, 17),
118  CHAN(5040, 18),
119  CHAN(5060, 19),
120  CHAN(5080, 20),
121  CHAN(5180, 21),
122  CHAN(5200, 22),
123  CHAN(5220, 23),
124  CHAN(5240, 24),
125  CHAN(5260, 25),
126  CHAN(5280, 26),
127  CHAN(5300, 27),
128  CHAN(5320, 28),
129  CHAN(5500, 29),
130  CHAN(5520, 30),
131  CHAN(5540, 31),
132  CHAN(5560, 32),
133  CHAN(5580, 33),
134  CHAN(5600, 34),
135  CHAN(5620, 35),
136  CHAN(5640, 36),
137  CHAN(5660, 37),
138  CHAN(5680, 38),
139  CHAN(5700, 39),
140  CHAN(5745, 40),
141  CHAN(5765, 41),
142  CHAN(5785, 42),
143  CHAN(5805, 43),
144  CHAN(5825, 44),
145  CHAN(5170, 45),
146  CHAN(5190, 46),
147  CHAN(5210, 47),
148  CHAN(5230, 48),
149 };
150 #undef CHAN
151 
152 #define CARL9170_HT_CAP \
153 { \
154  .ht_supported = true, \
155  .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156  IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157  IEEE80211_HT_CAP_SGI_40 | \
158  IEEE80211_HT_CAP_DSSSCCK40 | \
159  IEEE80211_HT_CAP_SM_PS, \
160  .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161  .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162  .mcs = { \
163  .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164  .rx_highest = cpu_to_le16(300), \
165  .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166  }, \
167 }
168 
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170  .channels = carl9170_2ghz_chantable,
171  .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172  .bitrates = carl9170_g_ratetable,
173  .n_bitrates = carl9170_g_ratetable_size,
174  .ht_cap = CARL9170_HT_CAP,
175 };
176 
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178  .channels = carl9170_5ghz_chantable,
179  .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180  .bitrates = carl9170_a_ratetable,
181  .n_bitrates = carl9170_a_ratetable_size,
182  .ht_cap = CARL9170_HT_CAP,
183 };
184 
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 {
187  struct carl9170_sta_tid *tid_info;
188  LIST_HEAD(tid_gc);
189 
190  rcu_read_lock();
191  list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192  spin_lock_bh(&ar->tx_ampdu_list_lock);
193  if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194  tid_info->state = CARL9170_TID_STATE_KILLED;
195  list_del_rcu(&tid_info->list);
196  ar->tx_ampdu_list_len--;
197  list_add_tail(&tid_info->tmp_list, &tid_gc);
198  }
199  spin_unlock_bh(&ar->tx_ampdu_list_lock);
200 
201  }
202  rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203  rcu_read_unlock();
204 
205  synchronize_rcu();
206 
207  while (!list_empty(&tid_gc)) {
208  struct sk_buff *skb;
209  tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210  tmp_list);
211 
212  while ((skb = __skb_dequeue(&tid_info->queue)))
213  carl9170_tx_status(ar, skb, false);
214 
215  list_del_init(&tid_info->tmp_list);
216  kfree(tid_info);
217  }
218 }
219 
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 {
222  if (drop_queued) {
223  int i;
224 
225  /*
226  * We can only drop frames which have not been uploaded
227  * to the device yet.
228  */
229 
230  for (i = 0; i < ar->hw->queues; i++) {
231  struct sk_buff *skb;
232 
233  while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234  struct ieee80211_tx_info *info;
235 
236  info = IEEE80211_SKB_CB(skb);
237  if (info->flags & IEEE80211_TX_CTL_AMPDU)
239 
240  carl9170_tx_status(ar, skb, false);
241  }
242  }
243  }
244 
245  /* Wait for all other outstanding frames to timeout. */
246  if (atomic_read(&ar->tx_total_queued))
248 }
249 
250 static void carl9170_flush_ba(struct ar9170 *ar)
251 {
252  struct sk_buff_head free;
253  struct carl9170_sta_tid *tid_info;
254  struct sk_buff *skb;
255 
256  __skb_queue_head_init(&free);
257 
258  rcu_read_lock();
259  spin_lock_bh(&ar->tx_ampdu_list_lock);
260  list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261  if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262  tid_info->state = CARL9170_TID_STATE_SUSPEND;
263 
264  spin_lock(&tid_info->lock);
265  while ((skb = __skb_dequeue(&tid_info->queue)))
266  __skb_queue_tail(&free, skb);
267  spin_unlock(&tid_info->lock);
268  }
269  }
270  spin_unlock_bh(&ar->tx_ampdu_list_lock);
271  rcu_read_unlock();
272 
273  while ((skb = __skb_dequeue(&free)))
274  carl9170_tx_status(ar, skb, false);
275 }
276 
277 static void carl9170_zap_queues(struct ar9170 *ar)
278 {
279  struct carl9170_vif_info *cvif;
280  unsigned int i;
281 
282  carl9170_ampdu_gc(ar);
283 
284  carl9170_flush_ba(ar);
285  carl9170_flush(ar, true);
286 
287  for (i = 0; i < ar->hw->queues; i++) {
288  spin_lock_bh(&ar->tx_status[i].lock);
289  while (!skb_queue_empty(&ar->tx_status[i])) {
290  struct sk_buff *skb;
291 
292  skb = skb_peek(&ar->tx_status[i]);
293  carl9170_tx_get_skb(skb);
294  spin_unlock_bh(&ar->tx_status[i].lock);
295  carl9170_tx_drop(ar, skb);
296  spin_lock_bh(&ar->tx_status[i].lock);
297  carl9170_tx_put_skb(skb);
298  }
299  spin_unlock_bh(&ar->tx_status[i].lock);
300  }
301 
305 
306  /* reinitialize queues statistics */
307  memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308  for (i = 0; i < ar->hw->queues; i++)
309  ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310 
311  for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312  ar->mem_bitmap[i] = 0;
313 
314  rcu_read_lock();
315  list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316  spin_lock_bh(&ar->beacon_lock);
317  dev_kfree_skb_any(cvif->beacon);
318  cvif->beacon = NULL;
319  spin_unlock_bh(&ar->beacon_lock);
320  }
321  rcu_read_unlock();
322 
323  atomic_set(&ar->tx_ampdu_upload, 0);
325  atomic_set(&ar->tx_total_pending, 0);
326  atomic_set(&ar->tx_total_queued, 0);
327  atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328 }
329 
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332  queue.aifs = ai_fs; \
333  queue.cw_min = cwmin; \
334  queue.cw_max = cwmax; \
335  queue.txop = _txop; \
336 } while (0)
337 
338 static int carl9170_op_start(struct ieee80211_hw *hw)
339 {
340  struct ar9170 *ar = hw->priv;
341  int err, i;
342 
343  mutex_lock(&ar->mutex);
344 
345  carl9170_zap_queues(ar);
346 
347  /* reset QoS defaults */
348  CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
349  CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
350  CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
351  CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
352  CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
353 
354  ar->current_factor = ar->current_density = -1;
355  /* "The first key is unique." */
356  ar->usedkeys = 1;
357  ar->filter_state = 0;
358  ar->ps.last_action = jiffies;
359  ar->ps.last_slept = jiffies;
361  ar->rx_software_decryption = false;
362  ar->disable_offload = false;
363 
364  for (i = 0; i < ar->hw->queues; i++) {
366  ar->max_queue_stop_timeout[i] = 0;
367  }
368 
369  atomic_set(&ar->mem_allocs, 0);
370 
371  err = carl9170_usb_open(ar);
372  if (err)
373  goto out;
374 
375  err = carl9170_init_mac(ar);
376  if (err)
377  goto out;
378 
379  err = carl9170_set_qos(ar);
380  if (err)
381  goto out;
382 
383  if (ar->fw.rx_filter) {
384  err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
386  if (err)
387  goto out;
388  }
389 
392  if (err)
393  goto out;
394 
395  /* Clear key-cache */
396  for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
398  0, NULL, 0);
399  if (err)
400  goto out;
401 
403  1, NULL, 0);
404  if (err)
405  goto out;
406 
407  if (i < AR9170_CAM_MAX_USER) {
408  err = carl9170_disable_key(ar, i);
409  if (err)
410  goto out;
411  }
412  }
413 
414  carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
415 
418 
420  err = 0;
421 
422 out:
423  mutex_unlock(&ar->mutex);
424  return err;
425 }
426 
427 static void carl9170_cancel_worker(struct ar9170 *ar)
428 {
431 #ifdef CONFIG_CARL9170_LEDS
432  cancel_delayed_work_sync(&ar->led_work);
433 #endif /* CONFIG_CARL9170_LEDS */
437 }
438 
439 static void carl9170_op_stop(struct ieee80211_hw *hw)
440 {
441  struct ar9170 *ar = hw->priv;
442 
443  carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
444 
446 
447  mutex_lock(&ar->mutex);
448  if (IS_ACCEPTING_CMD(ar)) {
450 
451  carl9170_led_set_state(ar, 0);
452 
453  /* stop DMA */
455  carl9170_usb_stop(ar);
456  }
457 
458  carl9170_zap_queues(ar);
459  mutex_unlock(&ar->mutex);
460 
461  carl9170_cancel_worker(ar);
462 }
463 
464 static void carl9170_restart_work(struct work_struct *work)
465 {
466  struct ar9170 *ar = container_of(work, struct ar9170,
467  restart_work);
468  int err = -EIO;
469 
470  ar->usedkeys = 0;
471  ar->filter_state = 0;
472  carl9170_cancel_worker(ar);
473 
474  mutex_lock(&ar->mutex);
475  if (!ar->force_usb_reset) {
476  err = carl9170_usb_restart(ar);
477  if (net_ratelimit()) {
478  if (err)
479  dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
480  else
481  dev_info(&ar->udev->dev, "device restarted successfully.\n");
482  }
483  }
484  carl9170_zap_queues(ar);
485  mutex_unlock(&ar->mutex);
486 
487  if (!err && !ar->force_usb_reset) {
488  ar->restart_counter++;
489  atomic_set(&ar->pending_restarts, 0);
490 
492  } else {
493  /*
494  * The reset was unsuccessful and the device seems to
495  * be dead. But there's still one option: a low-level
496  * usb subsystem reset...
497  */
498 
499  carl9170_usb_reset(ar);
500  }
501 }
502 
503 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
504 {
505  carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
506 
507  /*
508  * Sometimes, an error can trigger several different reset events.
509  * By ignoring these *surplus* reset events, the device won't be
510  * killed again, right after it has recovered.
511  */
512  if (atomic_inc_return(&ar->pending_restarts) > 1) {
513  dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
514  return;
515  }
516 
518 
519  dev_err(&ar->udev->dev, "restart device (%d)\n", r);
520 
521  if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
523  ar->last_reason = r;
524 
525  if (!ar->registered)
526  return;
527 
528  if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
529  ar->force_usb_reset = true;
530 
532 
533  /*
534  * At this point, the device instance might have vanished/disabled.
535  * So, don't put any code which access the ar9170 struct
536  * without proper protection.
537  */
538 }
539 
540 static void carl9170_ping_work(struct work_struct *work)
541 {
542  struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
543  int err;
544 
545  if (!IS_STARTED(ar))
546  return;
547 
548  mutex_lock(&ar->mutex);
549  err = carl9170_echo_test(ar, 0xdeadbeef);
550  if (err)
552  mutex_unlock(&ar->mutex);
553 }
554 
555 static int carl9170_init_interface(struct ar9170 *ar,
556  struct ieee80211_vif *vif)
557 {
558  struct ath_common *common = &ar->common;
559  int err;
560 
561  if (!vif) {
563  return 0;
564  }
565 
566  memcpy(common->macaddr, vif->addr, ETH_ALEN);
567 
568  if (modparam_nohwcrypt ||
569  ((vif->type != NL80211_IFTYPE_STATION) &&
570  (vif->type != NL80211_IFTYPE_AP))) {
571  ar->rx_software_decryption = true;
572  ar->disable_offload = true;
573  }
574 
575  err = carl9170_set_operating_mode(ar);
576  return err;
577 }
578 
579 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
580  struct ieee80211_vif *vif)
581 {
582  struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
583  struct ieee80211_vif *main_vif;
584  struct ar9170 *ar = hw->priv;
585  int vif_id = -1, err = 0;
586 
587  mutex_lock(&ar->mutex);
588  rcu_read_lock();
589  if (vif_priv->active) {
590  /*
591  * Skip the interface structure initialization,
592  * if the vif survived the _restart call.
593  */
594  vif_id = vif_priv->id;
595  vif_priv->enable_beacon = false;
596 
597  spin_lock_bh(&ar->beacon_lock);
598  dev_kfree_skb_any(vif_priv->beacon);
599  vif_priv->beacon = NULL;
600  spin_unlock_bh(&ar->beacon_lock);
601 
602  goto init;
603  }
604 
605  main_vif = carl9170_get_main_vif(ar);
606 
607  if (main_vif) {
608  switch (main_vif->type) {
610  if (vif->type == NL80211_IFTYPE_STATION)
611  break;
612 
613  err = -EBUSY;
614  rcu_read_unlock();
615 
616  goto unlock;
617 
619  case NL80211_IFTYPE_AP:
620  if ((vif->type == NL80211_IFTYPE_STATION) ||
621  (vif->type == NL80211_IFTYPE_WDS) ||
622  (vif->type == NL80211_IFTYPE_AP) ||
624  break;
625 
626  err = -EBUSY;
627  rcu_read_unlock();
628  goto unlock;
629 
630  default:
631  rcu_read_unlock();
632  goto unlock;
633  }
634  }
635 
636  vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
637 
638  if (vif_id < 0) {
639  rcu_read_unlock();
640 
641  err = -ENOSPC;
642  goto unlock;
643  }
644 
645  BUG_ON(ar->vif_priv[vif_id].id != vif_id);
646 
647  vif_priv->active = true;
648  vif_priv->id = vif_id;
649  vif_priv->enable_beacon = false;
650  ar->vifs++;
651  list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
652  rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
653 
654 init:
655  if (carl9170_get_main_vif(ar) == vif) {
656  rcu_assign_pointer(ar->beacon_iter, vif_priv);
657  rcu_read_unlock();
658 
659  err = carl9170_init_interface(ar, vif);
660  if (err)
661  goto unlock;
662  } else {
663  rcu_read_unlock();
664  err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
665 
666  if (err)
667  goto unlock;
668  }
669 
670  if (ar->fw.tx_seq_table) {
671  err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
672  0);
673  if (err)
674  goto unlock;
675  }
676 
677 unlock:
678  if (err && (vif_id >= 0)) {
679  vif_priv->active = false;
680  bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
681  ar->vifs--;
682  RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
683  list_del_rcu(&vif_priv->list);
684  mutex_unlock(&ar->mutex);
685  synchronize_rcu();
686  } else {
687  if (ar->vifs > 1)
688  ar->ps.off_override |= PS_OFF_VIF;
689 
690  mutex_unlock(&ar->mutex);
691  }
692 
693  return err;
694 }
695 
696 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
697  struct ieee80211_vif *vif)
698 {
699  struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
700  struct ieee80211_vif *main_vif;
701  struct ar9170 *ar = hw->priv;
702  unsigned int id;
703 
704  mutex_lock(&ar->mutex);
705 
706  if (WARN_ON_ONCE(!vif_priv->active))
707  goto unlock;
708 
709  ar->vifs--;
710 
711  rcu_read_lock();
712  main_vif = carl9170_get_main_vif(ar);
713 
714  id = vif_priv->id;
715 
716  vif_priv->active = false;
717  WARN_ON(vif_priv->enable_beacon);
718  vif_priv->enable_beacon = false;
719  list_del_rcu(&vif_priv->list);
720  RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
721 
722  if (vif == main_vif) {
723  rcu_read_unlock();
724 
725  if (ar->vifs) {
726  WARN_ON(carl9170_init_interface(ar,
727  carl9170_get_main_vif(ar)));
728  } else {
730  }
731  } else {
732  rcu_read_unlock();
733 
735  }
736 
737  carl9170_update_beacon(ar, false);
738  carl9170_flush_cab(ar, id);
739 
740  spin_lock_bh(&ar->beacon_lock);
741  dev_kfree_skb_any(vif_priv->beacon);
742  vif_priv->beacon = NULL;
743  spin_unlock_bh(&ar->beacon_lock);
744 
745  bitmap_release_region(&ar->vif_bitmap, id, 0);
746 
748 
749  if (ar->vifs == 1)
750  ar->ps.off_override &= ~PS_OFF_VIF;
751 
752 unlock:
753  mutex_unlock(&ar->mutex);
754 
755  synchronize_rcu();
756 }
757 
758 void carl9170_ps_check(struct ar9170 *ar)
759 {
760  ieee80211_queue_work(ar->hw, &ar->ps_work);
761 }
762 
763 /* caller must hold ar->mutex */
764 static int carl9170_ps_update(struct ar9170 *ar)
765 {
766  bool ps = false;
767  int err = 0;
768 
769  if (!ar->ps.off_override)
770  ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
771 
772  if (ps != ar->ps.state) {
773  err = carl9170_powersave(ar, ps);
774  if (err)
775  return err;
776 
777  if (ar->ps.state && !ps) {
778  ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
779  ar->ps.last_action);
780  }
781 
782  if (ps)
783  ar->ps.last_slept = jiffies;
784 
785  ar->ps.last_action = jiffies;
786  ar->ps.state = ps;
787  }
788 
789  return 0;
790 }
791 
792 static void carl9170_ps_work(struct work_struct *work)
793 {
794  struct ar9170 *ar = container_of(work, struct ar9170,
795  ps_work);
796  mutex_lock(&ar->mutex);
797  if (IS_STARTED(ar))
798  WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
799  mutex_unlock(&ar->mutex);
800 }
801 
802 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
803 {
804  int err;
805 
806  if (noise) {
807  err = carl9170_get_noisefloor(ar);
808  if (err)
809  return err;
810  }
811 
812  if (ar->fw.hw_counters) {
813  err = carl9170_collect_tally(ar);
814  if (err)
815  return err;
816  }
817 
818  if (flush)
819  memset(&ar->tally, 0, sizeof(ar->tally));
820 
821  return 0;
822 }
823 
824 static void carl9170_stat_work(struct work_struct *work)
825 {
826  struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
827  int err;
828 
829  mutex_lock(&ar->mutex);
830  err = carl9170_update_survey(ar, false, true);
831  mutex_unlock(&ar->mutex);
832 
833  if (err)
834  return;
835 
838 }
839 
840 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
841 {
842  struct ar9170 *ar = hw->priv;
843  int err = 0;
844 
845  mutex_lock(&ar->mutex);
847  /* TODO */
848  err = 0;
849  }
850 
851  if (changed & IEEE80211_CONF_CHANGE_PS) {
852  err = carl9170_ps_update(ar);
853  if (err)
854  goto out;
855  }
856 
857  if (changed & IEEE80211_CONF_CHANGE_SMPS) {
858  /* TODO */
859  err = 0;
860  }
861 
862  if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
863  /* adjust slot time for 5 GHz */
864  err = carl9170_set_slot_time(ar);
865  if (err)
866  goto out;
867 
868  err = carl9170_update_survey(ar, true, false);
869  if (err)
870  goto out;
871 
872  err = carl9170_set_channel(ar, hw->conf.channel,
873  hw->conf.channel_type, CARL9170_RFI_NONE);
874  if (err)
875  goto out;
876 
877  err = carl9170_update_survey(ar, false, true);
878  if (err)
879  goto out;
880 
881  err = carl9170_set_dyn_sifs_ack(ar);
882  if (err)
883  goto out;
884 
885  err = carl9170_set_rts_cts_rate(ar);
886  if (err)
887  goto out;
888  }
889 
890  if (changed & IEEE80211_CONF_CHANGE_POWER) {
891  err = carl9170_set_mac_tpc(ar, ar->hw->conf.channel);
892  if (err)
893  goto out;
894  }
895 
896 out:
897  mutex_unlock(&ar->mutex);
898  return err;
899 }
900 
901 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
902  struct netdev_hw_addr_list *mc_list)
903 {
904  struct netdev_hw_addr *ha;
905  u64 mchash;
906 
907  /* always get broadcast frames */
908  mchash = 1ULL << (0xff >> 2);
909 
910  netdev_hw_addr_list_for_each(ha, mc_list)
911  mchash |= 1ULL << (ha->addr[5] >> 2);
912 
913  return mchash;
914 }
915 
916 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
917  unsigned int changed_flags,
918  unsigned int *new_flags,
919  u64 multicast)
920 {
921  struct ar9170 *ar = hw->priv;
922 
923  /* mask supported flags */
924  *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
925 
926  if (!IS_ACCEPTING_CMD(ar))
927  return;
928 
929  mutex_lock(&ar->mutex);
930 
931  ar->filter_state = *new_flags;
932  /*
933  * We can support more by setting the sniffer bit and
934  * then checking the error flags, later.
935  */
936 
937  if (*new_flags & FIF_ALLMULTI)
938  multicast = ~0ULL;
939 
940  if (multicast != ar->cur_mc_hash)
941  WARN_ON(carl9170_update_multicast(ar, multicast));
942 
943  if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
944  ar->sniffer_enabled = !!(*new_flags &
946 
948  }
949 
950  if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
951  u32 rx_filter = 0;
952 
953  if (!ar->fw.ba_filter)
954  rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
955 
956  if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
957  rx_filter |= CARL9170_RX_FILTER_BAD;
958 
959  if (!(*new_flags & FIF_CONTROL))
960  rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
961 
962  if (!(*new_flags & FIF_PSPOLL))
963  rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
964 
965  if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
966  rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
967  rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
968  }
969 
970  WARN_ON(carl9170_rx_filter(ar, rx_filter));
971  }
972 
973  mutex_unlock(&ar->mutex);
974 }
975 
976 
977 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
978  struct ieee80211_vif *vif,
979  struct ieee80211_bss_conf *bss_conf,
980  u32 changed)
981 {
982  struct ar9170 *ar = hw->priv;
983  struct ath_common *common = &ar->common;
984  int err = 0;
985  struct carl9170_vif_info *vif_priv;
986  struct ieee80211_vif *main_vif;
987 
988  mutex_lock(&ar->mutex);
989  vif_priv = (void *) vif->drv_priv;
990  main_vif = carl9170_get_main_vif(ar);
991  if (WARN_ON(!main_vif))
992  goto out;
993 
994  if (changed & BSS_CHANGED_BEACON_ENABLED) {
995  struct carl9170_vif_info *iter;
996  int i = 0;
997 
998  vif_priv->enable_beacon = bss_conf->enable_beacon;
999  rcu_read_lock();
1000  list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1001  if (iter->active && iter->enable_beacon)
1002  i++;
1003 
1004  }
1005  rcu_read_unlock();
1006 
1007  ar->beacon_enabled = i;
1008  }
1009 
1010  if (changed & BSS_CHANGED_BEACON) {
1011  err = carl9170_update_beacon(ar, false);
1012  if (err)
1013  goto out;
1014  }
1015 
1016  if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1018 
1019  if (main_vif != vif) {
1020  bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1021  bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1022  }
1023 
1024  /*
1025  * Therefore a hard limit for the broadcast traffic should
1026  * prevent false alarms.
1027  */
1028  if (vif->type != NL80211_IFTYPE_STATION &&
1029  (bss_conf->beacon_int * bss_conf->dtim_period >=
1031  err = -EINVAL;
1032  goto out;
1033  }
1034 
1035  err = carl9170_set_beacon_timers(ar);
1036  if (err)
1037  goto out;
1038  }
1039 
1040  if (changed & BSS_CHANGED_HT) {
1041  /* TODO */
1042  err = 0;
1043  if (err)
1044  goto out;
1045  }
1046 
1047  if (main_vif != vif)
1048  goto out;
1049 
1050  /*
1051  * The following settings can only be changed by the
1052  * master interface.
1053  */
1054 
1055  if (changed & BSS_CHANGED_BSSID) {
1056  memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1057  err = carl9170_set_operating_mode(ar);
1058  if (err)
1059  goto out;
1060  }
1061 
1062  if (changed & BSS_CHANGED_ASSOC) {
1063  ar->common.curaid = bss_conf->aid;
1064  err = carl9170_set_beacon_timers(ar);
1065  if (err)
1066  goto out;
1067  }
1068 
1069  if (changed & BSS_CHANGED_ERP_SLOT) {
1070  err = carl9170_set_slot_time(ar);
1071  if (err)
1072  goto out;
1073  }
1074 
1075  if (changed & BSS_CHANGED_BASIC_RATES) {
1076  err = carl9170_set_mac_rates(ar);
1077  if (err)
1078  goto out;
1079  }
1080 
1081 out:
1082  WARN_ON_ONCE(err && IS_STARTED(ar));
1083  mutex_unlock(&ar->mutex);
1084 }
1085 
1086 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1087  struct ieee80211_vif *vif)
1088 {
1089  struct ar9170 *ar = hw->priv;
1090  struct carl9170_tsf_rsp tsf;
1091  int err;
1092 
1093  mutex_lock(&ar->mutex);
1095  0, NULL, sizeof(tsf), &tsf);
1096  mutex_unlock(&ar->mutex);
1097  if (WARN_ON(err))
1098  return 0;
1099 
1100  return le64_to_cpu(tsf.tsf_64);
1101 }
1102 
1103 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1104  struct ieee80211_vif *vif,
1105  struct ieee80211_sta *sta,
1106  struct ieee80211_key_conf *key)
1107 {
1108  struct ar9170 *ar = hw->priv;
1109  int err = 0, i;
1110  u8 ktype;
1111 
1112  if (ar->disable_offload || !vif)
1113  return -EOPNOTSUPP;
1114 
1115  /*
1116  * We have to fall back to software encryption, whenever
1117  * the user choose to participates in an IBSS or is connected
1118  * to more than one network.
1119  *
1120  * This is very unfortunate, because some machines cannot handle
1121  * the high througput speed in 802.11n networks.
1122  */
1123 
1124  if (!is_main_vif(ar, vif)) {
1125  mutex_lock(&ar->mutex);
1126  goto err_softw;
1127  }
1128 
1129  /*
1130  * While the hardware supports *catch-all* key, for offloading
1131  * group-key en-/de-cryption. The way of how the hardware
1132  * decides which keyId maps to which key, remains a mystery...
1133  */
1134  if ((vif->type != NL80211_IFTYPE_STATION &&
1135  vif->type != NL80211_IFTYPE_ADHOC) &&
1137  return -EOPNOTSUPP;
1138 
1139  switch (key->cipher) {
1141  ktype = AR9170_ENC_ALG_WEP64;
1142  break;
1144  ktype = AR9170_ENC_ALG_WEP128;
1145  break;
1147  ktype = AR9170_ENC_ALG_TKIP;
1148  break;
1150  ktype = AR9170_ENC_ALG_AESCCMP;
1152  break;
1153  default:
1154  return -EOPNOTSUPP;
1155  }
1156 
1157  mutex_lock(&ar->mutex);
1158  if (cmd == SET_KEY) {
1159  if (!IS_STARTED(ar)) {
1160  err = -EOPNOTSUPP;
1161  goto out;
1162  }
1163 
1164  if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1165  sta = NULL;
1166 
1167  i = 64 + key->keyidx;
1168  } else {
1169  for (i = 0; i < 64; i++)
1170  if (!(ar->usedkeys & BIT(i)))
1171  break;
1172  if (i == 64)
1173  goto err_softw;
1174  }
1175 
1176  key->hw_key_idx = i;
1177 
1178  err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1179  ktype, 0, key->key,
1180  min_t(u8, 16, key->keylen));
1181  if (err)
1182  goto out;
1183 
1184  if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1185  err = carl9170_upload_key(ar, i, sta ? sta->addr :
1186  NULL, ktype, 1,
1187  key->key + 16, 16);
1188  if (err)
1189  goto out;
1190 
1191  /*
1192  * hardware is not capable generating MMIC
1193  * of fragmented frames!
1194  */
1196  }
1197 
1198  if (i < 64)
1199  ar->usedkeys |= BIT(i);
1200 
1202  } else {
1203  if (!IS_STARTED(ar)) {
1204  /* The device is gone... together with the key ;-) */
1205  err = 0;
1206  goto out;
1207  }
1208 
1209  if (key->hw_key_idx < 64) {
1210  ar->usedkeys &= ~BIT(key->hw_key_idx);
1211  } else {
1212  err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1214  NULL, 0);
1215  if (err)
1216  goto out;
1217 
1218  if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1219  err = carl9170_upload_key(ar, key->hw_key_idx,
1220  NULL,
1222  1, NULL, 0);
1223  if (err)
1224  goto out;
1225  }
1226 
1227  }
1228 
1229  err = carl9170_disable_key(ar, key->hw_key_idx);
1230  if (err)
1231  goto out;
1232  }
1233 
1234 out:
1235  mutex_unlock(&ar->mutex);
1236  return err;
1237 
1238 err_softw:
1239  if (!ar->rx_software_decryption) {
1240  ar->rx_software_decryption = true;
1242  }
1243  mutex_unlock(&ar->mutex);
1244  return -ENOSPC;
1245 }
1246 
1247 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1248  struct ieee80211_vif *vif,
1249  struct ieee80211_sta *sta)
1250 {
1251  struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1252  unsigned int i;
1253 
1254  atomic_set(&sta_info->pending_frames, 0);
1255 
1256  if (sta->ht_cap.ht_supported) {
1257  if (sta->ht_cap.ampdu_density > 6) {
1258  /*
1259  * HW does support 16us AMPDU density.
1260  * No HT-Xmit for station.
1261  */
1262 
1263  return 0;
1264  }
1265 
1266  for (i = 0; i < CARL9170_NUM_TID; i++)
1267  RCU_INIT_POINTER(sta_info->agg[i], NULL);
1268 
1269  sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1270  sta_info->ht_sta = true;
1271  }
1272 
1273  return 0;
1274 }
1275 
1276 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1277  struct ieee80211_vif *vif,
1278  struct ieee80211_sta *sta)
1279 {
1280  struct ar9170 *ar = hw->priv;
1281  struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1282  unsigned int i;
1283  bool cleanup = false;
1284 
1285  if (sta->ht_cap.ht_supported) {
1286 
1287  sta_info->ht_sta = false;
1288 
1289  rcu_read_lock();
1290  for (i = 0; i < CARL9170_NUM_TID; i++) {
1291  struct carl9170_sta_tid *tid_info;
1292 
1293  tid_info = rcu_dereference(sta_info->agg[i]);
1294  RCU_INIT_POINTER(sta_info->agg[i], NULL);
1295 
1296  if (!tid_info)
1297  continue;
1298 
1299  spin_lock_bh(&ar->tx_ampdu_list_lock);
1300  if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1301  tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1302  spin_unlock_bh(&ar->tx_ampdu_list_lock);
1303  cleanup = true;
1304  }
1305  rcu_read_unlock();
1306 
1307  if (cleanup)
1308  carl9170_ampdu_gc(ar);
1309  }
1310 
1311  return 0;
1312 }
1313 
1314 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1315  struct ieee80211_vif *vif, u16 queue,
1316  const struct ieee80211_tx_queue_params *param)
1317 {
1318  struct ar9170 *ar = hw->priv;
1319  int ret;
1320 
1321  mutex_lock(&ar->mutex);
1322  if (queue < ar->hw->queues) {
1323  memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1324  ret = carl9170_set_qos(ar);
1325  } else {
1326  ret = -EINVAL;
1327  }
1328 
1329  mutex_unlock(&ar->mutex);
1330  return ret;
1331 }
1332 
1333 static void carl9170_ampdu_work(struct work_struct *work)
1334 {
1335  struct ar9170 *ar = container_of(work, struct ar9170,
1336  ampdu_work);
1337 
1338  if (!IS_STARTED(ar))
1339  return;
1340 
1341  mutex_lock(&ar->mutex);
1342  carl9170_ampdu_gc(ar);
1343  mutex_unlock(&ar->mutex);
1344 }
1345 
1346 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1347  struct ieee80211_vif *vif,
1349  struct ieee80211_sta *sta,
1350  u16 tid, u16 *ssn, u8 buf_size)
1351 {
1352  struct ar9170 *ar = hw->priv;
1353  struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1354  struct carl9170_sta_tid *tid_info;
1355 
1356  if (modparam_noht)
1357  return -EOPNOTSUPP;
1358 
1359  switch (action) {
1361  if (!sta_info->ht_sta)
1362  return -EOPNOTSUPP;
1363 
1364  rcu_read_lock();
1365  if (rcu_dereference(sta_info->agg[tid])) {
1366  rcu_read_unlock();
1367  return -EBUSY;
1368  }
1369 
1370  tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1371  GFP_ATOMIC);
1372  if (!tid_info) {
1373  rcu_read_unlock();
1374  return -ENOMEM;
1375  }
1376 
1377  tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1378  tid_info->state = CARL9170_TID_STATE_PROGRESS;
1379  tid_info->tid = tid;
1380  tid_info->max = sta_info->ampdu_max_len;
1381 
1382  INIT_LIST_HEAD(&tid_info->list);
1383  INIT_LIST_HEAD(&tid_info->tmp_list);
1384  skb_queue_head_init(&tid_info->queue);
1385  spin_lock_init(&tid_info->lock);
1386 
1387  spin_lock_bh(&ar->tx_ampdu_list_lock);
1388  ar->tx_ampdu_list_len++;
1389  list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1390  rcu_assign_pointer(sta_info->agg[tid], tid_info);
1391  spin_unlock_bh(&ar->tx_ampdu_list_lock);
1392  rcu_read_unlock();
1393 
1394  ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1395  break;
1396 
1398  rcu_read_lock();
1399  tid_info = rcu_dereference(sta_info->agg[tid]);
1400  if (tid_info) {
1401  spin_lock_bh(&ar->tx_ampdu_list_lock);
1402  if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1403  tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1404  spin_unlock_bh(&ar->tx_ampdu_list_lock);
1405  }
1406 
1407  RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1408  rcu_read_unlock();
1409 
1410  ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1411  ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1412  break;
1413 
1415  rcu_read_lock();
1416  tid_info = rcu_dereference(sta_info->agg[tid]);
1417 
1418  sta_info->stats[tid].clear = true;
1419  sta_info->stats[tid].req = false;
1420 
1421  if (tid_info) {
1422  bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1423  tid_info->state = CARL9170_TID_STATE_IDLE;
1424  }
1425  rcu_read_unlock();
1426 
1427  if (WARN_ON_ONCE(!tid_info))
1428  return -EFAULT;
1429 
1430  break;
1431 
1434  /* Handled by hardware */
1435  break;
1436 
1437  default:
1438  return -EOPNOTSUPP;
1439  }
1440 
1441  return 0;
1442 }
1443 
1444 #ifdef CONFIG_CARL9170_WPC
1445 static int carl9170_register_wps_button(struct ar9170 *ar)
1446 {
1447  struct input_dev *input;
1448  int err;
1449 
1450  if (!(ar->features & CARL9170_WPS_BUTTON))
1451  return 0;
1452 
1453  input = input_allocate_device();
1454  if (!input)
1455  return -ENOMEM;
1456 
1457  snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1458  wiphy_name(ar->hw->wiphy));
1459 
1460  snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1461  "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1462 
1463  input->name = ar->wps.name;
1464  input->phys = ar->wps.phys;
1465  input->id.bustype = BUS_USB;
1466  input->dev.parent = &ar->hw->wiphy->dev;
1467 
1468  input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1469 
1470  err = input_register_device(input);
1471  if (err) {
1472  input_free_device(input);
1473  return err;
1474  }
1475 
1476  ar->wps.pbc = input;
1477  return 0;
1478 }
1479 #endif /* CONFIG_CARL9170_WPC */
1480 
1481 #ifdef CONFIG_CARL9170_HWRNG
1482 static int carl9170_rng_get(struct ar9170 *ar)
1483 {
1484 
1485 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1486 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1487 
1488  static const __le32 rng_load[RW] = {
1489  [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1490 
1491  u32 buf[RW];
1492 
1493  unsigned int i, off = 0, transfer, count;
1494  int err;
1495 
1497 
1498  if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1499  return -EAGAIN;
1500 
1501  count = ARRAY_SIZE(ar->rng.cache);
1502  while (count) {
1504  RB, (u8 *) rng_load,
1505  RB, (u8 *) buf);
1506  if (err)
1507  return err;
1508 
1509  transfer = min_t(unsigned int, count, RW);
1510  for (i = 0; i < transfer; i++)
1511  ar->rng.cache[off + i] = buf[i];
1512 
1513  off += transfer;
1514  count -= transfer;
1515  }
1516 
1517  ar->rng.cache_idx = 0;
1518 
1519 #undef RW
1520 #undef RB
1521  return 0;
1522 }
1523 
1524 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1525 {
1526  struct ar9170 *ar = (struct ar9170 *)rng->priv;
1527  int ret = -EIO;
1528 
1529  mutex_lock(&ar->mutex);
1530  if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1531  ret = carl9170_rng_get(ar);
1532  if (ret) {
1533  mutex_unlock(&ar->mutex);
1534  return ret;
1535  }
1536  }
1537 
1538  *data = ar->rng.cache[ar->rng.cache_idx++];
1539  mutex_unlock(&ar->mutex);
1540 
1541  return sizeof(u16);
1542 }
1543 
1544 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1545 {
1546  if (ar->rng.initialized) {
1547  hwrng_unregister(&ar->rng.rng);
1548  ar->rng.initialized = false;
1549  }
1550 }
1551 
1552 static int carl9170_register_hwrng(struct ar9170 *ar)
1553 {
1554  int err;
1555 
1556  snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1557  "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1558  ar->rng.rng.name = ar->rng.name;
1559  ar->rng.rng.data_read = carl9170_rng_read;
1560  ar->rng.rng.priv = (unsigned long)ar;
1561 
1562  if (WARN_ON(ar->rng.initialized))
1563  return -EALREADY;
1564 
1565  err = hwrng_register(&ar->rng.rng);
1566  if (err) {
1567  dev_err(&ar->udev->dev, "Failed to register the random "
1568  "number generator (%d)\n", err);
1569  return err;
1570  }
1571 
1572  ar->rng.initialized = true;
1573 
1574  err = carl9170_rng_get(ar);
1575  if (err) {
1576  carl9170_unregister_hwrng(ar);
1577  return err;
1578  }
1579 
1580  return 0;
1581 }
1582 #endif /* CONFIG_CARL9170_HWRNG */
1583 
1584 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1585  struct survey_info *survey)
1586 {
1587  struct ar9170 *ar = hw->priv;
1588  struct ieee80211_channel *chan;
1590  int err, b, i;
1591 
1592  chan = ar->channel;
1593  if (!chan)
1594  return -ENODEV;
1595 
1596  if (idx == chan->hw_value) {
1597  mutex_lock(&ar->mutex);
1598  err = carl9170_update_survey(ar, false, true);
1599  mutex_unlock(&ar->mutex);
1600  if (err)
1601  return err;
1602  }
1603 
1604  for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1605  band = ar->hw->wiphy->bands[b];
1606 
1607  if (!band)
1608  continue;
1609 
1610  for (i = 0; i < band->n_channels; i++) {
1611  if (band->channels[i].hw_value == idx) {
1612  chan = &band->channels[i];
1613  goto found;
1614  }
1615  }
1616  }
1617  return -ENOENT;
1618 
1619 found:
1620  memcpy(survey, &ar->survey[idx], sizeof(*survey));
1621 
1622  survey->channel = chan;
1623  survey->filled = SURVEY_INFO_NOISE_DBM;
1624 
1625  if (ar->channel == chan)
1626  survey->filled |= SURVEY_INFO_IN_USE;
1627 
1628  if (ar->fw.hw_counters) {
1629  survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1632  }
1633 
1634  return 0;
1635 }
1636 
1637 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1638 {
1639  struct ar9170 *ar = hw->priv;
1640  unsigned int vid;
1641 
1642  mutex_lock(&ar->mutex);
1643  for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1644  carl9170_flush_cab(ar, vid);
1645 
1646  carl9170_flush(ar, drop);
1647  mutex_unlock(&ar->mutex);
1648 }
1649 
1650 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1652 {
1653  struct ar9170 *ar = hw->priv;
1654 
1655  memset(stats, 0, sizeof(*stats));
1656  stats->dot11ACKFailureCount = ar->tx_ack_failures;
1657  stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1658  return 0;
1659 }
1660 
1661 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1662  struct ieee80211_vif *vif,
1663  enum sta_notify_cmd cmd,
1664  struct ieee80211_sta *sta)
1665 {
1666  struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1667 
1668  switch (cmd) {
1669  case STA_NOTIFY_SLEEP:
1670  sta_info->sleeping = true;
1671  if (atomic_read(&sta_info->pending_frames))
1672  ieee80211_sta_block_awake(hw, sta, true);
1673  break;
1674 
1675  case STA_NOTIFY_AWAKE:
1676  sta_info->sleeping = false;
1677  break;
1678  }
1679 }
1680 
1681 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1682 {
1683  struct ar9170 *ar = hw->priv;
1684 
1685  return !!atomic_read(&ar->tx_total_queued);
1686 }
1687 
1688 static const struct ieee80211_ops carl9170_ops = {
1689  .start = carl9170_op_start,
1690  .stop = carl9170_op_stop,
1691  .tx = carl9170_op_tx,
1692  .flush = carl9170_op_flush,
1693  .add_interface = carl9170_op_add_interface,
1694  .remove_interface = carl9170_op_remove_interface,
1695  .config = carl9170_op_config,
1696  .prepare_multicast = carl9170_op_prepare_multicast,
1697  .configure_filter = carl9170_op_configure_filter,
1698  .conf_tx = carl9170_op_conf_tx,
1699  .bss_info_changed = carl9170_op_bss_info_changed,
1700  .get_tsf = carl9170_op_get_tsf,
1701  .set_key = carl9170_op_set_key,
1702  .sta_add = carl9170_op_sta_add,
1703  .sta_remove = carl9170_op_sta_remove,
1704  .sta_notify = carl9170_op_sta_notify,
1705  .get_survey = carl9170_op_get_survey,
1706  .get_stats = carl9170_op_get_stats,
1707  .ampdu_action = carl9170_op_ampdu_action,
1708  .tx_frames_pending = carl9170_tx_frames_pending,
1709 };
1710 
1711 void *carl9170_alloc(size_t priv_size)
1712 {
1713  struct ieee80211_hw *hw;
1714  struct ar9170 *ar;
1715  struct sk_buff *skb;
1716  int i;
1717 
1718  /*
1719  * this buffer is used for rx stream reconstruction.
1720  * Under heavy load this device (or the transport layer?)
1721  * tends to split the streams into separate rx descriptors.
1722  */
1723 
1724  skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1725  if (!skb)
1726  goto err_nomem;
1727 
1728  hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1729  if (!hw)
1730  goto err_nomem;
1731 
1732  ar = hw->priv;
1733  ar->hw = hw;
1734  ar->rx_failover = skb;
1735 
1736  memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1737  ar->rx_has_plcp = false;
1738 
1739  /*
1740  * Here's a hidden pitfall!
1741  *
1742  * All 4 AC queues work perfectly well under _legacy_ operation.
1743  * However as soon as aggregation is enabled, the traffic flow
1744  * gets very bumpy. Therefore we have to _switch_ to a
1745  * software AC with a single HW queue.
1746  */
1747  hw->queues = __AR9170_NUM_TXQ;
1748 
1749  mutex_init(&ar->mutex);
1751  spin_lock_init(&ar->cmd_lock);
1754  spin_lock_init(&ar->mem_lock);
1755  spin_lock_init(&ar->state_lock);
1756  atomic_set(&ar->pending_restarts, 0);
1757  ar->vifs = 0;
1758  for (i = 0; i < ar->hw->queues; i++) {
1759  skb_queue_head_init(&ar->tx_status[i]);
1760  skb_queue_head_init(&ar->tx_pending[i]);
1761 
1762  INIT_LIST_HEAD(&ar->bar_list[i]);
1763  spin_lock_init(&ar->bar_list_lock[i]);
1764  }
1765  INIT_WORK(&ar->ps_work, carl9170_ps_work);
1766  INIT_WORK(&ar->ping_work, carl9170_ping_work);
1767  INIT_WORK(&ar->restart_work, carl9170_restart_work);
1768  INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1769  INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1771  INIT_LIST_HEAD(&ar->tx_ampdu_list);
1773  (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1774 
1775  bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1776  INIT_LIST_HEAD(&ar->vif_list);
1777  init_completion(&ar->tx_flush);
1778 
1779  /* firmware decides which modes we support */
1780  hw->wiphy->interface_modes = 0;
1781 
1789 
1790  if (!modparam_noht) {
1791  /*
1792  * see the comment above, why we allow the user
1793  * to disable HT by a module parameter.
1794  */
1796  }
1797 
1798  hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1799  hw->sta_data_size = sizeof(struct carl9170_sta_info);
1800  hw->vif_data_size = sizeof(struct carl9170_vif_info);
1801 
1804 
1805  for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1806  ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1807 
1808  hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1809 
1810  /* As IBSS Encryption is software-based, IBSS RSN is supported. */
1811  hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1812  return ar;
1813 
1814 err_nomem:
1815  kfree_skb(skb);
1816  return ERR_PTR(-ENOMEM);
1817 }
1818 
1819 static int carl9170_read_eeprom(struct ar9170 *ar)
1820 {
1821 #define RW 8 /* number of words to read at once */
1822 #define RB (sizeof(u32) * RW)
1823  u8 *eeprom = (void *)&ar->eeprom;
1824  __le32 offsets[RW];
1825  int i, j, err;
1826 
1827  BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1828 
1830 #ifndef __CHECKER__
1831  /* don't want to handle trailing remains */
1832  BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1833 #endif
1834 
1835  for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1836  for (j = 0; j < RW; j++)
1837  offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1838  RB * i + 4 * j);
1839 
1841  RB, (u8 *) &offsets,
1842  RB, eeprom + RB * i);
1843  if (err)
1844  return err;
1845  }
1846 
1847 #undef RW
1848 #undef RB
1849  return 0;
1850 }
1851 
1852 static int carl9170_parse_eeprom(struct ar9170 *ar)
1853 {
1854  struct ath_regulatory *regulatory = &ar->common.regulatory;
1855  unsigned int rx_streams, tx_streams, tx_params = 0;
1856  int bands = 0;
1857  int chans = 0;
1858 
1859  if (ar->eeprom.length == cpu_to_le16(0xffff))
1860  return -ENODATA;
1861 
1862  rx_streams = hweight8(ar->eeprom.rx_mask);
1863  tx_streams = hweight8(ar->eeprom.tx_mask);
1864 
1865  if (rx_streams != tx_streams) {
1866  tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1867 
1868  WARN_ON(!(tx_streams >= 1 && tx_streams <=
1870 
1871  tx_params = (tx_streams - 1) <<
1873 
1874  carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1875  carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1876  }
1877 
1878  if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1879  ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1880  &carl9170_band_2GHz;
1881  chans += carl9170_band_2GHz.n_channels;
1882  bands++;
1883  }
1884  if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1885  ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1886  &carl9170_band_5GHz;
1887  chans += carl9170_band_5GHz.n_channels;
1888  bands++;
1889  }
1890 
1891  if (!bands)
1892  return -EINVAL;
1893 
1894  ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1895  if (!ar->survey)
1896  return -ENOMEM;
1897  ar->num_channels = chans;
1898 
1899  /*
1900  * I measured this, a bandswitch takes roughly
1901  * 135 ms and a frequency switch about 80.
1902  *
1903  * FIXME: measure these values again once EEPROM settings
1904  * are used, that will influence them!
1905  */
1906  if (bands == 2)
1907  ar->hw->channel_change_time = 135 * 1000;
1908  else
1909  ar->hw->channel_change_time = 80 * 1000;
1910 
1911  regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1912 
1913  /* second part of wiphy init */
1914  SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1915 
1916  return 0;
1917 }
1918 
1919 static int carl9170_reg_notifier(struct wiphy *wiphy,
1920  struct regulatory_request *request)
1921 {
1922  struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1923  struct ar9170 *ar = hw->priv;
1924 
1925  return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1926 }
1927 
1928 int carl9170_register(struct ar9170 *ar)
1929 {
1930  struct ath_regulatory *regulatory = &ar->common.regulatory;
1931  int err = 0, i;
1932 
1933  if (WARN_ON(ar->mem_bitmap))
1934  return -EINVAL;
1935 
1936  ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1937  sizeof(unsigned long), GFP_KERNEL);
1938 
1939  if (!ar->mem_bitmap)
1940  return -ENOMEM;
1941 
1942  /* try to read EEPROM, init MAC addr */
1943  err = carl9170_read_eeprom(ar);
1944  if (err)
1945  return err;
1946 
1947  err = carl9170_parse_eeprom(ar);
1948  if (err)
1949  return err;
1950 
1951  err = ath_regd_init(regulatory, ar->hw->wiphy,
1952  carl9170_reg_notifier);
1953  if (err)
1954  return err;
1955 
1956  if (modparam_noht) {
1957  carl9170_band_2GHz.ht_cap.ht_supported = false;
1958  carl9170_band_5GHz.ht_cap.ht_supported = false;
1959  }
1960 
1961  for (i = 0; i < ar->fw.vif_num; i++) {
1962  ar->vif_priv[i].id = i;
1963  ar->vif_priv[i].vif = NULL;
1964  }
1965 
1966  err = ieee80211_register_hw(ar->hw);
1967  if (err)
1968  return err;
1969 
1970  /* mac80211 interface is now registered */
1971  ar->registered = true;
1972 
1973  if (!ath_is_world_regd(regulatory))
1974  regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1975 
1976 #ifdef CONFIG_CARL9170_DEBUGFS
1978 #endif /* CONFIG_CARL9170_DEBUGFS */
1979 
1980  err = carl9170_led_init(ar);
1981  if (err)
1982  goto err_unreg;
1983 
1984 #ifdef CONFIG_CARL9170_LEDS
1985  err = carl9170_led_register(ar);
1986  if (err)
1987  goto err_unreg;
1988 #endif /* CONFIG_CARL9170_LEDS */
1989 
1990 #ifdef CONFIG_CARL9170_WPC
1991  err = carl9170_register_wps_button(ar);
1992  if (err)
1993  goto err_unreg;
1994 #endif /* CONFIG_CARL9170_WPC */
1995 
1996 #ifdef CONFIG_CARL9170_HWRNG
1997  err = carl9170_register_hwrng(ar);
1998  if (err)
1999  goto err_unreg;
2000 #endif /* CONFIG_CARL9170_HWRNG */
2001 
2002  dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2003  wiphy_name(ar->hw->wiphy));
2004 
2005  return 0;
2006 
2007 err_unreg:
2008  carl9170_unregister(ar);
2009  return err;
2010 }
2011 
2012 void carl9170_unregister(struct ar9170 *ar)
2013 {
2014  if (!ar->registered)
2015  return;
2016 
2017  ar->registered = false;
2018 
2019 #ifdef CONFIG_CARL9170_LEDS
2020  carl9170_led_unregister(ar);
2021 #endif /* CONFIG_CARL9170_LEDS */
2022 
2023 #ifdef CONFIG_CARL9170_DEBUGFS
2025 #endif /* CONFIG_CARL9170_DEBUGFS */
2026 
2027 #ifdef CONFIG_CARL9170_WPC
2028  if (ar->wps.pbc) {
2029  input_unregister_device(ar->wps.pbc);
2030  ar->wps.pbc = NULL;
2031  }
2032 #endif /* CONFIG_CARL9170_WPC */
2033 
2034 #ifdef CONFIG_CARL9170_HWRNG
2035  carl9170_unregister_hwrng(ar);
2036 #endif /* CONFIG_CARL9170_HWRNG */
2037 
2038  carl9170_cancel_worker(ar);
2040 
2042 }
2043 
2044 void carl9170_free(struct ar9170 *ar)
2045 {
2046  WARN_ON(ar->registered);
2047  WARN_ON(IS_INITIALIZED(ar));
2048 
2049  kfree_skb(ar->rx_failover);
2050  ar->rx_failover = NULL;
2051 
2052  kfree(ar->mem_bitmap);
2053  ar->mem_bitmap = NULL;
2054 
2055  kfree(ar->survey);
2056  ar->survey = NULL;
2057 
2058  mutex_destroy(&ar->mutex);
2059 
2060  ieee80211_free_hw(ar->hw);
2061 }