Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mac.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "hw.h"
18 #include "hw-ops.h"
19 #include <linux/export.h>
20 
21 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
22  struct ath9k_tx_queue_info *qi)
23 {
24  ath_dbg(ath9k_hw_common(ah), INTERRUPT,
25  "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
29 
31 
32  REG_WRITE(ah, AR_IMR_S0,
35  REG_WRITE(ah, AR_IMR_S1,
38 
41  REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
42 
44 }
45 
47 {
48  return REG_READ(ah, AR_QTXDP(q));
49 }
51 
52 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
53 {
54  REG_WRITE(ah, AR_QTXDP(q), txdp);
55 }
57 
58 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
59 {
60  ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
61  REG_WRITE(ah, AR_Q_TXE, 1 << q);
62 }
64 
66 {
67  u32 npend;
68 
69  npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
70  if (npend == 0) {
71 
72  if (REG_READ(ah, AR_Q_TXE) & (1 << q))
73  npend = 1;
74  }
75 
76  return npend;
77 }
79 
105 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
106 {
107  u32 txcfg, curLevel, newLevel;
108 
109  if (ah->tx_trig_level >= ah->config.max_txtrig_level)
110  return false;
111 
113 
114  txcfg = REG_READ(ah, AR_TXCFG);
115  curLevel = MS(txcfg, AR_FTRIG);
116  newLevel = curLevel;
117  if (bIncTrigLevel) {
118  if (curLevel < ah->config.max_txtrig_level)
119  newLevel++;
120  } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
121  newLevel--;
122  if (newLevel != curLevel)
123  REG_WRITE(ah, AR_TXCFG,
124  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
125 
127 
128  ah->tx_trig_level = newLevel;
129 
130  return newLevel != curLevel;
131 }
133 
135 {
136  int maxdelay = 1000;
137  int i, q;
138 
139  if (ah->curchan) {
140  if (IS_CHAN_HALF_RATE(ah->curchan))
141  maxdelay *= 2;
142  else if (IS_CHAN_QUARTER_RATE(ah->curchan))
143  maxdelay *= 4;
144  }
145 
147 
151 
152  for (q = 0; q < AR_NUM_QCU; q++) {
153  for (i = 0; i < maxdelay; i++) {
154  if (i)
155  udelay(5);
156 
157  if (!ath9k_hw_numtxpending(ah, q))
158  break;
159  }
160  }
161 
165 
166  REG_WRITE(ah, AR_Q_TXD, 0);
167 }
169 
171 {
172 #define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
173 #define ATH9K_TIME_QUANTUM 100 /* usec */
175  int wait;
176 
177  REG_WRITE(ah, AR_Q_TXD, 1 << q);
178 
179  for (wait = wait_time; wait != 0; wait--) {
180  if (wait != wait_time)
182 
183  if (ath9k_hw_numtxpending(ah, q) == 0)
184  break;
185  }
186 
187  REG_WRITE(ah, AR_Q_TXD, 0);
188 
189  return wait != 0;
190 
191 #undef ATH9K_TX_STOP_DMA_TIMEOUT
192 #undef ATH9K_TIME_QUANTUM
193 }
195 
196 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
197  const struct ath9k_tx_queue_info *qinfo)
198 {
199  u32 cw;
200  struct ath_common *common = ath9k_hw_common(ah);
201  struct ath9k_tx_queue_info *qi;
202 
203  qi = &ah->txq[q];
204  if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
205  ath_dbg(common, QUEUE,
206  "Set TXQ properties, inactive queue: %u\n", q);
207  return false;
208  }
209 
210  ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
211 
212  qi->tqi_ver = qinfo->tqi_ver;
213  qi->tqi_subtype = qinfo->tqi_subtype;
214  qi->tqi_qflags = qinfo->tqi_qflags;
215  qi->tqi_priority = qinfo->tqi_priority;
216  if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
217  qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
218  else
219  qi->tqi_aifs = INIT_AIFS;
220  if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
221  cw = min(qinfo->tqi_cwmin, 1024U);
222  qi->tqi_cwmin = 1;
223  while (qi->tqi_cwmin < cw)
224  qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
225  } else
226  qi->tqi_cwmin = qinfo->tqi_cwmin;
227  if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
228  cw = min(qinfo->tqi_cwmax, 1024U);
229  qi->tqi_cwmax = 1;
230  while (qi->tqi_cwmax < cw)
231  qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
232  } else
233  qi->tqi_cwmax = INIT_CWMAX;
234 
235  if (qinfo->tqi_shretry != 0)
236  qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
237  else
239  if (qinfo->tqi_lgretry != 0)
240  qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
241  else
243  qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
245  qi->tqi_burstTime = qinfo->tqi_burstTime;
246  qi->tqi_readyTime = qinfo->tqi_readyTime;
247 
248  switch (qinfo->tqi_subtype) {
249  case ATH9K_WME_UPSD:
250  if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
252  break;
253  default:
254  break;
255  }
256 
257  return true;
258 }
260 
261 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
262  struct ath9k_tx_queue_info *qinfo)
263 {
264  struct ath_common *common = ath9k_hw_common(ah);
265  struct ath9k_tx_queue_info *qi;
266 
267  qi = &ah->txq[q];
268  if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
269  ath_dbg(common, QUEUE,
270  "Get TXQ properties, inactive queue: %u\n", q);
271  return false;
272  }
273 
274  qinfo->tqi_qflags = qi->tqi_qflags;
275  qinfo->tqi_ver = qi->tqi_ver;
276  qinfo->tqi_subtype = qi->tqi_subtype;
277  qinfo->tqi_qflags = qi->tqi_qflags;
278  qinfo->tqi_priority = qi->tqi_priority;
279  qinfo->tqi_aifs = qi->tqi_aifs;
280  qinfo->tqi_cwmin = qi->tqi_cwmin;
281  qinfo->tqi_cwmax = qi->tqi_cwmax;
282  qinfo->tqi_shretry = qi->tqi_shretry;
283  qinfo->tqi_lgretry = qi->tqi_lgretry;
284  qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
286  qinfo->tqi_burstTime = qi->tqi_burstTime;
287  qinfo->tqi_readyTime = qi->tqi_readyTime;
288 
289  return true;
290 }
292 
294  const struct ath9k_tx_queue_info *qinfo)
295 {
296  struct ath_common *common = ath9k_hw_common(ah);
297  struct ath9k_tx_queue_info *qi;
298  int q;
299 
300  switch (type) {
302  q = ATH9K_NUM_TX_QUEUES - 1;
303  break;
304  case ATH9K_TX_QUEUE_CAB:
305  q = ATH9K_NUM_TX_QUEUES - 2;
306  break;
308  q = 1;
309  break;
311  q = ATH9K_NUM_TX_QUEUES - 3;
312  break;
313  case ATH9K_TX_QUEUE_DATA:
314  for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
315  if (ah->txq[q].tqi_type ==
317  break;
318  if (q == ATH9K_NUM_TX_QUEUES) {
319  ath_err(common, "No available TX queue\n");
320  return -1;
321  }
322  break;
323  default:
324  ath_err(common, "Invalid TX queue type: %u\n", type);
325  return -1;
326  }
327 
328  ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
329 
330  qi = &ah->txq[q];
331  if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
332  ath_err(common, "TX queue: %u already active\n", q);
333  return -1;
334  }
335  memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
336  qi->tqi_type = type;
337  qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
338  (void) ath9k_hw_set_txq_props(ah, q, qinfo);
339 
340  return q;
341 }
343 
344 static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
345 {
346  ah->txok_interrupt_mask &= ~(1 << q);
347  ah->txerr_interrupt_mask &= ~(1 << q);
348  ah->txdesc_interrupt_mask &= ~(1 << q);
349  ah->txeol_interrupt_mask &= ~(1 << q);
350  ah->txurn_interrupt_mask &= ~(1 << q);
351 }
352 
354 {
355  struct ath_common *common = ath9k_hw_common(ah);
356  struct ath9k_tx_queue_info *qi;
357 
358  qi = &ah->txq[q];
359  if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
360  ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
361  return false;
362  }
363 
364  ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
365 
367  ath9k_hw_clear_queue_interrupts(ah, q);
368  ath9k_hw_set_txq_interrupts(ah, qi);
369 
370  return true;
371 }
373 
375 {
376  struct ath_common *common = ath9k_hw_common(ah);
377  struct ath9k_channel *chan = ah->curchan;
378  struct ath9k_tx_queue_info *qi;
379  u32 cwMin, chanCwMin, value;
380 
381  qi = &ah->txq[q];
382  if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
383  ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
384  return true;
385  }
386 
387  ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
388 
389  if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
390  if (chan && IS_CHAN_B(chan))
391  chanCwMin = INIT_CWMIN_11B;
392  else
393  chanCwMin = INIT_CWMIN;
394 
395  for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
396  } else
397  cwMin = qi->tqi_cwmin;
398 
400 
401  REG_WRITE(ah, AR_DLCL_IFS(q),
402  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
405 
406  REG_WRITE(ah, AR_DRETRY_LIMIT(q),
410 
412 
413  if (AR_SREV_9340(ah))
414  REG_WRITE(ah, AR_DMISC(q),
416  else
417  REG_WRITE(ah, AR_DMISC(q),
419 
420  if (qi->tqi_cbrPeriod) {
421  REG_WRITE(ah, AR_QCBRCFG(q),
425  (qi->tqi_cbrOverflowLimit ?
427  }
428  if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
429  REG_WRITE(ah, AR_QRDYTIMECFG(q),
432  }
433 
434  REG_WRITE(ah, AR_DCHNTIME(q),
436  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
437 
438  if (qi->tqi_burstTime
441 
444 
446 
449 
450  switch (qi->tqi_type) {
453 
454  REG_SET_BIT(ah, AR_QMISC(q),
458 
459  REG_SET_BIT(ah, AR_DMISC(q),
464 
466 
467  /*
468  * cwmin and cwmax should be 0 for beacon queue
469  * but not for IBSS as we would create an imbalance
470  * on beaconing fairness for participating nodes.
471  */
472  if (AR_SREV_9300_20_OR_LATER(ah) &&
473  ah->opmode != NL80211_IFTYPE_ADHOC) {
475  | SM(0, AR_D_LCL_IFS_CWMAX)
476  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
477  }
478  break;
479  case ATH9K_TX_QUEUE_CAB:
481 
482  REG_SET_BIT(ah, AR_QMISC(q),
486  value = (qi->tqi_readyTime -
487  (ah->config.sw_beacon_response_time -
488  ah->config.dma_beacon_response_time) -
489  ah->config.additional_swba_backoff) * 1024;
490  REG_WRITE(ah, AR_QRDYTIMECFG(q),
491  value | AR_Q_RDYTIMECFG_EN);
492  REG_SET_BIT(ah, AR_DMISC(q),
495 
497 
498  break;
501  break;
504  break;
505  default:
506  break;
507  }
508 
510  REG_SET_BIT(ah, AR_DMISC(q),
514  }
515 
516  if (AR_SREV_9300_20_OR_LATER(ah))
518 
519  ath9k_hw_clear_queue_interrupts(ah, q);
520  if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
521  ah->txok_interrupt_mask |= 1 << q;
522  ah->txerr_interrupt_mask |= 1 << q;
523  }
525  ah->txdesc_interrupt_mask |= 1 << q;
527  ah->txeol_interrupt_mask |= 1 << q;
529  ah->txurn_interrupt_mask |= 1 << q;
530  ath9k_hw_set_txq_interrupts(ah, qi);
531 
532  return true;
533 }
535 
536 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
537  struct ath_rx_status *rs)
538 {
539  struct ar5416_desc ads;
540  struct ar5416_desc *adsp = AR5416DESC(ds);
541  u32 phyerr;
542 
543  if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
544  return -EINPROGRESS;
545 
546  ads.u.rx = adsp->u.rx;
547 
548  rs->rs_status = 0;
549  rs->rs_flags = 0;
550 
551  rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
552  rs->rs_tstamp = ads.AR_RcvTimestamp;
553 
554  if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
555  rs->rs_rssi = ATH9K_RSSI_BAD;
562  } else {
563  rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
564  rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
566  rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
568  rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
570  rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
572  rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
574  rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
576  }
577  if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
578  rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
579  else
581 
582  rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
583  rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
584 
585  rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
586  rs->rs_moreaggr =
587  (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
588  rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
589  rs->rs_flags =
590  (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
591  rs->rs_flags |=
592  (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
593 
594  if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
596  if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
598  if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
600 
601  if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
602  /*
603  * Treat these errors as mutually exclusive to avoid spurious
604  * extra error reports from the hardware. If a CRC error is
605  * reported, then decryption and MIC errors are irrelevant,
606  * the frame is going to be dropped either way
607  */
608  if (ads.ds_rxstatus8 & AR_CRCErr)
609  rs->rs_status |= ATH9K_RXERR_CRC;
610  else if (ads.ds_rxstatus8 & AR_PHYErr) {
611  rs->rs_status |= ATH9K_RXERR_PHY;
612  phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
613  rs->rs_phyerr = phyerr;
614  } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
616  else if (ads.ds_rxstatus8 & AR_MichaelErr)
617  rs->rs_status |= ATH9K_RXERR_MIC;
618  }
619 
620  if (ads.ds_rxstatus8 & AR_KeyMiss)
622 
623  return 0;
624 }
626 
627 /*
628  * This can stop or re-enables RX.
629  *
630  * If bool is set this will kill any frame which is currently being
631  * transferred between the MAC and baseband and also prevent any new
632  * frames from getting started.
633  */
634 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
635 {
636  u32 reg;
637 
638  if (set) {
641 
643  0, AH_WAIT_TIMEOUT)) {
645  (AR_DIAG_RX_DIS |
647 
648  reg = REG_READ(ah, AR_OBS_BUS_1);
649  ath_err(ath9k_hw_common(ah),
650  "RX failed to go idle in 10 ms RXSM=0x%x\n",
651  reg);
652 
653  return false;
654  }
655  } else {
658  }
659 
660  return true;
661 }
663 
664 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
665 {
666  REG_WRITE(ah, AR_RXDP, rxdp);
667 }
669 
670 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
671 {
673 
674  ath9k_ani_reset(ah, is_scanning);
675 
677 }
679 
681 {
683 
685 }
687 
688 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
689 {
690 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
691  struct ath_common *common = ath9k_hw_common(ah);
692  u32 mac_status, last_mac_status = 0;
693  int i;
694 
695  /* Enable access to the DMA observation bus */
696  REG_WRITE(ah, AR_MACMISC,
700 
701  REG_WRITE(ah, AR_CR, AR_CR_RXD);
702 
703  /* Wait for rx enable bit to go low */
704  for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
705  if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
706  break;
707 
708  if (!AR_SREV_9300_20_OR_LATER(ah)) {
709  mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
710  if (mac_status == 0x1c0 && mac_status == last_mac_status) {
711  *reset = true;
712  break;
713  }
714 
715  last_mac_status = mac_status;
716  }
717 
719  }
720 
721  if (i == 0) {
722  ath_err(common,
723  "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
724  AH_RX_STOP_DMA_TIMEOUT / 1000,
725  REG_READ(ah, AR_CR),
726  REG_READ(ah, AR_DIAG_SW),
727  REG_READ(ah, AR_DMADBG_7));
728  return false;
729  } else {
730  return true;
731  }
732 
733 #undef AH_RX_STOP_DMA_TIMEOUT
734 }
736 
738 {
739  struct ath9k_tx_queue_info qi;
740 
741  memset(&qi, 0, sizeof(qi));
742  qi.tqi_aifs = 1;
743  qi.tqi_cwmin = 0;
744  qi.tqi_cwmax = 0;
745 
746  if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
748 
750 }
752 
753 bool ath9k_hw_intrpend(struct ath_hw *ah)
754 {
755  u32 host_isr;
756 
757  if (AR_SREV_9100(ah))
758  return true;
759 
760  host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
761 
762  if (((host_isr & AR_INTR_MAC_IRQ) ||
763  (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
764  (host_isr != AR_INTR_SPURIOUS))
765  return true;
766 
767  host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
768  if ((host_isr & AR_INTR_SYNC_DEFAULT)
769  && (host_isr != AR_INTR_SPURIOUS))
770  return true;
771 
772  return false;
773 }
775 
777 {
778  struct ath_common *common = ath9k_hw_common(ah);
779 
780  ath_dbg(common, INTERRUPT, "disable IER\n");
782  (void) REG_READ(ah, AR_IER);
783  if (!AR_SREV_9100(ah)) {
786 
789  }
790 }
792 
794 {
795  if (!(ah->imask & ATH9K_INT_GLOBAL))
796  atomic_set(&ah->intr_ref_cnt, -1);
797  else
798  atomic_dec(&ah->intr_ref_cnt);
799 
801 }
803 
805 {
806  struct ath_common *common = ath9k_hw_common(ah);
807  u32 sync_default = AR_INTR_SYNC_DEFAULT;
808  u32 async_mask;
809 
810  if (!(ah->imask & ATH9K_INT_GLOBAL))
811  return;
812 
813  if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
814  ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
815  atomic_read(&ah->intr_ref_cnt));
816  return;
817  }
818 
819  if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
820  sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
821 
822  async_mask = AR_INTR_MAC_IRQ;
823 
824  if (ah->imask & ATH9K_INT_MCI)
825  async_mask |= AR_INTR_ASYNC_MASK_MCI;
826 
827  ath_dbg(common, INTERRUPT, "enable IER\n");
829  if (!AR_SREV_9100(ah)) {
830  REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
831  REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
832 
833  REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
834  REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
835  }
836  ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
837  REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
838 }
840 
842 {
843  enum ath9k_int ints = ah->imask;
844  u32 mask, mask2;
845  struct ath9k_hw_capabilities *pCap = &ah->caps;
846  struct ath_common *common = ath9k_hw_common(ah);
847 
848  if (!(ints & ATH9K_INT_GLOBAL))
850 
851  ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
852 
853  mask = ints & ATH9K_INT_COMMON;
854  mask2 = 0;
855 
856  if (ints & ATH9K_INT_TX) {
857  if (ah->config.tx_intr_mitigation)
858  mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
859  else {
860  if (ah->txok_interrupt_mask)
861  mask |= AR_IMR_TXOK;
862  if (ah->txdesc_interrupt_mask)
863  mask |= AR_IMR_TXDESC;
864  }
865  if (ah->txerr_interrupt_mask)
866  mask |= AR_IMR_TXERR;
867  if (ah->txeol_interrupt_mask)
868  mask |= AR_IMR_TXEOL;
869  }
870  if (ints & ATH9K_INT_RX) {
871  if (AR_SREV_9300_20_OR_LATER(ah)) {
872  mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
873  if (ah->config.rx_intr_mitigation) {
874  mask &= ~AR_IMR_RXOK_LP;
875  mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
876  } else {
877  mask |= AR_IMR_RXOK_LP;
878  }
879  } else {
880  if (ah->config.rx_intr_mitigation)
881  mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
882  else
883  mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
884  }
885  if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
886  mask |= AR_IMR_GENTMR;
887  }
888 
889  if (ints & ATH9K_INT_GENTIMER)
890  mask |= AR_IMR_GENTMR;
891 
892  if (ints & (ATH9K_INT_BMISC)) {
893  mask |= AR_IMR_BCNMISC;
894  if (ints & ATH9K_INT_TIM)
895  mask2 |= AR_IMR_S2_TIM;
896  if (ints & ATH9K_INT_DTIM)
897  mask2 |= AR_IMR_S2_DTIM;
898  if (ints & ATH9K_INT_DTIMSYNC)
899  mask2 |= AR_IMR_S2_DTIMSYNC;
900  if (ints & ATH9K_INT_CABEND)
901  mask2 |= AR_IMR_S2_CABEND;
902  if (ints & ATH9K_INT_TSFOOR)
903  mask2 |= AR_IMR_S2_TSFOOR;
904  }
905 
906  if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
907  mask |= AR_IMR_BCNMISC;
908  if (ints & ATH9K_INT_GTT)
909  mask2 |= AR_IMR_S2_GTT;
910  if (ints & ATH9K_INT_CST)
911  mask2 |= AR_IMR_S2_CST;
912  }
913 
914  ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
915  REG_WRITE(ah, AR_IMR, mask);
919  ah->imrs2_reg |= mask2;
920  REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
921 
922  if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
923  if (ints & ATH9K_INT_TIM_TIMER)
925  else
927  }
928 
929  return;
930 }