Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
3  * Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  */
18 
19 /*************************************\
20 * DMA and interrupt masking functions *
21 \*************************************/
22 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include "ath5k.h"
35 #include "reg.h"
36 #include "debug.h"
37 
38 
39 /*********\
40 * Receive *
41 \*********/
42 
47 void
49 {
50  ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
51  ath5k_hw_reg_read(ah, AR5K_CR);
52 }
53 
58 static int
59 ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
60 {
61  unsigned int i;
62 
63  ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
64 
65  /*
66  * It may take some time to disable the DMA receive unit
67  */
68  for (i = 1000; i > 0 &&
69  (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
70  i--)
71  udelay(100);
72 
73  if (!i)
74  ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
75  "failed to stop RX DMA !\n");
76 
77  return i ? 0 : -EBUSY;
78 }
79 
84 u32
86 {
87  return ath5k_hw_reg_read(ah, AR5K_RXDP);
88 }
89 
97 int
99 {
100  if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
101  ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
102  "tried to set RXDP while rx was active !\n");
103  return -EIO;
104  }
105 
106  ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
107  return 0;
108 }
109 
110 
111 /**********\
112 * Transmit *
113 \**********/
114 
129 int
130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
131 {
132  u32 tx_queue;
133 
134  AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
135 
136  /* Return if queue is declared inactive */
137  if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
138  return -EINVAL;
139 
140  if (ah->ah_version == AR5K_AR5210) {
141  tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
142 
143  /*
144  * Set the queue by type on 5210
145  */
146  switch (ah->ah_txq[queue].tqi_type) {
147  case AR5K_TX_QUEUE_DATA:
148  tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
149  break;
151  tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
152  ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
153  AR5K_BSR);
154  break;
155  case AR5K_TX_QUEUE_CAB:
156  tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
157  ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
159  break;
160  default:
161  return -EINVAL;
162  }
163  /* Start queue */
164  ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
165  ath5k_hw_reg_read(ah, AR5K_CR);
166  } else {
167  /* Return if queue is disabled */
168  if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
169  return -EIO;
170 
171  /* Start queue */
172  AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
173  }
174 
175  return 0;
176 }
177 
187 static int
188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
189 {
190  unsigned int i = 40;
191  u32 tx_queue, pending;
192 
193  AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
194 
195  /* Return if queue is declared inactive */
196  if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
197  return -EINVAL;
198 
199  if (ah->ah_version == AR5K_AR5210) {
200  tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
201 
202  /*
203  * Set by queue type
204  */
205  switch (ah->ah_txq[queue].tqi_type) {
206  case AR5K_TX_QUEUE_DATA:
207  tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
208  break;
210  case AR5K_TX_QUEUE_CAB:
211  /* XXX Fix me... */
212  tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
213  ath5k_hw_reg_write(ah, 0, AR5K_BSR);
214  break;
215  default:
216  return -EINVAL;
217  }
218 
219  /* Stop queue */
220  ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
221  ath5k_hw_reg_read(ah, AR5K_CR);
222  } else {
223 
224  /*
225  * Enable DCU early termination to quickly
226  * flush any pending frames from QCU
227  */
230 
231  /*
232  * Schedule TX disable and wait until queue is empty
233  */
234  AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
235 
236  /* Wait for queue to stop */
237  for (i = 1000; i > 0 &&
238  (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
239  i--)
240  udelay(100);
241 
242  if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
243  ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
244  "queue %i didn't stop !\n", queue);
245 
246  /* Check for pending frames */
247  i = 1000;
248  do {
249  pending = ath5k_hw_reg_read(ah,
250  AR5K_QUEUE_STATUS(queue)) &
252  udelay(100);
253  } while (--i && pending);
254 
255  /* For 2413+ order PCU to drop packets using
256  * QUIET mechanism */
257  if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
258  pending) {
259  /* Set periodicity and duration */
260  ath5k_hw_reg_write(ah,
264 
265  /* Enable quiet period for current TSF */
266  ath5k_hw_reg_write(ah,
268  AR5K_REG_SM(ath5k_hw_reg_read(ah,
269  AR5K_TSF_L32_5211) >> 10,
272 
273  /* Force channel idle high */
276 
277  /* Wait a while and disable mechanism */
278  udelay(400);
281 
282  /* Re-check for pending frames */
283  i = 100;
284  do {
285  pending = ath5k_hw_reg_read(ah,
286  AR5K_QUEUE_STATUS(queue)) &
288  udelay(100);
289  } while (--i && pending);
290 
293 
294  if (pending)
295  ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
296  "quiet mechanism didn't work q:%i !\n",
297  queue);
298  }
299 
300  /*
301  * Disable DCU early termination
302  */
305 
306  /* Clear register */
307  ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
308  if (pending) {
309  ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
310  "tx dma didn't stop (q:%i, frm:%i) !\n",
311  queue, pending);
312  return -EBUSY;
313  }
314  }
315 
316  /* TODO: Check for success on 5210 else return error */
317  return 0;
318 }
319 
327 int
328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
329 {
330  int ret;
331  ret = ath5k_hw_stop_tx_dma(ah, queue);
332  if (ret) {
333  ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
334  "beacon queue didn't stop !\n");
335  return -EIO;
336  }
337  return 0;
338 }
339 
352 u32
353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
354 {
355  u16 tx_reg;
356 
357  AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
358 
359  /*
360  * Get the transmit queue descriptor pointer from the selected queue
361  */
362  /*5210 doesn't have QCU*/
363  if (ah->ah_version == AR5K_AR5210) {
364  switch (ah->ah_txq[queue].tqi_type) {
365  case AR5K_TX_QUEUE_DATA:
366  tx_reg = AR5K_NOQCU_TXDP0;
367  break;
369  case AR5K_TX_QUEUE_CAB:
370  tx_reg = AR5K_NOQCU_TXDP1;
371  break;
372  default:
373  return 0xffffffff;
374  }
375  } else {
376  tx_reg = AR5K_QUEUE_TXDP(queue);
377  }
378 
379  return ath5k_hw_reg_read(ah, tx_reg);
380 }
381 
395 int
396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
397 {
398  u16 tx_reg;
399 
400  AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
401 
402  /*
403  * Set the transmit queue descriptor pointer register by type
404  * on 5210
405  */
406  if (ah->ah_version == AR5K_AR5210) {
407  switch (ah->ah_txq[queue].tqi_type) {
408  case AR5K_TX_QUEUE_DATA:
409  tx_reg = AR5K_NOQCU_TXDP0;
410  break;
412  case AR5K_TX_QUEUE_CAB:
413  tx_reg = AR5K_NOQCU_TXDP1;
414  break;
415  default:
416  return -EINVAL;
417  }
418  } else {
419  /*
420  * Set the transmit queue descriptor pointer for
421  * the selected queue on QCU for 5211+
422  * (this won't work if the queue is still active)
423  */
424  if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
425  return -EIO;
426 
427  tx_reg = AR5K_QUEUE_TXDP(queue);
428  }
429 
430  /* Set descriptor pointer */
431  ath5k_hw_reg_write(ah, phys_addr, tx_reg);
432 
433  return 0;
434 }
435 
452 int
453 ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
454 {
455  u32 trigger_level, imr;
456  int ret = -EIO;
457 
458  /*
459  * Disable interrupts by setting the mask
460  */
461  imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
462 
463  trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
465 
466  if (!increase) {
467  if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
468  goto done;
469  } else
470  trigger_level +=
471  ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
472 
473  /*
474  * Update trigger level on success
475  */
476  if (ah->ah_version == AR5K_AR5210)
477  ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
478  else
480  AR5K_TXCFG_TXFULL, trigger_level);
481 
482  ret = 0;
483 
484 done:
485  /*
486  * Restore interrupt mask
487  */
488  ath5k_hw_set_imr(ah, imr);
489 
490  return ret;
491 }
492 
493 
494 /*******************\
495 * Interrupt masking *
496 \*******************/
497 
505 bool
507 {
508  return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
509 }
510 
526 int
527 ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
528 {
529  u32 data = 0;
530 
531  /*
532  * Read interrupt status from Primary Interrupt
533  * Register.
534  *
535  * Note: PISR/SISR Not available on 5210
536  */
537  if (ah->ah_version == AR5K_AR5210) {
538  u32 isr = 0;
539  isr = ath5k_hw_reg_read(ah, AR5K_ISR);
540  if (unlikely(isr == AR5K_INT_NOCARD)) {
541  *interrupt_mask = isr;
542  return -ENODEV;
543  }
544 
545  /*
546  * Filter out the non-common bits from the interrupt
547  * status.
548  */
549  *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
550 
551  /* Hanlde INT_FATAL */
553  | AR5K_ISR_DPERR)))
554  *interrupt_mask |= AR5K_INT_FATAL;
555 
556  /*
557  * XXX: BMISS interrupts may occur after association.
558  * I found this on 5210 code but it needs testing. If this is
559  * true we should disable them before assoc and re-enable them
560  * after a successful assoc + some jiffies.
561  interrupt_mask &= ~AR5K_INT_BMISS;
562  */
563 
564  data = isr;
565  } else {
566  u32 pisr = 0;
567  u32 pisr_clear = 0;
568  u32 sisr0 = 0;
569  u32 sisr1 = 0;
570  u32 sisr2 = 0;
571  u32 sisr3 = 0;
572  u32 sisr4 = 0;
573 
574  /* Read PISR and SISRs... */
575  pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
576  if (unlikely(pisr == AR5K_INT_NOCARD)) {
577  *interrupt_mask = pisr;
578  return -ENODEV;
579  }
580 
581  sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
582  sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
583  sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
584  sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
585  sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
586 
587  /*
588  * PISR holds the logical OR of interrupt bits
589  * from SISR registers:
590  *
591  * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
592  * per-queue bits on SISR0
593  *
594  * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
595  * per-queue bits on SISR1
596  *
597  * TXURN -> Logical OR of TXURN per-queue bits on SISR2
598  *
599  * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
600  *
601  * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
602  * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
603  * (and TSFOOR ?) bits on SISR2
604  *
605  * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
606  * QCBRURN per-queue bits on SISR3
607  * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
608  *
609  * If we clean these bits on PISR we 'll also clear all
610  * related bits from SISRs, e.g. if we write the TXOK bit on
611  * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
612  * interrupt got fired for another queue while we were reading
613  * the interrupt registers and we write back the TXOK bit on
614  * PISR we 'll lose it. So make sure that we don't write back
615  * on PISR any bits that come from SISRs. Clearing them from
616  * SISRs will also clear PISR so no need to worry here.
617  */
618 
619  pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
620 
621  /*
622  * Write to clear them...
623  * Note: This means that each bit we write back
624  * to the registers will get cleared, leaving the
625  * rest unaffected. So this won't affect new interrupts
626  * we didn't catch while reading/processing, we 'll get
627  * them next time get_isr gets called.
628  */
629  ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
630  ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
631  ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
632  ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
633  ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
634  ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
635  /* Flush previous write */
636  ath5k_hw_reg_read(ah, AR5K_PISR);
637 
638  /*
639  * Filter out the non-common bits from the interrupt
640  * status.
641  */
642  *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
643 
644 
645  /* We treat TXOK,TXDESC, TXERR and TXEOL
646  * the same way (schedule the tx tasklet)
647  * so we track them all together per queue */
648  if (pisr & AR5K_ISR_TXOK)
649  ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
651 
652  if (pisr & AR5K_ISR_TXDESC)
653  ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
655 
656  if (pisr & AR5K_ISR_TXERR)
657  ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
659 
660  if (pisr & AR5K_ISR_TXEOL)
661  ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
663 
664  /* Currently this is not much usefull since we treat
665  * all queues the same way if we get a TXURN (update
666  * tx trigger level) but we might need it later on*/
667  if (pisr & AR5K_ISR_TXURN)
668  ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
670 
671  /* Misc Beacon related interrupts */
672 
673  /* For AR5211 */
674  if (pisr & AR5K_ISR_TIM)
675  *interrupt_mask |= AR5K_INT_TIM;
676 
677  /* For AR5212+ */
678  if (pisr & AR5K_ISR_BCNMISC) {
679  if (sisr2 & AR5K_SISR2_TIM)
680  *interrupt_mask |= AR5K_INT_TIM;
681  if (sisr2 & AR5K_SISR2_DTIM)
682  *interrupt_mask |= AR5K_INT_DTIM;
683  if (sisr2 & AR5K_SISR2_DTIM_SYNC)
684  *interrupt_mask |= AR5K_INT_DTIM_SYNC;
685  if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
686  *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
687  if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
688  *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
689  }
690 
691  /* Below interrupts are unlikely to happen */
692 
693  /* HIU = Host Interface Unit (PCI etc)
694  * Can be one of MCABT, SSERR, DPERR from SISR2 */
695  if (unlikely(pisr & (AR5K_ISR_HIUERR)))
696  *interrupt_mask |= AR5K_INT_FATAL;
697 
698  /*Beacon Not Ready*/
699  if (unlikely(pisr & (AR5K_ISR_BNR)))
700  *interrupt_mask |= AR5K_INT_BNR;
701 
702  /* A queue got CBR overrun */
703  if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
704  *interrupt_mask |= AR5K_INT_QCBRORN;
705  ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
707  }
708 
709  /* A queue got CBR underrun */
710  if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
711  *interrupt_mask |= AR5K_INT_QCBRURN;
712  ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
714  }
715 
716  /* A queue got triggered */
717  if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
718  *interrupt_mask |= AR5K_INT_QTRIG;
719  ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
721  }
722 
723  data = pisr;
724  }
725 
726  /*
727  * In case we didn't handle anything,
728  * print the register value.
729  */
730  if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
731  ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
732 
733  return 0;
734 }
735 
745 enum ath5k_int
746 ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
747 {
748  enum ath5k_int old_mask, int_mask;
749 
750  old_mask = ah->ah_imr;
751 
752  /*
753  * Disable card interrupts to prevent any race conditions
754  * (they will be re-enabled afterwards if AR5K_INT GLOBAL
755  * is set again on the new mask).
756  */
757  if (old_mask & AR5K_INT_GLOBAL) {
758  ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
759  ath5k_hw_reg_read(ah, AR5K_IER);
760  }
761 
762  /*
763  * Add additional, chipset-dependent interrupt mask flags
764  * and write them to the IMR (interrupt mask register).
765  */
766  int_mask = new_mask & AR5K_INT_COMMON;
767 
768  if (ah->ah_version != AR5K_AR5210) {
769  /* Preserve per queue TXURN interrupt mask */
770  u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
772 
773  /* Fatal interrupt abstraction for 5211+ */
774  if (new_mask & AR5K_INT_FATAL) {
775  int_mask |= AR5K_IMR_HIUERR;
777  | AR5K_SIMR2_DPERR);
778  }
779 
780  /* Misc beacon related interrupts */
781  if (new_mask & AR5K_INT_TIM)
782  int_mask |= AR5K_IMR_TIM;
783 
784  if (new_mask & AR5K_INT_TIM)
785  simr2 |= AR5K_SISR2_TIM;
786  if (new_mask & AR5K_INT_DTIM)
787  simr2 |= AR5K_SISR2_DTIM;
788  if (new_mask & AR5K_INT_DTIM_SYNC)
789  simr2 |= AR5K_SISR2_DTIM_SYNC;
790  if (new_mask & AR5K_INT_BCN_TIMEOUT)
791  simr2 |= AR5K_SISR2_BCN_TIMEOUT;
792  if (new_mask & AR5K_INT_CAB_TIMEOUT)
793  simr2 |= AR5K_SISR2_CAB_TIMEOUT;
794 
795  /*Beacon Not Ready*/
796  if (new_mask & AR5K_INT_BNR)
797  int_mask |= AR5K_INT_BNR;
798 
799  /* Note: Per queue interrupt masks
800  * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
801  ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
802  ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
803 
804  } else {
805  /* Fatal interrupt abstraction for 5210 */
806  if (new_mask & AR5K_INT_FATAL)
807  int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
809 
810  /* Only common interrupts left for 5210 (no SIMRs) */
811  ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
812  }
813 
814  /* If RXNOFRM interrupt is masked disable it
815  * by setting AR5K_RXNOFRM to zero */
816  if (!(new_mask & AR5K_INT_RXNOFRM))
817  ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
818 
819  /* Store new interrupt mask */
820  ah->ah_imr = new_mask;
821 
822  /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
823  if (new_mask & AR5K_INT_GLOBAL) {
824  ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
825  ath5k_hw_reg_read(ah, AR5K_IER);
826  }
827 
828  return old_mask;
829 }
830 
831 
832 /********************\
833  Init/Stop functions
834 \********************/
835 
846 void
848 {
849  /*
850  * Set Rx/Tx DMA Configuration
851  *
852  * Set standard DMA size (128). Note that
853  * a DMA size of 512 causes rx overruns and tx errors
854  * on pci-e cards (tested on 5424 but since rx overruns
855  * also occur on 5416/5418 with madwifi we set 128
856  * for all PCI-E cards to be safe).
857  *
858  * XXX: need to check 5210 for this
859  * TODO: Check out tx trigger level, it's always 64 on dumps but I
860  * guess we can tweak it and see how it goes ;-)
861  */
862  if (ah->ah_version != AR5K_AR5210) {
867  }
868 
869  /* Pre-enable interrupts on 5211/5212*/
870  if (ah->ah_version != AR5K_AR5210)
871  ath5k_hw_set_imr(ah, ah->ah_imr);
872 
873 }
874 
886 int
888 {
889  int i, qmax, err;
890  err = 0;
891 
892  /* Disable interrupts */
893  ath5k_hw_set_imr(ah, 0);
894 
895  /* Stop rx dma */
896  err = ath5k_hw_stop_rx_dma(ah);
897  if (err)
898  return err;
899 
900  /* Clear any pending interrupts
901  * and disable tx dma */
902  if (ah->ah_version != AR5K_AR5210) {
903  ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
904  qmax = AR5K_NUM_TX_QUEUES;
905  } else {
906  /* PISR/SISR Not available on 5210 */
907  ath5k_hw_reg_read(ah, AR5K_ISR);
909  }
910 
911  for (i = 0; i < qmax; i++) {
912  err = ath5k_hw_stop_tx_dma(ah, i);
913  /* -EINVAL -> queue inactive */
914  if (err && err != -EINVAL)
915  return err;
916  }
917 
918  return 0;
919 }