21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
23 static inline bool ath9k_check_auto_sleep(
struct ath_softc *
sc)
77 sc->
rx.rxotherant = 0;
80 static void ath_opmode_init(
struct ath_softc *sc)
83 struct ath_common *common = ath9k_hw_common(ah);
98 mfilt[0] = mfilt[1] = ~0;
102 static bool ath_rx_edma_buf_link(
struct ath_softc *sc,
110 rx_edma = &sc->
rx.rx_edma[qtype];
115 list_del_init(&bf->
list);
131 static void ath_rx_addbuffer_edma(
struct ath_softc *sc,
137 if (list_empty(&sc->
rx.rxbuf)) {
143 if (!ath_rx_edma_buf_link(sc, qtype))
155 rx_edma = &sc->rx.rx_edma[qtype];
164 static void ath_rx_edma_cleanup(
struct ath_softc *sc)
167 struct ath_common *common = ath9k_hw_common(ah);
184 INIT_LIST_HEAD(&sc->
rx.rxbuf);
190 static void ath_rx_edma_init_queue(
struct ath_rx_edma *rx_edma,
int size)
192 skb_queue_head_init(&rx_edma->
rx_fifo);
196 static int ath_rx_edma_init(
struct ath_softc *sc,
int nbufs)
206 ah->
caps.rx_status_len);
209 ah->
caps.rx_lp_qdepth);
211 ah->
caps.rx_hp_qdepth);
213 size =
sizeof(
struct ath_buf) * nbufs;
218 INIT_LIST_HEAD(&sc->
rx.rxbuf);
219 sc->
rx.rx_bufptr =
bf;
221 for (
i = 0;
i < nbufs;
i++, bf++) {
240 "dma_mapping_error() on RX init\n");
251 ath_rx_edma_cleanup(sc);
255 static void ath_edma_start_recv(
struct ath_softc *sc)
257 spin_lock_bh(&sc->
rx.rxbuflock);
259 ath9k_hw_rxena(sc->
sc_ah);
271 spin_unlock_bh(&sc->
rx.rxbuflock);
274 static void ath_edma_stop_recv(
struct ath_softc *sc)
292 sc->
sc_ah->caps.rx_status_len;
295 return ath_rx_edma_init(sc, nbufs);
306 "failed to allocate rx descriptors: %d\n",
329 "dma_mapping_error() on RX init\n");
347 struct ath_common *common = ath9k_hw_common(ah);
352 ath_rx_edma_cleanup(sc);
367 if (sc->
rx.rxdma.dd_desc_len != 0)
406 if (sc->
sc_ah->is_monitoring)
423 if (conf_is_ht(&sc->
hw->conf))
446 ath_edma_start_recv(sc);
450 spin_lock_bh(&sc->
rx.rxbuflock);
451 if (list_empty(&sc->
rx.rxbuf))
456 ath_rx_buf_link(sc, bf);
460 if (list_empty(&sc->
rx.rxbuf))
471 spin_unlock_bh(&sc->
rx.rxbuflock);
481 spin_lock_bh(&sc->
rx.rxbuflock);
487 ath_edma_stop_recv(sc);
490 spin_unlock_bh(&sc->
rx.rxbuflock);
495 "Could not stop RX, we could be "
496 "confusing the DMA engine when we start RX up\n");
499 return stopped && !reset;
511 static bool ath_beacon_dtim_pending_cab(
struct sk_buff *skb)
519 pos = mgmt->
u.beacon.variable;
522 while (pos + 2 < end) {
525 if (pos + elen > end)
529 if (elen <
sizeof(*tim))
547 if (skb->
len < 24 + 8 + 2 + 2)
555 "Reconfigure Beacon timers based on timestamp from the AP\n");
559 if (ath_beacon_dtim_pending_cab(skb)) {
568 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
580 ath_dbg(common,
PS,
"PS wait for CAB frames timed out\n");
584 static void ath_rx_ps(
struct ath_softc *sc,
struct sk_buff *skb,
bool mybeacon)
594 ath_rx_ps_beacon(sc, skb);
598 is_multicast_ether_addr(hdr->
addr1) &&
606 "All PS CAB frames received, back to sleep\n");
608 !is_multicast_ether_addr(hdr->
addr1) &&
612 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
620 static bool ath_edma_get_buffers(
struct ath_softc *sc,
627 struct ath_common *common = ath9k_hw_common(ah);
632 skb = skb_peek(&rx_edma->
rx_fifo);
650 __skb_unlink(skb, &rx_edma->
rx_fifo);
654 ath_rx_edma_buf_link(sc, qtype);
656 skb = skb_peek(&rx_edma->
rx_fifo);
661 __skb_unlink(skb, &rx_edma->
rx_fifo);
663 ath_rx_edma_buf_link(sc, qtype);
679 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
692 struct ath_common *common = ath9k_hw_common(ah);
697 if (list_empty(&sc->
rx.rxbuf)) {
722 memset(&trs, 0,
sizeof(trs));
723 if (list_is_last(&bf->
list, &sc->
rx.rxbuf)) {
763 static bool ath9k_rx_accept(
struct ath_common *common,
770 bool is_mc, is_valid_tkip, strip_mic,
mic_error;
773 u8 rx_status_len = ah->
caps.rx_status_len;
777 is_mc = !!is_multicast_ether_addr(hdr->
addr1);
780 strip_mic = is_valid_tkip && ieee80211_is_data(
fc) &&
781 ieee80211_has_protected(
fc) &&
814 mic_error = is_valid_tkip && !ieee80211_is_ctl(
fc) &&
815 !ieee80211_has_morefrags(
fc) &&
837 *decrypt_error =
true;
870 static int ath9k_process_rate(
struct ath_common *common,
880 band = hw->
conf.channel->band;
881 sband = hw->
wiphy->bands[band];
883 if (rx_stats->
rs_rate & 0x80) {
911 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
917 static void ath9k_process_rssi(
struct ath_common *common,
942 ah->
stats.avgbrssi = rssi;
950 static int ath9k_rx_skb_preprocess(
struct ath_common *common,
963 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
970 ath9k_process_rssi(common, hw, hdr, rx_stats);
972 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
975 rx_status->
band = hw->
conf.channel->band;
976 rx_status->
freq = hw->
conf.channel->center_freq;
986 static void ath9k_rx_skb_postprocess(
struct ath_common *common,
1012 padsize = padpos & 3;
1013 if (padsize && skb->
len>=padpos+padsize+
FCS_LEN) {
1021 ieee80211_has_protected(fc)) {
1023 }
else if (ieee80211_has_protected(fc)
1024 && !decrypt_error && skb->
len >= hdrlen + 4) {
1025 keyix = skb->
data[hdrlen + 3] >> 6;
1027 if (
test_bit(keyix, common->keymap))
1032 ieee80211_is_mgmt(fc))
1040 struct sk_buff *skb =
NULL, *requeue_skb, *hdr_skb;
1043 struct ath_common *common = ath9k_hw_common(ah);
1051 u8 rx_status_len = ah->
caps.rx_status_len;
1054 unsigned long flags;
1062 spin_lock_bh(&sc->
rx.rxbuflock);
1065 tsf_lower = tsf & 0xffffffff;
1068 bool decrypt_error =
false;
1073 memset(&rs, 0,
sizeof(rs));
1075 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1077 bf = ath_get_next_rx_buf(sc, &rs);
1091 hdr_skb = sc->
rx.frag;
1095 hdr = (
struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1096 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1099 if (!is_zero_ether_addr(common->
curbssid) &&
1117 goto requeue_drop_frag;
1125 rxs->
mactime -= 0x100000000ULL;
1129 rxs->
mactime += 0x100000000ULL;
1131 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1132 rxs, &decrypt_error);
1134 goto requeue_drop_frag;
1150 goto requeue_drop_frag;
1159 if (ah->
caps.rx_status_len)
1163 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1164 rxs, decrypt_error);
1176 ath_err(common,
"dma_mapping_error() on RX\n");
1200 int space = skb->
len - skb_tailroom(hdr_skb);
1205 goto requeue_drop_frag;
1210 skb_copy_from_linear_data(skb,
skb_put(hdr_skb, skb->
len),
1224 if (++sc->
rx.rxotherant >= 3)
1227 sc->
rx.rxotherant = 0;
1239 ath9k_check_auto_sleep(sc))
1241 spin_unlock_irqrestore(&sc->
sc_pm_lock, flags);
1256 ath_rx_edma_buf_link(sc, qtype);
1258 list_move_tail(&bf->
list, &sc->
rx.rxbuf);
1259 ath_rx_buf_link(sc, bf);
1265 spin_unlock_bh(&sc->
rx.rxbuflock);