23 static const int subtype_txq_to_hwq[] = {
30 #define ATH9K_HTC_INIT_TXQ(subtype) do { \
31 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
32 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
33 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
34 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
35 qi.tqi_physCompBuf = 0; \
36 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \
37 TXQ_FLAG_TXDESCINT_ENABLE; \
58 spin_lock_bh(&priv->
tx.tx_lock);
59 priv->
tx.queued_cnt++;
65 spin_unlock_bh(&priv->
tx.tx_lock);
70 spin_lock_bh(&priv->
tx.tx_lock);
76 spin_unlock_bh(&priv->
tx.tx_lock);
83 spin_lock_bh(&priv->
tx.tx_lock);
86 spin_unlock_bh(&priv->
tx.tx_lock);
90 spin_unlock_bh(&priv->
tx.tx_lock);
97 spin_lock_bh(&priv->
tx.tx_lock);
99 spin_unlock_bh(&priv->
tx.tx_lock);
137 epid_queue = &priv->
tx.mgmt_ep_queue;
138 else if (epid == priv->
cab_ep)
139 epid_queue = &priv->
tx.cab_ep_queue;
141 epid_queue = &priv->
tx.data_be_queue;
143 epid_queue = &priv->
tx.data_bk_queue;
145 epid_queue = &priv->
tx.data_vi_queue;
147 epid_queue = &priv->
tx.data_vo_queue;
149 ath_err(common,
"Invalid EPID: %d\n", epid);
164 tx_ctl = HTC_SKB_CB(skb);
181 ath_err(common,
"Unsupported EPID: %d\n", tx_ctl->
epid);
205 "Unable to update hardware queue %u!\n", qnum);
217 u8 sta_idx,
u8 vif_idx,
u8 slot)
226 tx_ctl = HTC_SKB_CB(skb);
229 memset(tx_ctl, 0,
sizeof(*tx_ctl));
238 mgmt->
u.probe_resp.timestamp = avp->
tsfadjust;
263 u8 sta_idx,
u8 vif_idx,
u8 slot,
274 tx_ctl = HTC_SKB_CB(skb);
277 memset(tx_ctl, 0,
sizeof(*tx_ctl));
280 tx_hdr.node_idx = sta_idx;
281 tx_hdr.vif_idx = vif_idx;
282 tx_hdr.cookie =
slot;
301 qc = ieee80211_get_qos_ctl(hdr);
306 if (priv->
hw->wiphy->rts_threshold != (
u32) -1)
307 if (skb->
len > priv->
hw->wiphy->rts_threshold)
312 (vif && vif->
bss_conf.use_cts_prot))
320 tx_hdr.keyix = tx_info->
control.hw_key->hw_key_idx;
322 tx_fhdr =
skb_push(skb,
sizeof(tx_hdr));
323 memcpy(tx_fhdr, (
u8 *) &tx_hdr,
sizeof(tx_hdr));
331 qnum = skb_get_queue_mapping(skb);
332 tx_ctl->
epid = get_htc_epid(priv, qnum);
338 u8 slot,
bool is_cab)
355 vif_idx = avp->
index;
357 if (!priv->
ah->is_monitoring) {
359 "VIF is null, but no monitor interface !\n");
371 sta_idx = ista->
index;
377 ath9k_htc_tx_data(priv, vif, skb,
378 sta_idx, vif_idx, slot, is_cab);
380 ath9k_htc_tx_mgmt(priv, avp, skb,
381 sta_idx, vif_idx, slot);
387 static inline bool __ath9k_htc_check_tx_aggr(
struct ath9k_htc_priv *priv,
392 spin_lock_bh(&priv->
tx.tx_lock);
395 spin_unlock_bh(&priv->
tx.tx_lock);
419 if (sta && conf_is_ht(&priv->
hw->conf) &&
421 if (ieee80211_is_data_qos(fc)) {
425 qc = ieee80211_get_qos_ctl(hdr);
428 if (__ath9k_htc_check_tx_aggr(priv, ista, tid)) {
430 spin_lock_bh(&priv->
tx.tx_lock);
432 spin_unlock_bh(&priv->
tx.tx_lock);
452 slot = strip_drv_header(priv, skb);
458 tx_ctl = HTC_SKB_CB(skb);
460 tx_info = IEEE80211_SKB_CB(skb);
462 rate = &tx_info->
status.rates[0];
470 if (!txok || !vif || !txs)
497 ath9k_htc_check_tx_aggr(priv, vif, skb);
500 spin_lock_bh(&priv->
tx.tx_lock);
502 priv->
tx.queued_cnt = 0;
503 spin_unlock_bh(&priv->
tx.tx_lock);
511 static inline void ath9k_htc_tx_drainq(
struct ath9k_htc_priv *priv,
517 ath9k_htc_tx_process(priv, skb,
NULL);
525 spin_lock_bh(&priv->
tx.tx_lock);
527 spin_unlock_bh(&priv->
tx.tx_lock);
537 ath9k_htc_tx_drainq(priv, &priv->
tx.mgmt_ep_queue);
538 ath9k_htc_tx_drainq(priv, &priv->
tx.cab_ep_queue);
539 ath9k_htc_tx_drainq(priv, &priv->
tx.data_be_queue);
540 ath9k_htc_tx_drainq(priv, &priv->
tx.data_bk_queue);
541 ath9k_htc_tx_drainq(priv, &priv->
tx.data_vi_queue);
542 ath9k_htc_tx_drainq(priv, &priv->
tx.data_vo_queue);
543 ath9k_htc_tx_drainq(priv, &priv->
tx.tx_failed);
548 spin_lock_bh(&priv->
wmi->event_lock);
553 spin_unlock_bh(&priv->
wmi->event_lock);
555 spin_lock_bh(&priv->
tx.tx_lock);
557 spin_unlock_bh(&priv->
tx.tx_lock);
564 spin_lock_bh(&priv->
tx.tx_lock);
566 spin_unlock_bh(&priv->
tx.tx_lock);
569 spin_unlock_bh(&priv->
tx.tx_lock);
571 ath9k_htc_tx_drainq(priv, &priv->
tx.tx_failed);
594 if (fcookie == cookie)
609 epid_queue = get_htc_epid_queue(priv, epid);
614 skb_queue_walk_safe(epid_queue, skb, tmp) {
615 if (check_cookie(priv, skb, txs->
cookie, epid)) {
616 __skb_unlink(skb, epid_queue);
617 spin_unlock_irqrestore(&epid_queue->
lock, flags);
621 spin_unlock_irqrestore(&epid_queue->
lock, flags);
623 ath_dbg(common, XMIT,
"No matching packet for cookie: %d, epid: %d\n",
637 for (i = 0; i < txs->
cnt; i++) {
642 skb = ath9k_htc_tx_get_packet(priv, __txs);
656 spin_lock(&priv->
wmi->event_lock);
658 &priv->
wmi->pending_tx_events);
659 spin_unlock(&priv->
wmi->event_lock);
664 ath9k_htc_tx_process(priv, skb, __txs);
678 tx_ctl = HTC_SKB_CB(skb);
688 epid_queue = get_htc_epid_queue(priv, ep_id);
702 tx_ctl = HTC_SKB_CB(skb);
707 ath_dbg(common, XMIT,
"Dropping a packet due to TX timeout\n");
714 static void ath9k_htc_tx_cleanup_queue(
struct ath9k_htc_priv *priv,
717 bool process =
false;
722 skb_queue_head_init(&queue);
725 skb_queue_walk_safe(epid_queue, skb, tmp) {
726 if (check_packet(priv, skb)) {
727 __skb_unlink(skb, epid_queue);
728 __skb_queue_tail(&queue, skb);
732 spin_unlock_irqrestore(&epid_queue->
lock, flags);
735 skb_queue_walk_safe(&queue, skb, tmp) {
736 __skb_unlink(skb, &queue);
737 ath9k_htc_tx_process(priv, skb,
NULL);
749 spin_lock(&priv->
wmi->event_lock);
752 skb = ath9k_htc_tx_get_packet(priv, &event->
txs);
755 "Found packet for cookie: %d, epid: %d\n",
759 ath9k_htc_tx_process(priv, skb, &event->
txs);
770 spin_unlock(&priv->
wmi->event_lock);
775 ath9k_htc_tx_cleanup_queue(priv, &priv->
tx.mgmt_ep_queue);
776 ath9k_htc_tx_cleanup_queue(priv, &priv->
tx.cab_ep_queue);
777 ath9k_htc_tx_cleanup_queue(priv, &priv->
tx.data_be_queue);
778 ath9k_htc_tx_cleanup_queue(priv, &priv->
tx.data_bk_queue);
779 ath9k_htc_tx_cleanup_queue(priv, &priv->
tx.data_vi_queue);
780 ath9k_htc_tx_cleanup_queue(priv, &priv->
tx.data_vo_queue);
791 skb_queue_head_init(&priv->
tx.mgmt_ep_queue);
792 skb_queue_head_init(&priv->
tx.cab_ep_queue);
793 skb_queue_head_init(&priv->
tx.data_be_queue);
794 skb_queue_head_init(&priv->
tx.data_bk_queue);
795 skb_queue_head_init(&priv->
tx.data_vi_queue);
796 skb_queue_head_init(&priv->
tx.data_vo_queue);
797 skb_queue_head_init(&priv->
tx.tx_failed);
809 struct ath_common *common = ath9k_hw_common(ah);
813 memset(&qi, 0,
sizeof(qi));
821 ath_err(common,
"qnum %u out of range, max %zu!\n",
835 memset(&qi, 0,
sizeof(qi));
850 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
876 (priv->
nvifs <= 1) &&
882 if (conf_is_ht(&priv->
hw->conf)) {
895 #undef RX_FILTER_PRESERVE
911 mfilt[0] = mfilt[1] = ~0;
917 ath9k_hw_rxena(priv->
ah);
918 ath9k_htc_opmode_init(priv);
925 u8 rx_rate,
u8 rs_flags)
931 if (rx_rate & 0x80) {
942 band = hw->
conf.channel->band;
946 if (sband->
bitrates[i].hw_value == rx_rate) {
950 if (sband->
bitrates[i].hw_value_short == rx_rate) {
974 ath_err(common,
"Corrupted RX frame, dropping (len: %d)\n",
984 "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
1001 padsize = padpos & 3;
1002 if (padsize && skb->
len >= padpos+padsize+
FCS_LEN) {
1009 if (rxbuf->
rxstatus.rs_status != 0) {
1018 if (ieee80211_is_ctl(fc))
1052 }
else if (ieee80211_has_protected(fc) &&
1053 skb->
len >= hdrlen + 4) {
1054 keyix = skb->
data[hdrlen + 3] >> 6;
1055 if (
test_bit(keyix, common->keymap))
1060 ath9k_process_rate(hw, rx_status, rxbuf->
rxstatus.rs_rate,
1068 last_rssi = priv->
rx.last_rssi;
1077 if (ieee80211_is_beacon(fc))
1078 priv->
ah->stats.avgbrssi = rxbuf->
rxstatus.rs_rssi;
1081 rx_status->
band = hw->
conf.channel->band;
1082 rx_status->
freq = hw->
conf.channel->center_freq;
1102 unsigned long flags;
1108 if (tmp_buf->in_process) {
1114 if (rxbuf ==
NULL) {
1115 spin_unlock_irqrestore(&priv->
rx.rxbuflock, flags);
1122 if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
1127 memcpy(IEEE80211_SKB_RXCB(rxbuf->
skb), &rx_status,
1135 spin_unlock_irqrestore(&priv->
rx.rxbuflock, flags);
1143 list_move_tail(&rxbuf->
list, &priv->
rx.rxbuf);
1145 spin_unlock_irqrestore(&priv->
rx.rxbuflock, flags);
1155 struct ath_common *common = ath9k_hw_common(ah);
1158 spin_lock(&priv->
rx.rxbuflock);
1160 if (!tmp_buf->in_process) {
1165 spin_unlock(&priv->
rx.rxbuflock);
1167 if (rxbuf ==
NULL) {
1172 spin_lock(&priv->
rx.rxbuflock);
1175 spin_unlock(&priv->
rx.rxbuflock);
1200 struct ath_common *common = ath9k_hw_common(ah);
1204 INIT_LIST_HEAD(&priv->
rx.rxbuf);
1209 if (rxbuf ==
NULL) {
1210 ath_err(common,
"Unable to allocate RX buffers\n");