18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #define ATH6KL_TID_MASK 0xf
29 #define ATH6KL_AID_SHIFT 4
31 static inline u8 ath6kl_get_tid(
u8 tid_mux)
36 static inline u8 ath6kl_get_aid(
u8 tid_mux)
44 struct ath6kl *
ar = ath6kl_priv(dev);
53 if (is_multicast_ether_addr(eth_hdr->
h_dest))
64 if ((ep_map == -1) && !ar->
node_map[
i].tx_pend)
101 static bool ath6kl_process_uapsdq(
struct ath6kl_sta *conn,
107 bool is_apsdq_empty =
false;
109 u8 up = 0, traffic_class, *ip_hdr;
120 if (!skb_queue_empty(&conn->
apsdq))
134 ip_hdr = (
u8 *)(datap + 1);
140 ip_hdr = (
u8 *)(llc_hdr + 1);
150 if ((conn->
apsd_info & (1 << traffic_class)) == 0)
155 is_apsdq_empty = skb_queue_empty(&conn->
apsdq);
163 if (is_apsdq_empty) {
173 static bool ath6kl_process_psq(
struct ath6kl_sta *conn,
178 bool is_psq_empty =
false;
183 if (!skb_queue_empty(&conn->
psq))
191 is_psq_empty = skb_queue_empty(&conn->
psq);
212 bool ps_queued =
false;
215 if (is_multicast_ether_addr(datap->
h_dest)) {
217 bool q_mcast =
false;
232 bool is_mcastq_empty =
false;
257 if (!skb_queue_empty(&ar->
mcastpsq))
272 ps_queued = ath6kl_process_uapsdq(conn,
275 ps_queued = ath6kl_process_psq(conn,
294 spin_lock_bh(&ar->
lock);
297 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
306 ath6kl_err(
"wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
311 if (cookie ==
NULL) {
312 spin_unlock_bh(&ar->
lock);
322 spin_unlock_bh(&ar->
lock);
345 struct ath6kl *ar = ath6kl_priv(dev);
352 bool chk_adhoc_ps_mapping =
false;
361 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
376 if (ath6kl_powersave_ap(vif, skb, &flags))
384 (skb_network_header(skb) - skb->
head) +
432 chk_adhoc_ps_mapping =
true;
444 spin_lock_bh(&ar->
lock);
446 if (chk_adhoc_ps_mapping)
447 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
453 spin_unlock_bh(&ar->
lock);
461 spin_unlock_bh(&ar->
lock);
469 spin_unlock_bh(&ar->
lock);
527 spin_lock_bh(&ar->
lock);
570 spin_unlock_bh(&ar->
lock);
574 ath6kl_htc_activity_changed(ar->
htc_target, eid, active);
607 target->
endpoint[endpoint].tx_drop_packet_threshold)
622 netif_stop_queue(vif->
ndev);
633 static void ath6kl_tx_clear_node_map(
struct ath6kl_vif *vif,
660 for (i = ar->
node_num; i > 0; i--) {
686 skb_queue_head_init(&skb_queue);
689 spin_lock_bh(&ar->
lock);
692 while (!list_empty(packet_queue)) {
698 ath6kl_cookie = (
struct ath6kl_cookie *)packet->
pkt_cntxt;
703 skb = ath6kl_cookie->
skb;
705 map_no = ath6kl_cookie->
map_no;
707 if (!skb || !skb->
data)
710 __skb_queue_tail(&skb_queue, skb);
729 if_idx = wmi_cmd_hdr_get_if_idx(
732 if_idx = wmi_data_hdr_get_if_idx(
745 flushing[if_idx] =
true;
753 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
758 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
762 flushing[if_idx] =
false;
767 ath6kl_tx_clear_node_map(vif, eid, map_no);
775 spin_unlock_bh(&ar->
lock);
777 __skb_queue_purge(&skb_queue);
785 netif_wake_queue(vif->
ndev);
798 spin_unlock_bh(&ar->
lock);
814 static void ath6kl_deliver_frames_to_nw_stack(
struct net_device *dev,
871 ath6kl_htc_get_rxbuf_num(ar->
htc_target, endpoint);
873 if (n_buf_refill <= 0)
876 INIT_LIST_HEAD(&queue);
879 "%s: providing htc with %d buffers at eid=%d\n",
880 __func__, n_buf_refill, endpoint);
882 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
890 set_htc_rxpkt_info(packet, skb, skb->
data,
896 if (!list_empty(&queue))
897 ath6kl_htc_add_rxbuf_multiple(ar->
htc_target, &queue);
913 set_htc_rxpkt_info(packet, skb, skb->
data,
917 spin_lock_bh(&ar->
lock);
919 spin_unlock_bh(&ar->
lock);
935 int refill_cnt = 0,
depth = 0;
938 __func__, endpoint, len);
944 spin_lock_bh(&ar->
lock);
947 spin_unlock_bh(&ar->
lock);
959 spin_unlock_bh(&ar->
lock);
971 static void aggr_slice_amsdu(
struct aggr_info *p_aggr,
976 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
979 mac_hdr_len =
sizeof(
struct ethhdr);
980 framep = skb->
data + mac_hdr_len;
981 amsdu_len = skb->
len - mac_hdr_len;
983 while (amsdu_len > mac_hdr_len) {
984 hdr = (
struct ethhdr *) framep;
989 ath6kl_err(
"802.3 AMSDU frame bound check failed. len %d\n",
994 frame_8023_len = payload_8023_len + mac_hdr_len;
995 new_skb = aggr_get_free_skb(p_aggr);
1001 memcpy(new_skb->
data, framep, frame_8023_len);
1002 skb_put(new_skb, frame_8023_len);
1005 dev_kfree_skb(new_skb);
1012 if ((amsdu_len - frame_8023_len) == 0)
1018 frame_8023_len =
ALIGN(frame_8023_len, 4);
1020 framep += frame_8023_len;
1021 amsdu_len -= frame_8023_len;
1031 struct rxtid *rxtid;
1033 u16 idx, idx_end, seq_end;
1036 rxtid = &agg_conn->
rx_tid[tid];
1037 stats = &agg_conn->
stat[tid];
1039 spin_lock_bh(&rxtid->
lock);
1055 seq_end = seq_no ? seq_no : rxtid->
seq_next;
1060 if ((order == 1) && (!node->
skb))
1065 aggr_slice_amsdu(agg_conn->
aggr_info, rxtid,
1075 }
while (idx != idx_end);
1077 spin_unlock_bh(&rxtid->
lock);
1082 ath6kl_deliver_frames_to_nw_stack(agg_conn->
dev, skb);
1089 struct rxtid *rxtid;
1094 bool is_queued =
false;
1097 rxtid = &agg_conn->
rx_tid[tid];
1098 stats = &agg_conn->
stat[tid];
1104 aggr_slice_amsdu(agg_conn->
aggr_info, rxtid, frame);
1108 ath6kl_deliver_frames_to_nw_stack(agg_conn->
dev,
1119 if (((st < end) && (cur < st || cur > end)) ||
1120 ((st >
end) && (cur > end) && (cur <
st))) {
1121 extended_end = (end + rxtid->
hold_q_sz - 1) &
1124 if (((end < extended_end) &&
1125 (cur < end || cur > extended_end)) ||
1126 ((end > extended_end) && (cur > extended_end) &&
1128 aggr_deque_frms(agg_conn, tid, 0, 0);
1129 spin_lock_bh(&rxtid->
lock);
1135 spin_unlock_bh(&rxtid->
lock);
1147 aggr_deque_frms(agg_conn, tid, st, 0);
1157 spin_lock_bh(&rxtid->
lock);
1171 dev_kfree_skb(node->
skb);
1184 spin_unlock_bh(&rxtid->
lock);
1186 aggr_deque_frms(agg_conn, tid, 0, 1);
1191 spin_lock_bh(&rxtid->
lock);
1192 for (idx = 0 ; idx < rxtid->
hold_q_sz; idx++) {
1193 if (rxtid->
hold_q[idx].skb) {
1207 spin_unlock_bh(&rxtid->
lock);
1212 static void ath6kl_uapsd_trigger_frame_rx(
struct ath6kl_vif *vif,
1216 bool is_apsdq_empty, is_apsdq_empty_at_start;
1237 if (!num_frames_to_deliver)
1241 is_apsdq_empty = skb_queue_empty(&conn->
apsdq);
1243 is_apsdq_empty_at_start = is_apsdq_empty;
1245 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1249 is_apsdq_empty = skb_queue_empty(&conn->
apsdq);
1257 num_frames_to_deliver--;
1260 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1268 if (is_apsdq_empty) {
1269 if (is_apsdq_empty_at_start)
1276 conn->
aid, 0, flags);
1289 u8 meta_type, dot11_hdr = 0;
1290 u8 pad_before_data_start;
1293 bool is_amsdu, prev_ps, ps_state =
false;
1294 bool trig_state =
false;
1304 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1305 __func__, ar, ept, skb, packet->
buf,
1347 spin_unlock_bh(&vif->
if_lock);
1354 ath6kl_deliver_frames_to_nw_stack(vif->
ndev, skb);
1360 min_hdr_len =
sizeof(
struct ethhdr) + sizeof(struct wmi_data_hdr) +
1361 sizeof(struct ath6kl_llc_snap_hdr);
1363 dhdr = (
struct wmi_data_hdr *) skb->
data;
1371 ((packet->
act_len < min_hdr_len) ||
1373 ath6kl_info(
"frame len is too short or too long\n");
1382 meta_type = wmi_data_hdr_get_meta(dhdr);
1387 offset =
sizeof(
struct wmi_data_hdr);
1390 switch (meta_type) {
1403 datap = (
struct ethhdr *) (skb->
data + offset);
1429 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1434 bool is_apsdq_empty;
1468 is_apsdq_empty = skb_queue_empty(&conn->
apsdq);
1476 if (!is_apsdq_empty)
1489 if ((packet->
act_len < min_hdr_len) ||
1497 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ?
true :
false;
1498 tid = wmi_data_hdr_get_up(dhdr);
1499 seq_no = wmi_data_hdr_get_seqno(dhdr);
1500 meta_type = wmi_data_hdr_get_meta(dhdr);
1501 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1502 pad_before_data_start =
1506 skb_pull(skb,
sizeof(
struct wmi_data_hdr));
1508 switch (meta_type) {
1524 skb_pull(skb, pad_before_data_start);
1547 if (is_multicast_ether_addr(datap->
h_dest))
1581 if (is_unicast_ether_addr(datap->
h_dest)) {
1590 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1595 }
else if (!is_broadcast_ether_addr(datap->
h_dest))
1598 ath6kl_deliver_frames_to_nw_stack(vif->
ndev, skb);
1601 static void aggr_timeout(
unsigned long arg)
1605 struct rxtid *rxtid;
1609 rxtid = &aggr_conn->
rx_tid[
i];
1610 stats = &aggr_conn->
stat[
i];
1617 "aggr timeout (st %d end %d)\n",
1621 aggr_deque_frms(aggr_conn, i, 0, 0);
1627 rxtid = &aggr_conn->
rx_tid[
i];
1630 spin_lock_bh(&rxtid->
lock);
1631 for (j = 0; j < rxtid->
hold_q_sz; j++) {
1632 if (rxtid->
hold_q[j].skb) {
1638 spin_unlock_bh(&rxtid->
lock);
1650 static void aggr_delete_tid_state(
struct aggr_info_conn *aggr_conn,
u8 tid)
1652 struct rxtid *rxtid;
1655 if (!aggr_conn || tid >= NUM_OF_TIDS)
1658 rxtid = &aggr_conn->
rx_tid[tid];
1659 stats = &aggr_conn->
stat[tid];
1662 aggr_deque_frms(aggr_conn, tid, 0, 0);
1664 rxtid->
aggr =
false;
1681 struct rxtid *rxtid;
1687 aid = ath6kl_get_aid(tid_mux);
1697 tid = ath6kl_get_tid(tid_mux);
1698 if (tid >= NUM_OF_TIDS)
1701 rxtid = &aggr_conn->
rx_tid[tid];
1702 stats = &aggr_conn->
stat[tid];
1706 __func__, win_sz, tid);
1709 aggr_delete_tid_state(aggr_conn, tid);
1719 if (!skb_queue_empty(&rxtid->
q))
1728 struct rxtid *rxtid;
1734 aggr_conn->
timer.function = aggr_timeout;
1735 aggr_conn->
timer.data = (
unsigned long) aggr_conn;
1741 rxtid = &aggr_conn->
rx_tid[
i];
1742 rxtid->
aggr =
false;
1744 skb_queue_head_init(&rxtid->
q);
1756 ath6kl_err(
"failed to alloc memory for aggr_node\n");
1762 ath6kl_err(
"failed to alloc memory for connection specific aggr info\n");
1778 struct rxtid *rxtid;
1783 aid = ath6kl_get_aid(tid_mux);
1793 tid = ath6kl_get_tid(tid_mux);
1794 if (tid >= NUM_OF_TIDS)
1797 rxtid = &aggr_conn->
rx_tid[tid];
1800 aggr_delete_tid_state(aggr_conn, tid);
1816 aggr_delete_tid_state(aggr_conn, tid);
1824 spin_lock_bh(&ar->
lock);
1826 spin_unlock_bh(&ar->
lock);
1833 spin_unlock_bh(&ar->
lock);
1835 spin_lock_bh(&ar->
lock);
1838 spin_unlock_bh(&ar->
lock);