Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tx.c
Go to the documentation of this file.
1 /*
2  * This file is part of wl1271
3  *
4  * Copyright (C) 2009 Nokia Corporation
5  *
6  * Contact: Luciano Coelho <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
27 
28 #include "wlcore.h"
29 #include "debug.h"
30 #include "io.h"
31 #include "ps.h"
32 #include "tx.h"
33 #include "event.h"
34 #include "hw_ops.h"
35 
36 /*
37  * TODO: this is here just for now, it must be removed when the data
38  * operations are in place.
39  */
40 #include "../wl12xx/reg.h"
41 
42 static int wl1271_set_default_wep_key(struct wl1271 *wl,
43  struct wl12xx_vif *wlvif, u8 id)
44 {
45  int ret;
46  bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
47 
48  if (is_ap)
49  ret = wl12xx_cmd_set_default_wep_key(wl, id,
50  wlvif->ap.bcast_hlid);
51  else
52  ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
53 
54  if (ret < 0)
55  return ret;
56 
57  wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
58  return 0;
59 }
60 
61 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
62 {
63  int id;
64 
66  if (id >= wl->num_tx_desc)
67  return -EBUSY;
68 
69  __set_bit(id, wl->tx_frames_map);
70  wl->tx_frames[id] = skb;
71  wl->tx_frames_cnt++;
72  return id;
73 }
74 
75 void wl1271_free_tx_id(struct wl1271 *wl, int id)
76 {
77  if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78  if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
80 
81  wl->tx_frames[id] = NULL;
82  wl->tx_frames_cnt--;
83  }
84 }
86 
87 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
88  struct sk_buff *skb)
89 {
90  struct ieee80211_hdr *hdr;
91 
92  /*
93  * add the station to the known list before transmitting the
94  * authentication response. this way it won't get de-authed by FW
95  * when transmitting too soon.
96  */
97  hdr = (struct ieee80211_hdr *)(skb->data +
98  sizeof(struct wl1271_tx_hw_descr));
99  if (ieee80211_is_auth(hdr->frame_control))
101 }
102 
103 static void wl1271_tx_regulate_link(struct wl1271 *wl,
104  struct wl12xx_vif *wlvif,
105  u8 hlid)
106 {
107  bool fw_ps, single_sta;
108  u8 tx_pkts;
109 
110  if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
111  return;
112 
113  fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
114  tx_pkts = wl->links[hlid].allocated_pkts;
115  single_sta = (wl->active_sta_count == 1);
116 
117  /*
118  * if in FW PS and there is enough data in FW we can put the link
119  * into high-level PS and clean out its TX queues.
120  * Make an exception if this is the only connected station. In this
121  * case FW-memory congestion is not a problem.
122  */
123  if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
124  wl12xx_ps_link_start(wl, wlvif, hlid, true);
125 }
126 
127 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
128 {
129  return wl->dummy_packet == skb;
130 }
132 
133 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134  struct sk_buff *skb, struct ieee80211_sta *sta)
135 {
136  if (sta) {
137  struct wl1271_station *wl_sta;
138 
139  wl_sta = (struct wl1271_station *)sta->drv_priv;
140  return wl_sta->hlid;
141  } else {
142  struct ieee80211_hdr *hdr;
143 
144  if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
145  return wl->system_hlid;
146 
147  hdr = (struct ieee80211_hdr *)skb->data;
148  if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
149  return wlvif->ap.bcast_hlid;
150  else
151  return wlvif->ap.global_hlid;
152  }
153 }
154 
155 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156  struct sk_buff *skb, struct ieee80211_sta *sta)
157 {
158  struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
159 
160  if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
161  return wl->system_hlid;
162 
163  if (wlvif->bss_type == BSS_TYPE_AP_BSS)
164  return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
165 
166  if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
168  !ieee80211_is_auth(hdr->frame_control) &&
169  !ieee80211_is_assoc_req(hdr->frame_control))
170  return wlvif->sta.hlid;
171  else
172  return wlvif->dev_hlid;
173 }
174 
175 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
176  unsigned int packet_length)
177 {
180  return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
181  else
182  return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
183 }
185 
186 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
187  struct sk_buff *skb, u32 extra, u32 buf_offset,
188  u8 hlid, bool is_gem)
189 {
190  struct wl1271_tx_hw_descr *desc;
191  u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
192  u32 total_blocks;
193  int id, ret = -EBUSY, ac;
194  u32 spare_blocks;
195 
196  if (buf_offset + total_len > wl->aggr_buf_size)
197  return -EAGAIN;
198 
199  spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
200 
201  /* allocate free identifier for the packet */
202  id = wl1271_alloc_tx_id(wl, skb);
203  if (id < 0)
204  return id;
205 
206  total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
207 
208  if (total_blocks <= wl->tx_blocks_available) {
209  desc = (struct wl1271_tx_hw_descr *)skb_push(
210  skb, total_len - skb->len);
211 
212  wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
213  spare_blocks);
214 
215  desc->id = id;
216 
217  wl->tx_blocks_available -= total_blocks;
218  wl->tx_allocated_blocks += total_blocks;
219 
220  /* If the FW was empty before, arm the Tx watchdog */
221  if (wl->tx_allocated_blocks == total_blocks)
223 
224  ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
225  wl->tx_allocated_pkts[ac]++;
226 
227  if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
228  wlvif->bss_type == BSS_TYPE_AP_BSS &&
229  test_bit(hlid, wlvif->ap.sta_hlid_map))
230  wl->links[hlid].allocated_pkts++;
231 
232  ret = 0;
233 
235  "tx_allocate: size: %d, blocks: %d, id: %d",
236  total_len, total_blocks, id);
237  } else {
238  wl1271_free_tx_id(wl, id);
239  }
240 
241  return ret;
242 }
243 
244 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
245  struct sk_buff *skb, u32 extra,
246  struct ieee80211_tx_info *control, u8 hlid)
247 {
248  struct timespec ts;
249  struct wl1271_tx_hw_descr *desc;
250  int ac, rate_idx;
251  s64 hosttime;
252  u16 tx_attr = 0;
254  struct ieee80211_hdr *hdr;
255  u8 *frame_start;
256  bool is_dummy;
257 
258  desc = (struct wl1271_tx_hw_descr *) skb->data;
259  frame_start = (u8 *)(desc + 1);
260  hdr = (struct ieee80211_hdr *)(frame_start + extra);
261  frame_control = hdr->frame_control;
262 
263  /* relocate space for security header */
264  if (extra) {
265  int hdrlen = ieee80211_hdrlen(frame_control);
266  memmove(frame_start, hdr, hdrlen);
267  skb_set_network_header(skb, skb_network_offset(skb) + extra);
268  }
269 
270  /* configure packet life time */
271  getnstimeofday(&ts);
272  hosttime = (timespec_to_ns(&ts) >> 10);
273  desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
274 
275  is_dummy = wl12xx_is_dummy_packet(wl, skb);
276  if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
278  else
280 
281  /* queue */
282  ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
283  desc->tid = skb->priority;
284 
285  if (is_dummy) {
286  /*
287  * FW expects the dummy packet to have an invalid session id -
288  * any session id that is different than the one set in the join
289  */
290  tx_attr = (SESSION_COUNTER_INVALID <<
293 
294  tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
295  } else if (wlvif) {
296  /* configure the tx attributes */
297  tx_attr = wlvif->session_counter <<
299  }
300 
301  desc->hlid = hlid;
302  if (is_dummy || !wlvif)
303  rate_idx = 0;
304  else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
305  /*
306  * if the packets are data packets
307  * send them with AP rate policies (EAPOLs are an exception),
308  * otherwise use default basic rates
309  */
310  if (skb->protocol == cpu_to_be16(ETH_P_PAE))
311  rate_idx = wlvif->sta.basic_rate_idx;
312  else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
313  rate_idx = wlvif->sta.p2p_rate_idx;
314  else if (ieee80211_is_data(frame_control))
315  rate_idx = wlvif->sta.ap_rate_idx;
316  else
317  rate_idx = wlvif->sta.basic_rate_idx;
318  } else {
319  if (hlid == wlvif->ap.global_hlid)
320  rate_idx = wlvif->ap.mgmt_rate_idx;
321  else if (hlid == wlvif->ap.bcast_hlid ||
322  skb->protocol == cpu_to_be16(ETH_P_PAE) ||
323  !ieee80211_is_data(frame_control))
324  /*
325  * send non-data, bcast and EAPOLs using the
326  * min basic rate
327  */
328  rate_idx = wlvif->ap.bcast_rate_idx;
329  else
330  rate_idx = wlvif->ap.ucast_rate_idx[ac];
331  }
332 
333  tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
334 
335  /* for WEP shared auth - no fw encryption is needed */
336  if (ieee80211_is_auth(frame_control) &&
337  ieee80211_has_protected(frame_control))
338  tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
339 
340  desc->tx_attr = cpu_to_le16(tx_attr);
341 
342  wlcore_hw_set_tx_desc_csum(wl, desc, skb);
343  wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
344 }
345 
346 /* caller must hold wl->mutex */
347 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
348  struct sk_buff *skb, u32 buf_offset, u8 hlid)
349 {
350  struct ieee80211_tx_info *info;
351  u32 extra = 0;
352  int ret = 0;
353  u32 total_len;
354  bool is_dummy;
355  bool is_gem = false;
356 
357  if (!skb) {
358  wl1271_error("discarding null skb");
359  return -EINVAL;
360  }
361 
362  if (hlid == WL12XX_INVALID_LINK_ID) {
363  wl1271_error("invalid hlid. dropping skb 0x%p", skb);
364  return -EINVAL;
365  }
366 
367  info = IEEE80211_SKB_CB(skb);
368 
369  is_dummy = wl12xx_is_dummy_packet(wl, skb);
370 
372  info->control.hw_key &&
373  info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
374  extra = WL1271_EXTRA_SPACE_TKIP;
375 
376  if (info->control.hw_key) {
377  bool is_wep;
378  u8 idx = info->control.hw_key->hw_key_idx;
379  u32 cipher = info->control.hw_key->cipher;
380 
381  is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
382  (cipher == WLAN_CIPHER_SUITE_WEP104);
383 
384  if (unlikely(is_wep && wlvif->default_key != idx)) {
385  ret = wl1271_set_default_wep_key(wl, wlvif, idx);
386  if (ret < 0)
387  return ret;
388  wlvif->default_key = idx;
389  }
390 
391  is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
392  }
393 
394  ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
395  is_gem);
396  if (ret < 0)
397  return ret;
398 
399  wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
400 
401  if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
402  wl1271_tx_ap_update_inconnection_sta(wl, skb);
403  wl1271_tx_regulate_link(wl, wlvif, hlid);
404  }
405 
406  /*
407  * The length of each packet is stored in terms of
408  * words. Thus, we must pad the skb data to make sure its
409  * length is aligned. The number of padding bytes is computed
410  * and set in wl1271_tx_fill_hdr.
411  * In special cases, we want to align to a specific block size
412  * (eg. for wl128x with SDIO we align to 256).
413  */
414  total_len = wlcore_calc_packet_alignment(wl, skb->len);
415 
416  memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
417  memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
418 
419  /* Revert side effects in the dummy packet skb, so it can be reused */
420  if (is_dummy)
421  skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
422 
423  return total_len;
424 }
425 
427  enum ieee80211_band rate_band)
428 {
430  u32 enabled_rates = 0;
431  int bit;
432 
433  band = wl->hw->wiphy->bands[rate_band];
434  for (bit = 0; bit < band->n_bitrates; bit++) {
435  if (rate_set & 0x1)
436  enabled_rates |= band->bitrates[bit].hw_value;
437  rate_set >>= 1;
438  }
439 
440  /* MCS rates indication are on bits 16 - 31 */
441  rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
442 
443  for (bit = 0; bit < 16; bit++) {
444  if (rate_set & 0x1)
445  enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
446  rate_set >>= 1;
447  }
448 
449  return enabled_rates;
450 }
451 
453 {
454  int i;
455 
456  for (i = 0; i < NUM_TX_QUEUES; i++) {
460  /* firmware buffer has space, restart queues */
461  wlcore_wake_queue(wl, i,
463  }
464  }
465 }
466 
467 static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
468  struct sk_buff_head *queues)
469 {
470  int i, q = -1, ac;
471  u32 min_pkts = 0xffffffff;
472 
473  /*
474  * Find a non-empty ac where:
475  * 1. There are packets to transmit
476  * 2. The FW has the least allocated blocks
477  *
478  * We prioritize the ACs according to VO>VI>BE>BK
479  */
480  for (i = 0; i < NUM_TX_QUEUES; i++) {
481  ac = wl1271_tx_get_queue(i);
482  if (!skb_queue_empty(&queues[ac]) &&
483  (wl->tx_allocated_pkts[ac] < min_pkts)) {
484  q = ac;
485  min_pkts = wl->tx_allocated_pkts[q];
486  }
487  }
488 
489  if (q == -1)
490  return NULL;
491 
492  return &queues[q];
493 }
494 
495 static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
496  struct wl1271_link *lnk)
497 {
498  struct sk_buff *skb;
499  unsigned long flags;
500  struct sk_buff_head *queue;
501 
502  queue = wl1271_select_queue(wl, lnk->tx_queue);
503  if (!queue)
504  return NULL;
505 
506  skb = skb_dequeue(queue);
507  if (skb) {
508  int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
509  spin_lock_irqsave(&wl->wl_lock, flags);
510  WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
511  wl->tx_queue_count[q]--;
512  spin_unlock_irqrestore(&wl->wl_lock, flags);
513  }
514 
515  return skb;
516 }
517 
518 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
519  struct wl12xx_vif *wlvif,
520  u8 *hlid)
521 {
522  struct sk_buff *skb = NULL;
523  int i, h, start_hlid;
524 
525  /* start from the link after the last one */
526  start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
527 
528  /* dequeue according to AC, round robin on each link */
529  for (i = 0; i < WL12XX_MAX_LINKS; i++) {
530  h = (start_hlid + i) % WL12XX_MAX_LINKS;
531 
532  /* only consider connected stations */
533  if (!test_bit(h, wlvif->links_map))
534  continue;
535 
536  skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
537  if (!skb)
538  continue;
539 
540  wlvif->last_tx_hlid = h;
541  break;
542  }
543 
544  if (!skb)
545  wlvif->last_tx_hlid = 0;
546 
547  *hlid = wlvif->last_tx_hlid;
548  return skb;
549 }
550 
551 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
552 {
553  unsigned long flags;
554  struct wl12xx_vif *wlvif = wl->last_wlvif;
555  struct sk_buff *skb = NULL;
556 
557  /* continue from last wlvif (round robin) */
558  if (wlvif) {
559  wl12xx_for_each_wlvif_continue(wl, wlvif) {
560  skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
561  if (skb) {
562  wl->last_wlvif = wlvif;
563  break;
564  }
565  }
566  }
567 
568  /* dequeue from the system HLID before the restarting wlvif list */
569  if (!skb) {
570  skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
571  *hlid = wl->system_hlid;
572  }
573 
574  /* do a new pass over the wlvif list */
575  if (!skb) {
576  wl12xx_for_each_wlvif(wl, wlvif) {
577  skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
578  if (skb) {
579  wl->last_wlvif = wlvif;
580  break;
581  }
582 
583  /*
584  * No need to continue after last_wlvif. The previous
585  * pass should have found it.
586  */
587  if (wlvif == wl->last_wlvif)
588  break;
589  }
590  }
591 
592  if (!skb &&
594  int q;
595 
596  skb = wl->dummy_packet;
597  *hlid = wl->system_hlid;
598  q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
599  spin_lock_irqsave(&wl->wl_lock, flags);
600  WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
601  wl->tx_queue_count[q]--;
602  spin_unlock_irqrestore(&wl->wl_lock, flags);
603  }
604 
605  return skb;
606 }
607 
608 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
609  struct sk_buff *skb, u8 hlid)
610 {
611  unsigned long flags;
612  int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
613 
614  if (wl12xx_is_dummy_packet(wl, skb)) {
616  } else {
617  skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
618 
619  /* make sure we dequeue the same packet next time */
620  wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
621  WL12XX_MAX_LINKS;
622  }
623 
624  spin_lock_irqsave(&wl->wl_lock, flags);
625  wl->tx_queue_count[q]++;
626  spin_unlock_irqrestore(&wl->wl_lock, flags);
627 }
628 
629 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
630 {
631  struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
632 
633  return ieee80211_is_data_present(hdr->frame_control);
634 }
635 
636 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
637 {
638  struct wl12xx_vif *wlvif;
639  u32 timeout;
640  u8 hlid;
641 
642  if (!wl->conf.rx_streaming.interval)
643  return;
644 
645  if (!wl->conf.rx_streaming.always &&
647  return;
648 
649  timeout = wl->conf.rx_streaming.duration;
650  wl12xx_for_each_wlvif_sta(wl, wlvif) {
651  bool found = false;
652  for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
653  if (test_bit(hlid, wlvif->links_map)) {
654  found = true;
655  break;
656  }
657  }
658 
659  if (!found)
660  continue;
661 
662  /* enable rx streaming */
665  &wlvif->rx_streaming_enable_work);
666 
668  jiffies + msecs_to_jiffies(timeout));
669  }
670 }
671 
672 /*
673  * Returns failure values only in case of failed bus ops within this function.
674  * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
675  * triggering recovery by higher layers when not necessary.
676  * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
677  * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
678  * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
679  * within prepare_tx_frame code but there's nothing we should do about those
680  * as well.
681  */
683 {
684  struct wl12xx_vif *wlvif;
685  struct sk_buff *skb;
686  struct wl1271_tx_hw_descr *desc;
687  u32 buf_offset = 0, last_len = 0;
688  bool sent_packets = false;
689  unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
690  int ret = 0;
691  int bus_ret = 0;
692  u8 hlid;
693 
694  if (unlikely(wl->state != WLCORE_STATE_ON))
695  return 0;
696 
697  while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
698  struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
699  bool has_data = false;
700 
701  wlvif = NULL;
702  if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
703  wlvif = wl12xx_vif_to_data(info->control.vif);
704  else
705  hlid = wl->system_hlid;
706 
707  has_data = wlvif && wl1271_tx_is_data_present(skb);
708  ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
709  hlid);
710  if (ret == -EAGAIN) {
711  /*
712  * Aggregation buffer is full.
713  * Flush buffer and try again.
714  */
715  wl1271_skb_queue_head(wl, wlvif, skb, hlid);
716 
717  buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
718  last_len);
719  bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
720  wl->aggr_buf, buf_offset, true);
721  if (bus_ret < 0)
722  goto out;
723 
724  sent_packets = true;
725  buf_offset = 0;
726  continue;
727  } else if (ret == -EBUSY) {
728  /*
729  * Firmware buffer is full.
730  * Queue back last skb, and stop aggregating.
731  */
732  wl1271_skb_queue_head(wl, wlvif, skb, hlid);
733  /* No work left, avoid scheduling redundant tx work */
735  goto out_ack;
736  } else if (ret < 0) {
737  if (wl12xx_is_dummy_packet(wl, skb))
738  /*
739  * fw still expects dummy packet,
740  * so re-enqueue it
741  */
742  wl1271_skb_queue_head(wl, wlvif, skb, hlid);
743  else
744  ieee80211_free_txskb(wl->hw, skb);
745  goto out_ack;
746  }
747  last_len = ret;
748  buf_offset += last_len;
749  wl->tx_packets_count++;
750  if (has_data) {
751  desc = (struct wl1271_tx_hw_descr *) skb->data;
752  __set_bit(desc->hlid, active_hlids);
753  }
754  }
755 
756 out_ack:
757  if (buf_offset) {
758  buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
759  bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
760  buf_offset, true);
761  if (bus_ret < 0)
762  goto out;
763 
764  sent_packets = true;
765  }
766  if (sent_packets) {
767  /*
768  * Interrupt the firmware with the new packets. This is only
769  * required for older hardware revisions
770  */
772  bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
773  wl->tx_packets_count);
774  if (bus_ret < 0)
775  goto out;
776  }
777 
779  }
780  wl12xx_rearm_rx_streaming(wl, active_hlids);
781 
782 out:
783  return bus_ret;
784 }
785 
787 {
788  struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
789  int ret;
790 
791  mutex_lock(&wl->mutex);
792  ret = wl1271_ps_elp_wakeup(wl);
793  if (ret < 0)
794  goto out;
795 
796  ret = wlcore_tx_work_locked(wl);
797  if (ret < 0) {
799  goto out;
800  }
801 
803 out:
804  mutex_unlock(&wl->mutex);
805 }
806 
807 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
808 {
809  u8 flags = 0;
810 
811  /*
812  * TODO: use wl12xx constants when this code is moved to wl12xx, as
813  * only it uses Tx-completion.
814  */
815  if (rate_class_index <= 8)
816  flags |= IEEE80211_TX_RC_MCS;
817 
818  /*
819  * TODO: use wl12xx constants when this code is moved to wl12xx, as
820  * only it uses Tx-completion.
821  */
822  if (rate_class_index == 0)
823  flags |= IEEE80211_TX_RC_SHORT_GI;
824 
825  return flags;
826 }
827 
828 static void wl1271_tx_complete_packet(struct wl1271 *wl,
830 {
831  struct ieee80211_tx_info *info;
832  struct ieee80211_vif *vif;
833  struct wl12xx_vif *wlvif;
834  struct sk_buff *skb;
835  int id = result->id;
836  int rate = -1;
837  u8 rate_flags = 0;
838  u8 retries = 0;
839 
840  /* check for id legality */
841  if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
842  wl1271_warning("TX result illegal id: %d", id);
843  return;
844  }
845 
846  skb = wl->tx_frames[id];
847  info = IEEE80211_SKB_CB(skb);
848 
849  if (wl12xx_is_dummy_packet(wl, skb)) {
850  wl1271_free_tx_id(wl, id);
851  return;
852  }
853 
854  /* info->control is valid as long as we don't update info->status */
855  vif = info->control.vif;
856  wlvif = wl12xx_vif_to_data(vif);
857 
858  /* update the TX status info */
859  if (result->status == TX_SUCCESS) {
860  if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
861  info->flags |= IEEE80211_TX_STAT_ACK;
862  rate = wlcore_rate_to_idx(wl, result->rate_class_index,
863  wlvif->band);
864  rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
865  retries = result->ack_failures;
866  } else if (result->status == TX_RETRY_EXCEEDED) {
867  wl->stats.excessive_retries++;
868  retries = result->ack_failures;
869  }
870 
871  info->status.rates[0].idx = rate;
872  info->status.rates[0].count = retries;
873  info->status.rates[0].flags = rate_flags;
874  info->status.ack_signal = -1;
875 
876  wl->stats.retry_count += result->ack_failures;
877 
878  /*
879  * update sequence number only when relevant, i.e. only in
880  * sessions of TKIP, AES and GEM (not in open or WEP sessions)
881  */
882  if (info->control.hw_key &&
883  (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
884  info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
885  info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
886  u8 fw_lsb = result->tx_security_sequence_number_lsb;
887  u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
888 
889  /*
890  * update security sequence number, taking care of potential
891  * wrap-around
892  */
893  wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
894  wlvif->tx_security_last_seq_lsb = fw_lsb;
895  }
896 
897  /* remove private header from packet */
898  skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
899 
900  /* remove TKIP header space if present */
902  info->control.hw_key &&
903  info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
904  int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
906  hdrlen);
908  }
909 
910  wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
911  " status 0x%x",
912  result->id, skb, result->ack_failures,
913  result->rate_class_index, result->status);
914 
915  /* return the packet to the stack */
918  wl1271_free_tx_id(wl, result->id);
919 }
920 
921 /* Called upon reception of a TX complete interrupt */
922 int wlcore_tx_complete(struct wl1271 *wl)
923 {
925  u32 count, fw_counter;
926  u32 i;
927  int ret;
928 
929  /* read the tx results from the chipset */
930  ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
931  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
932  if (ret < 0)
933  goto out;
934 
935  fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
936 
937  /* write host counter to chipset (to ack) */
938  ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
940  tx_result_host_counter), fw_counter);
941  if (ret < 0)
942  goto out;
943 
944  count = fw_counter - wl->tx_results_count;
945  wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
946 
947  /* verify that the result buffer is not getting overrun */
948  if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
949  wl1271_warning("TX result overflow from chipset: %d", count);
950 
951  /* process the results */
952  for (i = 0; i < count; i++) {
955 
956  /* process the packet */
957  result = &(wl->tx_res_if->tx_results_queue[offset]);
958  wl1271_tx_complete_packet(wl, result);
959 
960  wl->tx_results_count++;
961  }
962 
963 out:
964  return ret;
965 }
967 
968 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
969 {
970  struct sk_buff *skb;
971  int i;
972  unsigned long flags;
973  struct ieee80211_tx_info *info;
974  int total[NUM_TX_QUEUES];
975 
976  for (i = 0; i < NUM_TX_QUEUES; i++) {
977  total[i] = 0;
978  while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
979  wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
980 
981  if (!wl12xx_is_dummy_packet(wl, skb)) {
982  info = IEEE80211_SKB_CB(skb);
983  info->status.rates[0].idx = -1;
984  info->status.rates[0].count = 0;
985  ieee80211_tx_status_ni(wl->hw, skb);
986  }
987 
988  total[i]++;
989  }
990  }
991 
992  spin_lock_irqsave(&wl->wl_lock, flags);
993  for (i = 0; i < NUM_TX_QUEUES; i++)
994  wl->tx_queue_count[i] -= total[i];
995  spin_unlock_irqrestore(&wl->wl_lock, flags);
996 
998 }
999 
1000 /* caller must hold wl->mutex and TX must be stopped */
1001 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1002 {
1003  int i;
1004 
1005  /* TX failure */
1006  for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
1007  if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1008  wl1271_free_sta(wl, wlvif, i);
1009  else
1010  wlvif->sta.ba_rx_bitmap = 0;
1011 
1012  wl->links[i].allocated_pkts = 0;
1013  wl->links[i].prev_freed_pkts = 0;
1014  }
1015  wlvif->last_tx_hlid = 0;
1016 
1017 }
1018 /* caller must hold wl->mutex and TX must be stopped */
1019 void wl12xx_tx_reset(struct wl1271 *wl)
1020 {
1021  int i;
1022  struct sk_buff *skb;
1023  struct ieee80211_tx_info *info;
1024 
1025  /* only reset the queues if something bad happened */
1026  if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
1027  for (i = 0; i < WL12XX_MAX_LINKS; i++)
1029 
1030  for (i = 0; i < NUM_TX_QUEUES; i++)
1031  wl->tx_queue_count[i] = 0;
1032  }
1033 
1034  /*
1035  * Make sure the driver is at a consistent state, in case this
1036  * function is called from a context other than interface removal.
1037  * This call will always wake the TX queues.
1038  */
1040 
1041  for (i = 0; i < wl->num_tx_desc; i++) {
1042  if (wl->tx_frames[i] == NULL)
1043  continue;
1044 
1045  skb = wl->tx_frames[i];
1046  wl1271_free_tx_id(wl, i);
1047  wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1048 
1049  if (!wl12xx_is_dummy_packet(wl, skb)) {
1050  /*
1051  * Remove private headers before passing the skb to
1052  * mac80211
1053  */
1054  info = IEEE80211_SKB_CB(skb);
1055  skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1056  if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1057  info->control.hw_key &&
1058  info->control.hw_key->cipher ==
1060  int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1062  skb->data, hdrlen);
1064  }
1065 
1066  info->status.rates[0].idx = -1;
1067  info->status.rates[0].count = 0;
1068 
1069  ieee80211_tx_status_ni(wl->hw, skb);
1070  }
1071  }
1072 }
1073 
1074 #define WL1271_TX_FLUSH_TIMEOUT 500000
1075 
1076 /* caller must *NOT* hold wl->mutex */
1077 void wl1271_tx_flush(struct wl1271 *wl)
1078 {
1079  unsigned long timeout, start_time;
1080  int i;
1081  start_time = jiffies;
1082  timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1083 
1084  /* only one flush should be in progress, for consistent queue state */
1085  mutex_lock(&wl->flush_mutex);
1086 
1087  mutex_lock(&wl->mutex);
1088  if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1089  mutex_unlock(&wl->mutex);
1090  goto out;
1091  }
1092 
1094 
1095  while (!time_after(jiffies, timeout)) {
1096  wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1097  wl->tx_frames_cnt,
1098  wl1271_tx_total_queue_count(wl));
1099 
1100  /* force Tx and give the driver some time to flush data */
1101  mutex_unlock(&wl->mutex);
1102  if (wl1271_tx_total_queue_count(wl))
1103  wl1271_tx_work(&wl->tx_work);
1104  msleep(20);
1105  mutex_lock(&wl->mutex);
1106 
1107  if ((wl->tx_frames_cnt == 0) &&
1108  (wl1271_tx_total_queue_count(wl) == 0)) {
1109  wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1110  jiffies_to_msecs(jiffies - start_time));
1111  goto out_wake;
1112  }
1113  }
1114 
1115  wl1271_warning("Unable to flush all TX buffers, "
1116  "timed out (timeout %d ms",
1117  WL1271_TX_FLUSH_TIMEOUT / 1000);
1118 
1119  /* forcibly flush all Tx buffers on our queues */
1120  for (i = 0; i < WL12XX_MAX_LINKS; i++)
1122 
1123 out_wake:
1125  mutex_unlock(&wl->mutex);
1126 out:
1127  mutex_unlock(&wl->flush_mutex);
1128 }
1130 
1131 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1132 {
1133  if (WARN_ON(!rate_set))
1134  return 0;
1135 
1136  return BIT(__ffs(rate_set));
1137 }
1138 
1139 void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
1141 {
1142  bool stopped = !!wl->queue_stop_reasons[queue];
1143 
1144  /* queue should not be stopped for this reason */
1145  WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
1146 
1147  if (stopped)
1148  return;
1149 
1150  ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1151 }
1152 
1153 void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
1155 {
1156  unsigned long flags;
1157 
1158  spin_lock_irqsave(&wl->wl_lock, flags);
1159  wlcore_stop_queue_locked(wl, queue, reason);
1160  spin_unlock_irqrestore(&wl->wl_lock, flags);
1161 }
1162 
1163 void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
1165 {
1166  unsigned long flags;
1167 
1168  spin_lock_irqsave(&wl->wl_lock, flags);
1169 
1170  /* queue should not be clear for this reason */
1171  WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
1172 
1173  if (wl->queue_stop_reasons[queue])
1174  goto out;
1175 
1176  ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1177 
1178 out:
1179  spin_unlock_irqrestore(&wl->wl_lock, flags);
1180 }
1181 
1182 void wlcore_stop_queues(struct wl1271 *wl,
1184 {
1185  int i;
1186 
1187  for (i = 0; i < NUM_TX_QUEUES; i++)
1188  wlcore_stop_queue(wl, i, reason);
1189 }
1191 
1192 void wlcore_wake_queues(struct wl1271 *wl,
1194 {
1195  int i;
1196 
1197  for (i = 0; i < NUM_TX_QUEUES; i++)
1198  wlcore_wake_queue(wl, i, reason);
1199 }
1201 
1203 {
1204  int i;
1205  unsigned long flags;
1206 
1207  spin_lock_irqsave(&wl->wl_lock, flags);
1208 
1209  for (i = 0; i < NUM_TX_QUEUES; i++) {
1210  if (!wl->queue_stop_reasons[i])
1211  continue;
1212 
1213  wl->queue_stop_reasons[i] = 0;
1215  wl1271_tx_get_mac80211_queue(i));
1216  }
1217 
1218  spin_unlock_irqrestore(&wl->wl_lock, flags);
1219 }
1220 
1223 {
1224  return test_bit(reason, &wl->queue_stop_reasons[queue]);
1225 }
1226 
1227 bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
1228 {
1229  return !!wl->queue_stop_reasons[queue];
1230 }