23 #include <linux/kernel.h>
24 #include <linux/module.h>
32 static bool wl1251_tx_double_buffer_busy(
struct wl1251 *wl,
u32 data_out_count)
34 int used, data_in_count;
38 if (data_in_count < data_out_count)
42 used = data_in_count - data_out_count;
53 static int wl1251_tx_path_status(
struct wl1251 *wl)
61 busy = wl1251_tx_double_buffer_busy(wl, data_out_count);
87 tx_hdr->
control.rate_policy = 0;
90 tx_hdr->
control.packet_type = 0;
95 tx_hdr->
control.tx_complete = 1;
104 #define MAX_MSDU_SECURITY_LENGTH 16
105 #define MAX_MPDU_SECURITY_LENGTH 16
106 #define WLAN_QOS_HDR_LEN 26
107 #define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
109 #define HW_BLOCK_SIZE 252
113 u16 num_mpdus, mem_blocks_per_frag;
120 if (payload_len > frag_threshold) {
121 mem_blocks_per_frag =
125 mem_blocks = num_mpdus * mem_blocks_per_frag;
130 mem_blocks_per_frag = 0;
138 mem_blocks +=
min(num_mpdus, mem_blocks_per_frag);
143 static int wl1251_tx_fill_hdr(
struct wl1251 *wl,
struct sk_buff *skb,
154 id = wl1251_tx_id(wl, skb);
163 rate = ieee80211_get_tx_rate(wl->
hw, control);
168 tx_hdr->
xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
170 wl1251_tx_control(tx_hdr, control, fc);
171 wl1251_tx_frag_block_num(tx_hdr);
177 static int wl1251_tx_send_packet(
struct wl1251 *wl,
struct sk_buff *skb,
196 fc = *(
__le16 *)(skb->
data +
sizeof(*tx_hdr));
204 sizeof(*tx_hdr) + hdrlen);
216 if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
230 offset = (4 - (
long)skb->
data) & 0x03;
236 unsigned char *
src = skb->
data;
237 skb_reserve(skb, offset);
247 addr = wl->
data_path->tx_packet_ring_addr +
248 wl->
data_path->tx_packet_ring_chunk_size;
250 addr = wl->
data_path->tx_packet_ring_addr;
255 "queue %d", tx_hdr->
id, skb, tx_hdr->
length,
261 static void wl1251_tx_trigger(
struct wl1251 *wl)
277 TX_STATUS_DATA_OUT_COUNT_MASK;
281 static int wl1251_tx_frame(
struct wl1251 *wl,
struct sk_buff *skb)
287 info = IEEE80211_SKB_CB(skb);
290 idx = info->
control.hw_key->hw_key_idx;
298 ret = wl1251_tx_path_status(wl);
302 ret = wl1251_tx_fill_hdr(wl, skb, info);
306 ret = wl1251_tx_send_packet(wl, skb, info);
310 wl1251_tx_trigger(wl);
319 bool woken_up =
false;
335 ret = wl1251_tx_frame(wl, skb);
339 }
else if (ret < 0) {
352 static const char *wl1251_tx_parse_status(
u8 status)
358 memset(buf, 0,
sizeof(buf));
380 static void wl1251_tx_packet_cb(
struct wl1251 *wl,
394 info = IEEE80211_SKB_CB(skb);
418 result->
status, wl1251_tx_parse_status(result->
status));
429 int i, result_index, num_complete = 0, queue_len;
430 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
438 result,
sizeof(result));
443 result_ptr = &result[result_index];
445 if (result_ptr->
done_1 == 1 &&
446 result_ptr->
done_2 == 1) {
447 wl1251_tx_packet_cb(wl, result_ptr);
452 result_index = (result_index + 1) &
453 (FW_TX_CMPLT_BLOCK_SIZE - 1);
460 queue_len = skb_queue_len(&wl->
tx_queue);
462 if ((num_complete > 0) && (queue_len > 0)) {
475 spin_unlock_irqrestore(&wl->
wl_lock, flags);
495 }
else if (result_index < wl->next_tx_complete) {
502 (FW_TX_CMPLT_BLOCK_SIZE -
510 FW_TX_CMPLT_BLOCK_SIZE +
519 FW_TX_CMPLT_BLOCK_SIZE *
539 info = IEEE80211_SKB_CB(skb);
552 info = IEEE80211_SKB_CB(skb);