37 #include <linux/pci.h>
40 #include <linux/slab.h>
47 struct b43legacy_dmadesc_meta **
meta)
51 *meta = &(ring->meta[
slot]);
52 desc = ring->descbase;
58 static void op32_fill_descriptor(
struct b43legacy_dmaring *
ring,
69 slot = (
int)(desc - descbase);
75 addr |= ring->dev->dma.translation;
76 ctl = (bufsize - ring->frameoffset)
78 if (slot == ring->nr_slots - 1)
93 static void op32_poke_tx(
struct b43legacy_dmaring *ring,
int slot)
99 static void op32_tx_suspend(
struct b43legacy_dmaring *ring)
106 static void op32_tx_resume(
struct b43legacy_dmaring *ring)
113 static int op32_get_current_rxslot(
struct b43legacy_dmaring *ring)
123 static void op32_set_current_rxslot(
struct b43legacy_dmaring *ring,
130 static inline int free_slots(
struct b43legacy_dmaring *ring)
132 return (ring->nr_slots - ring->used_slots);
135 static inline int next_slot(
struct b43legacy_dmaring *ring,
int slot)
138 if (slot == ring->nr_slots - 1)
143 static inline int prev_slot(
struct b43legacy_dmaring *ring,
int slot)
147 return ring->nr_slots - 1;
151 #ifdef CONFIG_B43LEGACY_DEBUG
152 static void update_max_used_slots(
struct b43legacy_dmaring *ring,
153 int current_used_slots)
155 if (current_used_slots <= ring->max_used_slots)
157 ring->max_used_slots = current_used_slots;
160 "max_used_slots increased to %d on %s ring %d\n",
161 ring->max_used_slots,
162 ring->tx ?
"TX" :
"RX",
167 void update_max_used_slots(
struct b43legacy_dmaring *ring,
168 int current_used_slots)
174 int request_slot(
struct b43legacy_dmaring *ring)
182 slot = next_slot(ring, ring->current_slot);
183 ring->current_slot =
slot;
186 update_max_used_slots(ring, ring->used_slots);
192 static struct b43legacy_dmaring *priority_to_txring(
196 struct b43legacy_dmaring *ring;
199 return dev->
dma.tx_ring1;
202 switch (queue_priority) {
207 ring = dev->
dma.tx_ring3;
210 ring = dev->
dma.tx_ring2;
213 ring = dev->
dma.tx_ring1;
216 ring = dev->
dma.tx_ring0;
219 ring = dev->
dma.tx_ring4;
222 ring = dev->
dma.tx_ring5;
230 static inline int txring_to_priority(
struct b43legacy_dmaring *ring)
232 static const u8 idx_to_prio[] =
233 { 3, 2, 1, 0, 4, 5, };
238 return idx_to_prio[ring->index];
242 static u16 b43legacy_dmacontroller_base(
enum b43legacy_dmatype
type,
245 static const u16 map32[] = {
256 return map32[controller_idx];
260 dma_addr_t map_descbuffer(
struct b43legacy_dmaring *ring,
280 void unmap_descbuffer(
struct b43legacy_dmaring *ring,
296 void sync_descbuffer_for_cpu(
struct b43legacy_dmaring *ring,
307 void sync_descbuffer_for_device(
struct b43legacy_dmaring *ring,
318 void free_descriptor_buffer(
struct b43legacy_dmaring *ring,
319 struct b43legacy_dmadesc_meta *
meta,
326 dev_kfree_skb(meta->skb);
331 static int alloc_ringmemory(
struct b43legacy_dmaring *ring)
338 if (!ring->descbase) {
348 static void free_ringmemory(
struct b43legacy_dmaring *ring)
351 ring->descbase, ring->dmabase);
355 static int b43legacy_dmacontroller_rx_reset(
struct b43legacy_wldev *dev,
357 enum b43legacy_dmatype type)
366 b43legacy_write32(dev, mmio_base + offset, 0);
367 for (i = 0; i < 10; i++) {
369 value = b43legacy_read32(dev, mmio_base + offset);
386 static int b43legacy_dmacontroller_tx_reset(
struct b43legacy_wldev *dev,
388 enum b43legacy_dmatype type)
396 for (i = 0; i < 10; i++) {
398 value = b43legacy_read32(dev, mmio_base + offset);
407 b43legacy_write32(dev, mmio_base + offset, 0);
408 for (i = 0; i < 10; i++) {
410 value = b43legacy_read32(dev, mmio_base + offset);
429 static bool b43legacy_dma_mapping_error(
struct b43legacy_dmaring *ring,
437 switch (ring->type) {
438 case B43legacy_DMA_30BIT:
439 if ((
u64)addr + buffersize > (1ULL << 30))
442 case B43legacy_DMA_32BIT:
443 if ((
u64)addr + buffersize > (1ULL << 32))
453 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
458 static int setup_rx_descbuffer(
struct b43legacy_dmaring *ring,
460 struct b43legacy_dmadesc_meta *meta,
470 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
473 dmaaddr = map_descbuffer(ring, skb->
data,
474 ring->rx_buffersize, 0);
475 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
481 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
484 dmaaddr = map_descbuffer(ring, skb->
data,
485 ring->rx_buffersize, 0);
488 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
495 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
508 static int alloc_initial_descbuffers(
struct b43legacy_dmaring *ring)
513 struct b43legacy_dmadesc_meta *
meta;
515 for (i = 0; i < ring->nr_slots; i++) {
516 desc = op32_idx2desc(ring, i, &meta);
518 err = setup_rx_descbuffer(ring, desc, meta,
GFP_KERNEL);
521 "Failed to allocate initial descbuffers\n");
526 ring->used_slots = ring->nr_slots;
532 for (i--; i >= 0; i--) {
533 desc = op32_idx2desc(ring, i, &meta);
535 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
536 dev_kfree_skb(meta->skb);
545 static int dmacontroller_setup(
struct b43legacy_dmaring *ring)
550 u32 trans = ring->dev->dma.translation;
551 u32 ringbase = (
u32)(ring->dmabase);
561 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
564 err = alloc_initial_descbuffers(ring);
570 value = (ring->frameoffset <<
577 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
587 static void dmacontroller_cleanup(
struct b43legacy_dmaring *ring)
590 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
594 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
600 static void free_all_descbuffers(
struct b43legacy_dmaring *ring)
602 struct b43legacy_dmadesc_meta *
meta;
605 if (!ring->used_slots)
607 for (i = 0; i < ring->nr_slots; i++) {
608 op32_idx2desc(ring, i, &meta);
615 unmap_descbuffer(ring, meta->dmaaddr,
618 unmap_descbuffer(ring, meta->dmaaddr,
619 ring->rx_buffersize, 0);
620 free_descriptor_buffer(ring, meta, 0);
629 mmio_base = b43legacy_dmacontroller_base(0, 0);
630 b43legacy_write32(dev,
633 tmp = b43legacy_read32(dev, mmio_base +
641 static enum b43legacy_dmatype dma_mask_to_engine_type(
u64 dmamask)
644 return B43legacy_DMA_30BIT;
646 return B43legacy_DMA_32BIT;
648 return B43legacy_DMA_30BIT;
653 struct b43legacy_dmaring *b43legacy_setup_dmaring(
struct b43legacy_wldev *dev,
654 int controller_index,
656 enum b43legacy_dmatype type)
658 struct b43legacy_dmaring *ring;
673 ring->meta = kcalloc(nr_slots,
sizeof(
struct b43legacy_dmadesc_meta),
678 ring->txhdr_cache = kcalloc(nr_slots,
681 if (!ring->txhdr_cache)
689 if (b43legacy_dma_mapping_error(ring, dma_test,
692 kfree(ring->txhdr_cache);
693 ring->txhdr_cache = kcalloc(nr_slots,
696 if (!ring->txhdr_cache)
704 if (b43legacy_dma_mapping_error(ring, dma_test,
706 goto err_kfree_txhdr_cache;
714 ring->nr_slots = nr_slots;
715 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
716 ring->index = controller_index;
719 ring->current_slot = -1;
721 if (ring->index == 0) {
724 }
else if (ring->index == 3) {
730 #ifdef CONFIG_B43LEGACY_DEBUG
731 ring->last_injected_overflow =
jiffies;
734 err = alloc_ringmemory(ring);
736 goto err_kfree_txhdr_cache;
737 err = dmacontroller_setup(ring);
739 goto err_free_ringmemory;
745 free_ringmemory(ring);
746 err_kfree_txhdr_cache:
747 kfree(ring->txhdr_cache);
757 static void b43legacy_destroy_dmaring(
struct b43legacy_dmaring *ring)
762 b43legacydbg(ring->dev->wl,
"DMA-%u 0x%04X (%s) max used slots:"
763 " %d/%d\n", (
unsigned int)(ring->type), ring->mmio_base,
764 (ring->tx) ?
"TX" :
"RX", ring->max_used_slots,
769 dmacontroller_cleanup(ring);
770 free_all_descbuffers(ring);
771 free_ringmemory(ring);
773 kfree(ring->txhdr_cache);
782 if (b43legacy_using_pio(dev))
786 b43legacy_destroy_dmaring(dma->
rx_ring3);
788 b43legacy_destroy_dmaring(dma->
rx_ring0);
791 b43legacy_destroy_dmaring(dma->
tx_ring5);
793 b43legacy_destroy_dmaring(dma->
tx_ring4);
795 b43legacy_destroy_dmaring(dma->
tx_ring3);
797 b43legacy_destroy_dmaring(dma->
tx_ring2);
799 b43legacy_destroy_dmaring(dma->
tx_ring1);
801 b43legacy_destroy_dmaring(dma->
tx_ring0);
808 bool fallback =
false;
831 "the required %u-bit DMA mask\n",
832 (
unsigned int)dma_mask_to_engine_type(orig_mask));
838 (
unsigned int)dma_mask_to_engine_type(orig_mask),
839 (
unsigned int)dma_mask_to_engine_type(mask));
848 struct b43legacy_dmaring *ring;
851 enum b43legacy_dmatype
type;
853 dmamask = supported_dma_mask(dev);
854 type = dma_mask_to_engine_type(dmamask);
855 err = b43legacy_dma_set_mask(dev, dmamask);
857 #ifdef CONFIG_B43LEGACY_PIO
859 "Falling back to PIO\n");
864 "no PIO support compiled in\n");
872 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
877 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
879 goto err_destroy_tx0;
882 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
884 goto err_destroy_tx1;
887 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
889 goto err_destroy_tx2;
892 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
894 goto err_destroy_tx3;
897 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
899 goto err_destroy_tx4;
903 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
905 goto err_destroy_tx5;
908 if (dev->
dev->id.revision < 5) {
909 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
911 goto err_destroy_rx0;
915 b43legacydbg(dev->
wl,
"%u-bit DMA initialized\n", (
unsigned int)type);
921 b43legacy_destroy_dmaring(dma->
rx_ring0);
924 b43legacy_destroy_dmaring(dma->
tx_ring5);
927 b43legacy_destroy_dmaring(dma->
tx_ring4);
930 b43legacy_destroy_dmaring(dma->
tx_ring3);
933 b43legacy_destroy_dmaring(dma->
tx_ring2);
936 b43legacy_destroy_dmaring(dma->
tx_ring1);
939 b43legacy_destroy_dmaring(dma->
tx_ring0);
945 static u16 generate_cookie(
struct b43legacy_dmaring *ring,
956 switch (ring->index) {
985 u16 cookie,
int *slot)
988 struct b43legacy_dmaring *ring =
NULL;
990 switch (cookie & 0xF000) {
1012 *slot = (cookie & 0x0FFF);
1018 static int dma_tx_fragment(
struct b43legacy_dmaring *ring,
1021 struct sk_buff *skb = *in_skb;
1024 int slot, old_top_slot, old_used_slots;
1027 struct b43legacy_dmadesc_meta *
meta;
1028 struct b43legacy_dmadesc_meta *meta_hdr;
1031 #define SLOTS_PER_PACKET 2
1034 old_top_slot = ring->current_slot;
1035 old_used_slots = ring->used_slots;
1038 slot = request_slot(ring);
1039 desc = op32_idx2desc(ring, slot, &meta_hdr);
1040 memset(meta_hdr, 0,
sizeof(*meta_hdr));
1042 header = &(ring->txhdr_cache[slot *
sizeof(
1046 generate_cookie(ring, slot));
1048 ring->current_slot = old_top_slot;
1049 ring->used_slots = old_used_slots;
1053 meta_hdr->dmaaddr = map_descbuffer(ring, (
unsigned char *)header,
1055 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1057 ring->current_slot = old_top_slot;
1058 ring->used_slots = old_used_slots;
1061 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1065 slot = request_slot(ring);
1066 desc = op32_idx2desc(ring, slot, &meta);
1067 memset(meta, 0,
sizeof(*meta));
1070 meta->is_last_fragment =
true;
1072 meta->dmaaddr = map_descbuffer(ring, skb->
data, skb->
len, 1);
1074 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->
len, 1)) {
1077 ring->current_slot = old_top_slot;
1078 ring->used_slots = old_used_slots;
1084 memcpy(bounce_skb->cb, skb->cb,
sizeof(skb->cb));
1085 bounce_skb->
dev = skb->
dev;
1086 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1087 info = IEEE80211_SKB_CB(bounce_skb);
1091 *in_skb = bounce_skb;
1093 meta->dmaaddr = map_descbuffer(ring, skb->
data, skb->
len, 1);
1094 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->
len, 1)) {
1095 ring->current_slot = old_top_slot;
1096 ring->used_slots = old_used_slots;
1098 goto out_free_bounce;
1102 op32_fill_descriptor(ring, desc, meta->dmaaddr,
1107 op32_poke_tx(ring, next_slot(ring, slot));
1113 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1119 int should_inject_overflow(
struct b43legacy_dmaring *ring)
1121 #ifdef CONFIG_B43LEGACY_DEBUG
1126 unsigned long next_overflow;
1128 next_overflow = ring->last_injected_overflow +
HZ;
1130 ring->last_injected_overflow =
jiffies;
1132 "Injecting TX ring overflow on "
1133 "DMA controller %d\n", ring->index);
1144 struct b43legacy_dmaring *ring;
1147 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1169 err = dma_tx_fragment(ring, &skb);
1181 should_inject_overflow(ring)) {
1183 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1185 dev->
wl->tx_queue_stopped[skb_mapping] = 1;
1186 ring->stopped =
true;
1197 struct b43legacy_dmaring *ring;
1198 struct b43legacy_dmadesc_meta *
meta;
1203 ring = parse_cookie(dev, status->
cookie, &slot);
1211 firstused = ring->current_slot - ring->used_slots + 1;
1213 firstused = ring->nr_slots + firstused;
1219 "ring %d. Expected %d, but got %d\n",
1220 ring->index, firstused, slot);
1226 op32_idx2desc(ring, slot, &meta);
1229 unmap_descbuffer(ring, meta->dmaaddr,
1232 unmap_descbuffer(ring, meta->dmaaddr,
1236 if (meta->is_last_fragment) {
1239 info = IEEE80211_SKB_CB(meta->skb);
1244 retry_limit = info->
status.rates[0].count;
1245 ieee80211_tx_info_clear_status(info);
1250 if (status->
rts_count > dev->
wl->hw->conf.short_frame_max_tx_count) {
1258 info->
status.rates[0].count = 0;
1262 info->
status.rates[0].count = retry_limit;
1268 info->
status.rates[1].idx = -1;
1289 if (meta->is_last_fragment)
1291 slot = next_slot(ring, slot);
1294 if (ring->stopped) {
1296 ring->stopped =
false;
1299 if (dev->
wl->tx_queue_stopped[ring->queue_prio]) {
1300 dev->
wl->tx_queue_stopped[ring->queue_prio] = 0;
1313 static void dma_rx(
struct b43legacy_dmaring *ring,
1317 struct b43legacy_dmadesc_meta *
meta;
1324 desc = op32_idx2desc(ring, *slot, &meta);
1326 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1329 if (ring->index == 3) {
1335 while (hw->
cookie == 0) {
1344 sync_descbuffer_for_device(ring, meta->dmaaddr,
1345 ring->rx_buffersize);
1358 }
while (len == 0 && i++ < 5);
1361 sync_descbuffer_for_device(ring, meta->dmaaddr,
1362 ring->rx_buffersize);
1366 if (
unlikely(len > ring->rx_buffersize)) {
1376 desc = op32_idx2desc(ring, *slot, &meta);
1378 sync_descbuffer_for_device(ring, meta->dmaaddr,
1379 ring->rx_buffersize);
1380 *slot = next_slot(ring, *slot);
1382 tmp -= ring->rx_buffersize;
1387 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1388 len, ring->rx_buffersize, cnt);
1392 dmaaddr = meta->dmaaddr;
1393 err = setup_rx_descbuffer(ring, desc, meta,
GFP_ATOMIC);
1395 b43legacydbg(ring->dev->wl,
"DMA RX: setup_rx_descbuffer()"
1397 sync_descbuffer_for_device(ring, dmaaddr,
1398 ring->rx_buffersize);
1402 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1403 skb_put(skb, len + ring->frameoffset);
1418 current_slot = op32_get_current_rxslot(ring);
1422 slot = ring->current_slot;
1423 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1424 dma_rx(ring, &slot);
1425 update_max_used_slots(ring, ++used_slots);
1427 op32_set_current_rxslot(ring, slot);
1428 ring->current_slot =
slot;
1431 static void b43legacy_dma_tx_suspend_ring(
struct b43legacy_dmaring *ring)
1434 op32_tx_suspend(ring);
1437 static void b43legacy_dma_tx_resume_ring(
struct b43legacy_dmaring *ring)
1440 op32_tx_resume(ring);
1446 b43legacy_dma_tx_suspend_ring(dev->
dma.tx_ring0);
1447 b43legacy_dma_tx_suspend_ring(dev->
dma.tx_ring1);
1448 b43legacy_dma_tx_suspend_ring(dev->
dma.tx_ring2);
1449 b43legacy_dma_tx_suspend_ring(dev->
dma.tx_ring3);
1450 b43legacy_dma_tx_suspend_ring(dev->
dma.tx_ring4);
1451 b43legacy_dma_tx_suspend_ring(dev->
dma.tx_ring5);
1456 b43legacy_dma_tx_resume_ring(dev->
dma.tx_ring5);
1457 b43legacy_dma_tx_resume_ring(dev->
dma.tx_ring4);
1458 b43legacy_dma_tx_resume_ring(dev->
dma.tx_ring3);
1459 b43legacy_dma_tx_resume_ring(dev->
dma.tx_ring2);
1460 b43legacy_dma_tx_resume_ring(dev->
dma.tx_ring1);
1461 b43legacy_dma_tx_resume_ring(dev->
dma.tx_ring0);