37 #include <linux/pci.h>
41 #include <linux/slab.h>
42 #include <asm/div64.h>
48 #define TX_SLOTS_PER_FRAME 2
98 static void op32_fill_descriptor(
struct b43_dmaring *ring,
109 slot = (
int)(&(desc->
dma32) - descbase);
131 static void op32_poke_tx(
struct b43_dmaring *ring,
int slot)
137 static void op32_tx_suspend(
struct b43_dmaring *ring)
143 static void op32_tx_resume(
struct b43_dmaring *ring)
149 static int op32_get_current_rxslot(
struct b43_dmaring *ring)
159 static void op32_set_current_rxslot(
struct b43_dmaring *ring,
int slot)
166 .idx2desc = op32_idx2desc,
167 .fill_descriptor = op32_fill_descriptor,
168 .poke_tx = op32_poke_tx,
169 .tx_suspend = op32_tx_suspend,
170 .tx_resume = op32_tx_resume,
171 .get_current_rxslot = op32_get_current_rxslot,
172 .set_current_rxslot = op32_set_current_rxslot,
185 desc = &(desc[
slot]);
190 static void op64_fill_descriptor(
struct b43_dmaring *ring,
193 int start,
int end,
int irq)
197 u32 ctl0 = 0, ctl1 = 0;
201 slot = (
int)(&(desc->
dma64) - descbase);
226 static void op64_poke_tx(
struct b43_dmaring *ring,
int slot)
232 static void op64_tx_suspend(
struct b43_dmaring *ring)
238 static void op64_tx_resume(
struct b43_dmaring *ring)
244 static int op64_get_current_rxslot(
struct b43_dmaring *ring)
254 static void op64_set_current_rxslot(
struct b43_dmaring *ring,
int slot)
261 .idx2desc = op64_idx2desc,
262 .fill_descriptor = op64_fill_descriptor,
263 .poke_tx = op64_poke_tx,
264 .tx_suspend = op64_tx_suspend,
265 .tx_resume = op64_tx_resume,
266 .get_current_rxslot = op64_get_current_rxslot,
267 .set_current_rxslot = op64_set_current_rxslot,
270 static inline int free_slots(
struct b43_dmaring *ring)
275 static inline int next_slot(
struct b43_dmaring *ring,
int slot)
283 static inline int prev_slot(
struct b43_dmaring *ring,
int slot)
291 #ifdef CONFIG_B43_DEBUG
292 static void update_max_used_slots(
struct b43_dmaring *ring,
293 int current_used_slots)
295 if (current_used_slots <= ring->max_used_slots)
297 ring->max_used_slots = current_used_slots;
300 "max_used_slots increased to %d on %s ring %d\n",
301 ring->max_used_slots,
302 ring->
tx ?
"TX" :
"RX", ring->
index);
307 void update_max_used_slots(
struct b43_dmaring *ring,
int current_used_slots)
313 static inline int request_slot(
struct b43_dmaring *ring)
325 update_max_used_slots(ring, ring->
used_slots);
332 static const u16 map64[] = {
340 static const u16 map32[] = {
352 return map64[controller_idx];
356 return map32[controller_idx];
361 unsigned char *
buf,
size_t len,
int tx)
390 void sync_descbuffer_for_cpu(
struct b43_dmaring *ring,
399 void sync_descbuffer_for_device(
struct b43_dmaring *ring,
408 void free_descriptor_buffer(
struct b43_dmaring *ring,
417 static int alloc_ringmemory(
struct b43_dmaring *ring)
434 ring_mem_size, &(ring->
dmabase),
437 b43err(ring->
dev->wl,
"DMA ringmemory allocation failed\n");
445 static void free_ringmemory(
struct b43_dmaring *ring)
464 b43_write32(dev, mmio_base + offset, 0);
465 for (i = 0; i < 10; i++) {
468 value = b43_read32(dev, mmio_base + offset);
485 b43err(dev->
wl,
"DMA RX reset timed out\n");
493 static int b43_dmacontroller_tx_reset(
struct b43_wldev *dev,
u16 mmio_base,
502 for (i = 0; i < 10; i++) {
505 value = b43_read32(dev, mmio_base + offset);
522 b43_write32(dev, mmio_base + offset, 0);
523 for (i = 0; i < 10; i++) {
526 value = b43_read32(dev, mmio_base + offset);
543 b43err(dev->
wl,
"DMA TX reset timed out\n");
553 static bool b43_dma_mapping_error(
struct b43_dmaring *ring,
555 size_t buffersize,
bool dma_to_device)
560 switch (ring->
type) {
562 if ((
u64)addr + buffersize > (1ULL << 30))
566 if ((
u64)addr + buffersize > (1ULL << 32))
580 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
589 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
595 unsigned char *
frame;
604 memset(frame, 0xFF,
sizeof(
struct b43_plcp_hdr6) + 2 );
607 static int setup_rx_descbuffer(
struct b43_dmaring *ring,
619 b43_poison_rx_buffer(ring, skb);
621 if (b43_dma_mapping_error(ring, dmaaddr, ring->
rx_buffersize, 0)) {
630 b43_poison_rx_buffer(ring, skb);
631 dmaaddr = map_descbuffer(ring, skb->
data,
633 if (b43_dma_mapping_error(ring, dmaaddr, ring->
rx_buffersize, 0)) {
634 b43err(ring->
dev->wl,
"RX DMA buffer allocation failed\n");
642 ring->
ops->fill_descriptor(ring, desc, dmaaddr,
651 static int alloc_initial_descbuffers(
struct b43_dmaring *ring)
657 for (i = 0; i < ring->
nr_slots; i++) {
658 desc = ring->
ops->idx2desc(ring, i, &meta);
660 err = setup_rx_descbuffer(ring, desc, meta,
GFP_KERNEL);
663 "Failed to allocate initial descbuffers\n");
674 for (i--; i >= 0; i--) {
675 desc = ring->
ops->idx2desc(ring, i, &meta);
678 dev_kfree_skb(meta->
skb);
687 static int dmacontroller_setup(
struct b43_dmaring *ring)
725 err = alloc_initial_descbuffers(ring);
768 static void dmacontroller_cleanup(
struct b43_dmaring *ring)
789 static void free_all_descbuffers(
struct b43_dmaring *ring)
796 for (i = 0; i < ring->
nr_slots; i++) {
798 ring->
ops->idx2desc(ring, i, &meta);
805 unmap_descbuffer(ring, meta->
dmaaddr,
808 unmap_descbuffer(ring, meta->
dmaaddr,
811 free_descriptor_buffer(ring, meta);
820 switch (dev->
dev->bus_type) {
821 #ifdef CONFIG_B43_BCMA
828 #ifdef CONFIG_B43_SSB
837 mmio_base = b43_dmacontroller_base(0, 0);
861 int controller_index,
881 for (i = 0; i < ring->
nr_slots; i++)
886 ring->
mmio_base = b43_dmacontroller_base(type, controller_index);
887 ring->
index = controller_index;
889 ring->
ops = &dma64_ops;
891 ring->
ops = &dma32_ops;
896 if (ring->
index == 0) {
897 switch (dev->
fw.hdr_format) {
911 #ifdef CONFIG_B43_DEBUG
912 ring->last_injected_overflow =
jiffies;
931 if (b43_dma_mapping_error(ring, dma_test,
932 b43_txhdr_size(dev), 1)) {
946 if (b43_dma_mapping_error(ring, dma_test,
947 b43_txhdr_size(dev), 1)) {
950 "TXHDR DMA allocation failed\n");
951 goto err_kfree_txhdr_cache;
956 dma_test, b43_txhdr_size(dev),
960 err = alloc_ringmemory(ring);
962 goto err_kfree_txhdr_cache;
963 err = dmacontroller_setup(ring);
965 goto err_free_ringmemory;
971 free_ringmemory(ring);
972 err_kfree_txhdr_cache:
982 #define divide(a, b) ({ \
988 #define modulo(a, b) ({ \
994 static void b43_destroy_dmaring(
struct b43_dmaring *ring,
995 const char *ringname)
1000 #ifdef CONFIG_B43_DEBUG
1003 u64 failed_packets = ring->nr_failed_tx_packets;
1004 u64 succeed_packets = ring->nr_succeed_tx_packets;
1005 u64 nr_packets = failed_packets + succeed_packets;
1006 u64 permille_failed = 0, average_tries = 0;
1009 permille_failed =
divide(failed_packets * 1000, nr_packets);
1011 average_tries =
divide(ring->nr_total_packet_tries * 100, nr_packets);
1014 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
1015 "Average tries %llu.%02llu\n",
1016 (
unsigned int)(ring->
type), ringname,
1017 ring->max_used_slots,
1019 (
unsigned long long)failed_packets,
1020 (
unsigned long long)nr_packets,
1021 (
unsigned long long)
divide(permille_failed, 10),
1022 (
unsigned long long)
modulo(permille_failed, 10),
1023 (
unsigned long long)
divide(average_tries, 100),
1024 (
unsigned long long)
modulo(average_tries, 100));
1031 dmacontroller_cleanup(ring);
1032 free_all_descbuffers(ring);
1033 free_ringmemory(ring);
1040 #define destroy_ring(dma, ring) do { \
1041 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1042 (dma)->ring = NULL; \
1049 if (b43_using_pio_transfers(dev))
1064 bool fallback =
false;
1086 b43err(dev->
wl,
"The machine/kernel does not support "
1087 "the required %u-bit DMA mask\n",
1088 (
unsigned int)dma_mask_to_engine_type(orig_mask));
1092 b43info(dev->
wl,
"DMA mask fallback from %u-bit to %u-bit\n",
1093 (
unsigned int)dma_mask_to_engine_type(orig_mask),
1094 (
unsigned int)dma_mask_to_engine_type(mask));
1103 static bool b43_dma_translation_in_low_word(
struct b43_wldev *dev,
1109 #ifdef CONFIG_B43_SSB
1112 !(pci_is_pcie(dev->
dev->sdev->bus->host_pci) &&
1126 dmamask = supported_dma_mask(dev);
1127 type = dma_mask_to_engine_type(dmamask);
1128 err = b43_dma_set_mask(dev, dmamask);
1132 switch (dev->
dev->bus_type) {
1133 #ifdef CONFIG_B43_BCMA
1138 #ifdef CONFIG_B43_SSB
1147 #ifdef CONFIG_B43_BCMA
1149 if (dev->
dev->bus_type == B43_BUS_BCMA)
1161 goto err_destroy_bk;
1165 goto err_destroy_be;
1169 goto err_destroy_vi;
1173 goto err_destroy_vo;
1176 dma->
rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1178 goto err_destroy_mcast;
1183 b43dbg(dev->
wl,
"%u-bit DMA initialized\n",
1184 (
unsigned int)type);
1215 cookie = (((
u16)ring->
index + 1) << 12);
1217 cookie |= (
u16)slot;
1229 switch (cookie & 0xF000) {
1246 *slot = (cookie & 0x0FFF);
1248 b43dbg(dev->
wl,
"TX-status contains "
1249 "invalid cookie: 0x%04X\n", cookie);
1256 static int dma_tx_fragment(
struct b43_dmaring *ring,
1263 int slot, old_top_slot, old_used_slots;
1269 size_t hdrsize = b43_txhdr_size(ring->
dev);
1280 slot = request_slot(ring);
1281 desc = ops->
idx2desc(ring, slot, &meta_hdr);
1282 memset(meta_hdr, 0,
sizeof(*meta_hdr));
1285 cookie = generate_cookie(ring, slot);
1294 meta_hdr->
dmaaddr = map_descbuffer(ring, (
unsigned char *)header,
1296 if (b43_dma_mapping_error(ring, meta_hdr->
dmaaddr, hdrsize, 1)) {
1305 slot = request_slot(ring);
1306 desc = ops->
idx2desc(ring, slot, &meta);
1307 memset(meta, 0,
sizeof(*meta));
1315 if (b43_dma_mapping_error(ring, meta->
dmaaddr, skb->
len, 1)) {
1326 if (b43_dma_mapping_error(ring, meta->
dmaaddr, skb->
len, 1)) {
1346 ops->
poke_tx(ring, next_slot(ring, slot));
1350 unmap_descbuffer(ring, meta_hdr->
dmaaddr,
1355 static inline int should_inject_overflow(
struct b43_dmaring *ring)
1357 #ifdef CONFIG_B43_DEBUG
1361 unsigned long next_overflow;
1363 next_overflow = ring->last_injected_overflow +
HZ;
1365 ring->last_injected_overflow =
jiffies;
1367 "Injecting TX ring overflow on "
1368 "DMA controller %d\n", ring->
index);
1384 switch (queue_prio) {
1389 ring = dev->
dma.tx_ring_AC_VO;
1392 ring = dev->
dma.tx_ring_AC_VI;
1395 ring = dev->
dma.tx_ring_AC_BE;
1398 ring = dev->
dma.tx_ring_AC_BK;
1402 ring = dev->
dma.tx_ring_AC_BE;
1417 ring = dev->
dma.tx_ring_mcast;
1423 ring = select_ring_by_priority(
1424 dev, skb_get_queue_mapping(skb));
1435 b43err(dev->
wl,
"Packet after queue stopped\n");
1443 b43err(dev->
wl,
"DMA queue overflow\n");
1451 ring->
queue_prio = skb_get_queue_mapping(skb);
1453 err = dma_tx_fragment(ring, skb);
1462 b43err(dev->
wl,
"DMA tx mapping failure\n");
1466 should_inject_overflow(ring)) {
1468 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1470 dev->
wl->tx_queue_stopped[skb_mapping] = 1;
1487 int slot, firstused;
1490 ring = parse_cookie(dev, status->
cookie, &slot);
1500 firstused = ring->
nr_slots + firstused;
1504 b43dbg(dev->
wl,
"Out of order TX status report on DMA ring %d. "
1505 "Expected %d, but got %d\n",
1506 ring->
index, firstused, slot);
1517 b43dbg(dev->
wl,
"Poisoned TX slot %d (first=%d) "
1519 slot, firstused, ring->
index);
1524 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->
skb));
1526 unmap_descbuffer(ring, meta->
dmaaddr, meta->
skb->len, 1);
1530 unmap_descbuffer(ring, meta->
dmaaddr,
1531 b43_txhdr_size(dev), 1);
1540 b43dbg(dev->
wl,
"TX status unexpected NULL skb "
1541 "at slot %d (first=%d) on ring %d\n",
1542 slot, firstused, ring->
index);
1546 info = IEEE80211_SKB_CB(meta->
skb);
1553 #ifdef CONFIG_B43_DEBUG
1555 ring->nr_succeed_tx_packets++;
1557 ring->nr_failed_tx_packets++;
1558 ring->nr_total_packet_tries += status->
frame_count;
1570 b43dbg(dev->
wl,
"TX status unexpected non-NULL skb "
1571 "at slot %d (first=%d) on ring %d\n",
1572 slot, firstused, ring->
index);
1585 slot = next_slot(ring, slot);
1606 static void dma_rx(
struct b43_dmaring *ring,
int *slot)
1617 desc = ops->
idx2desc(ring, *slot, &meta);
1631 }
while (len == 0 && i++ < 5);
1634 goto drop_recycle_buffer;
1637 if (
unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1640 b43dbg(ring->
dev->wl,
"DMA RX: Dropping poisoned buffer.\n");
1642 goto drop_recycle_buffer;
1654 desc = ops->
idx2desc(ring, *slot, &meta);
1656 b43_poison_rx_buffer(ring, meta->
skb);
1657 sync_descbuffer_for_device(ring, meta->
dmaaddr,
1659 *slot = next_slot(ring, *slot);
1665 b43err(ring->
dev->wl,
"DMA RX buffer too small "
1666 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1672 err = setup_rx_descbuffer(ring, desc, meta,
GFP_ATOMIC);
1674 b43dbg(ring->
dev->wl,
"DMA RX: setup_rx_descbuffer() failed\n");
1675 goto drop_recycle_buffer;
1686 drop_recycle_buffer:
1688 b43_poison_rx_buffer(ring, skb);
1689 sync_descbuffer_for_device(ring, dmaaddr, ring->
rx_buffersize);
1695 int slot, current_slot;
1700 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1703 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1704 dma_rx(ring, &slot);
1705 update_max_used_slots(ring, ++used_slots);
1712 static void b43_dma_tx_suspend_ring(
struct b43_dmaring *ring)
1715 ring->
ops->tx_suspend(ring);
1718 static void b43_dma_tx_resume_ring(
struct b43_dmaring *ring)
1721 ring->
ops->tx_resume(ring);
1727 b43_dma_tx_suspend_ring(dev->
dma.tx_ring_AC_BK);
1728 b43_dma_tx_suspend_ring(dev->
dma.tx_ring_AC_BE);
1729 b43_dma_tx_suspend_ring(dev->
dma.tx_ring_AC_VI);
1730 b43_dma_tx_suspend_ring(dev->
dma.tx_ring_AC_VO);
1731 b43_dma_tx_suspend_ring(dev->
dma.tx_ring_mcast);
1736 b43_dma_tx_resume_ring(dev->
dma.tx_ring_mcast);
1737 b43_dma_tx_resume_ring(dev->
dma.tx_ring_AC_VO);
1738 b43_dma_tx_resume_ring(dev->
dma.tx_ring_AC_VI);
1739 b43_dma_tx_resume_ring(dev->
dma.tx_ring_AC_BE);
1740 b43_dma_tx_resume_ring(dev->
dma.tx_ring_AC_BK);
1767 unsigned int engine_index,
bool enable)
1772 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1774 mmio_base = b43_dmacontroller_base(type, engine_index);
1775 direct_fifo_rx(dev, type, mmio_base, enable);