29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
37 #include <linux/prefetch.h>
44 #include "../dmaengine.h"
49 "ioat2+: allocate 2^n descriptors per channel"
50 " (default: 8 max: 16)");
54 "ioat2+: upper limit for ring size (default: 16)");
60 ioat->
dmacount += ioat2_ring_pending(ioat);
64 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
72 if (ioat2_ring_pending(ioat)) {
97 if (ioat2_ring_space(ioat) < 1) {
99 "Unable to start null desc - ring full\n");
105 desc = ioat2_get_ring_ent(ioat, ioat->
head);
110 hw->
ctl_f.int_en = 1;
111 hw->
ctl_f.compl_write = 1;
116 async_tx_ack(&desc->
txd);
117 ioat2_set_chainaddr(ioat, desc->
txd.phys);
127 __ioat2_start_null_desc(ioat);
136 bool seen_current =
false;
140 dev_dbg(
to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x\n",
143 active = ioat2_ring_active(ioat);
144 for (
i = 0;
i < active && !seen_current;
i++) {
146 prefetch(ioat2_get_ring_ent(ioat, idx +
i + 1));
147 desc = ioat2_get_ring_ent(ioat, idx +
i);
152 dma_cookie_complete(tx);
159 if (tx->
phys == phys_complete)
163 ioat->
tail = idx +
i;
164 BUG_ON(active && !seen_current);
167 if (active - i == 0) {
186 __cleanup(ioat, phys_complete);
209 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
212 if (ioat2_ring_pending(ioat)) {
215 desc = ioat2_get_ring_ent(ioat, ioat->
tail);
216 ioat2_set_chainaddr(ioat, desc->
txd.phys);
219 __ioat2_start_null_desc(ioat);
228 status = ioat_chansts(chan);
229 if (is_ioat_active(status) || is_ioat_idle(status))
231 while (is_ioat_active(status) || is_ioat_idle(status)) {
236 status = ioat_chansts(chan);
249 while (ioat_reset_pending(chan)) {
267 __cleanup(ioat, phys_complete);
281 status = ioat_chansts(chan);
286 if (is_ioat_halted(status)) {
293 BUG_ON(is_ioat_bug(chanerr));
304 __cleanup(ioat, phys_complete);
307 ioat2_restart_channel(ioat);
322 active = ioat2_ring_active(ioat);
356 struct device *
dev = &device->
pdev->dev;
365 dev_warn(dev,
"(%d) exceeds max supported channels (%zu)\n",
371 if (xfercap_log == 0)
373 dev_dbg(dev,
"%s: xfercap = %d\n", __func__, 1 << xfercap_log);
376 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
377 if (i7300_idle_platform_probe(
NULL,
NULL, 1) == 0)
380 for (i = 0; i < dma->
chancnt; i++) {
404 cookie = dma_cookie_assign(tx);
418 ioat2_update_pending(ioat);
432 hw = pci_pool_alloc(dma->
dma_pool, flags, &phys);
435 memset(hw, 0,
sizeof(*hw));
439 pci_pool_free(dma->
dma_pool, hw, phys);
444 desc->
txd.tx_submit = ioat2_tx_submit_unlock;
462 int descs = 1 <<
order;
469 ring = kcalloc(descs,
sizeof(*ring), flags);
472 for (i = 0; i < descs; i++) {
473 ring[
i] = ioat2_alloc_ring_ent(c, flags);
476 ioat2_free_ring_ent(ring[i], c);
484 for (i = 0; i < descs-1; i++) {
490 ring[
i]->
hw->next = ring[0]->
txd.phys;
530 ring = ioat2_alloc_ring(c, order,
GFP_KERNEL);
545 ioat2_start_null_desc(ioat);
550 status = ioat_chansts(chan);
551 }
while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
553 if (is_ioat_active(status) || is_ioat_idle(status)) {
560 "failed to start channel chanerr: %#x\n", chanerr);
574 const u32 curr_size = ioat2_ring_size(ioat);
575 const u16 active = ioat2_ring_active(ioat);
584 if (active == curr_size)
590 if (active >= new_size)
594 ring = kcalloc(new_size,
sizeof(*ring),
GFP_NOWAIT);
599 if (new_size > curr_size) {
601 for (i = 0; i < curr_size; i++) {
602 u16 curr_idx = (ioat->
tail+
i) & (curr_size-1);
603 u16 new_idx = (ioat->
tail+
i) & (new_size-1);
605 ring[new_idx] = ioat->
ring[curr_idx];
610 for (i = curr_size; i < new_size; i++) {
611 u16 new_idx = (ioat->
tail+
i) & (new_size-1);
613 ring[new_idx] = ioat2_alloc_ring_ent(c,
GFP_NOWAIT);
614 if (!ring[new_idx]) {
616 u16 new_idx = (ioat->
tail+
i) & (new_size-1);
618 ioat2_free_ring_ent(ring[new_idx], c);
627 for (i = curr_size-1; i < new_size; i++) {
628 u16 new_idx = (ioat->
tail+
i) & (new_size-1);
641 for (i = 0; i < new_size; i++) {
642 u16 curr_idx = (ioat->
tail+
i) & (curr_size-1);
643 u16 new_idx = (ioat->
tail+
i) & (new_size-1);
645 ring[new_idx] = ioat->
ring[curr_idx];
650 for (i = new_size; i < curr_size; i++) {
653 ent = ioat2_get_ring_ent(ioat, ioat->
tail+i);
654 ioat2_free_ring_ent(ent, c);
658 hw = ring[(ioat->
tail+new_size-1) & (new_size-1)]->
hw;
659 next = ring[(ioat->
tail+new_size) & (new_size-1)];
689 if (
likely(ioat2_ring_space(ioat) > num_descs)) {
713 if (printk_ratelimit())
714 dev_dbg(
to_dev(chan),
"%s: ring full! num_descs: %d (%x:%x:%x)\n",
733 dma_addr_t dma_src,
size_t len,
unsigned long flags)
741 int num_descs,
idx,
i;
743 num_descs = ioat2_xferlen_to_descs(ioat, len);
752 desc = ioat2_get_ring_ent(ioat, idx + i);
764 }
while (++i < num_descs);
770 hw->
ctl_f.compl_write = 1;
805 descs = ioat2_ring_space(ioat);
806 dev_dbg(
to_dev(chan),
"freeing %d idle descriptors\n", descs);
807 for (i = 0; i < descs; i++) {
808 desc = ioat2_get_ring_ent(ioat, ioat->
head + i);
809 ioat2_free_ring_ent(desc, c);
812 if (descs < total_descs)
814 total_descs - descs);
816 for (i = 0; i < total_descs - descs; i++) {
817 desc = ioat2_get_ring_ent(ioat, ioat->
tail + i);
819 ioat2_free_ring_ent(desc, c);
848 return sprintf(page,
"%d\n", ioat2_ring_active(ioat));
852 static struct attribute *ioat2_attrs[] = {
853 &ring_size_attr.
attr,
854 &ring_active_attr.
attr,
862 .default_attrs = ioat2_attrs,
888 ioat_set_tcp_copy_break(2048);
891 chan = to_chan_common(c);