33 #include <linux/types.h>
34 #include <linux/device.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
110 INIT_LIST_HEAD(&pq->
sent);
115 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
124 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
147 int i,
size_t offset,
size_t len,
153 pkt->
addr[
i].length = len;
155 pkt->
addr[
i].dma_mapped = dma_mapped;
157 pkt->
addr[
i].kvaddr = kvaddr;
163 size_t len,
int dma_mapped,
169 qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
174 static int qib_user_sdma_coalesce(
const struct qib_devdata *
dd,
176 const struct iovec *iov,
194 for (i = 0; i < niov; i++) {
198 iov[i].iov_base, iov[i].iov_len);
215 qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
231 static int qib_user_sdma_num_pages(
const struct iovec *iov)
234 const unsigned long len = iov->
iov_len;
235 const unsigned long spage = addr &
PAGE_MASK;
236 const unsigned long epage = (addr + len - 1) &
PAGE_MASK;
244 static int qib_user_sdma_page_length(
unsigned long addr,
unsigned long len)
251 static void qib_user_sdma_free_pkt_frag(
struct device *
dev,
258 if (pkt->
addr[i].page) {
259 if (pkt->
addr[i].dma_mapped)
265 if (pkt->
addr[i].kvaddr)
268 if (pkt->
addr[i].put_page)
272 }
else if (pkt->
addr[i].kvaddr)
275 pkt->
addr[i].kvaddr, pkt->
addr[i].addr);
279 static int qib_user_sdma_pin_pages(
const struct qib_devdata *dd,
281 unsigned long addr,
int tlen,
int npages)
283 struct page *
pages[2];
288 npages, 0, 1, pages,
NULL);
293 for (i = 0; i <
ret; i++)
300 for (j = 0; j < npages; j++) {
302 const int flen = qib_user_sdma_page_length(addr, tlen);
313 qib_user_sdma_init_frag(pkt, pkt->
naddr, fofs, flen, 1, 1,
314 pages[j],
kmap(pages[j]), dma_addr);
325 static int qib_user_sdma_pin_pkt(
const struct qib_devdata *dd,
328 const struct iovec *iov,
334 for (idx = 0; idx < niov; idx++) {
335 const int npages = qib_user_sdma_num_pages(iov + idx);
336 const unsigned long addr = (
unsigned long) iov[idx].iov_base;
338 ret = qib_user_sdma_pin_pages(dd, pkt, addr,
339 iov[idx].iov_len, npages);
347 for (idx = 0; idx < pkt->
naddr; idx++)
348 qib_user_sdma_free_pkt_frag(&dd->
pcidev->dev, pq, pkt, idx);
354 static int qib_user_sdma_init_payload(
const struct qib_devdata *dd,
357 const struct iovec *iov,
358 unsigned long niov,
int npages)
363 ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
365 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
371 static void qib_user_sdma_free_pkt_list(
struct device *dev,
380 for (i = 0; i < pkt->
naddr; i++)
381 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
385 INIT_LIST_HEAD(list);
395 static int qib_user_sdma_queue_pkts(
const struct qib_devdata *dd,
398 const struct iovec *iov,
402 unsigned long idx = 0;
405 struct page *page =
NULL;
414 while (idx < niov && npkts < maxpkts) {
415 const unsigned long addr = (
unsigned long) iov[idx].iov_base;
416 const unsigned long idx_save =
idx;
435 len >
PAGE_SIZE || len & 3 || addr & 3) {
478 if (pktnw < pktnwc || pktnw > pktnwc + (
PAGE_SIZE >> 2)) {
484 while (pktnwc < pktnw && idx < niov) {
486 const unsigned long faddr =
487 (
unsigned long) iov[idx].iov_base;
489 if (slen & 3 || faddr & 3 || !slen ||
497 ((faddr + slen - 1) & PAGE_MASK))
505 if (pktnwc != pktnw) {
521 qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
522 page, pbc, dma_addr);
525 ret = qib_user_sdma_init_payload(dd, pq, pkt,
553 qib_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, list);
565 static int qib_user_sdma_queue_clean(
struct qib_pportdata *ppd,
574 INIT_LIST_HEAD(&free_list);
582 list_move_tail(&pkt->
list, &free_list);
588 if (!list_empty(&free_list)) {
595 qib_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, &free_list);
596 qib_user_sdma_set_complete_counter(pq, counter);
613 static int qib_user_sdma_hwqueue_clean(
struct qib_pportdata *ppd)
620 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
637 if (list_empty(&pq->
sent)) {
641 qib_user_sdma_hwqueue_clean(ppd);
642 qib_user_sdma_queue_clean(ppd, pq);
647 if (!list_empty(&pq->
sent)) {
650 qib_dev_err(dd,
"user sdma lists not empty: forcing!\n");
651 INIT_LIST_HEAD(&free_list);
653 list_splice_init(&pq->
sent, &free_list);
654 qib_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, &free_list);
667 ((addr & 0xfffffffcULL) << 32) |
669 ((tmpgen & 3ULL) << 30) |
671 ((dwlen & 0x7ffULL) << 16) |
673 (dwoffset & 0x7ffULL));
676 static inline __le64 qib_sdma_make_first_desc0(
__le64 descq)
681 static inline __le64 qib_sdma_make_last_desc0(
__le64 descq)
684 return descq |
cpu_to_le64(1ULL << 11 | 1ULL << 13);
687 static inline __le64 qib_sdma_make_desc1(
u64 addr)
693 static void qib_user_sdma_send_frag(
struct qib_pportdata *ppd,
699 const u64 dwlen = (
u64) pkt->
addr[idx].length / 4;
705 descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
707 descq0 = qib_sdma_make_first_desc0(descq0);
708 if (idx == pkt->
naddr - 1)
709 descq0 = qib_sdma_make_last_desc0(descq0);
712 descqp[1] = qib_sdma_make_desc1(addr);
716 static int qib_user_sdma_push_pkts(
struct qib_pportdata *ppd,
727 if (list_empty(pktlist))
739 if (
unlikely(!__qib_sdma_running(ppd))) {
745 while (!list_empty(pktlist)) {
753 if (pkt->
naddr > qib_sdma_descq_freecnt(ppd))
754 goto unlock_check_tail;
756 for (i = 0; i < pkt->
naddr; i++) {
757 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
758 ofs += pkt->
addr[
i].length >> 2;
777 for (i = 0; i < pkt->
naddr; i++) {
787 list_move_tail(&pkt->
list, &pq->
sent);
801 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
808 const struct iovec *iov,
817 INIT_LIST_HEAD(&list);
826 qib_user_sdma_hwqueue_clean(ppd);
827 qib_user_sdma_queue_clean(ppd, pq);
834 ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
845 if (!list_empty(&list)) {
851 if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
852 qib_user_sdma_hwqueue_clean(ppd);
853 qib_user_sdma_queue_clean(ppd, pq);
856 ret = qib_user_sdma_push_pkts(ppd, pq, &list);
863 if (!list_empty(&list))
870 if (!list_empty(&list))
871 qib_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, &list);
874 return (ret < 0) ? ret : npkts;
883 qib_user_sdma_hwqueue_clean(ppd);
884 ret = qib_user_sdma_queue_clean(ppd, pq);