33 #include <linux/types.h>
34 #include <linux/device.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/list.h>
41 #include <linux/uio.h>
42 #include <linux/rbtree.h>
50 #define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
52 #define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
54 #define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
111 INIT_LIST_HEAD(&pq->
sent);
116 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
125 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
148 int i,
size_t offset,
size_t len,
154 pkt->
addr[
i].length = len;
156 pkt->
addr[
i].dma_mapped = dma_mapped;
158 pkt->
addr[
i].kvaddr = kvaddr;
164 size_t len,
int dma_mapped,
170 ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
177 const struct iovec *iov,
178 unsigned long niov) {
194 for (i = 0; i < niov; i++) {
198 iov[i].iov_base, iov[i].iov_len);
215 ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
229 static int ipath_user_sdma_num_pages(
const struct iovec *iov)
232 const unsigned long len = iov->
iov_len;
233 const unsigned long spage = addr &
PAGE_MASK;
234 const unsigned long epage = (addr + len - 1) &
PAGE_MASK;
240 static int ipath_user_sdma_page_length(
unsigned long addr,
unsigned long len)
247 static void ipath_user_sdma_free_pkt_frag(
struct device *
dev,
254 if (pkt->
addr[i].page) {
255 if (pkt->
addr[i].dma_mapped)
261 if (pkt->
addr[i].kvaddr)
264 if (pkt->
addr[i].put_page)
268 }
else if (pkt->
addr[i].kvaddr)
271 pkt->
addr[i].kvaddr, pkt->
addr[i].addr);
275 static int ipath_user_sdma_pin_pages(
const struct ipath_devdata *dd,
277 unsigned long addr,
int tlen,
int npages)
279 struct page *
pages[2];
284 npages, 0, 1, pages,
NULL);
289 for (i = 0; i <
ret; i++)
296 for (j = 0; j < npages; j++) {
299 ipath_user_sdma_page_length(addr, tlen);
310 ipath_user_sdma_init_frag(pkt, pkt->
naddr, fofs, flen, 1, 1,
311 pages[j],
kmap(pages[j]),
323 static int ipath_user_sdma_pin_pkt(
const struct ipath_devdata *dd,
326 const struct iovec *iov,
332 for (idx = 0; idx < niov; idx++) {
333 const int npages = ipath_user_sdma_num_pages(iov + idx);
334 const unsigned long addr = (
unsigned long) iov[idx].iov_base;
336 ret = ipath_user_sdma_pin_pages(dd, pkt,
337 addr, iov[idx].iov_len,
346 for (idx = 0; idx < pkt->
naddr; idx++)
347 ipath_user_sdma_free_pkt_frag(&dd->
pcidev->dev, pq, pkt, idx);
353 static int ipath_user_sdma_init_payload(
const struct ipath_devdata *dd,
356 const struct iovec *iov,
357 unsigned long niov,
int npages)
362 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
364 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
370 static void ipath_user_sdma_free_pkt_list(
struct device *dev,
379 for (i = 0; i < pkt->
naddr; i++)
380 ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
393 static int ipath_user_sdma_queue_pkts(
const struct ipath_devdata *dd,
396 const struct iovec *iov,
400 unsigned long idx = 0;
403 struct page *page =
NULL;
412 while (idx < niov && npkts < maxpkts) {
413 const unsigned long addr = (
unsigned long) iov[idx].iov_base;
414 const unsigned long idx_save =
idx;
433 len >
PAGE_SIZE || len & 3 || addr & 3) {
476 if (pktnw < pktnwc || pktnw > pktnwc + (
PAGE_SIZE >> 2)) {
483 while (pktnwc < pktnw && idx < niov) {
485 const unsigned long faddr =
486 (
unsigned long) iov[idx].iov_base;
488 if (slen & 3 || faddr & 3 || !slen ||
496 ((faddr + slen - 1) & PAGE_MASK))
504 if (pktnwc != pktnw) {
520 ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
521 page, pbc, dma_addr);
524 ret = ipath_user_sdma_init_payload(dd, pq, pkt,
552 ipath_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, list);
564 static int ipath_user_sdma_queue_clean(
const struct ipath_devdata *dd,
572 INIT_LIST_HEAD(&free_list);
580 list_move_tail(&pkt->
list, &free_list);
586 if (!list_empty(&free_list)) {
593 ipath_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, &free_list);
594 ipath_user_sdma_set_complete_counter(pq, counter);
611 static int ipath_user_sdma_hwqueue_clean(
struct ipath_devdata *dd)
632 for (i = 0; i < 100; i++) {
634 if (list_empty(&pq->
sent)) {
638 ipath_user_sdma_hwqueue_clean(dd);
639 ipath_user_sdma_queue_clean(dd, pq);
644 if (!list_empty(&pq->
sent)) {
648 INIT_LIST_HEAD(&free_list);
650 list_splice_init(&pq->
sent, &free_list);
651 ipath_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, &free_list);
660 ((addr & 0xfffffffcULL) << 32) |
664 ((dwlen & 0x7ffULL) << 16) |
666 (dwoffset & 0x7ffULL));
669 static inline __le64 ipath_sdma_make_first_desc0(
__le64 descq)
674 static inline __le64 ipath_sdma_make_last_desc0(
__le64 descq)
677 return descq |
cpu_to_le64(1ULL << 11 | 1ULL << 13);
680 static inline __le64 ipath_sdma_make_desc1(
u64 addr)
686 static void ipath_user_sdma_send_frag(
struct ipath_devdata *dd,
692 const u64 dwlen = (
u64) pkt->
addr[idx].length / 4;
698 descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
700 descq0 = ipath_sdma_make_first_desc0(descq0);
701 if (idx == pkt->
naddr - 1)
702 descq0 = ipath_sdma_make_last_desc0(descq0);
705 descqp[1] = ipath_sdma_make_desc1(addr);
709 static int ipath_user_sdma_push_pkts(
struct ipath_devdata *dd,
717 if (list_empty(pktlist))
731 while (!list_empty(pktlist)) {
739 if (pkt->
naddr > ipath_sdma_descq_freecnt(dd))
740 goto unlock_check_tail;
742 for (i = 0; i < pkt->
naddr; i++) {
743 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
744 ofs += pkt->
addr[
i].length >> 2;
753 ipath_dbg(
"packet size %X > ibmax %X, fail\n",
765 for (i = 0; i < pkt->
naddr; i++) {
775 list_move_tail(&pkt->
list, &pq->
sent);
783 ipath_write_kreg(dd, dd->
ipath_kregs->kr_senddmatail, tail);
795 const struct iovec *iov,
802 INIT_LIST_HEAD(&list);
807 ipath_user_sdma_hwqueue_clean(dd);
808 ipath_user_sdma_queue_clean(dd, pq);
815 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
826 if (!list_empty(&list)) {
832 if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
833 ipath_user_sdma_hwqueue_clean(dd);
834 ipath_user_sdma_queue_clean(dd, pq);
837 ret = ipath_user_sdma_push_pkts(dd, pq, &list);
844 if (!list_empty(&list))
851 if (!list_empty(&list))
852 ipath_user_sdma_free_pkt_list(&dd->
pcidev->dev, pq, &list);
855 return (ret < 0) ? ret : npkts;
864 ipath_user_sdma_hwqueue_clean(dd);
865 ret = ipath_user_sdma_queue_clean(dd, pq);