35 #include <linux/netdevice.h>
42 static ushort sdma_descq_cnt = 256;
49 #define SDMA_DESC_LAST (1ULL << 11)
50 #define SDMA_DESC_FIRST (1ULL << 12)
51 #define SDMA_DESC_DMA_HEAD (1ULL << 13)
52 #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
53 #define SDMA_DESC_INTR (1ULL << 15)
54 #define SDMA_DESC_COUNT_LSB 16
55 #define SDMA_DESC_GEN_LSB 30
83 static void sdma_complete(
struct kref *);
89 static void sdma_sw_clean_up_task(
unsigned long);
97 static void sdma_complete(
struct kref *
kref)
107 kref_put(&ss->
kref, sdma_complete);
130 list_del_init(&txp->
list);
136 unmap_desc(ppd, idx);
146 static void sdma_sw_clean_up_task(
unsigned long opaque)
165 clear_sdma_activelist(ppd);
185 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
201 ppd->
dd->f_sdma_hw_start_up(ppd);
212 static void sdma_start_sw_clean_up(
struct qib_pportdata *ppd)
214 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
236 if (action[next_state].
op_halt)
263 addr = (desc[1] << 32) | (desc[0] >> 32);
264 len = (desc[0] >> 14) & (0x7ffULL << 2);
281 "failed to allocate SendDMA descriptor FIFO memory\n");
290 "failed to allocate SendDMA head memory\n");
335 sdmadesc[1] = addr >> 32;
337 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
344 sdmadesc[0] |= dwoffset & 0x7ffULL;
390 list_del_init(&txp->
list);
421 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
426 if (__qib_sdma_running(ppd))
436 ret = alloc_sdma(ppd);
441 ppd->
dd->f_sdma_init_early(ppd);
444 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
457 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
494 ret = __qib_sdma_running(ppd);
495 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
512 tx->
txreq.start_idx = 0;
513 tx->
txreq.next_descq_idx = 0;
515 clear_sdma_activelist(ppd);
544 if (
unlikely(!__qib_sdma_running(ppd))) {
545 complete_sdma_err_req(ppd, tx);
549 if (tx->
txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
553 ppd->
dd->f_sdma_set_desc_cnt(ppd,
559 make_sdma_desc(ppd, sdmadesc, (
u64) tx->
txreq.addr, dwoffset, 0);
597 make_sdma_desc(ppd, sdmadesc, (
u64) addr, dw, dwoffset);
617 }
else if (sge->
length == 0 && sge->
mr->lkey) {
619 if (++sge->
m >= sge->
mr->mapsz)
624 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
626 sge->
mr->map[sge->
m]->segs[sge->
n].length;
644 ppd->
dd->f_sdma_update_tail(ppd, tail);
657 unmap_desc(ppd, tail);
661 spin_lock(&qp->r_lock);
662 spin_lock(&qp->s_lock);
667 }
else if (qp->
s_wqe)
669 spin_unlock(&qp->s_lock);
670 spin_unlock(&qp->r_lock);
676 spin_lock(&qp->s_lock);
688 dev = &ppd->
dd->verbs_dev;
690 if (list_empty(&qp->
iowait)) {
700 spin_unlock(&qp->s_lock);
703 spin_unlock(&qp->s_lock);
707 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
723 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
754 sdma_sw_tear_down(ppd);
775 sdma_sw_tear_down(ppd);
809 sdma_sw_tear_down(ppd);
851 sdma_hw_start_up(ppd);
873 sdma_start_sw_clean_up(ppd);
887 sdma_start_sw_clean_up(ppd);
907 sdma_start_sw_clean_up(ppd);
923 ppd->
dd->f_sdma_hw_clean_up(ppd);
941 sdma_start_sw_clean_up(ppd);
956 sdma_start_sw_clean_up(ppd);
965 sdma_start_sw_clean_up(ppd);