33 #include <linux/kernel.h>
34 #include <linux/slab.h>
107 rdsdebug(
"local ipaddr = %x port %d, "
108 "remote ipaddr = %x port %d"
109 "..looking for %x port %d, "
110 "remote ipaddr = %x port %d\n",
119 #ifdef WORKING_TUPLE_DETECTION
135 *cm_id = i_cm_id->
cm_id;
155 spin_lock_irq(&rds_iwdev->
spinlock);
157 spin_unlock_irq(&rds_iwdev->
spinlock);
162 static void rds_iw_remove_cm_id(
struct rds_iw_device *rds_iwdev,
167 spin_lock_irq(&rds_iwdev->
spinlock);
169 if (i_cm_id->
cm_id == cm_id) {
175 spin_unlock_irq(&rds_iwdev->
spinlock);
195 rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
197 rds_iw_remove_cm_id(rds_iwdev, cm_id);
199 return rds_iw_add_cm_id(rds_iwdev, cm_id);
227 spin_lock_irq(&rds_iwdev->
spinlock);
230 spin_unlock_irq(&rds_iwdev->
spinlock);
246 spin_lock_irq(list_lock);
247 list_splice(list, &tmp_list);
248 INIT_LIST_HEAD(list);
249 spin_unlock_irq(list_lock);
277 return ERR_PTR(-
EBUSY);
285 unsigned int dma_len = ib_sg_dma_len(dev, &sg->
list[i]);
289 sg->
bytes += dma_len;
291 end_addr = dma_addr + dma_len;
295 dma_addr &= ~PAGE_MASK;
297 if (end_addr & PAGE_MASK) {
298 if (i < sg->dma_len - 1)
300 end_addr = (end_addr +
PAGE_MASK) & ~PAGE_MASK;
316 for (i = j = 0; i < sg->
dma_len; ++
i) {
317 unsigned int dma_len = ib_sg_dma_len(dev, &sg->
list[i]);
318 u64 dma_addr = ib_sg_dma_address(dev, &sg->
list[i]);
321 end_addr = dma_addr + dma_len;
322 dma_addr &= ~PAGE_MASK;
323 for (; dma_addr < end_addr; dma_addr +=
PAGE_SIZE)
324 dma_pages[j++] = dma_addr;
381 rds_iw_flush_mr_pool(pool, 1);
395 list_del_init(&ibmr->
mapping.m_list);
397 spin_unlock_irqrestore(&pool->
list_lock, flags);
406 int err = 0, iter = 0;
409 ibmr = rds_iw_reuse_fmr(pool);
434 rds_iw_flush_mr_pool(pool, 0);
444 INIT_LIST_HEAD(&ibmr->
mapping.m_list);
447 err = rds_iw_init_fastreg(pool, ibmr);
456 rds_iw_destroy_fastreg(pool, ibmr);
486 static int rds_iw_flush_mr_pool(
struct rds_iw_mr_pool *pool,
int free_all)
492 unsigned int nfreed = 0, ncleaned = 0, unpinned = 0;
501 list_splice_init(&pool->
dirty_list, &unmap_list);
503 list_splice_init(&pool->
clean_list, &kill_list);
504 spin_unlock_irqrestore(&pool->
list_lock, flags);
514 if (!list_empty(&unmap_list)) {
515 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
516 &kill_list, &unpinned);
520 list_splice_init(&unmap_list, &kill_list);
527 rds_iw_destroy_fastreg(pool, ibmr);
534 if (!list_empty(&unmap_list)) {
537 spin_unlock_irqrestore(&pool->
list_lock, flags);
552 rds_iw_flush_mr_pool(pool, 0);
560 rdsdebug(
"RDS/IW: free_mr nents %u\n", ibmr->
mapping.m_sg.len);
565 rds_iw_free_fastreg(pool, ibmr);
574 rds_iw_flush_mr_pool(pool, 0);
591 rds_iw_flush_mr_pool(pool, 0);
603 ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
614 ibmr = rds_iw_alloc_mr(rds_iwdev);
621 ret = rds_iw_map_fastreg(rds_iwdev->
mr_pool, ibmr, sg, nents);
623 *key_ret = ibmr->
mr->rkey;
678 if (IS_ERR(page_list)) {
679 err = PTR_ERR(page_list);
706 memset(&f_wr, 0,
sizeof(f_wr));
709 f_wr.wr.fast_reg.length = mapping->
m_sg.bytes;
710 f_wr.wr.fast_reg.rkey = mapping->
m_rkey;
711 f_wr.wr.fast_reg.page_list = ibmr->
page_list;
712 f_wr.wr.fast_reg.page_list_len = mapping->
m_sg.dma_len;
717 f_wr.wr.fast_reg.iova_start = 0;
721 ret = ib_post_send(ibmr->
cm_id->qp, &f_wr, &failed_wr);
722 BUG_ON(failed_wr != &f_wr);
725 __func__, __LINE__, ret);
729 static int rds_iw_rdma_fastreg_inv(
struct rds_iw_mr *ibmr)
734 if (!ibmr->
cm_id->qp || !ibmr->
mr)
737 memset(&s_wr, 0,
sizeof(s_wr));
740 s_wr.ex.invalidate_rkey = ibmr->
mr->rkey;
744 ret = ib_post_send(ibmr->
cm_id->qp, &s_wr, &failed_wr);
747 __func__, __LINE__, ret);
764 rds_iw_set_scatterlist(&mapping->
m_sg, sg, sg_len);
766 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->
m_sg);
767 if (IS_ERR(dma_pages)) {
768 ret = PTR_ERR(dma_pages);
778 for (i = 0; i < mapping->
m_sg.dma_npages; ++
i)
779 ibmr->
page_list->page_list[i] = dma_pages[i];
781 ret = rds_iw_rdma_build_fastreg(mapping);
802 if (!ibmr->
mapping.m_sg.dma_len)
805 ret = rds_iw_rdma_fastreg_inv(ibmr);
816 spin_unlock_irqrestore(&pool->
list_lock, flags);
819 static unsigned int rds_iw_unmap_fastreg_list(
struct rds_iw_mr_pool *pool,
825 unsigned int ncleaned = 0;
844 while (!list_empty(unmap_list)) {
849 *unpinned += mapping->
m_sg.len;
850 list_move(&mapping->
m_list, &laundered);
853 spin_unlock_irqrestore(&pool->
list_lock, flags);
859 list_splice_init(&laundered, unmap_list);