33 #include <linux/kernel.h>
34 #include <linux/slab.h>
42 #define CLEAN_LIST_BUSY_BIT 0
87 static void rds_ib_teardown_mr(
struct rds_ib_mr *ibmr);
98 if (i_ipaddr->
ipaddr == ipaddr) {
120 spin_lock_irq(&rds_ibdev->
spinlock);
122 spin_unlock_irq(&rds_ibdev->
spinlock);
133 spin_lock_irq(&rds_ibdev->
spinlock);
135 if (i_ipaddr->
ipaddr == ipaddr) {
136 list_del_rcu(&i_ipaddr->
list);
141 spin_unlock_irq(&rds_ibdev->
spinlock);
153 rds_ibdev_old = rds_ib_get_device(ipaddr);
155 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
159 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
188 spin_lock_irq(&rds_ibdev->
spinlock);
191 spin_unlock_irq(&rds_ibdev->
spinlock);
257 rds_ib_flush_mr_pool(pool, 1,
NULL);
281 static inline void wait_clean_list_grace(
void)
287 flag = &
per_cpu(clean_list_grace, cpu);
297 int err = 0, iter = 0;
303 ibmr = rds_ib_reuse_fmr(pool);
328 rds_ib_flush_mr_pool(pool, 0, &ibmr);
339 memset(ibmr, 0,
sizeof(*ibmr));
347 if (IS_ERR(ibmr->
fmr)) {
348 err = PTR_ERR(ibmr->
fmr);
379 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
390 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
400 if (i < sg_dma_len - 1)
420 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
421 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
424 dma_pages[page_cnt++] =
428 ret = ib_map_phys_fmr(ibmr->
fmr,
429 dma_pages, page_cnt, io_addr);
435 rds_ib_teardown_mr(ibmr);
468 static void __rds_ib_teardown_mr(
struct rds_ib_mr *ibmr)
473 ib_dma_unmap_sg(rds_ibdev->
dev,
483 for (i = 0; i < ibmr->
sg_len; ++
i) {
499 static void rds_ib_teardown_mr(
struct rds_ib_mr *ibmr)
501 unsigned int pinned = ibmr->
sg_len;
503 __rds_ib_teardown_mr(ibmr);
512 static inline unsigned int rds_ib_flush_goal(
struct rds_ib_mr_pool *pool,
int free_all)
532 node = llist_del_all(llist);
571 int free_all,
struct rds_ib_mr **ibmr_ret)
578 unsigned long unpinned = 0;
579 unsigned int nfreed = 0, ncleaned = 0, free_goal;
587 ibmr = rds_ib_reuse_fmr(pool);
599 ibmr = rds_ib_reuse_fmr(pool);
611 ibmr = rds_ib_reuse_fmr(pool);
621 llist_append_to_list(&pool->
drop_list, &unmap_list);
622 llist_append_to_list(&pool->
free_list, &unmap_list);
624 llist_append_to_list(&pool->
clean_list, &unmap_list);
626 free_goal = rds_ib_flush_goal(pool, free_all);
628 if (list_empty(&unmap_list))
633 list_add(&ibmr->
fmr->list, &fmr_list);
641 unpinned += ibmr->sg_len;
642 __rds_ib_teardown_mr(ibmr);
643 if (nfreed < free_goal || ibmr->remap_count >= pool->
fmr_attr.max_maps) {
653 if (!list_empty(&unmap_list)) {
663 wait_clean_list_grace();
665 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
670 if (clean_nodes->
next)
691 rds_ib_flush_mr_pool(pool, 0,
NULL);
700 rdsdebug(
"RDS/IB: free_mr nents %u\n", ibmr->
sg_len);
718 rds_ib_flush_mr_pool(pool, 0,
NULL);
738 rds_ib_flush_mr_pool(pool, 0,
NULL);
761 ibmr = rds_ib_alloc_fmr(rds_ibdev);
765 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
767 *key_ret = ibmr->
fmr->rkey;