43 #include <linux/slab.h>
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
55 #define MAX_RPAGES 512
58 #define EHCA_SECTSHIFT SECTION_SIZE_BITS
59 #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
60 #define EHCA_HUGEPAGESHIFT 34
61 #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
62 #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
63 #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
64 #define EHCA_DIR_INDEX_SHIFT 13
65 #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
66 #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
67 #define EHCA_TOP_MAP_SIZE (0x10000)
68 #define EHCA_DIR_MAP_SIZE (0x10000)
69 #define EHCA_ENT_MAP_SIZE (0x10000)
70 #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
72 static unsigned long ehca_mr_len;
99 #define EHCA_MR_PGSHIFT4K 12
100 #define EHCA_MR_PGSHIFT64K 16
101 #define EHCA_MR_PGSHIFT1M 20
102 #define EHCA_MR_PGSHIFT16M 24
104 static u64 ehca_map_vaddr(
void *caddr);
106 static u32 ehca_encode_hwpage_size(
u32 pgsize)
110 return (log - 12) / 4;
113 static u64 ehca_get_max_hwpage_size(
struct ehca_shca *shca)
118 static struct ehca_mr *ehca_mr_new(
void)
131 static void ehca_mr_delete(
struct ehca_mr *me)
136 static struct ehca_mw *ehca_mw_new(
void)
149 static void ehca_mw_delete(
struct ehca_mw *me)
166 e_maxmr = ehca_mr_new();
170 goto get_dma_mr_exit0;
175 mr_access_flags, e_pd,
179 ehca_mr_delete(e_maxmr);
180 ib_mr = ERR_PTR(ret);
181 goto get_dma_mr_exit0;
187 goto get_dma_mr_exit0;
193 PTR_ERR(ib_mr), pd, mr_access_flags);
214 if ((num_phys_buf <= 0) || !phys_buf_array) {
216 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
218 goto reg_phys_mr_exit0;
223 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
231 goto reg_phys_mr_exit0;
238 ib_mr = ERR_PTR(ret);
239 goto reg_phys_mr_exit0;
242 (((
u64)iova_start + size) < (
u64)iova_start)) {
246 goto reg_phys_mr_exit0;
249 e_mr = ehca_mr_new();
253 goto reg_phys_mr_exit0;
263 ib_mr = ERR_PTR(ret);
264 goto reg_phys_mr_exit1;
275 hw_pgsize = ehca_get_max_hwpage_size(shca);
276 num_hwpages =
NUM_CHUNKS(((
u64)iova_start % hw_pgsize) + size,
278 memset(&pginfo, 0,
sizeof(pginfo));
288 ret =
ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
289 e_pd, &pginfo, &e_mr->
ib.
ib_mr.lkey,
292 ib_mr = ERR_PTR(ret);
293 goto reg_phys_mr_exit1;
301 ehca_mr_delete(e_mr);
305 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
306 PTR_ERR(ib_mr), pd, phys_buf_array,
307 num_phys_buf, mr_access_flags, iova_start);
314 u64 virt,
int mr_access_flags,
336 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
344 goto reg_user_mr_exit0;
347 if (length == 0 || virt + length < virt) {
349 "virt_base=%llx", length, virt);
351 goto reg_user_mr_exit0;
354 e_mr = ehca_mr_new();
358 goto reg_user_mr_exit0;
363 if (IS_ERR(e_mr->
umem)) {
364 ib_mr = (
void *)e_mr->
umem;
365 goto reg_user_mr_exit1;
370 "e_mr->umem->page_size=%x", e_mr->
umem->page_size);
372 goto reg_user_mr_exit2;
379 if (e_mr->
umem->hugetlb) {
381 page_shift = (fls64(length - 1) + 3) & ~3;
385 hwpage_size = 1
UL << page_shift;
393 reg_user_mr_fallback:
394 num_hwpages =
NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
396 memset(&pginfo, 0,
sizeof(pginfo));
404 (&e_mr->
umem->chunk_list),
407 ret =
ehca_reg_mr(shca, e_mr, (
u64 *)virt, length, mr_access_flags,
408 e_pd, &pginfo, &e_mr->
ib.
ib_mr.lkey,
412 "with hwpage_size=%llx", hwpage_size);
414 "kpage_size=%lx", PAGE_SIZE);
420 goto reg_user_mr_fallback;
423 ib_mr = ERR_PTR(ret);
424 goto reg_user_mr_exit2;
433 ehca_mr_delete(e_mr);
437 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
460 u32 tmp_lkey, tmp_rkey;
461 unsigned long sl_flags;
469 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
471 goto rereg_phys_mr_exit0;
477 "mr_rereg_mask=%x", pd, mr_rereg_mask);
479 goto rereg_phys_mr_exit0;
485 (mr_rereg_mask == 0)) {
487 goto rereg_phys_mr_exit0;
491 if (e_mr == shca->
maxmr) {
494 "shca->maxmr=%p mr->lkey=%x",
497 goto rereg_phys_mr_exit0;
499 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
502 "flags=%x", mr, e_mr->
flags);
504 goto rereg_phys_mr_exit0;
506 if (!phys_buf_array || num_phys_buf <= 0) {
508 " phys_buf_array=%p num_phys_buf=%x",
509 mr_rereg_mask, phys_buf_array, num_phys_buf);
511 goto rereg_phys_mr_exit0;
518 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
524 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
526 goto rereg_phys_mr_exit0;
531 new_start = e_mr->
start;
532 new_size = e_mr->
size;
536 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
537 u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
539 new_start = iova_start;
542 num_phys_buf, iova_start,
545 goto rereg_phys_mr_exit1;
546 if ((new_size == 0) ||
547 (((
u64)iova_start + new_size) < (
u64)iova_start)) {
549 "iova_start=%p", new_size, iova_start);
551 goto rereg_phys_mr_exit1;
554 new_size, PAGE_SIZE);
556 new_size, hw_pgsize);
557 memset(&pginfo, 0,
sizeof(pginfo));
567 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
568 new_acl = mr_access_flags;
569 if (mr_rereg_mask & IB_MR_REREG_PD)
572 ret =
ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
573 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
575 goto rereg_phys_mr_exit1;
578 if (mr_rereg_mask & IB_MR_REREG_PD)
584 spin_unlock_irqrestore(&e_mr->
mrlock, sl_flags);
588 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
590 ret, mr, mr_rereg_mask, pd, phys_buf_array,
591 num_phys_buf, mr_access_flags, iova_start);
604 unsigned long sl_flags;
609 "e_mr->flags=%x", mr, e_mr, e_mr->
flags);
618 if (h_ret != H_SUCCESS) {
620 "hca_hndl=%llx mr_hndl=%llx lkey=%x",
626 mr_attr->
pd = mr->
pd;
634 spin_unlock_irqrestore(&e_mr->
mrlock, sl_flags);
654 "e_mr->flags=%x", mr, e_mr, e_mr->
flags);
657 }
else if (e_mr == shca->
maxmr) {
660 "shca->maxmr=%p mr->lkey=%x",
668 if (h_ret != H_SUCCESS) {
670 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
681 ehca_mr_delete(e_mr);
701 e_mw = ehca_mw_new();
708 e_pd->
fw_pd, &hipzout);
709 if (h_ret != H_SUCCESS) {
711 "shca=%p hca_hndl=%llx mw=%p",
717 e_mw->ipz_mw_handle = hipzout.
handle;
718 e_mw->ib_mw.rkey = hipzout.
rkey;
722 ehca_mw_delete(e_mw);
736 ehca_gen_err(
"bind MW currently not supported by HCAD");
751 if (h_ret != H_SUCCESS) {
753 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
759 ehca_mw_delete(e_mw);
775 u32 tmp_lkey, tmp_rkey;
783 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
790 ib_fmr = ERR_PTR(-
EINVAL);
791 goto alloc_fmr_exit0;
796 ib_fmr = ERR_PTR(-
EINVAL);
797 goto alloc_fmr_exit0;
801 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
804 ib_fmr = ERR_PTR(-
EINVAL);
805 goto alloc_fmr_exit0;
812 ib_fmr = ERR_PTR(-
EINVAL);
813 goto alloc_fmr_exit0;
816 e_fmr = ehca_mr_new();
818 ib_fmr = ERR_PTR(-
ENOMEM);
819 goto alloc_fmr_exit0;
824 memset(&pginfo, 0,
sizeof(pginfo));
832 mr_access_flags, e_pd, &pginfo,
835 ib_fmr = ERR_PTR(ret);
836 goto alloc_fmr_exit1;
848 ehca_mr_delete(e_fmr);
866 u32 tmp_lkey, tmp_rkey;
870 e_fmr, e_fmr->
flags);
872 goto map_phys_fmr_exit0;
876 goto map_phys_fmr_exit0;
882 goto map_phys_fmr_exit0;
887 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
891 memset(&pginfo, 0,
sizeof(pginfo));
904 e_fmr->
acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
906 goto map_phys_fmr_exit0;
917 "iova=%llx", ret, fmr, page_list, list_len, iova);
931 u32 unmap_fmr_cnt = 0;
939 if ((shca != prev_shca) && prev_shca) {
941 "prev_shca=%p e_fmr=%p",
942 shca, prev_shca, e_fmr);
944 goto unmap_fmr_exit0;
948 "e_fmr->flags=%x", e_fmr, e_fmr->
flags);
950 goto unmap_fmr_exit0;
965 "stop rest, e_fmr=%p num_fmr=%x "
966 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
967 unmap_fmr_cnt, e_fmr->
ib.
ib_fmr.lkey);
968 goto unmap_fmr_exit0;
974 ehca_gen_err(
"ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
975 ret, fmr_list, num_fmr, unmap_fmr_cnt);
991 e_fmr, e_fmr->
flags);
997 if (h_ret != H_SUCCESS) {
999 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
1003 goto free_fmr_exit0;
1006 ehca_mr_delete(e_fmr);
1017 static int ehca_reg_bmap_mr_rpages(
struct ehca_shca *shca,
1040 hipz_acl |= 0x00000001;
1043 (
u64)iova_start, size, hipz_acl,
1044 e_pd->
fw_pd, &hipzout);
1045 if (h_ret != H_SUCCESS) {
1049 goto ehca_reg_mr_exit0;
1055 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
1062 goto ehca_reg_mr_exit1;
1068 e_mr->
start = iova_start;
1071 *lkey = hipzout.
lkey;
1072 *rkey = hipzout.
rkey;
1077 if (h_ret != H_SUCCESS) {
1079 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
1080 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
1081 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1090 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1091 "num_kpages=%llx num_hwpages=%llx",
1092 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1117 goto ehca_reg_mr_rpages_exit0;
1133 "bad rc, ret=%i rnum=%x kpage=%p",
1135 goto ehca_reg_mr_rpages_exit1;
1139 rpage =
__pa(kpage);
1144 goto ehca_reg_mr_rpages_exit1;
1159 if (h_ret != H_SUCCESS) {
1161 "hipz_reg_rpage_mr failed, h_ret=%lli "
1162 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
1163 " lkey=%x", h_ret, e_mr, i,
1171 }
else if (h_ret != H_PAGE_REGISTERED) {
1173 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
1174 "mr_hndl=%llx", h_ret, e_mr, i,
1185 ehca_reg_mr_rpages_exit1:
1187 ehca_reg_mr_rpages_exit0:
1190 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
1222 goto ehca_rereg_mr_rereg1_exit0;
1225 pginfo_save = *pginfo;
1229 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
1230 "kpage=%p", e_mr, pginfo, pginfo->
type,
1232 goto ehca_rereg_mr_rereg1_exit1;
1234 rpage =
__pa(kpage);
1238 goto ehca_rereg_mr_rereg1_exit1;
1241 (
u64)iova_start, size, hipz_acl,
1242 e_pd->
fw_pd, rpage, &hipzout);
1243 if (h_ret != H_SUCCESS) {
1250 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
1251 *pginfo = pginfo_save;
1253 }
else if ((
u64 *)hipzout.
vaddr != iova_start) {
1255 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
1256 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
1268 e_mr->
start = iova_start;
1271 *lkey = hipzout.
lkey;
1272 *rkey = hipzout.
rkey;
1275 ehca_rereg_mr_rereg1_exit1:
1277 ehca_rereg_mr_rereg1_exit0:
1278 if ( ret && (ret != -
EAGAIN) )
1280 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
1281 ret, *lkey, *rkey, pginfo, pginfo->
num_kpages,
1300 int rereg_1_hcall = 1;
1301 int rereg_3_hcall = 0;
1308 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
1322 if (rereg_1_hcall) {
1324 acl, e_pd, pginfo, lkey, rkey);
1329 goto ehca_rereg_mr_exit0;
1333 if (rereg_3_hcall) {
1338 if (h_ret != H_SUCCESS) {
1340 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1346 goto ehca_rereg_mr_exit0;
1360 ret =
ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1365 sizeof(
struct ehca_mr) - offset);
1366 goto ehca_rereg_mr_exit0;
1370 ehca_rereg_mr_exit0:
1373 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1374 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1375 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1376 acl, e_pd, pginfo, pginfo->
num_kpages, *lkey, *rkey,
1377 rereg_1_hcall, rereg_3_hcall);
1391 u32 tmp_lkey, tmp_rkey;
1402 0, 0, e_pd->
fw_pd, 0, &hipzout);
1403 if (h_ret == H_SUCCESS) {
1407 tmp_lkey = hipzout.
lkey;
1408 tmp_rkey = hipzout.
rkey;
1416 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1417 "mr_hndl=%llx lkey=%x lkey_out=%x",
1426 if (h_ret != H_SUCCESS) {
1428 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1434 goto ehca_unmap_one_fmr_exit0;
1447 e_fmr->
acl = save_fmr.
acl;
1449 memset(&pginfo, 0,
sizeof(pginfo));
1453 e_fmr->
acl, e_pd, &pginfo, &tmp_lkey,
1458 sizeof(
struct ehca_mr) - offset);
1461 ehca_unmap_one_fmr_exit0:
1489 (
u64)iova_start, hipz_acl, e_pd->
fw_pd,
1491 if (h_ret != H_SUCCESS) {
1493 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1494 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1495 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1500 goto ehca_reg_smr_exit0;
1506 e_newmr->
start = iova_start;
1510 *lkey = hipzout.
lkey;
1511 *rkey = hipzout.
rkey;
1517 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1518 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1523 static inline void *ehca_calc_sectbase(
int top,
int dir,
int idx)
1531 #define ehca_bmap_valid(entry) \
1532 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1534 static u64 ehca_reg_mr_section(
int top,
int dir,
int idx,
u64 *kpage,
1539 unsigned long page = 0;
1543 void *sectbase = ehca_calc_sectbase(top, dir, idx);
1544 if ((
unsigned long)sectbase & (pginfo->
hwpage_size - 1)) {
1546 "hwpage_size does not fit to "
1547 "section start address");
1551 while (page < page_count) {
1553 for (rnum = 0; (rnum <
MAX_RPAGES) && (page < page_count);
1563 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1571 static u64 ehca_reg_mr_sections(
int top,
int dir,
u64 *kpage,
1575 u64 hret = H_SUCCESS;
1582 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1584 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1590 static u64 ehca_reg_mr_dir_sections(
int top,
u64 *kpage,
struct ehca_shca *shca,
1594 u64 hret = H_SUCCESS;
1601 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1602 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1626 goto ehca_reg_internal_maxmr_exit0;
1629 e_mr = ehca_mr_new();
1633 goto ehca_reg_internal_maxmr_exit0;
1638 size_maxmr = ehca_mr_len;
1641 ib_pbuf.
size = size_maxmr;
1644 hw_pgsize = ehca_get_max_hwpage_size(shca);
1645 num_hwpages =
NUM_CHUNKS(((
u64)iova_start % hw_pgsize) + size_maxmr,
1648 memset(&pginfo, 0,
sizeof(pginfo));
1653 pginfo.
u.
phy.num_phys_buf = 1;
1654 pginfo.
u.
phy.phys_buf_array = &ib_pbuf;
1656 ret =
ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1657 &pginfo, &e_mr->
ib.
ib_mr.lkey,
1661 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1662 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1663 num_kpages, num_hwpages);
1664 goto ehca_reg_internal_maxmr_exit1;
1676 ehca_reg_internal_maxmr_exit1:
1677 ehca_mr_delete(e_mr);
1678 ehca_reg_internal_maxmr_exit0:
1681 ret, shca, e_pd, e_maxmr);
1704 (
u64)iova_start, hipz_acl, e_pd->
fw_pd,
1706 if (h_ret != H_SUCCESS) {
1708 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1718 e_newmr->
start = iova_start;
1722 *lkey = hipzout.
lkey;
1723 *rkey = hipzout.
rkey;
1738 goto ehca_dereg_internal_maxmr_exit0;
1741 e_maxmr = shca->
maxmr;
1748 "ret=%i e_maxmr=%p shca=%p lkey=%x",
1749 ret, e_maxmr, shca, e_maxmr->
ib.
ib_mr.lkey);
1750 shca->
maxmr = e_maxmr;
1751 goto ehca_dereg_internal_maxmr_exit0;
1756 ehca_dereg_internal_maxmr_exit0:
1759 ret, shca, shca->
maxmr);
1778 if (num_phys_buf == 0) {
1779 ehca_gen_err(
"bad phys buf array len, num_phys_buf=0");
1784 ehca_gen_err(
"iova_start/addr mismatch, iova_start=%p "
1785 "pbuf->addr=%llx pbuf->size=%llx",
1786 iova_start, pbuf->
addr, pbuf->
size);
1790 (num_phys_buf > 1)) {
1791 ehca_gen_err(
"addr/size mismatch in 1st buf, pbuf->addr=%llx "
1792 "pbuf->size=%llx", pbuf->
addr, pbuf->
size);
1796 for (i = 0; i < num_phys_buf; i++) {
1804 (i < (num_phys_buf - 1)) &&
1810 size_count += pbuf->
size;
1830 "e_fmr->fmr_max_pages=%x fmr=%p",
1839 ehca_gen_err(
"bad page, i=%x *page=%llx page=%p fmr=%p "
1840 "fmr_page_size=%x", i, *page, page, e_fmr,
1866 chunk = pginfo->
u.
usr.next_chunk;
1867 prev_chunk = pginfo->
u.
usr.next_chunk;
1869 chunk, (&(pginfo->
u.
usr.region->chunk_list)),
list) {
1870 for (i = pginfo->
u.
usr.next_nmap; i < chunk->
nmap; ) {
1877 "chunk->page_list[i]=%llx "
1878 "i=%x next_hwpage=%llx",
1887 if (pginfo->
next_hwpage % hwpages_per_kpage == 0) {
1889 (pginfo->
u.
usr.next_nmap)++;
1894 if (j >= number)
break;
1896 if ((pginfo->
u.
usr.next_nmap >= chunk->
nmap) &&
1898 pginfo->
u.
usr.next_nmap = 0;
1901 }
else if (pginfo->
u.
usr.next_nmap >= chunk->
nmap) {
1902 pginfo->
u.
usr.next_nmap = 0;
1904 }
else if (j >= number)
1909 pginfo->
u.
usr.next_chunk =
1911 (&(pginfo->
u.
usr.region->chunk_list)),
1921 int start_idx,
int end_idx,
1925 for (t = start_idx; t <= end_idx; t++) {
1930 if (pgaddr -
PAGE_SIZE != *prev_pgaddr) {
1932 "prev_pgaddr=%llx page_list_i=%x",
1933 pgaddr, *prev_pgaddr, t);
1936 *prev_pgaddr = pgaddr;
1949 u64 pgaddr, prev_pgaddr;
1953 int nr_kpages = kpages_per_hwpage;
1956 chunk = pginfo->
u.
usr.next_chunk;
1957 prev_chunk = pginfo->
u.
usr.next_chunk;
1959 chunk, (&(pginfo->
u.
usr.region->chunk_list)),
list) {
1960 for (i = pginfo->
u.
usr.next_nmap; i < chunk->nmap; ) {
1961 if (nr_kpages == kpages_per_hwpage) {
1978 "invalid alignment "
1999 *kpage, pgaddr, val);
2001 prev_pgaddr = pgaddr;
2004 pginfo->
u.
usr.next_nmap++;
2010 if (i + nr_kpages > chunk->
nmap) {
2011 ret = ehca_check_kpages_per_ate(
2013 chunk->
nmap - 1, &prev_pgaddr);
2014 if (ret)
return ret;
2016 pginfo->
u.
usr.next_nmap += chunk->
nmap -
i;
2017 nr_kpages -= chunk->
nmap -
i;
2021 ret = ehca_check_kpages_per_ate(chunk->
page_list, i,
2024 if (ret)
return ret;
2027 pginfo->
u.
usr.next_nmap += nr_kpages;
2029 nr_kpages = kpages_per_hwpage;
2033 if (j >= number)
break;
2035 if ((pginfo->
u.
usr.next_nmap >= chunk->
nmap) &&
2037 pginfo->
u.
usr.next_nmap = 0;
2040 }
else if (pginfo->
u.
usr.next_nmap >= chunk->
nmap) {
2041 pginfo->
u.
usr.next_nmap = 0;
2043 }
else if (j >= number)
2048 pginfo->
u.
usr.next_chunk =
2050 (&(pginfo->
u.
usr.region->chunk_list)),
2060 u64 num_hw, offs_hw;
2064 while (i < number) {
2065 pbuf = pginfo->
u.
phy.phys_buf_array + pginfo->
u.
phy.next_buf;
2075 "kpage_cnt=%llx num_kpages=%llx "
2077 "num_hwpages=%llx i=%x",
2086 if ( !(*kpage) && pbuf->
addr ) {
2088 "next_hwpage=%llx", pbuf->
addr,
2103 if (i >= number)
break;
2106 (pginfo->
u.
phy.next_buf)++;
2121 fmrlist = pginfo->
u.
fmr.page_list + pginfo->
u.
fmr.next_listelem;
2122 for (i = 0; i < number; i++) {
2123 *kpage = (*fmrlist & ~(pginfo->
hwpage_size - 1)) +
2127 "next_listelem=%llx next_hwpage=%llx",
2129 pginfo->
u.
fmr.next_listelem,
2136 (pginfo->
u.
fmr.fmr_pgsize /
2139 (pginfo->
u.
fmr.next_listelem)++;
2145 unsigned int cnt_per_hwpage = pginfo->
hwpage_size /
2146 pginfo->
u.
fmr.fmr_pgsize;
2150 for (j = 1; j < cnt_per_hwpage; j++) {
2152 if (prev + pginfo->
u.
fmr.fmr_pgsize != p) {
2154 "found prev=%llx p=%llx "
2155 "idx=%x", prev, p, i + j);
2161 pginfo->
u.
fmr.next_listelem += cnt_per_hwpage;
2162 fmrlist += cnt_per_hwpage;
2176 switch (pginfo->
type) {
2178 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
2182 ehca_set_pagebuf_user1(pginfo, number, kpage) :
2183 ehca_set_pagebuf_user2(pginfo, number, kpage);
2186 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
2206 if ((size == ehca_mr_len) &&
2238 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
2318 ehca_top_bmap->
dir[dir] =
2320 if (!ehca_top_bmap->
dir[dir])
2328 static inline int ehca_init_bmap(
struct ehca_bmap *ehca_bmap,
int top,
int dir)
2333 if (!ehca_bmap->
top[top])
2338 return ehca_init_top_bmap(ehca_bmap->
top[top], dir);
2341 static inline int ehca_calc_index(
unsigned long i,
unsigned long s)
2360 kfree(ehca_bmap->
top[top]->dir[dir]);
2370 static int ehca_update_busmap(
unsigned long pfn,
unsigned long nr_pages)
2372 unsigned long i, start_section, end_section;
2388 for (i = start_section; i < end_section; i++) {
2394 ret = ehca_init_bmap(ehca_bmap, top, dir);
2399 ehca_bmap->
top[
top]->dir[dir]->ent[
idx] = ehca_mr_len;
2405 static int ehca_is_hugepage(
unsigned long pfn)
2419 static int ehca_create_busmap_callback(
unsigned long initial_pfn,
2420 unsigned long total_nr_pages,
void *
arg)
2423 unsigned long pfn, start_pfn, end_pfn, nr_pages;
2426 return ehca_update_busmap(initial_pfn, total_nr_pages);
2429 start_pfn = initial_pfn;
2430 end_pfn = initial_pfn + total_nr_pages;
2433 while (pfn < end_pfn) {
2434 if (ehca_is_hugepage(pfn)) {
2436 nr_pages = pfn - start_pfn;
2437 ret = ehca_update_busmap(start_pfn, nr_pages);
2448 nr_pages = pfn - start_pfn;
2449 return ehca_update_busmap(start_pfn, nr_pages);
2458 ehca_create_busmap_callback);
2462 static int ehca_reg_bmap_mr_rpages(
struct ehca_shca *shca,
2477 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2478 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2484 if (hret == H_SUCCESS)
2488 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2489 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2497 static u64 ehca_map_vaddr(
void *caddr)
2500 unsigned long abs_addr,
offset;
2506 abs_addr =
__pa(caddr);
2517 entry = ehca_bmap->
top[
top]->dir[dir]->ent[
idx];
2534 return ehca_map_vaddr(cpu_addr);
2539 static void ehca_dma_unmap_single(
struct ib_device *dev,
u64 addr,
size_t size,
2546 unsigned long offset,
size_t size,
2551 if (offset + size > PAGE_SIZE)
2555 if (!ehca_dma_mapping_error(dev, addr))
2561 static void ehca_dma_unmap_page(
struct ib_device *dev,
u64 addr,
size_t size,
2575 addr = ehca_map_vaddr(sg_virt(sg));
2576 if (ehca_dma_mapping_error(dev, addr))
2580 sg->dma_length = sg->
length;
2601 static void ehca_dma_sync_single_for_cpu(
struct ib_device *dev,
u64 addr,
2608 static void ehca_dma_sync_single_for_device(
struct ib_device *dev,
u64 addr,
2615 static void *ehca_dma_alloc_coherent(
struct ib_device *dev,
size_t size,
2625 dma_addr = ehca_map_vaddr(addr);
2626 if (ehca_dma_mapping_error(dev, dma_addr)) {
2637 static void ehca_dma_free_coherent(
struct ib_device *dev,
size_t size,
2638 void *cpu_addr,
u64 dma_handle)
2640 if (cpu_addr && size)
2646 .mapping_error = ehca_dma_mapping_error,
2647 .map_single = ehca_dma_map_single,
2648 .unmap_single = ehca_dma_unmap_single,
2649 .map_page = ehca_dma_map_page,
2650 .unmap_page = ehca_dma_unmap_page,
2651 .map_sg = ehca_dma_map_sg,
2652 .unmap_sg = ehca_dma_unmap_sg,
2653 .dma_address = ehca_dma_address,
2654 .dma_len = ehca_dma_len,
2655 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
2656 .sync_single_for_device = ehca_dma_sync_single_for_device,
2657 .alloc_coherent = ehca_dma_alloc_coherent,
2658 .free_coherent = ehca_dma_free_coherent,