38 #define T4_ULPTX_MIN_IO 32
39 #define C4IW_MAX_INLINE_SIZE 96
47 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe,
i,
ret = 0;
52 PDBG(
"%s addr 0x%x len %u\n", __func__, addr, len);
54 c4iw_init_wr_wait(&wr_wait);
55 for (i = 0; i < num_wqe; i++) {
59 wr_len =
roundup(
sizeof *req +
sizeof *sc +
67 req = (
struct ulp_mem_io *)__skb_put(skb, wr_len);
71 if (i == (num_wqe-1)) {
91 to_dp = (
u8 *)(sc + 1);
94 memcpy(to_dp, from_dp, copy_len);
96 memset(to_dp, 0, copy_len);
98 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99 (copy_len % T4_ULPTX_MIN_IO));
106 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
119 int bind_enabled,
u32 zbva,
u64 to,
127 if (c4iw_fatal_error(rdev))
130 stag_state = stag_state > 0;
131 stag_idx = (*stag) >> 8;
138 rdev->
stats.stag.cur += 32;
139 if (rdev->
stats.stag.cur > rdev->
stats.stag.max)
144 PDBG(
"%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
145 __func__, stag_state, type, pdid, stag_idx);
149 memset(&tpt, 0,
sizeof(tpt));
168 err = write_adapter_mem(rdev, stag_idx +
169 (rdev->
lldi.vr->stag.start >> 5),
172 if (reset_tpt_entry) {
175 rdev->
stats.stag.cur -= 32;
182 u32 pbl_addr,
u32 pbl_size)
186 PDBG(
"%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187 __func__, pbl_addr, rdev->
lldi.vr->pbl.start,
190 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
197 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0
UL, 0, 0,
201 static int allocate_window(
struct c4iw_rdev *rdev,
u32 * stag,
u32 pdid)
204 return write_tpt_entry(rdev, 0, stag, 0, pdid,
FW_RI_STAG_MW, 0, 0, 0,
208 static int deallocate_window(
struct c4iw_rdev *rdev,
u32 stag)
210 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0
UL, 0, 0, 0,
215 u32 pbl_size,
u32 pbl_addr)
218 return write_tpt_entry(rdev, 0, stag, 0, pdid,
FW_RI_STAG_NSMR, 0, 0, 0,
219 0
UL, 0, 0, pbl_size, pbl_addr);
222 static int finish_mem_reg(
struct c4iw_mr *mhp,
u32 stag)
230 PDBG(
"%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
231 return insert_handle(mhp->
rhp, &mhp->
rhp->mmidr, mhp, mmid);
235 struct c4iw_mr *mhp,
int shift)
240 ret = write_tpt_entry(&rhp->
rdev, 0, &stag, 1, mhp->
attr.pdid,
242 mhp->
attr.mw_bind_enable, mhp->
attr.zbva,
243 mhp->
attr.va_fbo, mhp->
attr.len, shift - 12,
244 mhp->
attr.pbl_size, mhp->
attr.pbl_addr);
248 ret = finish_mem_reg(mhp, stag);
250 dereg_mem(&rhp->
rdev, mhp->
attr.stag, mhp->
attr.pbl_size,
256 struct c4iw_mr *mhp,
int shift,
int npages)
261 if (npages > mhp->
attr.pbl_size)
264 stag = mhp->
attr.stag;
265 ret = write_tpt_entry(&rhp->
rdev, 0, &stag, 1, mhp->
attr.pdid,
267 mhp->
attr.mw_bind_enable, mhp->
attr.zbva,
268 mhp->
attr.va_fbo, mhp->
attr.len, shift - 12,
269 mhp->
attr.pbl_size, mhp->
attr.pbl_addr);
273 ret = finish_mem_reg(mhp, stag);
275 dereg_mem(&rhp->
rdev, mhp->
attr.stag, mhp->
attr.pbl_size,
281 static int alloc_pbl(
struct c4iw_mr *mhp,
int npages)
286 if (!mhp->
attr.pbl_addr)
289 mhp->
attr.pbl_size = npages;
295 int num_phys_buf,
u64 *iova_start,
297 int *shift,
__be64 **page_list)
304 for (i = 0; i < num_phys_buf; ++
i) {
307 if (i != 0 && i != num_phys_buf - 1 &&
310 *total_size += buffer_list[
i].
size;
312 mask |= buffer_list[
i].
addr;
315 if (i != num_phys_buf - 1)
316 mask |= buffer_list[
i].
addr + buffer_list[
i].
size;
318 mask |= (buffer_list[
i].
addr + buffer_list[
i].
size +
322 if (*total_size > 0xFFFFFFFFULL)
326 for (*shift =
PAGE_SHIFT; *shift < 27; ++(*shift))
327 if ((1ULL << *shift) &
mask)
330 buffer_list[0].
size += buffer_list[0].
addr & ((1ULL << *shift) - 1);
331 buffer_list[0].
addr &= ~0ull << *shift;
334 for (i = 0; i < num_phys_buf; ++
i)
335 *npages += (buffer_list[i].
size +
336 (1ULL << *shift) - 1) >> *shift;
346 for (i = 0; i < num_phys_buf; ++
i)
348 j < (buffer_list[
i].
size + (1ULL << *shift) - 1) >> *shift;
351 ((
u64) j << *shift));
353 PDBG(
"%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354 __func__, (
unsigned long long)*iova_start,
355 (
unsigned long long)mask, *shift, (
unsigned long long)*total_size,
364 int num_phys_buf,
int acc,
u64 *iova_start)
376 PDBG(
"%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
382 mhp = to_c4iw_mr(mr);
384 php = to_c4iw_pd(mr->
pd);
390 memcpy(&mh, mhp,
sizeof *mhp);
393 php = to_c4iw_pd(pd);
395 mh.
attr.perms = c4iw_ib_to_tpt_access(acc);
402 &total_size, &npages,
408 ret = reregister_mem(rhp, php, &mh, shift, npages);
412 if (mr_rereg_mask & IB_MR_REREG_PD)
414 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
415 mhp->
attr.perms = c4iw_ib_to_tpt_access(acc);
416 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
418 mhp->
attr.va_fbo = *iova_start;
419 mhp->
attr.page_size = shift - 12;
420 mhp->
attr.len = (
u32) total_size;
421 mhp->
attr.pbl_size = npages;
429 int num_phys_buf,
int acc,
u64 *iova_start)
440 PDBG(
"%s ib_pd %p\n", __func__, pd);
441 php = to_c4iw_pd(pd);
456 if (num_phys_buf > 1 &&
463 &total_size, &npages, &shift,
468 ret = alloc_pbl(mhp, npages);
474 ret = write_pbl(&mhp->
rhp->rdev, page_list, mhp->
attr.pbl_addr,
483 mhp->
attr.perms = c4iw_ib_to_tpt_access(acc);
484 mhp->
attr.va_fbo = *iova_start;
485 mhp->
attr.page_size = shift - 12;
487 mhp->
attr.len = (
u32) total_size;
488 mhp->
attr.pbl_size = npages;
489 ret = register_mem(rhp, php, mhp, shift);
497 mhp->
attr.pbl_size << 3);
513 PDBG(
"%s ib_pd %p\n", __func__, pd);
514 php = to_c4iw_pd(pd);
523 mhp->
attr.perms = c4iw_ib_to_tpt_access(acc);
526 mhp->
attr.va_fbo = 0;
527 mhp->
attr.page_size = 0;
529 mhp->
attr.pbl_size = 0;
531 ret = write_tpt_entry(&rhp->
rdev, 0, &stag, 1, php->
pdid,
533 mhp->
attr.mw_bind_enable, 0, 0, ~0
UL, 0, 0, 0);
537 ret = finish_mem_reg(mhp, stag);
542 dereg_mem(&rhp->
rdev, mhp->
attr.stag, mhp->
attr.pbl_size,
561 PDBG(
"%s ib_pd %p\n", __func__, pd);
566 if ((length + start) < start)
569 php = to_c4iw_pd(pd);
578 if (IS_ERR(mhp->
umem)) {
579 err = PTR_ERR(mhp->
umem);
584 shift =
ffs(mhp->
umem->page_size) - 1;
590 err = alloc_pbl(mhp, n);
603 for (j = 0; j < chunk->
nmap; ++
j) {
605 for (k = 0; k < len; ++
k) {
608 mhp->
umem->page_size * k);
610 err = write_pbl(&mhp->
rhp->rdev,
612 mhp->
attr.pbl_addr + (n << 3), i);
622 err = write_pbl(&mhp->
rhp->rdev, pages,
623 mhp->
attr.pbl_addr + (n << 3), i);
632 mhp->
attr.perms = c4iw_ib_to_tpt_access(acc);
633 mhp->
attr.va_fbo = virt;
634 mhp->
attr.page_size = shift - 12;
637 err = register_mem(rhp, php, mhp, shift);
645 mhp->
attr.pbl_size << 3);
662 php = to_c4iw_pd(pd);
667 ret = allocate_window(&rhp->
rdev, &stag, php->
pdid);
678 if (insert_handle(rhp, &rhp->
mmidr, mhp, mmid)) {
679 deallocate_window(&rhp->
rdev, mhp->
attr.stag);
683 PDBG(
"%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
693 mhp = to_c4iw_mw(mw);
695 mmid = (mw->
rkey) >> 8;
696 remove_handle(rhp, &rhp->
mmidr, mmid);
697 deallocate_window(&rhp->
rdev, mhp->
attr.stag);
699 PDBG(
"%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
712 php = to_c4iw_pd(pd);
721 ret = alloc_pbl(mhp, pbl_depth);
725 ret = allocate_stag(&rhp->
rdev, &stag, php->
pdid,
726 mhp->
attr.pbl_size, mhp->
attr.pbl_addr);
735 if (insert_handle(rhp, &rhp->
mmidr, mhp, mmid)) {
740 PDBG(
"%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
743 dereg_mem(&rhp->
rdev, stag, mhp->
attr.pbl_size,
747 mhp->
attr.pbl_size << 3);
760 int size =
sizeof *c4pl + page_list_len *
sizeof(
u64);
771 c4pl->ibpl.page_list = (
u64 *)(c4pl + 1);
772 c4pl->ibpl.max_page_list_len = page_list_len;
791 PDBG(
"%s ib_mr %p\n", __func__, ib_mr);
796 mhp = to_c4iw_mr(ib_mr);
798 mmid = mhp->
attr.stag >> 8;
799 remove_handle(rhp, &rhp->
mmidr, mmid);
800 dereg_mem(&rhp->
rdev, mhp->
attr.stag, mhp->
attr.pbl_size,
802 if (mhp->
attr.pbl_size)
804 mhp->
attr.pbl_size << 3);
806 kfree((
void *) (
unsigned long) mhp->
kva);
809 PDBG(
"%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);