34 #include <linux/module.h>
36 #include <linux/random.h>
38 #include <linux/slab.h>
39 #include <asm/byteorder.h>
53 static void nes_unregister_ofa_device(
struct nes_ib_device *nesibdev);
58 static struct ib_mw *nes_alloc_mw(
struct ib_pd *ibpd) {
59 struct nes_pd *nespd = to_nespd(ibpd);
70 u32 next_stag_index = 0;
75 stag_key = (
u8)next_stag_index;
79 next_stag_index >>= 8;
80 next_stag_index %= nesadapter->
max_mr;
82 ret = nes_alloc_resource(nesadapter, nesadapter->
allocated_mrs,
90 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
94 stag = stag_index << 8;
96 stag += (
u32)stag_key;
103 if (cqp_request ==
NULL) {
105 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
110 cqp_wqe = &cqp_request->
cqp_wqe;
117 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
128 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
133 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
135 return ERR_PTR(-
ETIME);
155 static int nes_dealloc_mw(
struct ib_mw *ibmw)
157 struct nes_mr *nesmr = to_nesmw(ibmw);
168 if (cqp_request ==
NULL) {
173 cqp_wqe = &cqp_request->
cqp_wqe;
174 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
187 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
197 (ibmw->
rkey & 0x0fffff00) >> 8);
207 static int nes_bind_mw(
struct ib_qp *ibqp,
struct ib_mw *ibmw,
214 struct nes_qp *nesqp = to_nesqp(ibqp);
216 unsigned long flags = 0;
226 head = nesqp->
hwqp.sq_head;
227 qsize = nesqp->
hwqp.sq_tail;
230 if (((head + (2 * qsize) - nesqp->
hwqp.sq_tail) % qsize) == (qsize - 1)) {
231 spin_unlock_irqrestore(&nesqp->
lock, flags);
237 nes_fill_init_qp_wqe(wqe, nesqp, head);
238 u64temp = ibmw_bind->
wr_id;
260 u64temp = (
u64)ibmw_bind->
addr;
271 (1 << 24) | 0x00800000 | nesqp->
hwqp.qp_id);
273 spin_unlock_irqrestore(&nesqp->
lock, flags);
282 static int alloc_fast_reg_mr(
struct nes_device *nesdev,
struct nes_pd *nespd,
296 if (cqp_request ==
NULL) {
301 "region_length = %llu\n",
302 page_count, region_length);
304 cqp_wqe = &cqp_request->
cqp_wqe;
309 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
312 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
329 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
355 "wait_event_timeout ret = %u, CQP Major:Minor codes = "
356 "0x%04X:0x%04X.\n", stag, ret, cqp_request->
major_code,
361 if (!ret || major_code) {
364 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
377 static struct ib_mr *nes_alloc_fast_reg_mr(
struct ib_pd *ibpd,
int max_page_list_len)
379 struct nes_pd *nespd = to_nespd(ibpd);
398 return ERR_PTR(-
E2BIG);
401 stag_key = (
u8)next_stag_index;
402 next_stag_index >>= 8;
403 next_stag_index %= nesadapter->
max_mr;
405 err = nes_alloc_resource(nesadapter, nesadapter->
allocated_mrs,
406 nesadapter->
max_mr, &stag_index,
413 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
417 stag = stag_index << 8;
419 stag += (
u32)stag_key;
424 ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_page_list_len);
433 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
446 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
452 return ERR_PTR(-
E2BIG);
463 pifrpl = &pnesfrpl->
ibfrpl;
470 page_list_len *
sizeof(
u64),
478 "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
479 "pbl.paddr = %llx\n", pnesfrpl, &pnesfrpl->
ibfrpl,
514 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
518 memset(props, 0,
sizeof(*props));
565 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
568 memset(props, 0,
sizeof(*props));
572 if (netdev->
mtu >= 4096)
574 else if (netdev->
mtu >= 2048)
576 else if (netdev->
mtu >= 1024)
578 else if (netdev->
mtu >= 512)
587 if (netif_queue_stopped(netdev))
620 static int nes_query_gid(
struct ib_device *ibdev,
u8 port,
623 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
639 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
654 printk(
KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
660 memset(&uresp, 0,
sizeof uresp);
662 uresp.max_qps = nesibdev->
max_qp;
663 uresp.max_pds = nesibdev->
max_pd;
664 uresp.wq_size = nesdev->
nesadapter->max_qp_wr * 2;
665 uresp.virtwq = nesadapter->
virtwq;
668 nes_ucontext = kzalloc(
sizeof *nes_ucontext,
GFP_KERNEL);
672 nes_ucontext->
nesdev = nesdev;
675 ((
sizeof(
struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
679 if (ib_copy_to_udata(udata, &uresp,
sizeof uresp)) {
698 struct nes_ucontext *nes_ucontext = to_nesucontext(context);
716 struct nes_ucontext *nes_ucontext;
719 nes_ucontext = to_nesucontext(context);
725 PAGE_SIZE-1) & (~(PAGE_SIZE-1));
770 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
773 struct nes_ucontext *nesucontext;
778 nes_debug(
NES_DBG_PD,
"nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
779 nesvnic, nesdev->
netdev[0], nesdev->
netdev[0]->name, ibdev, context,
782 err = nes_alloc_resource(nesadapter, nesadapter->
allocated_pds,
790 nes_free_resource(nesadapter, nesadapter->
allocated_pds, pd_num);
795 nespd, nesvnic->
nesibdev->ibdev.name);
800 nesucontext = to_nesucontext(context);
807 nes_free_resource(nesadapter, nesadapter->
allocated_pds, pd_num);
812 uresp.pd_id = nespd->
pd_id;
815 nes_free_resource(nesadapter, nesadapter->
allocated_pds, pd_num);
833 static int nes_dealloc_pd(
struct ib_pd *ibpd)
835 struct nes_ucontext *nesucontext;
836 struct nes_pd *nespd = to_nespd(ibpd);
842 nesucontext = to_nesucontext(ibpd->
uobject->context);
853 nespd->
pd_id, nespd);
874 static int nes_destroy_ah(
struct ib_ah *
ah)
883 static inline u8 nes_get_encoded_size(
int *
size)
889 }
else if (*size <= 128) {
892 }
else if (*size <= 512) {
896 return (encoded_size);
904 static int nes_setup_virt_qp(
struct nes_qp *nesqp,
struct nes_pbl *nespbl,
918 pbl_entries = nespbl->
pbl_size >> 3;
919 nes_debug(
NES_DBG_QP,
"Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%lx\n",
926 rq_pbl_entries = (rq_size *
sizeof(
struct nes_hw_qp_wqe)) >> 12;
927 sq_pbl_entries = (sq_size *
sizeof(
struct nes_hw_qp_wqe)) >> 12;
937 if (!nesqp->
hwqp.sq_vbase) {
945 pbl += sq_pbl_entries;
951 nesqp->
hwqp.sq_vbase, (
unsigned long) nesqp->
hwqp.sq_pbase,
952 nesqp->
hwqp.rq_vbase, (
unsigned long) nesqp->
hwqp.rq_pbase);
957 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
963 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
974 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
980 tpbl = pblbuffer + 16;
982 while (sq_pbl_entries--)
985 while (rq_pbl_entries--)
1000 &nesqp->
hwqp.q2_pbase);
1007 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1026 static int nes_setup_mmap_qp(
struct nes_qp *nesqp,
struct nes_vnic *nesvnic,
1027 int sq_size,
int rq_size)
1041 &nesqp->
hwqp.sq_pbase);
1045 "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
1054 nesqp->
hwqp.rq_pbase = nesqp->
hwqp.sq_pbase +
1059 nesqp->
hwqp.q2_pbase = nesqp->
hwqp.rq_pbase +
1074 static inline void nes_free_qp_mem(
struct nes_device *nesdev,
1075 struct nes_qp *nesqp,
int virt_wqs)
1077 unsigned long flags;
1081 nesqp->
hwqp.sq_vbase, nesqp->
hwqp.sq_pbase);
1085 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1100 static struct ib_qp *nes_create_qp(
struct ib_pd *ibpd,
1105 struct nes_pd *nespd = to_nespd(ibpd);
1111 struct nes_ucontext *nes_ucontext;
1121 unsigned long flags;
1138 init_attr->
cap.max_inline_data = 0;
1140 init_attr->
cap.max_inline_data = 64;
1142 sq_size = init_attr->
cap.max_send_wr;
1143 rq_size = init_attr->
cap.max_recv_wr;
1146 sq_encoded_size = nes_get_encoded_size(&sq_size);
1147 rq_encoded_size = nes_get_encoded_size(&rq_size);
1149 if ((!sq_encoded_size) || (!rq_encoded_size)) {
1155 init_attr->
cap.max_send_wr = sq_size -2;
1156 init_attr->
cap.max_recv_wr = rq_size -1;
1159 ret = nes_alloc_resource(nesadapter, nesadapter->
allocated_qps,
1162 return ERR_PTR(ret);
1168 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1172 u64nesqp = (
unsigned long)mem;
1175 u64nesqp &= ~u64temp;
1176 nesqp = (
struct nes_qp *)(
unsigned long)u64nesqp;
1183 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1188 if (
req.user_wqe_buffers) {
1193 nes_ucontext = to_nesucontext(ibpd->
uobject->context);
1197 if (nespbl->
user_base == (
unsigned long )
req.user_wqe_buffers) {
1207 (
long long unsigned int)
req.user_wqe_buffers);
1208 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1214 nes_ucontext = to_nesucontext(ibpd->
uobject->context);
1222 "db index > max user regions, failing create QP\n");
1223 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1236 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1241 err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
1242 nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
1245 "error geting qp mem code = %d\n", err);
1246 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1252 nesqp->
hwqp.sq_encoded_size = sq_encoded_size;
1253 nesqp->
hwqp.sq_head = 1;
1255 nesqp->
hwqp.rq_encoded_size = rq_encoded_size;
1259 nesqp->
hwqp.qp_id = qp_num;
1260 nesqp->
ibqp.qp_num = nesqp->
hwqp.qp_id;
1263 nescq = to_nescq(init_attr->
send_cq);
1265 nescq = to_nescq(init_attr->
recv_cq);
1279 ((
u32)nesqp->
nesrcq->hw_cq.cq_number << 16));
1280 u64temp = (
u64)nesqp->
hwqp.sq_pbase;
1286 u64temp = (
u64)nesqp->
hwqp.sq_pbase;
1289 u64temp = (
u64)nesqp->
hwqp.rq_pbase;
1310 spin_unlock_irqrestore(&nesdev->
cqp.lock, flags);
1313 u64temp = (
u64)nesqp->
hwqp.q2_pbase;
1330 if (cqp_request ==
NULL) {
1332 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1333 nes_free_qp_mem(nesdev, nesqp,virt_wqs);
1338 cqp_wqe = &cqp_request->
cqp_wqe;
1348 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1364 " nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u,"
1365 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
1366 nesqp->
hwqp.qp_id, ret, nesdev->
cqp.sq_head, nesdev->
cqp.sq_tail,
1370 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1371 nes_free_qp_mem(nesdev, nesqp,virt_wqs);
1374 return ERR_PTR(-
ETIME);
1376 return ERR_PTR(-
EIO);
1384 uresp.actual_sq_size =
sq_size;
1385 uresp.actual_rq_size =
rq_size;
1386 uresp.qp_id = nesqp->
hwqp.qp_id;
1388 if (ib_copy_to_udata(udata, &uresp,
sizeof uresp)) {
1389 nes_free_resource(nesadapter, nesadapter->
allocated_qps, qp_num);
1390 nes_free_qp_mem(nesdev, nesqp,virt_wqs);
1397 nesqp->
hwqp.qp_id, nesqp, (
u32)
sizeof(*nesqp));
1413 return &nesqp->
ibqp;
1420 static void nes_clean_cq(
struct nes_qp *nesqp,
struct nes_cq *nescq)
1426 unsigned long flags = 0;
1430 cq_head = nescq->
hw_cq.cq_head;
1435 u64temp = (((
u64)hi) << 32) | ((
u64)lo);
1437 if (u64temp == (
u64)(
unsigned long)nesqp) {
1443 if (++cq_head >= nescq->
hw_cq.cq_size)
1447 spin_unlock_irqrestore(&nescq->
lock, flags);
1454 static int nes_destroy_qp(
struct ib_qp *ibqp)
1456 struct nes_qp *nesqp = to_nesqp(ibqp);
1457 struct nes_ucontext *nes_ucontext;
1475 cm_id = nesqp->
cm_id;
1480 cm_event.private_data =
NULL;
1481 cm_event.private_data_len = 0;
1484 "QP%u. cm_id = %p, refcount = %u. \n",
1495 nes_ucontext = to_nesucontext(ibqp->
uobject->context);
1509 nes_clean_cq(nesqp, nesqp->
nesscq);
1512 nes_clean_cq(nesqp, nesqp->
nesrcq);
1527 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
1531 struct nes_ucontext *nes_ucontext =
NULL;
1540 u32 pbl_entries = 1;
1542 unsigned long flags;
1545 if (entries > nesadapter->
max_cqe)
1548 err = nes_alloc_resource(nesadapter, nesadapter->
allocated_cqs,
1551 return ERR_PTR(err);
1556 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1561 nescq->
hw_cq.cq_size =
max(entries + 1, 5);
1562 nescq->
hw_cq.cq_number = cq_num;
1563 nescq->
ibcq.cqe = nescq->
hw_cq.cq_size - 1;
1567 nes_ucontext = to_nesucontext(context);
1569 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1575 if (nes_ucontext->
mcrqf) {
1576 if (nes_ucontext->
mcrqf & 0x80000000)
1577 nescq->
hw_cq.cq_number = nesvnic->
nic.qp_id + 28 + 2 * ((nes_ucontext->
mcrqf & 0xf) - 1);
1578 else if (nes_ucontext->
mcrqf & 0x40000000)
1579 nescq->
hw_cq.cq_number = nes_ucontext->
mcrqf & 0xffff;
1583 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1586 (
unsigned long)
req.user_cq_buffer, entries);
1589 if (nespbl->
user_base == (
unsigned long )
req.user_cq_buffer) {
1598 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1603 pbl_entries = nespbl->
pbl_size >> 3;
1607 nes_debug(
NES_DBG_CQ,
"Attempting to allocate pci memory (%u entries, %u bytes) for CQ%u.\n",
1612 &nescq->
hw_cq.cq_pbase);
1615 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1622 nescq->
hw_cq.cq_head = 0;
1624 nescq->
hw_cq.cq_number, nescq->
hw_cq.cq_vbase,
1633 if (cqp_request ==
NULL) {
1637 nescq->
hw_cq.cq_pbase);
1644 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1649 cqp_wqe = &cqp_request->
cqp_wqe;
1657 if (pbl_entries != 1) {
1658 if (pbl_entries > 32) {
1662 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1666 nescq->
hw_cq.cq_pbase);
1672 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1684 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1688 nescq->
hw_cq.cq_pbase);
1694 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1705 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1707 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1713 if (pbl_entries != 1)
1720 u64temp = (
u64)nescq->
hw_cq.cq_pbase;
1725 u64temp = (
u64)(
unsigned long)&nescq->
hw_cq;
1736 nescq->
hw_cq.cq_number);
1740 nescq->
hw_cq.cq_number, ret);
1745 nescq->
hw_cq.cq_pbase);
1751 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1753 return ERR_PTR(-
EIO);
1764 resp.mmap_db_index = 0;
1765 if (ib_copy_to_udata(udata, &
resp,
sizeof resp)) {
1766 nes_free_resource(nesadapter, nesadapter->
allocated_cqs, cq_num);
1772 return &nescq->
ibcq;
1779 static int nes_destroy_cq(
struct ib_cq *
ib_cq)
1787 unsigned long flags;
1794 nescq = to_nescq(ib_cq);
1795 nesvnic = to_nesvnic(ib_cq->
device);
1796 nesdev = nesvnic->
nesdev;
1803 if (cqp_request ==
NULL) {
1808 cqp_wqe = &cqp_request->
cqp_wqe;
1826 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1828 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1840 nescq->
hw_cq.cq_number);
1844 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
1849 nescq->
hw_cq.cq_number);
1853 nescq->
hw_cq.cq_number);
1862 nescq->
hw_cq.cq_vbase, nescq->
hw_cq.cq_pbase);
1879 if (pbl_count_4k == 1) {
1887 for (i = 0; i < 16; i++) {
1895 for (i = 3; i >= 0; i--) {
1901 for (k = 1; k < 16; k++) {
1921 u16 residual_page_count_4k,
int acc,
u64 *iova_start,
1922 u16 *actual_pbl_cnt,
u8 *used_4k_pbls)
1926 unsigned long flags;
1930 u16 pbl_count_256 = 0;
1932 u8 use_256_pbls = 0;
1934 u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
1941 if (cqp_request ==
NULL) {
1946 cqp_wqe = &cqp_request->
cqp_wqe;
1951 pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k;
1952 pbl_count_256 = (pg_cnt + 31) / 32;
1954 if (pbl_count_256 <= nesadapter->free_256pbl)
1956 else if (pbl_count_4k <= nesadapter->free_4kpbl)
1958 }
else if (pg_cnt <= 2048) {
1959 if (((pbl_count_4k + use_two_level) <= nesadapter->
free_4kpbl) &&
1962 }
else if ((pbl_count_256 + 1) <= nesadapter->
free_256pbl) {
1965 }
else if ((pbl_count_4k + use_two_level) <= nesadapter->
free_4kpbl) {
1969 if ((pbl_count_4k + 1) <= nesadapter->
free_4kpbl)
1974 pbl_count = pbl_count_256;
1975 nesadapter->
free_256pbl -= pbl_count + use_two_level;
1976 }
else if (use_4k_pbls) {
1977 pbl_count = pbl_count_4k;
1978 nesadapter->
free_4kpbl -= pbl_count + use_two_level;
1980 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1986 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
1989 if (use_256_pbls && use_two_level) {
1990 if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k) == 1) {
1992 root_vpbl = &new_root;
1995 nesadapter->
free_256pbl += pbl_count_256 + use_two_level;
1998 if (pbl_count_4k == 1)
2000 pbl_count = pbl_count_4k;
2002 if ((pbl_count_4k + use_two_level) <= nesadapter->
free_4kpbl) {
2003 nesadapter->
free_4kpbl -= pbl_count + use_two_level;
2006 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
2008 if (use_4k_pbls == 0)
2024 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2035 if (pbl_count == 0) {
2054 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
2059 if ((!ret || major_code) && pbl_count != 0) {
2062 nesadapter->
free_256pbl += pbl_count + use_two_level;
2063 else if (use_4k_pbls)
2064 nesadapter->
free_4kpbl += pbl_count + use_two_level;
2065 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
2073 else if (major_code)
2076 *actual_pbl_cnt = pbl_count + use_two_level;
2077 *used_4k_pbls = use_4k_pbls;
2086 struct ib_phys_buf *buffer_list,
int num_phys_buf,
int acc,
2090 struct nes_pd *nespd = to_nespd(ib_pd);
2102 u32 next_stag_index = 0;
2104 u32 root_pbl_index = 0;
2105 u32 cur_pbl_index = 0;
2113 vpbl.pbl_vbase =
NULL;
2118 stag_key = (
u8)next_stag_index;
2122 next_stag_index >>= 8;
2123 next_stag_index %= nesadapter->
max_mr;
2124 if (num_phys_buf > (1024*512)) {
2125 return ERR_PTR(-
E2BIG);
2134 return ERR_PTR(err);
2139 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2143 for (i = 0; i < num_phys_buf; i++) {
2145 if ((i & 0x01FF) == 0) {
2146 if (root_pbl_index == 1) {
2155 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2165 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2178 vpbl.pbl_vbase, (
unsigned long)vpbl.pbl_pbase);
2179 if (!vpbl.pbl_vbase) {
2180 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2186 if (1 <= root_pbl_index) {
2187 root_vpbl.
pbl_vbase[root_pbl_index].pa_low =
2189 root_vpbl.
pbl_vbase[root_pbl_index].pa_high =
2191 root_vpbl.
leaf_vpbl[root_pbl_index] = vpbl;
2197 mask = !buffer_list[
i].
size;
2199 mask |= buffer_list[
i].
addr;
2200 if (i != num_phys_buf - 1)
2201 mask |= buffer_list[
i].
addr + buffer_list[
i].
size;
2204 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2211 region_length += buffer_list[
i].
size;
2212 if ((i != 0) && (single_page)) {
2213 if ((buffer_list[i-1].
addr+PAGE_SIZE) != buffer_list[i].
addr)
2217 vpbl.pbl_vbase[cur_pbl_index++].pa_high =
2221 stag = stag_index << 8;
2223 stag += (
u32)stag_key;
2226 " length = 0x%016lX, index = 0x%08X\n",
2227 stag, (
unsigned long)*iova_start, (
unsigned long)region_length, stag_index);
2230 if (root_pbl_index == 1) {
2237 pbl_count = root_pbl_index;
2239 ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
2240 buffer_list[0].
addr, pbl_count, (
u16)cur_pbl_index, acc, iova_start,
2247 ibmr = &nesmr->
ibmr;
2255 if (root_pbl_index == 1) {
2259 for (i=0; i<root_pbl_index; i++) {
2275 static struct ib_mr *nes_get_dma_mr(
struct ib_pd *
pd,
int acc)
2282 bl.size = (
u64)0xffffffffffULL;
2284 return nes_reg_phys_mr(pd, &
bl, 1, acc, &kva);
2299 struct nes_pd *nespd = to_nespd(pd);
2305 struct nes_ucontext *nes_ucontext;
2312 int nmap_index, page_index;
2319 u32 next_stag_index;
2321 u32 root_pbl_index = 0;
2322 u32 cur_pbl_index = 0;
2329 if (IS_ERR(region)) {
2330 return (
struct ib_mr *)region;
2334 " offset = %u, page size = %u.\n",
2335 (
unsigned long int)start, (
unsigned long int)virt, (
u32)length,
2338 skip_pages = ((
u32)region->
offset) >> 12;
2340 if (ib_copy_from_udata(&
req, udata,
sizeof(
req))) {
2346 switch (
req.reg_type) {
2350 vpbl.pbl_vbase =
NULL;
2355 stag_key = (
u8)next_stag_index;
2357 driver_key = next_stag_index & 0x70000000;
2359 next_stag_index >>= 8;
2360 next_stag_index %= nesadapter->
max_mr;
2362 err = nes_alloc_resource(nesadapter, nesadapter->
allocated_mrs,
2366 return ERR_PTR(err);
2372 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2380 for (nmap_index = 0; nmap_index < chunk->
nmap; ++nmap_index) {
2383 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2388 goto reg_user_mr_err;
2398 goto reg_user_mr_err;
2403 region_length -= skip_pages << 12;
2404 for (page_index=skip_pages; page_index < chunk_pages; page_index++) {
2406 if ((page_count!=0)&&(page_count<<12)-(region->
offset&(4096-1))>=region->
length)
2408 if ((page_count&0x01FF) == 0) {
2409 if (page_count >= 1024 * 512) {
2411 nes_free_resource(nesadapter,
2414 ibmr = ERR_PTR(-
E2BIG);
2415 goto reg_user_mr_err;
2417 if (root_pbl_index == 1) {
2430 goto reg_user_mr_err;
2444 goto reg_user_mr_err;
2455 vpbl.pbl_vbase, (
unsigned int)vpbl.pbl_pbase);
2456 if (!vpbl.pbl_vbase) {
2458 nes_free_resource(nesadapter, nesadapter->
allocated_mrs, stag_index);
2461 goto reg_user_mr_err;
2463 if (1 <= root_pbl_index) {
2464 root_vpbl.
pbl_vbase[root_pbl_index].pa_low =
2466 root_vpbl.
pbl_vbase[root_pbl_index].pa_high =
2468 root_vpbl.
leaf_vpbl[root_pbl_index] = vpbl;
2474 if (page_count != 0) {
2475 if ((last_dma_addr+4096) !=
2484 last_dma_addr = first_dma_addr;
2488 vpbl.pbl_vbase[cur_pbl_index].pa_low =
2490 (page_index*4096)));
2491 vpbl.pbl_vbase[cur_pbl_index].pa_high =
2493 (page_index*4096))) >> 32)));
2501 " stag_key=0x%08x\n",
2502 stag_index, driver_key, stag_key);
2503 stag = stag_index << 8;
2505 stag += (
u32)stag_key;
2509 if (root_pbl_index == 1) {
2516 pbl_count = root_pbl_index;
2520 " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n",
2521 stag, (
unsigned int)iova_start,
2522 (
unsigned int)region_length, stag_index,
2523 (
unsigned long long)region->
length, pbl_count);
2524 ret = nes_reg_mr(nesdev, nespd, stag, region->
length, &root_vpbl,
2525 first_dma_addr, pbl_count, (
u16)cur_pbl_index, acc,
2534 ibmr = &nesmr->
ibmr;
2543 if (root_pbl_index == 1) {
2547 for (page_index=0; page_index<root_pbl_index; page_index++) {
2549 root_vpbl.
leaf_vpbl[page_index].pbl_vbase,
2550 root_vpbl.
leaf_vpbl[page_index].pbl_pbase);
2562 nespbl = kzalloc(
sizeof(*nespbl),
GFP_KERNEL);
2576 nes_ucontext = to_nesucontext(pd->
uobject->context);
2577 pbl_depth = region->
length >> 12;
2578 pbl_depth += (region->
length & (4096-1)) ? 1 : 0;
2601 " pbl_vbase=%p user_base=0x%lx\n",
2606 for (nmap_index = 0; nmap_index < chunk->
nmap; ++nmap_index) {
2610 for (page_index=0; page_index<chunk_pages; page_index++) {
2613 (page_index*4096)));
2616 (page_index*4096)))>>32);
2618 (
unsigned long long)*pbl,
2629 nesmr->
ibmr.rkey = -1;
2630 nesmr->
ibmr.lkey = -1;
2632 return &nesmr->
ibmr;
2645 struct nes_mr *nesmr = to_nesmr(ib_mr);
2651 unsigned long flags;
2667 if (cqp_request ==
NULL) {
2672 cqp_wqe = &cqp_request->
cqp_wqe;
2674 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2688 " CQP Major:Minor codes = 0x%04X:0x%04X\n",
2698 " ib_mr=%p, rkey = 0x%08X\n",
2699 ib_mr, ib_mr->
rkey);
2701 }
else if (major_code) {
2703 " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
2704 major_code, minor_code, ib_mr, ib_mr->
rkey);
2714 "exceeded the max(%u)\n",
2721 "exceeded the max(%u)\n",
2725 spin_unlock_irqrestore(&nesadapter->
pbl_lock, flags);
2728 (ib_mr->
rkey & 0x0fffff00) >> 8);
2747 return sprintf(buf,
"%x\n", nesvnic->
nesdev->nesadapter->hw_rev);
2762 return sprintf(buf,
"%u.%u\n",
2763 (nesvnic->
nesdev->nesadapter->firmware_version >> 16),
2764 (nesvnic->
nesdev->nesadapter->firmware_version & 0x000000ff));
2775 return sprintf(buf,
"NES020\n");
2786 return sprintf(buf,
"%.*s\n", 32,
"NES020 Board ID");
2809 struct nes_qp *nesqp = to_nesqp(ibqp);
2814 attr->
cap.max_send_wr = nesqp->
hwqp.sq_size;
2815 attr->
cap.max_recv_wr = nesqp->
hwqp.rq_size;
2816 attr->
cap.max_recv_sge = 1;
2818 attr->
cap.max_inline_data = 0;
2820 attr->
cap.max_inline_data = 64;
2826 init_attr->
srq = nesqp->
ibqp.srq = nesqp->
ibqp.srq;
2827 init_attr->
cap = attr->
cap;
2837 u32 next_iwarp_state,
u32 termlen,
u32 wait_completion)
2850 if (cqp_request ==
NULL) {
2854 if (wait_completion) {
2859 cqp_wqe = &cqp_request->
cqp_wqe;
2865 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2880 if (wait_completion) {
2886 "CQP Major:Minor codes = 0x%04X:0x%04X.\n",
2891 "CQP Major:Minor codes = 0x%04X:0x%04X, intended next state = 0x%08X.\n",
2900 else if (major_code)
2914 int attr_mask,
struct ib_udata *udata)
2916 struct nes_qp *nesqp = to_nesqp(ibqp);
2921 u32 next_iwarp_state = 0;
2923 unsigned long qplockflags;
2925 u16 original_last_aeq;
2926 u8 issue_modify_qp = 0;
2930 " iwarp_state=0x%X, refcount=%d\n",
2937 " QP Access Flags=0x%X, attr_mask = 0x%0x\n",
2947 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
2951 issue_modify_qp = 1;
2957 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
2961 issue_modify_qp = 1;
2967 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
2972 nesqp->
hwqp.qp_id );
2973 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
2977 if (nesqp->
iwarp_state != NES_CQP_QP_IWARP_STATE_RTS)
2980 issue_modify_qp = 1;
2986 issue_modify_qp = 1;
2988 nesqp->
hwqp.qp_id, nesqp->
hwqp.sq_head, nesqp->
hwqp.sq_tail);
2990 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
2995 " ignored due to current iWARP state\n",
2997 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3002 " already done based on hw state.\n",
3004 issue_modify_qp = 0;
3027 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3033 issue_modify_qp = 1;
3038 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3062 issue_modify_qp = 1;
3066 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3081 issue_modify_qp = 1;
3085 issue_modify_qp = 1;
3089 issue_modify_qp = 1;
3093 issue_modify_qp = 1;
3099 issue_modify_qp = 1;
3103 original_last_aeq = nesqp->
last_aeq;
3104 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3111 if (issue_modify_qp) {
3116 " failed for QP%u.\n",
3117 next_iwarp_state, nesqp->
hwqp.qp_id);
3123 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3125 original_last_aeq, nesqp->
last_aeq);
3132 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3134 original_last_aeq, nesqp->
last_aeq);
3139 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3152 " need ae to finish up, original_last_aeq = 0x%04X."
3153 " last_aeq = 0x%04X, scheduling timer.\n",
3155 original_last_aeq, nesqp->
last_aeq);
3158 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3160 spin_unlock_irqrestore(&nesqp->
lock, qplockflags);
3162 " need ae to finish up, original_last_aeq = 0x%04X."
3163 " last_aeq = 0x%04X.\n",
3165 original_last_aeq, nesqp->
last_aeq);
3170 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3172 original_last_aeq, nesqp->
last_aeq);
3176 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3178 original_last_aeq, nesqp->
last_aeq);
3213 static int nes_process_mad(
struct ib_device *ibdev,
int mad_flags,
3225 int total_payload_length = 0;
3226 for (sge_index = 0; sge_index < ib_wr->
num_sge; sge_index++) {
3228 ib_wr->
sg_list[sge_index].addr);
3230 ib_wr->
sg_list[sge_index].length);
3233 (ib_wr->
sg_list[sge_index].lkey));
3237 total_payload_length += ib_wr->
sg_list[sge_index].length;
3240 total_payload_length);
3242 total_payload_length);
3248 static int nes_post_send(
struct ib_qp *ibqp,
struct ib_send_wr *ib_wr,
3252 unsigned long flags = 0;
3255 struct nes_qp *nesqp = to_nesqp(ibqp);
3258 u32 qsize = nesqp->
hwqp.sq_size;
3271 head = nesqp->
hwqp.sq_head;
3281 if (((head + (2 * qsize) - nesqp->
hwqp.sq_tail) % qsize) == (qsize - 1)) {
3289 nes_fill_init_qp_wqe(wqe, nesqp, head);
3320 ((
nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3321 (ib_wr->
sg_list[0].length <= 64)) {
3323 (
void *)(
unsigned long)ib_wr->
sg_list[0].addr, ib_wr->
sg_list[0].length);
3328 fill_wqe_sg_send(wqe, ib_wr, 1);
3347 ib_wr->
wr.
rdma.remote_addr);
3350 ((
nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3351 (ib_wr->
sg_list[0].length <= 64)) {
3353 (
void *)(
unsigned long)ib_wr->
sg_list[0].addr, ib_wr->
sg_list[0].length);
3358 fill_wqe_sg_send(wqe, ib_wr, 1);
3382 ib_wr->
wr.
rdma.remote_addr);
3406 u64 *src_page_list = pnesfrpl->
ibfrpl.page_list;
3430 }
else if (ib_wr->
wr.
fast_reg.page_shift == 21) {
3434 " ib_wr=%u, max=1\n", ib_wr->
num_sge);
3440 if (flags & IB_ACCESS_LOCAL_WRITE)
3443 if (flags & IB_ACCESS_REMOTE_WRITE)
3446 if (flags & IB_ACCESS_REMOTE_READ)
3449 if (flags & IB_ACCESS_MW_BIND)
3454 pnesfrpl->
ibfrpl.max_page_list_len) {
3456 " ib_wr=%p, value=%u, max=%u\n",
3458 pnesfrpl->
ibfrpl.max_page_list_len);
3471 for (i = 0; i < ib_wr->
wr.
fast_reg.page_list_len; i++)
3475 "length: %d, rkey: %0x, pgl_paddr: %llx, "
3476 "page_list_len: %u, wqe_misc: %x\n",
3477 (
unsigned long long) ib_wr->
wr.
fast_reg.iova_start,
3499 ib_wr = ib_wr->
next;
3510 counter =
min(wqe_count, ((
u32)255));
3513 (counter << 24) | 0x00800000 | nesqp->
hwqp.qp_id);
3516 spin_unlock_irqrestore(&nesqp->
lock, flags);
3528 static int nes_post_recv(
struct ib_qp *ibqp,
struct ib_recv_wr *ib_wr,
3532 unsigned long flags = 0;
3535 struct nes_qp *nesqp = to_nesqp(ibqp);
3539 u32 qsize = nesqp->
hwqp.rq_size;
3543 u32 total_payload_length;
3552 head = nesqp->
hwqp.rq_head;
3566 if (((head + (2 * qsize) - nesqp->
hwqp.rq_tail) % qsize) == (qsize - 1)) {
3576 nes_fill_init_qp_wqe(wqe, nesqp, head);
3580 total_payload_length = 0;
3581 for (sge_index=0; sge_index < ib_wr->
num_sge; sge_index++) {
3583 ib_wr->
sg_list[sge_index].addr);
3585 ib_wr->
sg_list[sge_index].length);
3587 ib_wr->
sg_list[sge_index].lkey);
3589 total_payload_length += ib_wr->
sg_list[sge_index].length;
3592 total_payload_length);
3594 ib_wr = ib_wr->
next;
3604 counter =
min(wqe_count, ((
u32)255));
3609 spin_unlock_irqrestore(&nesqp->
lock, flags);
3625 unsigned long flags = 0;
3628 struct nes_cq *nescq = to_nescq(ibcq);
3637 u32 move_cq_head = 1;
3644 head = nescq->
hw_cq.cq_head;
3645 cq_size = nescq->
hw_cq.cq_size;
3647 while (cqe_count < num_entries) {
3648 if ((
le32_to_cpu(nescq->
hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3649 NES_CQE_VALID) == 0)
3660 wqe_index = u32temp & (nesdev->
nesadapter->max_qp_wr - 1);
3667 nesqp = (
struct nes_qp *)(
unsigned long)u64temp;
3668 memset(entry, 0,
sizeof *entry);
3674 entry->
status = err_code & 0x0000ffff;
3684 entry->
qp = &nesqp->
ibqp;
3690 nesqp->
hwqp.sq_tail++;
3728 nesqp->
hwqp.sq_tail = (wqe_index+1)&(nesqp->
hwqp.sq_size - 1);
3731 wq_tail = nesqp->
hwqp.sq_tail;
3740 nesqp->
hwqp.rq_tail = (wqe_index+1)&(nesqp->
hwqp.rq_size - 1);
3743 wq_tail = nesqp->
hwqp.rq_tail;
3747 entry->
wr_id = wrid;
3754 if (++head >= cq_size)
3761 " are pending %u of %u.\n",
3770 wqe_index = (wqe_index & (~(nesdev->
nesadapter->max_qp_wr - 1))) | wq_tail;
3785 cqe_count, nescq->
hw_cq.cq_number);
3787 spin_unlock_irqrestore(&nescq->
lock, flags);
3796 static int nes_req_notify_cq(
struct ib_cq *ibcq,
enum ib_cq_notify_flags notify_flags)
3800 struct nes_cq *nescq = to_nescq(ibcq);
3804 nescq->
hw_cq.cq_number);
3806 cq_arm = nescq->
hw_cq.cq_number;
3827 struct nes_vnic *nesvnic = netdev_priv(netdev);
3831 if (nesibdev ==
NULL) {
3841 nesibdev->
ibdev.uverbs_cmd_mask =
3865 nesibdev->
ibdev.phys_port_cnt = 1;
3866 nesibdev->
ibdev.num_comp_vectors = 1;
3867 nesibdev->
ibdev.dma_device = &nesdev->
pcidev->dev;
3868 nesibdev->
ibdev.dev.parent = &nesdev->
pcidev->dev;
3869 nesibdev->
ibdev.query_device = nes_query_device;
3870 nesibdev->
ibdev.query_port = nes_query_port;
3871 nesibdev->
ibdev.query_pkey = nes_query_pkey;
3872 nesibdev->
ibdev.query_gid = nes_query_gid;
3873 nesibdev->
ibdev.alloc_ucontext = nes_alloc_ucontext;
3874 nesibdev->
ibdev.dealloc_ucontext = nes_dealloc_ucontext;
3875 nesibdev->
ibdev.mmap = nes_mmap;
3876 nesibdev->
ibdev.alloc_pd = nes_alloc_pd;
3877 nesibdev->
ibdev.dealloc_pd = nes_dealloc_pd;
3878 nesibdev->
ibdev.create_ah = nes_create_ah;
3879 nesibdev->
ibdev.destroy_ah = nes_destroy_ah;
3880 nesibdev->
ibdev.create_qp = nes_create_qp;
3882 nesibdev->
ibdev.query_qp = nes_query_qp;
3883 nesibdev->
ibdev.destroy_qp = nes_destroy_qp;
3884 nesibdev->
ibdev.create_cq = nes_create_cq;
3885 nesibdev->
ibdev.destroy_cq = nes_destroy_cq;
3886 nesibdev->
ibdev.poll_cq = nes_poll_cq;
3887 nesibdev->
ibdev.get_dma_mr = nes_get_dma_mr;
3888 nesibdev->
ibdev.reg_phys_mr = nes_reg_phys_mr;
3889 nesibdev->
ibdev.reg_user_mr = nes_reg_user_mr;
3890 nesibdev->
ibdev.dereg_mr = nes_dereg_mr;
3891 nesibdev->
ibdev.alloc_mw = nes_alloc_mw;
3892 nesibdev->
ibdev.dealloc_mw = nes_dealloc_mw;
3893 nesibdev->
ibdev.bind_mw = nes_bind_mw;
3895 nesibdev->
ibdev.alloc_fast_reg_mr = nes_alloc_fast_reg_mr;
3896 nesibdev->
ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
3897 nesibdev->
ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
3899 nesibdev->
ibdev.attach_mcast = nes_multicast_attach;
3900 nesibdev->
ibdev.detach_mcast = nes_multicast_detach;
3901 nesibdev->
ibdev.process_mad = nes_process_mad;
3903 nesibdev->
ibdev.req_notify_cq = nes_req_notify_cq;
3904 nesibdev->
ibdev.post_send = nes_post_send;
3905 nesibdev->
ibdev.post_recv = nes_post_recv;
3928 static void nes_handle_delayed_event(
unsigned long data)
3930 struct nes_vnic *nesvnic = (
void *) data;
3935 event.device = &nesvnic->
nesibdev->ibdev;
3960 nesvnic->
event_timer.function = nes_handle_delayed_event;
3976 if (nesibdev ==
NULL)
3979 nes_unregister_ofa_device(nesibdev);
4007 for (i = 0; i <
ARRAY_SIZE(nes_dev_attributes); ++
i) {
4013 nes_dev_attributes[i]);
4029 static void nes_unregister_ofa_device(
struct nes_ib_device *nesibdev)
4034 for (i = 0; i <
ARRAY_SIZE(nes_dev_attributes); ++
i) {