32 #include <linux/module.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
45 #include <linux/slab.h>
49 #include <asm/byteorder.h>
70 static int iwch_ah_destroy(
struct ib_ah *
ah)
85 static int iwch_process_mad(
struct ib_device *ibdev,
101 PDBG(
"%s context %p\n", __func__, context);
113 struct iwch_dev *rhp = to_iwch_dev(ibdev);
115 PDBG(
"%s ibdev %p\n", __func__, ibdev);
116 context = kzalloc(
sizeof(*context),
GFP_KERNEL);
120 INIT_LIST_HEAD(&context->
mmaps);
125 static int iwch_destroy_cq(
struct ib_cq *
ib_cq)
129 PDBG(
"%s ib_cq %p\n", __func__, ib_cq);
130 chp = to_iwch_cq(ib_cq);
132 remove_handle(chp->
rhp, &chp->
rhp->cqidr, chp->
cq.cqid);
153 PDBG(
"%s ib_dev %p entries %d\n", __func__, ibdev, entries);
154 rhp = to_iwch_dev(ibdev);
160 ucontext = to_iwch_ucontext(ib_context);
161 if (!t3a_device(rhp)) {
162 if (ib_copy_from_udata(&ureq, udata,
sizeof (ureq))) {
170 if (t3a_device(rhp)) {
184 chp->
cq.size_log2 =
ilog2(entries);
191 chp->
ibcq.cqe = 1 << chp->
cq.size_log2;
196 if (insert_handle(rhp, &rhp->
cqidr, chp, chp->
cq.cqid)) {
207 iwch_destroy_cq(&chp->
ibcq);
210 uresp.cqid = chp->
cq.cqid;
211 uresp.size_log2 = chp->
cq.size_log2;
213 uresp.key = ucontext->
key;
218 if (udata->
outlen <
sizeof uresp) {
221 "downlevel libcxgb3 (non-fatal).\n");
228 uresp.memsize = mm->
len;
229 resplen =
sizeof uresp;
231 if (ib_copy_to_udata(udata, &uresp, resplen)) {
233 iwch_destroy_cq(&chp->
ibcq);
236 insert_mmap(ucontext, mm);
238 PDBG(
"created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
239 chp->
cq.cqid, chp, (1 << chp->
cq.size_log2),
240 (
unsigned long long) chp->
cq.dma_addr);
244 static int iwch_resize_cq(
struct ib_cq *cq,
int cqe,
struct ib_udata *udata)
247 struct iwch_cq *chp = to_iwch_cq(cq);
248 struct t3_cq oldcq, newcq;
251 PDBG(
"%s ib_cq %p cqe %d\n", __func__, cq, cqe);
259 newcq.size_log2 =
ilog2(cqe);
267 ret = iwch_quiesce_qps(chp);
278 memcpy(newcq.queue, chp->
cq.queue, (1 << chp->
cq.size_log2) *
284 chp->
cq.cqid = oldcq.cqid;
292 chp->
ibcq.cqe = (1<<chp->
cq.size_log2) - 1;
295 oldcq.cqid = newcq.cqid;
305 ret = iwch_resume_qps(chp);
321 chp = to_iwch_cq(ibcq);
334 PDBG(
"%s rptr 0x%x\n", __func__, chp->
cq.rptr);
336 spin_unlock_irqrestore(&chp->
lock, flag);
355 PDBG(
"%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->
vm_pgoff,
362 rdev_p = &(to_iwch_dev(context->
device)->rdev);
363 ucontext = to_iwch_ucontext(context);
365 mm = remove_mmap(ucontext, key, len);
371 if ((addr >= rdev_p->
rnic_info.udbell_physbase) &&
372 (addr < (rdev_p->
rnic_info.udbell_physbase +
383 vma->
vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
401 static int iwch_deallocate_pd(
struct ib_pd *pd)
406 php = to_iwch_pd(pd);
408 PDBG(
"%s ibpd %p pdid 0x%x\n", __func__, pd, php->
pdid);
422 PDBG(
"%s ibdev %p\n", __func__, ibdev);
436 iwch_deallocate_pd(&php->
ibpd);
440 PDBG(
"%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
450 PDBG(
"%s ib_mr %p\n", __func__, ib_mr);
455 mhp = to_iwch_mr(ib_mr);
457 mmid = mhp->
attr.stag >> 8;
461 remove_handle(rhp, &rhp->
mmidr, mmid);
463 kfree((
void *) (
unsigned long) mhp->
kva);
466 PDBG(
"%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
471 static struct ib_mr *iwch_register_phys_mem(
struct ib_pd *
pd,
486 PDBG(
"%s ib_pd %p\n", __func__, pd);
487 php = to_iwch_pd(pd);
502 if (num_phys_buf > 1 &&
503 ((buffer_list[0].addr + buffer_list[0].
size) & ~
PAGE_MASK)) {
509 &total_size, &npages, &shift, &page_list);
527 mhp->
attr.perms = iwch_ib_to_tpt_access(acc);
528 mhp->
attr.va_fbo = *iova_start;
529 mhp->
attr.page_size = shift - 12;
531 mhp->
attr.len = (
u32) total_size;
532 mhp->
attr.pbl_size = npages;
548 static int iwch_reregister_phys_mem(
struct ib_mr *mr,
553 int acc,
u64 * iova_start)
565 PDBG(
"%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
571 mhp = to_iwch_mr(mr);
573 php = to_iwch_pd(mr->
pd);
579 memcpy(&mh, mhp,
sizeof *mhp);
582 php = to_iwch_pd(pd);
584 mh.attr.perms = iwch_ib_to_tpt_access(acc);
588 &total_size, &npages,
599 if (mr_rereg_mask & IB_MR_REREG_PD)
601 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
602 mhp->
attr.perms = iwch_ib_to_tpt_access(acc);
603 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
605 mhp->
attr.va_fbo = *iova_start;
606 mhp->
attr.page_size = shift - 12;
607 mhp->
attr.len = (
u32) total_size;
608 mhp->
attr.pbl_size = npages;
628 PDBG(
"%s ib_pd %p\n", __func__, pd);
630 php = to_iwch_pd(pd);
639 if (IS_ERR(mhp->
umem)) {
640 err = PTR_ERR(mhp->
umem);
645 shift =
ffs(mhp->
umem->page_size) - 1;
664 for (j = 0; j < chunk->nmap; ++j) {
666 for (k = 0; k < len; ++
k) {
669 mhp->
umem->page_size * k);
690 mhp->
attr.perms = iwch_ib_to_tpt_access(acc);
691 mhp->
attr.va_fbo = virt;
692 mhp->
attr.page_size = shift - 12;
699 if (udata && !t3a_device(rhp)) {
700 uresp.pbl_addr = (mhp->
attr.pbl_addr -
701 rhp->
rdev.rnic_info.pbl_base) >> 3;
702 PDBG(
"%s user resp pbl_addr 0x%x\n", __func__,
705 if (ib_copy_to_udata(udata, &uresp,
sizeof (uresp))) {
706 iwch_dereg_mr(&mhp->
ibmr);
723 static struct ib_mr *iwch_get_dma_mr(
struct ib_pd *pd,
int acc)
729 PDBG(
"%s ib_pd %p\n", __func__, pd);
734 bl.size = 0xffffffff;
737 ibmr = iwch_register_phys_mem(pd, &
bl, 1, acc, &kva);
741 static struct ib_mw *iwch_alloc_mw(
struct ib_pd *pd)
750 php = to_iwch_pd(pd);
766 if (insert_handle(rhp, &rhp->
mmidr, mhp, mmid)) {
771 PDBG(
"%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
775 static int iwch_dealloc_mw(
struct ib_mw *mw)
781 mhp = to_iwch_mw(mw);
783 mmid = (mw->
rkey) >> 8;
785 remove_handle(rhp, &rhp->
mmidr, mmid);
787 PDBG(
"%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
791 static struct ib_mr *iwch_alloc_fast_reg_mr(
struct ib_pd *pd,
int pbl_depth)
800 php = to_iwch_pd(pd);
812 mhp->
attr.pbl_size, mhp->
attr.pbl_addr);
821 if (insert_handle(rhp, &rhp->
mmidr, mhp, mmid))
824 PDBG(
"%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
843 page_list =
kmalloc(
sizeof *page_list + page_list_len *
sizeof(
u64),
859 static int iwch_destroy_qp(
struct ib_qp *
ib_qp)
866 qhp = to_iwch_qp(ib_qp);
873 remove_handle(rhp, &rhp->
qpidr, qhp->
wq.qpid);
878 ucontext = ib_qp->
uobject ? to_iwch_ucontext(ib_qp->
uobject->context)
881 ucontext ? &ucontext->
uctx : &rhp->
rdev.uctx);
883 PDBG(
"%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
884 ib_qp, qhp->
wq.qpid, qhp);
889 static struct ib_qp *iwch_create_qp(
struct ib_pd *pd,
899 int wqsize, sqsize, rqsize;
902 PDBG(
"%s ib_pd %p\n", __func__, pd);
905 php = to_iwch_pd(pd);
907 schp = get_chp(rhp, ((
struct iwch_cq *) attrs->
send_cq)->cq.cqid);
908 rchp = get_chp(rhp, ((
struct iwch_cq *) attrs->
recv_cq)->cq.cqid);
914 if (rqsize == attrs->
cap.max_recv_wr)
940 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
943 PDBG(
"%s wqsize %d sqsize %d rqsize %d\n", __func__,
944 wqsize, sqsize, rqsize);
948 qhp->
wq.size_log2 =
ilog2(wqsize);
949 qhp->
wq.rq_size_log2 =
ilog2(rqsize);
950 qhp->
wq.sq_size_log2 =
ilog2(sqsize);
952 ucontext ? &ucontext->
uctx : &rhp->
rdev.uctx)) {
957 attrs->
cap.max_recv_wr = rqsize - 1;
958 attrs->
cap.max_send_wr = sqsize;
965 qhp->
attr.sq_num_entries = attrs->
cap.max_send_wr;
966 qhp->
attr.rq_num_entries = attrs->
cap.max_recv_wr;
967 qhp->
attr.sq_max_sges = attrs->
cap.max_send_sge;
968 qhp->
attr.sq_max_sges_rdma_write = attrs->
cap.max_send_sge;
969 qhp->
attr.rq_max_sges = attrs->
cap.max_recv_sge;
978 qhp->
attr.enable_rdma_read = 1;
979 qhp->
attr.enable_rdma_write = 1;
980 qhp->
attr.enable_bind = 1;
981 qhp->
attr.max_ord = 1;
982 qhp->
attr.max_ird = 1;
988 if (insert_handle(rhp, &rhp->
qpidr, qhp, qhp->
wq.qpid)) {
990 ucontext ? &ucontext->
uctx : &rhp->
rdev.uctx);
1001 iwch_destroy_qp(&qhp->
ibqp);
1008 iwch_destroy_qp(&qhp->
ibqp);
1012 uresp.qpid = qhp->
wq.qpid;
1013 uresp.size_log2 = qhp->
wq.size_log2;
1014 uresp.sq_size_log2 = qhp->
wq.sq_size_log2;
1015 uresp.rq_size_log2 = qhp->
wq.rq_size_log2;
1017 uresp.key = ucontext->
key;
1019 uresp.db_key = ucontext->
key;
1022 if (ib_copy_to_udata(udata, &uresp,
sizeof (uresp))) {
1025 iwch_destroy_qp(&qhp->
ibqp);
1028 mm1->
key = uresp.key;
1031 insert_mmap(ucontext, mm1);
1032 mm2->
key = uresp.db_key;
1035 insert_mmap(ucontext, mm2);
1037 qhp->
ibqp.qp_num = qhp->
wq.qpid;
1039 PDBG(
"%s sq_num_entries %d, rq_num_entries %d "
1040 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
1041 __func__, qhp->
attr.sq_num_entries, qhp->
attr.rq_num_entries,
1042 qhp->
wq.qpid, qhp, (
unsigned long long) qhp->
wq.dma_addr,
1043 1 << qhp->
wq.size_log2, qhp->
wq.rq_addr);
1047 static int iwch_ib_modify_qp(
struct ib_qp *ibqp,
struct ib_qp_attr *
attr,
1048 int attr_mask,
struct ib_udata *udata)
1055 PDBG(
"%s ib_qp %p\n", __func__, ibqp);
1059 attr_mask &= ~IB_QP_STATE;
1065 memset(&attrs, 0,
sizeof attrs);
1066 qhp = to_iwch_qp(ibqp);
1069 attrs.next_state = iwch_convert_state(attr->
qp_state);
1088 PDBG(
"%s ib_qp %p\n", __func__, qp);
1094 PDBG(
"%s ib_qp %p\n", __func__, qp);
1099 static struct ib_qp *iwch_get_qp(
struct ib_device *
dev,
int qpn)
1101 PDBG(
"%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1102 return (
struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1106 static int iwch_query_pkey(
struct ib_device *ibdev,
1109 PDBG(
"%s ibdev %p\n", __func__, ibdev);
1119 PDBG(
"%s ibdev %p, port %d, index %d, gid %p\n",
1120 __func__, ibdev, port, index, gid);
1121 dev = to_iwch_dev(ibdev);
1122 BUG_ON(port == 0 || port > 2);
1124 memcpy(&(gid->
raw[0]), dev->
rdev.port_info.lldevs[port-1]->dev_addr, 6);
1133 unsigned fw_maj, fw_min, fw_mic;
1137 next =
info.fw_version + 1;
1139 sscanf(cp,
"%i", &fw_maj);
1141 sscanf(cp,
"%i", &fw_min);
1143 sscanf(cp,
"%i", &fw_mic);
1145 return (((
u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1149 static int iwch_query_device(
struct ib_device *ibdev,
1153 struct iwch_dev *
dev;
1154 PDBG(
"%s ibdev %p\n", __func__, ibdev);
1156 dev = to_iwch_dev(ibdev);
1157 memset(props, 0,
sizeof *props);
1160 props->
fw_ver = fw_vers_string_to_u64(dev);
1182 static int iwch_query_port(
struct ib_device *ibdev,
1185 struct iwch_dev *
dev;
1187 struct in_device *inetdev;
1189 PDBG(
"%s ibdev %p\n", __func__, ibdev);
1191 dev = to_iwch_dev(ibdev);
1192 netdev = dev->
rdev.port_info.lldevs[port-1];
1196 if (netdev->
mtu >= 4096)
1198 else if (netdev->
mtu >= 2048)
1200 else if (netdev->
mtu >= 1024)
1202 else if (netdev->
mtu >= 512)
1207 if (!netif_carrier_ok(netdev))
1210 inetdev = in_dev_get(netdev);
1212 if (inetdev->ifa_list)
1216 in_dev_put(inetdev);
1239 struct iwch_dev *iwch_dev =
container_of(dev,
struct iwch_dev,
1241 PDBG(
"%s dev 0x%p\n", __func__, dev);
1242 return sprintf(buf,
"%d\n", iwch_dev->
rdev.t3cdev_p->type);
1247 struct iwch_dev *iwch_dev =
container_of(dev,
struct iwch_dev,
1252 PDBG(
"%s dev 0x%p\n", __func__, dev);
1260 struct iwch_dev *iwch_dev =
container_of(dev,
struct iwch_dev,
1265 PDBG(
"%s dev 0x%p\n", __func__, dev);
1273 struct iwch_dev *iwch_dev =
container_of(dev,
struct iwch_dev,
1275 PDBG(
"%s dev 0x%p\n", __func__, dev);
1276 return sprintf(buf,
"%x.%x\n", iwch_dev->
rdev.rnic_info.pdev->vendor,
1277 iwch_dev->
rdev.rnic_info.pdev->device);
1280 static int iwch_get_mib(
struct ib_device *ibdev,
1283 struct iwch_dev *
dev;
1287 PDBG(
"%s ibdev %p\n", __func__, ibdev);
1288 dev = to_iwch_dev(ibdev);
1293 memset(stats, 0,
sizeof *stats);
1294 stats->
iw.ipInReceives = ((
u64)
m.ipInReceive_hi << 32) +
1296 stats->
iw.ipInHdrErrors = ((
u64)
m.ipInHdrErrors_hi << 32) +
1298 stats->
iw.ipInAddrErrors = ((
u64)
m.ipInAddrErrors_hi << 32) +
1299 m.ipInAddrErrors_lo;
1300 stats->
iw.ipInUnknownProtos = ((
u64)
m.ipInUnknownProtos_hi << 32) +
1301 m.ipInUnknownProtos_lo;
1302 stats->
iw.ipInDiscards = ((
u64)
m.ipInDiscards_hi << 32) +
1304 stats->
iw.ipInDelivers = ((
u64)
m.ipInDelivers_hi << 32) +
1306 stats->
iw.ipOutRequests = ((
u64)
m.ipOutRequests_hi << 32) +
1308 stats->
iw.ipOutDiscards = ((
u64)
m.ipOutDiscards_hi << 32) +
1310 stats->
iw.ipOutNoRoutes = ((
u64)
m.ipOutNoRoutes_hi << 32) +
1312 stats->
iw.ipReasmTimeout = (
u64)
m.ipReasmTimeout;
1313 stats->
iw.ipReasmReqds = (
u64)
m.ipReasmReqds;
1314 stats->
iw.ipReasmOKs = (
u64)
m.ipReasmOKs;
1315 stats->
iw.ipReasmFails = (
u64)
m.ipReasmFails;
1316 stats->
iw.tcpActiveOpens = (
u64)
m.tcpActiveOpens;
1317 stats->
iw.tcpPassiveOpens = (
u64)
m.tcpPassiveOpens;
1318 stats->
iw.tcpAttemptFails = (
u64)
m.tcpAttemptFails;
1319 stats->
iw.tcpEstabResets = (
u64)
m.tcpEstabResets;
1320 stats->
iw.tcpOutRsts = (
u64)
m.tcpOutRsts;
1321 stats->
iw.tcpCurrEstab = (
u64)
m.tcpCurrEstab;
1322 stats->
iw.tcpInSegs = ((
u64)
m.tcpInSegs_hi << 32) +
1324 stats->
iw.tcpOutSegs = ((
u64)
m.tcpOutSegs_hi << 32) +
1326 stats->
iw.tcpRetransSegs = ((
u64)
m.tcpRetransSeg_hi << 32) +
1328 stats->
iw.tcpInErrs = ((
u64)
m.tcpInErrs_hi << 32) +
1330 stats->
iw.tcpRtoMin = (
u64)
m.tcpRtoMin;
1331 stats->
iw.tcpRtoMax = (
u64)
m.tcpRtoMax;
1352 PDBG(
"%s iwch_dev %p\n", __func__, dev);
1355 memcpy(&dev->
ibdev.node_guid, dev->
rdev.t3cdev_p->lldev->dev_addr, 6);
1362 dev->
ibdev.local_dma_lkey = 0;
1364 dev->
ibdev.uverbs_cmd_mask =
1384 dev->
ibdev.phys_port_cnt = dev->
rdev.port_info.nports;
1385 dev->
ibdev.num_comp_vectors = 1;
1386 dev->
ibdev.dma_device = &(dev->
rdev.rnic_info.pdev->dev);
1387 dev->
ibdev.query_device = iwch_query_device;
1388 dev->
ibdev.query_port = iwch_query_port;
1389 dev->
ibdev.query_pkey = iwch_query_pkey;
1390 dev->
ibdev.query_gid = iwch_query_gid;
1391 dev->
ibdev.alloc_ucontext = iwch_alloc_ucontext;
1392 dev->
ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1393 dev->
ibdev.mmap = iwch_mmap;
1394 dev->
ibdev.alloc_pd = iwch_allocate_pd;
1395 dev->
ibdev.dealloc_pd = iwch_deallocate_pd;
1396 dev->
ibdev.create_ah = iwch_ah_create;
1397 dev->
ibdev.destroy_ah = iwch_ah_destroy;
1398 dev->
ibdev.create_qp = iwch_create_qp;
1399 dev->
ibdev.modify_qp = iwch_ib_modify_qp;
1400 dev->
ibdev.destroy_qp = iwch_destroy_qp;
1401 dev->
ibdev.create_cq = iwch_create_cq;
1402 dev->
ibdev.destroy_cq = iwch_destroy_cq;
1403 dev->
ibdev.resize_cq = iwch_resize_cq;
1405 dev->
ibdev.get_dma_mr = iwch_get_dma_mr;
1406 dev->
ibdev.reg_phys_mr = iwch_register_phys_mem;
1407 dev->
ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1408 dev->
ibdev.reg_user_mr = iwch_reg_user_mr;
1409 dev->
ibdev.dereg_mr = iwch_dereg_mr;
1410 dev->
ibdev.alloc_mw = iwch_alloc_mw;
1412 dev->
ibdev.dealloc_mw = iwch_dealloc_mw;
1413 dev->
ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
1414 dev->
ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
1415 dev->
ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
1416 dev->
ibdev.attach_mcast = iwch_multicast_attach;
1417 dev->
ibdev.detach_mcast = iwch_multicast_detach;
1418 dev->
ibdev.process_mad = iwch_process_mad;
1419 dev->
ibdev.req_notify_cq = iwch_arm_cq;
1422 dev->
ibdev.get_protocol_stats = iwch_get_mib;
1426 if (!dev->
ibdev.iwcm)
1436 dev->
ibdev.iwcm->get_qp = iwch_get_qp;
1442 for (i = 0; i <
ARRAY_SIZE(iwch_class_attributes); ++
i) {
1444 iwch_class_attributes[i]);
1461 PDBG(
"%s iwch_dev %p\n", __func__, dev);
1462 for (i = 0; i <
ARRAY_SIZE(iwch_class_attributes); ++
i)
1464 iwch_class_attributes[i]);