41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/stat.h>
45 #include <linux/export.h>
52 static void init_query_mad(
struct ib_smp *
mad)
60 static int mthca_query_device(
struct ib_device *ibdev,
70 if (!in_mad || !out_mad)
73 memset(props, 0,
sizeof *props);
77 init_query_mad(in_mad);
133 static int mthca_query_port(
struct ib_device *ibdev,
142 if (!in_mad || !out_mad)
145 memset(props, 0,
sizeof *props);
147 init_query_mad(in_mad);
157 props->
lmc = out_mad->
data[34] & 0x7;
163 props->
gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
165 props->
pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
182 static int mthca_modify_device(
struct ib_device *ibdev,
199 static int mthca_modify_port(
struct ib_device *ibdev,
200 u8 port,
int port_modify_mask,
210 err = mthca_query_port(ibdev, port, &
attr);
214 set_ib.set_si_guid = 0;
228 static int mthca_query_pkey(
struct ib_device *ibdev,
237 if (!in_mad || !out_mad)
240 init_query_mad(in_mad);
257 static int mthca_query_gid(
struct ib_device *ibdev,
u8 port,
266 if (!in_mad || !out_mad)
269 init_query_mad(in_mad);
280 init_query_mad(in_mad);
304 if (!(to_mdev(ibdev)->
active))
307 memset(&uresp, 0,
sizeof uresp);
309 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
310 if (mthca_is_memfree(to_mdev(ibdev)))
311 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
326 if (IS_ERR(context->
db_tab)) {
327 err = PTR_ERR(context->
db_tab);
333 if (ib_copy_to_udata(udata, &uresp,
sizeof uresp)) {
345 static int mthca_dealloc_ucontext(
struct ib_ucontext *context)
348 to_mucontext(context)->
db_tab);
350 kfree(to_mucontext(context));
355 static int mthca_mmap_uar(
struct ib_ucontext *context,
364 to_mucontext(context)->
uar.pfn,
399 static int mthca_dealloc_pd(
struct ib_pd *pd)
407 static struct ib_ah *mthca_ah_create(
struct ib_pd *pd,
426 static int mthca_ah_destroy(
struct ib_ah *ah)
451 context = to_mucontext(pd->
uobject->context);
453 if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd)) {
459 context->
db_tab, ucmd.db_index,
465 srq->
mr.ibmr.lkey = ucmd.lkey;
470 &init_attr->
attr, srq);
474 context->
db_tab, ucmd.db_index);
493 static int mthca_destroy_srq(
struct ib_srq *srq)
498 context = to_mucontext(srq->
uobject->context);
533 context = to_mucontext(pd->
uobject->context);
535 if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd)) {
542 ucmd.sq_db_index, ucmd.sq_db_page);
550 ucmd.rq_db_index, ucmd.rq_db_page);
560 qp->
mr.ibmr.lkey = ucmd.lkey;
561 qp->
sq.db_index = ucmd.sq_db_index;
562 qp->
rq.db_index = ucmd.rq_db_index;
569 &init_attr->
cap, qp);
572 context = to_mucontext(pd->
uobject->context);
618 init_attr->
cap.max_send_wr = qp->
sq.max;
619 init_attr->
cap.max_recv_wr = qp->
rq.max;
620 init_attr->
cap.max_send_sge = qp->
sq.max_gs;
621 init_attr->
cap.max_recv_sge = qp->
rq.max_gs;
627 static int mthca_destroy_qp(
struct ib_qp *qp)
631 &to_mucontext(qp->
uobject->context)->uar,
632 to_mucontext(qp->
uobject->context)->db_tab,
633 to_mqp(qp)->sq.db_index);
635 &to_mucontext(qp->
uobject->context)->uar,
636 to_mucontext(qp->
uobject->context)->db_tab,
637 to_mqp(qp)->
rq.db_index);
654 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
658 if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd))
662 to_mucontext(context)->db_tab,
663 ucmd.set_db_index, ucmd.set_db_page);
668 to_mucontext(context)->db_tab,
669 ucmd.arm_db_index, ucmd.arm_db_page);
681 cq->
buf.mr.ibmr.lkey = ucmd.lkey;
686 for (nent = 1; nent <=
entries; nent <<= 1)
690 context ? to_mucontext(context) :
NULL,
691 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
696 if (context && ib_copy_to_udata(udata, &cq->
cqn,
sizeof (
__u32))) {
711 to_mucontext(context)->db_tab, ucmd.arm_db_index);
716 to_mucontext(context)->db_tab, ucmd.set_db_index);
726 spin_lock_irq(&cq->
lock);
743 spin_unlock_irq(&cq->
lock);
750 spin_lock_irq(&cq->
lock);
753 spin_unlock_irq(&cq->
lock);
759 spin_lock_irq(&cq->
lock);
761 spin_unlock_irq(&cq->
lock);
774 if (entries < 1 || entries > dev->
limits.max_cqes)
780 if (entries == ibcq->
cqe + 1) {
786 ret = mthca_alloc_resize_buf(dev, cq, entries);
791 if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd)) {
805 spin_lock_irq(&cq->
lock);
807 spin_unlock_irq(&cq->
lock);
816 spin_lock_irq(&cq->
lock);
817 if (cq->
resize_buf->state == CQ_RESIZE_READY) {
830 spin_unlock_irq(&cq->
lock);
834 ibcq->
cqe = entries - 1;
842 static int mthca_destroy_cq(
struct ib_cq *cq)
846 &to_mucontext(cq->
uobject->context)->uar,
847 to_mucontext(cq->
uobject->context)->db_tab,
848 to_mcq(cq)->arm_db_index);
850 &to_mucontext(cq->
uobject->context)->uar,
851 to_mucontext(cq->
uobject->context)->db_tab,
852 to_mcq(cq)->set_ci_db_index);
860 static inline u32 convert_access(
int acc)
869 static struct ib_mr *mthca_get_dma_mr(
struct ib_pd *pd,
int acc)
880 convert_access(acc), mr);
892 static struct ib_mr *mthca_reg_phys_mr(
struct ib_pd *pd,
907 mask = buffer_list[0].
addr ^ *iova_start;
909 for (i = 0; i < num_phys_buf; ++
i) {
911 mask |= buffer_list[
i].
addr;
912 if (i != num_phys_buf - 1)
913 mask |= buffer_list[
i].
addr + buffer_list[
i].
size;
915 total_size += buffer_list[
i].
size;
921 shift =
__ffs(mask | 1 << 31);
923 buffer_list[0].
size += buffer_list[0].
addr & ((1ULL << shift) - 1);
924 buffer_list[0].
addr &= ~0ull << shift;
931 for (i = 0; i < num_phys_buf; ++
i)
932 npages += (buffer_list[i].
size + (1ULL << shift) - 1) >> shift;
944 for (i = 0; i < num_phys_buf; ++
i)
946 j < (buffer_list[
i].
size + (1ULL << shift) - 1) >> shift;
948 page_list[n++] = buffer_list[i].
addr + ((
u64) j << shift);
950 mthca_dbg(to_mdev(pd->
device),
"Registering memory at %llx (iova %llx) "
951 "in PD %x; shift %d, npages %d.\n",
952 (
unsigned long long) buffer_list[0].
addr,
953 (
unsigned long long) *iova_start,
959 page_list, shift, npages,
960 *iova_start, total_size,
961 convert_access(acc), mr);
989 if (!to_mucontext(pd->
uobject->context)->reg_mr_warned) {
990 mthca_warn(dev,
"Process '%s' did not pass in MR attrs.\n",
992 mthca_warn(dev,
" Update libmthca to fix this.\n");
994 ++to_mucontext(pd->
uobject->context)->reg_mr_warned;
996 }
else if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd))
1006 if (IS_ERR(mr->
umem)) {
1007 err = PTR_ERR(mr->
umem);
1011 shift =
ffs(mr->
umem->page_size) - 1;
1018 if (IS_ERR(mr->mtt)) {
1019 err = PTR_ERR(mr->
mtt);
1034 for (j = 0; j < chunk->nmap; ++j) {
1036 for (k = 0; k < len; ++
k) {
1038 mr->
umem->page_size *
k;
1043 if (i == write_mtt_size) {
1060 err =
mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
1061 convert_access(acc), mr);
1076 return ERR_PTR(err);
1079 static int mthca_dereg_mr(
struct ib_mr *mr)
1091 static struct ib_fmr *mthca_alloc_fmr(
struct ib_pd *pd,
int mr_access_flags,
1101 memcpy(&fmr->
attr, fmr_attr,
sizeof *fmr_attr);
1103 convert_access(mr_access_flags), fmr);
1107 return ERR_PTR(err);
1113 static int mthca_dealloc_fmr(
struct ib_fmr *fmr)
1126 static int mthca_unmap_fmr(
struct list_head *fmr_list)
1133 if (mdev && to_mdev(fmr->
device) != mdev)
1135 mdev = to_mdev(fmr->
device);
1141 if (mthca_is_memfree(mdev)) {
1167 return sprintf(buf,
"%d.%d.%d\n", (
int) (dev->
fw_ver >> 32),
1168 (
int) (dev->
fw_ver >> 16) & 0xffff,
1169 (
int) dev->
fw_ver & 0xffff);
1177 switch (dev->
pdev->device) {
1179 return sprintf(buf,
"MT23108\n");
1181 return sprintf(buf,
"MT25208 (MT23108 compat mode)\n");
1183 return sprintf(buf,
"MT25208\n");
1186 return sprintf(buf,
"MT25204\n");
1188 return sprintf(buf,
"unknown\n");
1212 static int mthca_init_node_data(
struct mthca_dev *dev)
1218 in_mad = kzalloc(
sizeof *in_mad,
GFP_KERNEL);
1220 if (!in_mad || !out_mad)
1223 init_query_mad(in_mad);
1227 1, NULL, NULL, in_mad, out_mad);
1236 1, NULL, NULL, in_mad, out_mad);
1240 if (mthca_is_memfree(dev))
1255 ret = mthca_init_node_data(dev);
1263 dev->
ib_dev.uverbs_cmd_mask =
1283 dev->
ib_dev.num_comp_vectors = 1;
1285 dev->
ib_dev.query_device = mthca_query_device;
1286 dev->
ib_dev.query_port = mthca_query_port;
1287 dev->
ib_dev.modify_device = mthca_modify_device;
1288 dev->
ib_dev.modify_port = mthca_modify_port;
1289 dev->
ib_dev.query_pkey = mthca_query_pkey;
1290 dev->
ib_dev.query_gid = mthca_query_gid;
1291 dev->
ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1292 dev->
ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1293 dev->
ib_dev.mmap = mthca_mmap_uar;
1294 dev->
ib_dev.alloc_pd = mthca_alloc_pd;
1295 dev->
ib_dev.dealloc_pd = mthca_dealloc_pd;
1296 dev->
ib_dev.create_ah = mthca_ah_create;
1298 dev->
ib_dev.destroy_ah = mthca_ah_destroy;
1304 dev->
ib_dev.destroy_srq = mthca_destroy_srq;
1305 dev->
ib_dev.uverbs_cmd_mask |=
1311 if (mthca_is_memfree(dev))
1320 dev->
ib_dev.destroy_qp = mthca_destroy_qp;
1323 dev->
ib_dev.destroy_cq = mthca_destroy_cq;
1325 dev->
ib_dev.get_dma_mr = mthca_get_dma_mr;
1326 dev->
ib_dev.reg_phys_mr = mthca_reg_phys_mr;
1327 dev->
ib_dev.reg_user_mr = mthca_reg_user_mr;
1328 dev->
ib_dev.dereg_mr = mthca_dereg_mr;
1331 dev->
ib_dev.alloc_fmr = mthca_alloc_fmr;
1332 dev->
ib_dev.unmap_fmr = mthca_unmap_fmr;
1333 dev->
ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1334 if (mthca_is_memfree(dev))
1344 if (mthca_is_memfree(dev)) {
1360 for (i = 0; i <
ARRAY_SIZE(mthca_dev_attributes); ++
i) {
1362 mthca_dev_attributes[i]);