38 #include <linux/random.h>
50 #define MLX4_TUN_SEND_WRID_SHIFT 34
51 #define MLX4_TUN_QPN_SHIFT 32
52 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
55 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
60 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62 #define NUM_IDX_IN_PKEY_TBL_BLK 32
63 #define GUID_TBL_ENTRY_SIZE 8
64 #define GUID_TBL_BLK_NUM_ENTRIES 8
65 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
95 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
107 void *in_mad,
void *response_mad)
116 if (IS_ERR(inmailbox))
117 return PTR_ERR(inmailbox);
118 inbox = inmailbox->
buf;
121 if (IS_ERR(outmailbox)) {
123 return PTR_ERR(outmailbox);
126 memcpy(inbox, in_mad, 256);
136 if (mlx4_is_mfunc(dev->
dev) &&
153 memset(inbox + 256, 0, 256);
154 ext_info = inbox + 256;
158 ext_info->sl = in_wc->
sl << 4;
164 memcpy(ext_info->grh, in_grh, 40);
168 in_modifier |= in_wc->
slid << 16;
171 err = mlx4_cmd_box(dev->
dev, inmailbox->
dma, outmailbox->
dma, in_modifier,
172 mlx4_is_master(dev->
dev) ? (op_modifier & ~0x8) : op_modifier,
177 memcpy(response_mad, outmailbox->
buf, 256);
187 struct ib_ah *new_ah;
194 memset(&ah_attr, 0,
sizeof ah_attr);
205 if (dev->
sm_ah[port_num - 1])
207 dev->
sm_ah[port_num - 1] = new_ah;
208 spin_unlock_irqrestore(&dev->
sm_lock, flags);
221 u32 bn, pkey_change_bitmap;
229 switch (mad->
mad_hdr.attr_id) {
234 update_sm_ah(dev, port_num,
239 handle_client_rereg_event(dev, port_num);
242 handle_lid_change_event(dev, port_num);
246 if (!mlx4_is_mfunc(dev->
dev)) {
257 pkey_change_bitmap = 0;
258 for (i = 0; i < 32; i++) {
262 dev->
pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
263 pkey_change_bitmap |= (1 <<
i);
264 dev->
pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
268 pr_debug(
"PKEY Change event: port=%d, "
269 "block=0x%x, change_bitmap=0x%x\n",
270 port_num, bn, pkey_change_bitmap);
272 if (pkey_change_bitmap) {
275 if (!dev->
sriov.is_going_down)
276 __propagate_pkey_ev(dev, port_num, bn,
283 if (!mlx4_is_master(dev->
dev))
287 if (mlx4_is_master(dev->
dev) &&
288 !dev->
sriov.is_going_down) {
302 static void __propagate_pkey_ev(
struct mlx4_ib_dev *dev,
int port_num,
308 for (slave = 0; slave < dev->
dev->caps.sqp_demux; slave++) {
309 if (slave == mlx4_master_func_num(dev->
dev))
315 for (i = 0; i < 32; i++) {
316 if (!(change_bitmap & (1 << i)))
319 ix < dev->
dev->caps.pkey_table_len[
port_num]; ix++) {
320 if (dev->
pkeys.virt2phys_pkey[slave][port_num - 1]
321 [ix] == i + 32 * block) {
323 pr_debug(
"propagate_pkey_ev: slave %d,"
324 " port %d, ix %d (%d)\n",
325 slave, port_num, ix, err);
336 static void node_desc_override(
struct ib_device *dev,
347 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
362 if (IS_ERR(send_buf))
372 if ((send_buf->
ah = dev->
sm_ah[port_num - 1]))
376 spin_unlock_irqrestore(&dev->
sm_lock, flags);
383 static int mlx4_ib_demux_sa_handler(
struct ib_device *ibdev,
int port,
int slave,
404 for (i = 0; i < dev->
dev->caps.sqp_demux; i++) {
405 if (dev->
sriov.demux[port - 1].guid_cache[i] == guid)
412 static int find_slave_port_pkey_ix(
struct mlx4_ib_dev *dev,
int slave,
416 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
419 if (slave == mlx4_master_func_num(dev->
dev))
422 unassigned_pkey_ix = dev->
dev->phys_caps.pkey_phys_table_len[
port] - 1;
424 for (i = 0; i < dev->
dev->caps.pkey_table_len[
port]; i++) {
425 if (dev->
pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
428 pkey_ix = dev->
pkeys.virt2phys_pkey[slave][port - 1][
i];
433 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
434 if (slot_pkey & 0x8000) {
439 if (partial_ix == 0xFF)
440 partial_ix = pkey_ix;
445 if (partial_ix < 0xFF) {
446 *ix = (
u16) partial_ix;
465 unsigned tun_tx_ix = 0;
474 tun_ctx = dev->
sriov.demux[port-1].tun[slave];
481 if (!dest_qpt && (mlx4_master_func_num(dev->
dev) != slave))
485 tun_qp = &tun_ctx->
qp[0];
487 tun_qp = &tun_ctx->
qp[1];
496 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
499 tun_pkey_ix = pkey_ix;
501 tun_pkey_ix = dev->
pkeys.virt2phys_pkey[slave][port - 1][0];
503 dqpn = dev->
dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
510 memset(&attr, 0,
sizeof attr);
528 if (tun_qp->
tx_ring[tun_tx_ix].ah)
531 ib_dma_sync_single_for_cpu(&dev->
ib_dev,
532 tun_qp->
tx_ring[tun_tx_ix].buf.map,
548 ib_dma_sync_single_for_device(&dev->
ib_dev,
549 tun_qp->
tx_ring[tun_tx_ix].buf.map,
555 list.
lkey = tun_ctx->
mr->lkey;
560 wr.
wr.
ud.remote_qpn = dqpn;
568 ret = ib_post_send(src_qp, &wr, &bad_wr);
575 static int mlx4_ib_demux_mad(
struct ib_device *ibdev,
u8 port,
585 slave = mlx4_master_func_num(dev->
dev);
588 if (mad->
mad_hdr.method & 0x80) {
604 switch (mad->
mad_hdr.mgmt_class) {
606 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
620 if (slave != mlx4_master_func_num(dev->
dev)) {
621 pr_debug(
"dropping unsupported ingress mad from class:%d "
622 "for slave:%d\n", mad->
mad_hdr.mgmt_class, slave);
627 if (slave >= dev->
dev->caps.sqp_demux) {
628 mlx4_ib_warn(ibdev,
"slave id: %d is bigger than allowed:%d\n",
629 slave, dev->
dev->caps.sqp_demux);
635 pr_debug(
"failed sending to slave %d via tunnel qp (%d)\n",
640 static int ib_process_mad(
struct ib_device *ibdev,
int mad_flags,
u8 port_num,
644 u16 slid, prev_lid = 0;
648 if (in_wc && in_wc->
qp->qp_num) {
649 pr_debug(
"received MAD: slid:%d sqpn:%d "
650 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
658 pr_debug(
"sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
661 pr_debug(
"dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
670 forward_trap(to_mdev(ibdev), port_num, in_mad);
701 prev_lid = pattr.lid;
707 port_num, in_wc, in_grh, in_mad, out_mad);
711 if (!out_mad->
mad_hdr.status) {
713 smp_snoop(ibdev, port_num, in_mad, prev_lid);
715 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
716 node_desc_override(ibdev, out_mad);
739 static int iboe_process_mad(
struct ib_device *ibdev,
int mad_flags,
u8 port_num,
756 err = mlx4_cmd_box(dev->
dev, 0, mailbox->
dma, inmod, 0,
764 switch (mode & 0xf) {
766 edit_counter(mailbox->
buf,
767 (
void *)(out_mad->
data + 40));
786 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
787 in_grh, in_mad, out_mad);
789 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
790 in_grh, in_mad, out_mad);
799 if (mad_send_wc->
send_buf->context[0])
813 for (q = 0; q <= 1; ++
q) {
820 ret = PTR_ERR(agent);
833 for (q = 0; q <= 1; ++
q)
846 for (q = 0; q <= 1; ++
q) {
859 static void handle_lid_change_event(
struct mlx4_ib_dev *dev,
u8 port_num)
863 if (mlx4_is_master(dev->
dev) && !dev->
sriov.is_going_down)
868 static void handle_client_rereg_event(
struct mlx4_ib_dev *dev,
u8 port_num)
871 if (mlx4_is_master(dev->
dev)) {
874 if (!dev->
sriov.is_going_down) {
883 static void propagate_pkey_ev(
struct mlx4_ib_dev *dev,
int port_num,
890 static void handle_slaves_guid_change(
struct mlx4_ib_dev *dev,
u8 port_num,
891 u32 guid_tbl_blk_num,
u32 change_bitmap)
897 if (!mlx4_is_mfunc(dev->
dev) || !mlx4_is_master(dev->
dev))
902 if (!in_mad || !out_mad) {
907 guid_tbl_blk_num *= 4;
909 for (i = 0; i < 4; i++) {
910 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
912 memset(in_mad, 0,
sizeof *in_mad);
913 memset(out_mad, 0,
sizeof *out_mad);
924 port_num,
NULL,
NULL, in_mad, out_mad)) {
962 update_sm_ah(dev, port, lid, sl);
967 handle_lid_change_event(dev, port);
973 if (mlx4_is_master(dev->
dev))
975 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
979 handle_client_rereg_event(dev, port);
984 if (mlx4_is_master(dev->
dev) && !dev->
sriov.is_going_down)
985 propagate_pkey_ev(dev, port, eqe);
989 if (!mlx4_is_master(dev->
dev))
992 else if (!dev->
sriov.is_going_down) {
995 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
999 pr_warn(
"Unsupported subtype 0x%x for "
1000 "Port Management Change event\n", eqe->
subtype);
1018 static void mlx4_ib_tunnel_comp_handler(
struct ib_cq *
cq,
void *
arg)
1020 unsigned long flags;
1026 spin_unlock_irqrestore(&dev->
sriov.going_down_lock, flags);
1044 recv_wr.next =
NULL;
1046 recv_wr.num_sge = 1;
1049 ib_dma_sync_single_for_device(ctx->
ib_dev, tun_qp->
ring[index].map,
1051 return ib_post_recv(tun_qp->
qp, &recv_wr, &bad_recv_wr);
1054 static int mlx4_ib_multiplex_sa_handler(
struct ib_device *ibdev,
int port,
1070 static int is_proxy_qp0(
struct mlx4_ib_dev *dev,
int qpn,
int slave)
1072 int proxy_start = dev->
dev->phys_caps.base_proxy_sqpn + 8 * slave;
1074 return (qpn >= proxy_start && qpn <= proxy_start + 1);
1089 unsigned wire_tx_ix = 0;
1096 sqp_ctx = dev->
sriov.sqps[port-1];
1103 if (dest_qpt ==
IB_QPT_SMI && (mlx4_master_func_num(dev->
dev) != slave))
1108 sqp = &sqp_ctx->
qp[0];
1109 wire_pkey_ix = dev->
pkeys.virt2phys_pkey[slave][port - 1][0];
1112 sqp = &sqp_ctx->
qp[1];
1113 wire_pkey_ix = dev->
pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1119 sgid_index = attr->
grh.sgid_index;
1120 attr->
grh.sgid_index = 0;
1124 attr->
grh.sgid_index = sgid_index;
1125 to_mah(ah)->av.ib.gid_index = sgid_index;
1127 to_mah(ah)->av.ib.port_pd &=
cpu_to_be32(0x7FFFFFFF);
1139 if (sqp->
tx_ring[wire_tx_ix].ah)
1142 ib_dma_sync_single_for_cpu(&dev->
ib_dev,
1143 sqp->
tx_ring[wire_tx_ix].buf.map,
1149 ib_dma_sync_single_for_device(&dev->
ib_dev,
1150 sqp->
tx_ring[wire_tx_ix].buf.map,
1156 list.
lkey = sqp_ctx->
mr->lkey;
1160 wr.
wr.
ud.pkey_index = wire_pkey_ix;
1170 ret = ib_post_send(send_qp, &wr, &bad_wr);
1189 if (wc->
src_qp < dev->
dev->phys_caps.base_proxy_sqpn ||
1196 slave = ((wc->
src_qp & ~0x7) - dev->
dev->phys_caps.base_proxy_sqpn) / 8;
1197 if (slave != ctx->
slave) {
1199 "belongs to another slave\n", wc->
src_qp);
1202 if (slave != mlx4_master_func_num(dev->
dev) && !(wc->
src_qp & 0x2)) {
1204 "non-master trying to send QP0 packets\n", wc->
src_qp);
1209 ib_dma_sync_single_for_cpu(ctx->
ib_dev, tun_qp->
ring[wr_ix].map,
1212 switch (tunnel->
mad.mad_hdr.method) {
1220 slave_id = (
u8 *) &tunnel->
mad.mad_hdr.tid;
1223 "class:%d slave:%d\n", *slave_id,
1224 tunnel->
mad.mad_hdr.mgmt_class, slave);
1233 switch (tunnel->
mad.mad_hdr.mgmt_class) {
1235 if (mlx4_ib_multiplex_sa_handler(ctx->
ib_dev, ctx->
port, slave,
1251 if (slave != mlx4_master_func_num(dev->
dev)) {
1253 "for slave:%d\n", tunnel->
mad.mad_hdr.mgmt_class, slave);
1264 (ah_attr.grh.sgid_index != slave)) {
1266 slave, ah_attr.grh.sgid_index);
1271 is_proxy_qp0(dev, wc->
src_qp, slave) ?
1276 &ah_attr, &tunnel->
mad);
1284 int rx_buf_size, tx_buf_size;
1286 if (qp_type > IB_QPT_GSI)
1289 tun_qp = &ctx->
qp[qp_type];
1315 if (!tun_qp->
ring[i].addr)
1317 tun_qp->
ring[
i].map = ib_dma_map_single(ctx->
ib_dev,
1318 tun_qp->
ring[i].addr,
1326 if (!tun_qp->
tx_ring[i].buf.addr)
1329 ib_dma_map_single(ctx->
ib_dev,
1345 ib_dma_unmap_single(ctx->
ib_dev, tun_qp->
tx_ring[i].buf.map,
1355 ib_dma_unmap_single(ctx->
ib_dev, tun_qp->
ring[i].map,
1369 int rx_buf_size, tx_buf_size;
1371 if (qp_type > IB_QPT_GSI)
1374 tun_qp = &ctx->
qp[qp_type];
1385 ib_dma_unmap_single(ctx->
ib_dev, tun_qp->
ring[i].map,
1391 ib_dma_unmap_single(ctx->
ib_dev, tun_qp->
tx_ring[i].buf.map,
1410 while (ib_poll_cq(ctx->
cq, 1, &wc) == 1) {
1415 mlx4_ib_multiplex_mad(ctx, &wc);
1416 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1418 (MLX4_NUM_TUNNEL_BUFS - 1));
1420 pr_err(
"Failed reposting tunnel "
1421 "buf:%lld\n", wc.
wr_id);
1424 pr_debug(
"received tunnel send completion:"
1425 "wrid=0x%llx, status=0x%x\n",
1428 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1433 spin_unlock(&tun_qp->
tx_lock);
1440 pr_debug(
"mlx4_ib: completion error in tunnel: %d."
1441 " status = %d, wrid = 0x%llx\n",
1445 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1450 spin_unlock(&tun_qp->
tx_lock);
1456 static void pv_qp_event_handler(
struct ib_event *
event,
void *qp_context)
1461 pr_err(
"Fatal error (%d) on a MAD QP on port %d\n",
1472 int qp_attr_mask_INIT;
1474 if (qp_type > IB_QPT_GSI)
1477 tun_qp = &ctx->
qp[qp_type];
1479 memset(&qp_init_attr, 0,
sizeof qp_init_attr);
1480 qp_init_attr.init_attr.send_cq = ctx->
cq;
1481 qp_init_attr.init_attr.recv_cq = ctx->
cq;
1485 qp_init_attr.init_attr.cap.max_send_sge = 1;
1486 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1488 qp_init_attr.init_attr.qp_type =
IB_QPT_UD;
1490 qp_init_attr.port = ctx->
port;
1491 qp_init_attr.slave = ctx->
slave;
1492 qp_init_attr.proxy_qp_type = qp_type;
1496 qp_init_attr.init_attr.qp_type = qp_type;
1500 qp_init_attr.init_attr.port_num = ctx->
port;
1501 qp_init_attr.init_attr.qp_context =
ctx;
1502 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1504 if (IS_ERR(tun_qp->
qp)) {
1505 ret = PTR_ERR(tun_qp->
qp);
1507 pr_err(
"Couldn't create %s QP (%d)\n",
1508 create_tun ?
"tunnel" :
"special", ret);
1515 to_mdev(ctx->
ib_dev)->pkeys.virt2phys_pkey[ctx->
slave][ctx->
port - 1][0];
1520 pr_err(
"Couldn't change %s qp state to INIT (%d)\n",
1521 create_tun ?
"tunnel" :
"special", ret);
1527 pr_err(
"Couldn't change %s qp state to RTR (%d)\n",
1528 create_tun ?
"tunnel" :
"special", ret);
1535 pr_err(
"Couldn't change %s qp state to RTS (%d)\n",
1536 create_tun ?
"tunnel" :
"special", ret);
1541 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1543 pr_err(
" mlx4_ib_post_pv_buf error"
1544 " (err = %d, i = %d)\n", ret, i);
1559 static void mlx4_ib_sqp_comp_worker(
struct work_struct *work)
1576 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1586 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1589 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1590 mlx4_ib_demux_mad(ctx->
ib_dev, ctx->
port, &wc, grh, mad);
1591 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.
wr_id &
1592 (MLX4_NUM_TUNNEL_BUFS - 1)))
1593 pr_err(
"Failed reposting SQP "
1594 "buf:%lld\n", wc.
wr_id);
1601 pr_debug(
"mlx4_ib: completion error in tunnel: %d."
1602 " status = %d, wrid = 0x%llx\n",
1606 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1617 static int alloc_pv_object(
struct mlx4_ib_dev *dev,
int slave,
int port,
1625 pr_err(
"failed allocating pv resource context "
1626 "for port %d, slave %d\n", port, slave);
1637 static void free_pv_object(
struct mlx4_ib_dev *dev,
int slave,
int port)
1639 if (dev->
sriov.demux[port - 1].tun[slave]) {
1640 kfree(dev->
sriov.demux[port - 1].tun[slave]);
1645 static int create_pv_resources(
struct ib_device *ibdev,
int slave,
int port,
1655 if (ctx->
slave == mlx4_master_func_num(to_mdev(ctx->
ib_dev)->dev) &&
1660 ret = mlx4_ib_alloc_pv_bufs(ctx,
IB_QPT_SMI, create_tun);
1662 pr_err(
"Failed allocating qp0 tunnel bufs (%d)\n", ret);
1667 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1669 pr_err(
"Failed allocating qp1 tunnel bufs (%d)\n", ret);
1678 NULL, ctx, cq_size, 0);
1679 if (IS_ERR(ctx->
cq)) {
1680 ret = PTR_ERR(ctx->
cq);
1681 pr_err(
"Couldn't create tunnel CQ (%d)\n", ret);
1686 if (IS_ERR(ctx->
pd)) {
1687 ret = PTR_ERR(ctx->
pd);
1688 pr_err(
"Couldn't create tunnel PD (%d)\n", ret);
1693 if (IS_ERR(ctx->
mr)) {
1694 ret = PTR_ERR(ctx->
mr);
1695 pr_err(
"Couldn't get tunnel DMA MR (%d)\n", ret);
1700 ret = create_pv_sqp(ctx,
IB_QPT_SMI, create_tun);
1702 pr_err(
"Couldn't create %s QP0 (%d)\n",
1703 create_tun ?
"tunnel for" :
"", ret);
1708 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1710 pr_err(
"Couldn't create %s QP1 (%d)\n",
1711 create_tun ?
"tunnel for" :
"", ret);
1720 ctx->
wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1724 pr_err(
"Couldn't arm tunnel cq (%d)\n", ret);
1754 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1758 mlx4_ib_free_pv_qp_bufs(ctx,
IB_QPT_SMI, create_tun);
1764 static void destroy_pv_resources(
struct mlx4_ib_dev *dev,
int slave,
int port,
1780 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1791 static int mlx4_ib_tunnels_update(
struct mlx4_ib_dev *dev,
int slave,
1792 int port,
int do_init)
1799 if (slave == mlx4_master_func_num(dev->
dev))
1800 destroy_pv_resources(dev, slave, port,
1801 dev->
sriov.sqps[port - 1], 1);
1803 destroy_pv_resources(dev, slave, port,
1804 dev->
sriov.demux[port - 1].tun[slave], 1);
1809 ret = create_pv_resources(&dev->
ib_dev, slave, port, 1,
1810 dev->
sriov.demux[port - 1].tun[slave]);
1813 if (!ret && slave == mlx4_master_func_num(dev->
dev))
1814 ret = create_pv_resources(&dev->
ib_dev, slave, port, 0,
1815 dev->
sriov.sqps[port - 1]);
1824 mlx4_ib_tunnels_update(dmxw->
dev, dmxw->
slave, (
int) dmxw->
port,
1830 static int mlx4_ib_alloc_demux_ctx(
struct mlx4_ib_dev *dev,
1838 ctx->
tun = kcalloc(dev->
dev->caps.sqp_demux,
1847 for (i = 0; i < dev->
dev->caps.sqp_demux; i++) {
1848 ret = alloc_pv_object(dev, i, port, &ctx->
tun[i]);
1857 pr_err(
"Failed initializing mcg para-virt (%d)\n", ret);
1861 snprintf(name,
sizeof name,
"mlx4_ibt%d", port);
1864 pr_err(
"Failed to create tunnelling WQ for port %d\n", port);
1869 snprintf(name,
sizeof name,
"mlx4_ibud%d", port);
1872 pr_err(
"Failed to create up/down WQ for port %d\n", port);
1886 for (i = 0; i < dev->
dev->caps.sqp_demux; i++)
1887 free_pv_object(dev, i, port);
1900 sqp_ctx->
qp[0].qp =
NULL;
1901 mlx4_ib_free_pv_qp_bufs(sqp_ctx,
IB_QPT_SMI, 0);
1904 sqp_ctx->
qp[1].qp =
NULL;
1905 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
1922 for (i = 0; i < dev->
dev->caps.sqp_demux; i++) {
1929 for (i = 0; i < dev->
dev->caps.sqp_demux; i++) {
1930 destroy_pv_resources(dev, i, ctx->
port, ctx->
tun[i], 0);
1931 free_pv_object(dev, i, ctx->
port);
1939 static void mlx4_ib_master_tunnels(
struct mlx4_ib_dev *dev,
int do_init)
1943 if (!mlx4_is_master(dev->
dev))
1946 for (i = 0; i < dev->
dev->caps.num_ports; i++)
1947 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->
dev), i + 1, do_init);
1956 if (!mlx4_is_mfunc(dev->
dev))
1959 dev->
sriov.is_going_down = 0;
1965 if (mlx4_is_slave(dev->
dev)) {
1970 for (i = 0; i < dev->
dev->caps.sqp_demux; i++) {
1971 if (i == mlx4_master_func_num(dev->
dev))
1989 dev->
dev->caps.sqp_demux);
1995 dev->
sriov.demux[
i].guid_cache[0] = gid.
global.interface_id;
1996 err = alloc_pv_object(dev, mlx4_master_func_num(dev->
dev), i + 1,
1997 &dev->
sriov.sqps[i]);
2000 err = mlx4_ib_alloc_demux_ctx(dev, &dev->
sriov.demux[i], i + 1);
2004 mlx4_ib_master_tunnels(dev, 1);
2009 free_pv_object(dev, mlx4_master_func_num(dev->
dev), i + 1);
2010 mlx4_ib_free_demux_ctx(&dev->
sriov.demux[i]);
2027 unsigned long flags;
2029 if (!mlx4_is_mfunc(dev->
dev))
2033 dev->
sriov.is_going_down = 1;
2034 spin_unlock_irqrestore(&dev->
sriov.going_down_lock, flags);
2035 if (mlx4_is_master(dev->
dev)) {
2038 mlx4_ib_free_sqp_ctx(dev->
sriov.sqps[i]);
2041 mlx4_ib_free_demux_ctx(&dev->
sriov.demux[i]);