36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
44 #include <linux/if_ether.h>
50 #define MLX4_MAC_VALID (1ull << 63)
114 default:
return "Unknown";
210 if (res_id < res->res_id)
212 else if (res_id > res->
res_id)
230 if (res->
res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->
res_id > this->res_id)
233 new = &((*new)->rb_right);
239 rb_link_node(&res->
node, parent,
new);
258 case RES_QP:
return "RES_QP";
259 case RES_CQ:
return "RES_CQ";
260 case RES_SRQ:
return "RES_SRQ";
261 case RES_MPT:
return "RES_MPT";
262 case RES_MTT:
return "RES_MTT";
263 case RES_MAC:
return "RES_MAC";
264 case RES_EQ:
return "RES_EQ";
268 default:
return "Unknown resource type !!!";
278 priv->
mfunc.master.res_tracker.slave_list =
281 if (!priv->
mfunc.master.res_tracker.slave_list)
286 INIT_LIST_HEAD(&priv->
mfunc.master.res_tracker.
291 mlx4_dbg(dev,
"Started init_resource_tracker: %ld slaves\n",
306 if (priv->
mfunc.master.res_tracker.slave_list) {
310 dev->
caps.function != i)
314 kfree(priv->
mfunc.master.res_tracker.slave_list);
315 priv->
mfunc.master.res_tracker.slave_list =
NULL;
324 u8 orig_index = *(
u8 *)(inbox->
buf + 35);
329 port = (sched >> 6 & 1) + 1;
332 *(
u8 *)(inbox->
buf + 35) = new_index;
343 qp_ctx->
pri_path.mgid_index = 0x80 | slave;
347 qp_ctx->
pri_path.mgid_index = slave & 0x7F;
349 qp_ctx->
alt_path.mgid_index = slave & 0x7F;
353 static int mpt_mask(
struct mlx4_dev *dev)
355 return dev->
caps.num_mpts - 1;
358 static void *find_res(
struct mlx4_dev *dev,
int res_id,
363 return res_tracker_lookup(&priv->
mfunc.master.res_tracker.res_tree[type],
367 static int get_res(
struct mlx4_dev *dev,
int slave,
u64 res_id,
374 spin_lock_irq(mlx4_tlock(dev));
375 r = find_res(dev, res_id, type);
386 if (r->
owner != slave) {
398 spin_unlock_irq(mlx4_tlock(dev));
404 u64 res_id,
int *slave)
413 spin_lock(mlx4_tlock(dev));
415 r = find_res(dev,
id, type);
420 spin_unlock(mlx4_tlock(dev));
425 static void put_res(
struct mlx4_dev *dev,
int slave,
u64 res_id,
430 spin_lock_irq(mlx4_tlock(dev));
431 r = find_res(dev, res_id, type);
434 spin_unlock_irq(mlx4_tlock(dev));
445 ret->
com.res_id =
id;
462 ret->
com.res_id =
id;
478 ret->
com.res_id =
id;
493 ret->
com.res_id =
id;
507 ret->
com.res_id =
id;
514 static struct res_common *alloc_srq_tr(
int id)
522 ret->
com.res_id =
id;
529 static struct res_common *alloc_counter_tr(
int id)
537 ret->
com.res_id =
id;
543 static struct res_common *alloc_xrcdn_tr(
int id)
551 ret->
com.res_id =
id;
565 ret->
com.res_id =
id;
578 ret = alloc_qp_tr(
id);
581 ret = alloc_mpt_tr(
id, extra);
584 ret = alloc_mtt_tr(
id, extra);
587 ret = alloc_eq_tr(
id);
590 ret = alloc_cq_tr(
id);
593 ret = alloc_srq_tr(
id);
599 ret = alloc_counter_tr(
id);
602 ret = alloc_xrcdn_tr(
id);
605 ret = alloc_fs_rule_tr(
id);
616 static int add_res_range(
struct mlx4_dev *dev,
int slave,
u64 base,
int count,
626 res_arr = kzalloc(count *
sizeof *res_arr,
GFP_KERNEL);
630 for (i = 0; i <
count; ++
i) {
631 res_arr[
i] = alloc_tr(base + i, type, slave, extra);
633 for (--i; i >= 0; --
i)
641 spin_lock_irq(mlx4_tlock(dev));
642 for (i = 0; i <
count; ++
i) {
643 if (find_res(dev, base + i, type)) {
647 err = res_tracker_insert(root, res_arr[i]);
653 spin_unlock_irq(mlx4_tlock(dev));
659 for (--i; i >= base; --
i)
662 spin_unlock_irq(mlx4_tlock(dev));
664 for (i = 0; i <
count; ++
i)
672 static int remove_qp_ok(
struct res_qp *res)
682 static int remove_mtt_ok(
struct res_mtt *res,
int order)
688 mtt_states_str(res->
com.state),
693 else if (res->
order != order)
699 static int remove_mpt_ok(
struct res_mpt *res)
709 static int remove_eq_ok(
struct res_eq *res)
719 static int remove_counter_ok(
struct res_counter *res)
729 static int remove_xrcdn_ok(
struct res_xrcdn *res)
739 static int remove_fs_rule_ok(
struct res_fs_rule *res)
749 static int remove_cq_ok(
struct res_cq *res)
759 static int remove_srq_ok(
struct res_srq *res)
773 return remove_qp_ok((
struct res_qp *)res);
775 return remove_cq_ok((
struct res_cq *)res);
777 return remove_srq_ok((
struct res_srq *)res);
779 return remove_mpt_ok((
struct res_mpt *)res);
781 return remove_mtt_ok((
struct res_mtt *)res, extra);
785 return remove_eq_ok((
struct res_eq *)res);
787 return remove_counter_ok((
struct res_counter *)res);
789 return remove_xrcdn_ok((
struct res_xrcdn *)res);
791 return remove_fs_rule_ok((
struct res_fs_rule *)res);
797 static int rem_res_range(
struct mlx4_dev *dev,
int slave,
u64 base,
int count,
806 spin_lock_irq(mlx4_tlock(dev));
807 for (i = base; i < base +
count; ++
i) {
808 r = res_tracker_lookup(&tracker->
res_tree[type], i);
813 if (r->
owner != slave) {
817 err = remove_ok(r, type, extra);
822 for (i = base; i < base +
count; ++
i) {
823 r = res_tracker_lookup(&tracker->
res_tree[type], i);
831 spin_unlock_irq(mlx4_tlock(dev));
836 static int qp_res_start_move_to(
struct mlx4_dev *dev,
int slave,
int qpn,
845 spin_lock_irq(mlx4_tlock(dev));
849 else if (r->
com.owner != slave)
854 mlx4_dbg(dev,
"%s: failed RES_QP, 0x%llx\n",
855 __func__, r->
com.res_id);
863 mlx4_dbg(dev,
"failed RES_QP, 0x%llx\n", r->
com.res_id);
872 mlx4_dbg(dev,
"failed RES_QP, 0x%llx\n",
888 r->
com.from_state = r->
com.state;
896 spin_unlock_irq(mlx4_tlock(dev));
901 static int mr_res_start_move_to(
struct mlx4_dev *dev,
int slave,
int index,
909 spin_lock_irq(mlx4_tlock(dev));
913 else if (r->
com.owner != slave)
941 r->
com.from_state = r->
com.state;
949 spin_unlock_irq(mlx4_tlock(dev));
954 static int eq_res_start_move_to(
struct mlx4_dev *dev,
int slave,
int index,
962 spin_lock_irq(mlx4_tlock(dev));
966 else if (r->
com.owner != slave)
989 r->
com.from_state = r->
com.state;
997 spin_unlock_irq(mlx4_tlock(dev));
1002 static int cq_res_start_move_to(
struct mlx4_dev *dev,
int slave,
int cqn,
1010 spin_lock_irq(mlx4_tlock(dev));
1014 else if (r->
com.owner != slave)
1043 r->
com.from_state = r->
com.state;
1051 spin_unlock_irq(mlx4_tlock(dev));
1056 static int srq_res_start_move_to(
struct mlx4_dev *dev,
int slave,
int index,
1064 spin_lock_irq(mlx4_tlock(dev));
1068 else if (r->
com.owner != slave)
1093 r->
com.from_state = r->
com.state;
1101 spin_unlock_irq(mlx4_tlock(dev));
1106 static void res_abort_move(
struct mlx4_dev *dev,
int slave,
1113 spin_lock_irq(mlx4_tlock(dev));
1114 r = res_tracker_lookup(&tracker->
res_tree[type],
id);
1115 if (r && (r->
owner == slave))
1117 spin_unlock_irq(mlx4_tlock(dev));
1120 static void res_end_move(
struct mlx4_dev *dev,
int slave,
1127 spin_lock_irq(mlx4_tlock(dev));
1128 r = res_tracker_lookup(&tracker->
res_tree[type],
id);
1129 if (r && (r->
owner == slave))
1131 spin_unlock_irq(mlx4_tlock(dev));
1134 static int valid_reserved(
struct mlx4_dev *dev,
int slave,
int qpn)
1136 return mlx4_is_qp_reserved(dev, qpn) &&
1137 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1140 static int fw_reserved(
struct mlx4_dev *dev,
int qpn)
1145 static int qp_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1146 u64 in_param,
u64 *out_param)
1156 count = get_param_l(&in_param);
1157 align = get_param_h(&in_param);
1162 err = add_res_range(dev, slave, base, count,
RES_QP, 0);
1167 set_param_l(out_param, base);
1170 qpn = get_param_l(&in_param) & 0x7fffff;
1171 if (valid_reserved(dev, slave, qpn)) {
1172 err = add_res_range(dev, slave, qpn, 1,
RES_QP, 0);
1182 if (!fw_reserved(dev, qpn)) {
1185 res_abort_move(dev, slave,
RES_QP, qpn);
1190 res_end_move(dev, slave,
RES_QP, qpn);
1200 static int mtt_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1201 u64 in_param,
u64 *out_param)
1210 order = get_param_l(&in_param);
1215 err = add_res_range(dev, slave, base, 1,
RES_MTT, order);
1219 set_param_l(out_param, base);
1224 static int mpt_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1225 u64 in_param,
u64 *out_param)
1237 id = index & mpt_mask(dev);
1239 err = add_res_range(dev, slave,
id, 1,
RES_MPT, index);
1244 set_param_l(out_param, index);
1247 index = get_param_l(&in_param);
1248 id = index & mpt_mask(dev);
1249 err = mr_res_start_move_to(dev, slave,
id,
1256 res_abort_move(dev, slave,
RES_MPT,
id);
1260 res_end_move(dev, slave,
RES_MPT,
id);
1266 static int cq_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1267 u64 in_param,
u64 *out_param)
1278 err = add_res_range(dev, slave, cqn, 1,
RES_CQ, 0);
1284 set_param_l(out_param, cqn);
1294 static int srq_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1295 u64 in_param,
u64 *out_param)
1306 err = add_res_range(dev, slave, srqn, 1,
RES_SRQ, 0);
1312 set_param_l(out_param, srqn);
1322 static int mac_add_to_slave(
struct mlx4_dev *dev,
int slave,
u64 mac,
int port)
1338 static void mac_del_from_slave(
struct mlx4_dev *dev,
int slave,
u64 mac,
1348 if (res->
mac == mac && res->
port == (
u8) port) {
1356 static void rem_slave_macs(
struct mlx4_dev *dev,
int slave)
1371 static int mac_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1372 u64 in_param,
u64 *out_param)
1381 port = get_param_l(out_param);
1386 set_param_l(out_param, err);
1391 err = mac_add_to_slave(dev, slave, mac, port);
1398 static int vlan_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1399 u64 in_param,
u64 *out_param)
1404 static int counter_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1405 u64 in_param,
u64 *out_param)
1417 err = add_res_range(dev, slave, index, 1,
RES_COUNTER, 0);
1421 set_param_l(out_param, index);
1426 static int xrcdn_alloc_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1427 u64 in_param,
u64 *out_param)
1439 err = add_res_range(dev, slave, xrcdn, 1,
RES_XRCD, 0);
1443 set_param_l(out_param, xrcdn);
1459 err = qp_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1464 err = mtt_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1469 err = mpt_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1474 err = cq_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1479 err = srq_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1484 err = mac_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1489 err = vlan_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1494 err = counter_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1499 err = xrcdn_alloc_res(dev, slave, vhcr->
op_modifier, alop,
1511 static int qp_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1521 base = get_param_l(&in_param) & 0x7fffff;
1522 count = get_param_h(&in_param);
1523 err = rem_res_range(dev, slave, base, count,
RES_QP, 0);
1529 qpn = get_param_l(&in_param) & 0x7fffff;
1535 if (!fw_reserved(dev, qpn))
1538 res_end_move(dev, slave,
RES_QP, qpn);
1540 if (valid_reserved(dev, slave, qpn))
1541 err = rem_res_range(dev, slave, qpn, 1,
RES_QP, 0);
1550 static int mtt_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1551 u64 in_param,
u64 *out_param)
1560 base = get_param_l(&in_param);
1561 order = get_param_h(&in_param);
1562 err = rem_res_range(dev, slave, base, 1,
RES_MTT, order);
1568 static int mpt_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1578 index = get_param_l(&in_param);
1579 id = index & mpt_mask(dev);
1580 err = get_res(dev, slave,
id,
RES_MPT, &mpt);
1584 put_res(dev, slave,
id,
RES_MPT);
1586 err = rem_res_range(dev, slave,
id, 1,
RES_MPT, 0);
1592 index = get_param_l(&in_param);
1593 id = index & mpt_mask(dev);
1594 err = mr_res_start_move_to(dev, slave,
id,
1600 res_end_move(dev, slave,
RES_MPT,
id);
1610 static int cq_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1611 u64 in_param,
u64 *out_param)
1618 cqn = get_param_l(&in_param);
1619 err = rem_res_range(dev, slave, cqn, 1,
RES_CQ, 0);
1634 static int srq_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1635 u64 in_param,
u64 *out_param)
1642 srqn = get_param_l(&in_param);
1643 err = rem_res_range(dev, slave, srqn, 1,
RES_SRQ, 0);
1658 static int mac_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1659 u64 in_param,
u64 *out_param)
1666 port = get_param_l(out_param);
1667 mac_del_from_slave(dev, slave, in_param, port);
1679 static int vlan_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1680 u64 in_param,
u64 *out_param)
1685 static int counter_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1686 u64 in_param,
u64 *out_param)
1694 index = get_param_l(&in_param);
1695 err = rem_res_range(dev, slave, index, 1,
RES_COUNTER, 0);
1704 static int xrcdn_free_res(
struct mlx4_dev *dev,
int slave,
int op,
int cmd,
1705 u64 in_param,
u64 *out_param)
1713 xrcdn = get_param_l(&in_param);
1714 err = rem_res_range(dev, slave, xrcdn, 1,
RES_XRCD, 0);
1734 err = qp_free_res(dev, slave, vhcr->
op_modifier, alop,
1739 err = mtt_free_res(dev, slave, vhcr->
op_modifier, alop,
1744 err = mpt_free_res(dev, slave, vhcr->
op_modifier, alop,
1749 err = cq_free_res(dev, slave, vhcr->
op_modifier, alop,
1754 err = srq_free_res(dev, slave, vhcr->
op_modifier, alop,
1759 err = mac_free_res(dev, slave, vhcr->
op_modifier, alop,
1764 err = vlan_free_res(dev, slave, vhcr->
op_modifier, alop,
1769 err = counter_free_res(dev, slave, vhcr->
op_modifier, alop,
1774 err = xrcdn_free_res(dev, slave, vhcr->
op_modifier, alop,
1825 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1826 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1827 total_mem = sq_size +
rq_size;
1835 static int check_mtt_range(
struct mlx4_dev *dev,
int slave,
int start,
1838 int res_start = mtt->
com.res_id;
1841 if (start < res_start || start + size > res_start + res_size)
1856 int mtt_base = mr_get_mtt_addr(inbox->
buf) / dev->
caps.mtt_entry_sz;
1860 id = index & mpt_mask(dev);
1861 err = mr_res_start_move_to(dev, slave,
id,
RES_MPT_HW, &mpt);
1865 phys = mr_phys_mpt(inbox->
buf);
1867 err = get_res(dev, slave, mtt_base,
RES_MTT, &mtt);
1871 err = check_mtt_range(dev, slave, mtt_base,
1872 mr_get_mtt_size(inbox->
buf), mtt);
1885 put_res(dev, slave, mtt->
com.res_id,
RES_MTT);
1888 res_end_move(dev, slave,
RES_MPT,
id);
1893 put_res(dev, slave, mtt->
com.res_id,
RES_MTT);
1895 res_abort_move(dev, slave,
RES_MPT,
id);
1911 id = index & mpt_mask(dev);
1912 err = mr_res_start_move_to(dev, slave,
id,
RES_MPT_MAPPED, &mpt);
1923 res_end_move(dev, slave,
RES_MPT,
id);
1927 res_abort_move(dev, slave,
RES_MPT,
id);
1943 id = index & mpt_mask(dev);
1944 err = get_res(dev, slave,
id,
RES_MPT, &mpt);
1956 put_res(dev, slave,
id,
RES_MPT);
1999 int mtt_base = qp_get_mtt_addr(qpc) / dev->
caps.mtt_entry_sz;
2000 int mtt_size = qp_get_mtt_size(qpc);
2003 int rcqn = qp_get_rcqn(qpc);
2004 int scqn = qp_get_scqn(qpc);
2005 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2006 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2010 err = qp_res_start_move_to(dev, slave, qpn,
RES_QP_HW, &qp, 0);
2015 err = get_res(dev, slave, mtt_base,
RES_MTT, &mtt);
2019 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2023 err = get_res(dev, slave, rcqn,
RES_CQ, &rcq);
2028 err = get_res(dev, slave, scqn,
RES_CQ, &scq);
2035 err = get_res(dev, slave, srqn,
RES_SRQ, &srq);
2040 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2041 update_pkey_index(dev, slave, inbox);
2053 put_res(dev, slave, scqn,
RES_CQ);
2057 put_res(dev, slave, srqn,
RES_SRQ);
2060 put_res(dev, slave, rcqn,
RES_CQ);
2061 put_res(dev, slave, mtt_base,
RES_MTT);
2062 res_end_move(dev, slave,
RES_QP, qpn);
2068 put_res(dev, slave, srqn,
RES_SRQ);
2071 put_res(dev, slave, scqn,
RES_CQ);
2073 put_res(dev, slave, rcqn,
RES_CQ);
2075 put_res(dev, slave, mtt_base,
RES_MTT);
2077 res_abort_move(dev, slave,
RES_QP, qpn);
2092 if (log_eq_size + 5 < page_shift)
2095 return 1 << (log_eq_size + 5 - page_shift);
2108 if (log_cq_size + 5 < page_shift)
2111 return 1 << (log_cq_size + 5 - page_shift);
2122 int res_id = (slave << 8) | eqn;
2124 int mtt_base = eq_get_mtt_addr(eqc) / dev->
caps.mtt_entry_sz;
2125 int mtt_size = eq_get_mtt_size(eqc);
2129 err = add_res_range(dev, slave, res_id, 1,
RES_EQ, 0);
2132 err = eq_res_start_move_to(dev, slave, res_id,
RES_EQ_HW, &eq);
2136 err = get_res(dev, slave, mtt_base,
RES_MTT, &mtt);
2140 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2150 put_res(dev, slave, mtt->
com.res_id,
RES_MTT);
2151 res_end_move(dev, slave,
RES_EQ, res_id);
2155 put_res(dev, slave, mtt->
com.res_id,
RES_MTT);
2157 res_abort_move(dev, slave,
RES_EQ, res_id);
2159 rem_res_range(dev, slave, res_id, 1,
RES_EQ, 0);
2163 static int get_containing_mtt(
struct mlx4_dev *dev,
int slave,
int start,
2164 int len,
struct res_mtt **res)
2171 spin_lock_irq(mlx4_tlock(dev));
2174 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2176 mtt->
com.from_state = mtt->
com.state;
2182 spin_unlock_irq(mlx4_tlock(dev));
2187 static int verify_qp_parameters(
struct mlx4_dev *dev,
2195 qp_ctx = inbox->
buf + 8;
2202 switch (transition) {
2208 if (slave != mlx4_master_func_num(dev))
2237 u64 *pg_list = (
u64 *)page_list;
2244 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2255 for (i = 0; i < npages; ++
i)
2256 pg_list[i + 2] = (
be64_to_cpu(page_list[i + 2]) & ~1ULL);
2259 ((
u64 *)page_list + 2));
2262 put_res(dev, slave, rmtt->
com.res_id, RES_MTT);
2274 int res_id = eqn | (slave << 8);
2282 err = get_res(dev, slave, eq->
mtt->com.res_id, RES_MTT,
NULL);
2291 put_res(dev, slave, eq->
mtt->com.res_id, RES_MTT);
2292 res_end_move(dev, slave,
RES_EQ, res_id);
2293 rem_res_range(dev, slave, res_id, 1,
RES_EQ, 0);
2298 put_res(dev, slave, eq->
mtt->com.res_id, RES_MTT);
2300 res_abort_move(dev, slave,
RES_EQ, res_id);
2310 u32 in_modifier = 0;
2315 if (!priv->
mfunc.master.slave_state)
2318 event_eq = &priv->
mfunc.master.slave_state[slave].event_eq[eqe->
type];
2321 if (event_eq->
eqn < 0)
2325 res_id = (slave << 8) | event_eq->
eqn;
2326 err = get_res(dev, slave, res_id,
RES_EQ, &req);
2336 if (IS_ERR(mailbox)) {
2337 err = PTR_ERR(mailbox);
2348 in_modifier = (slave & 0xff) | ((event_eq->
eqn & 0xff) << 16);
2354 put_res(dev, slave, res_id,
RES_EQ);
2360 put_res(dev, slave, res_id,
RES_EQ);
2374 int res_id = eqn | (slave << 8);
2378 err = get_res(dev, slave, res_id,
RES_EQ, &eq);
2390 put_res(dev, slave, res_id,
RES_EQ);
2403 int mtt_base = cq_get_mtt_addr(cqc) / dev->
caps.mtt_entry_sz;
2407 err = cq_res_start_move_to(dev, slave, cqn,
RES_CQ_HW, &cq);
2410 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2413 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2421 put_res(dev, slave, mtt->
com.res_id, RES_MTT);
2422 res_end_move(dev, slave,
RES_CQ, cqn);
2426 put_res(dev, slave, mtt->
com.res_id, RES_MTT);
2428 res_abort_move(dev, slave,
RES_CQ, cqn);
2449 res_end_move(dev, slave,
RES_CQ, cqn);
2453 res_abort_move(dev, slave,
RES_CQ, cqn);
2467 err = get_res(dev, slave, cqn,
RES_CQ, &cq);
2476 put_res(dev, slave, cqn,
RES_CQ);
2481 static int handle_resize(
struct mlx4_dev *dev,
int slave,
2492 int mtt_base = cq_get_mtt_addr(cqc) / dev->
caps.mtt_entry_sz;
2494 err = get_res(dev, slave, cq->
mtt->com.res_id, RES_MTT, &orig_mtt);
2498 if (orig_mtt != cq->
mtt) {
2503 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2507 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2514 put_res(dev, slave, orig_mtt->
com.res_id, RES_MTT);
2517 put_res(dev, slave, mtt->
com.res_id, RES_MTT);
2521 put_res(dev, slave, mtt->
com.res_id, RES_MTT);
2523 put_res(dev, slave, orig_mtt->
com.res_id, RES_MTT);
2539 err = get_res(dev, slave, cqn,
RES_CQ, &cq);
2547 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2553 put_res(dev, slave, cqn,
RES_CQ);
2561 int log_rq_stride = srqc->
logstride & 7;
2564 if (log_srq_size + log_rq_stride + 4 < page_shift)
2567 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2581 int mtt_base = srq_get_mtt_addr(srqc) / dev->
caps.mtt_entry_sz;
2586 err = srq_res_start_move_to(dev, slave, srqn,
RES_SRQ_HW, &srq);
2589 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2592 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2603 put_res(dev, slave, mtt->
com.res_id, RES_MTT);
2604 res_end_move(dev, slave,
RES_SRQ, srqn);
2608 put_res(dev, slave, mtt->
com.res_id, RES_MTT);
2610 res_abort_move(dev, slave,
RES_SRQ, srqn);
2634 res_end_move(dev, slave,
RES_SRQ, srqn);
2639 res_abort_move(dev, slave,
RES_SRQ, srqn);
2654 err = get_res(dev, slave, srqn,
RES_SRQ, &srq);
2663 put_res(dev, slave, srqn,
RES_SRQ);
2677 err = get_res(dev, slave, srqn,
RES_SRQ, &srq);
2688 put_res(dev, slave, srqn,
RES_SRQ);
2702 err = get_res(dev, slave, qpn,
RES_QP, &qp);
2712 put_res(dev, slave, qpn,
RES_QP);
2723 adjust_proxy_tun_qkey(dev, vhcr, context);
2724 update_pkey_index(dev, slave, inbox);
2741 update_pkey_index(dev, slave, inbox);
2742 update_gid(dev, inbox, (
u8)slave);
2743 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2761 update_pkey_index(dev, slave, inbox);
2762 update_gid(dev, inbox, (
u8)slave);
2763 adjust_proxy_tun_qkey(dev, vhcr, context);
2780 update_pkey_index(dev, slave, inbox);
2781 update_gid(dev, inbox, (
u8)slave);
2782 adjust_proxy_tun_qkey(dev, vhcr, context);
2794 adjust_proxy_tun_qkey(dev, vhcr, context);
2811 adjust_proxy_tun_qkey(dev, vhcr, context);
2812 update_gid(dev, inbox, (
u8)slave);
2813 update_pkey_index(dev, slave, inbox);
2830 adjust_proxy_tun_qkey(dev, vhcr, context);
2831 update_gid(dev, inbox, (
u8)slave);
2832 update_pkey_index(dev, slave, inbox);
2846 err = qp_res_start_move_to(dev, slave, qpn,
RES_QP_MAPPED, &qp, 0);
2858 res_end_move(dev, slave,
RES_QP, qpn);
2862 res_abort_move(dev, slave,
RES_QP, qpn);
2879 static int add_mcg_res(
struct mlx4_dev *dev,
int slave,
struct res_qp *rqp,
2891 if (find_gid(dev, slave, rqp, gid)) {
2901 spin_unlock_irq(&rqp->
mcg_spl);
2906 static int rem_mcg_res(
struct mlx4_dev *dev,
int slave,
struct res_qp *rqp,
2914 res = find_gid(dev, slave, rqp, gid);
2915 if (!res || res->
prot != prot || res->
steer != steer)
2922 spin_unlock_irq(&rqp->
mcg_spl);
2934 u8 *gid = inbox->
buf;
2941 u8 steer_type_mask = 2;
2945 err = get_res(dev, slave, qpn,
RES_QP, &rqp);
2951 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2956 block_loopback, prot, type);
2960 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2966 put_res(dev, slave, qpn,
RES_QP);
2971 (
void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2973 put_res(dev, slave, qpn,
RES_QP);
2989 if (!is_multicast_ether_addr(eth_header->
eth.dst_mac) &&
2990 !is_broadcast_ether_addr(eth_header->
eth.dst_mac)) {
2996 pr_err(
"MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
2997 eth_header->
eth.dst_mac, slave);
3007 static int add_eth_header(
struct mlx4_dev *dev,
int slave,
3025 switch (header_id) {
3029 memmove(ip_header, eth_header,
3030 sizeof(*ip_header) +
sizeof(*l4_header));
3036 memmove(l4_header, eth_header,
sizeof(*l4_header));
3042 if (port == res->
port) {
3048 pr_err(
"Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3053 memset(eth_header, 0,
sizeof(*eth_header));
3054 eth_header->
size =
sizeof(*eth_header) >> 2;
3078 if (dev->
caps.steering_mode !=
3083 rule_header = (
struct _rule_hw *)(ctrl + 1);
3086 switch (header_id) {
3088 if (validate_eth_header_mac(slave, rule_header, rlist))
3096 pr_warn(
"Can't attach FS rule without L2 headers, adding L2 header.\n");
3097 if (add_eth_header(dev, slave, inbox, rlist, header_id))
3103 pr_err(
"Corrupted mailbox.\n");
3116 mlx4_err(dev,
"Fail to add flow steering resources.\n ");
3133 if (dev->
caps.steering_mode !=
3139 mlx4_err(dev,
"Fail to remove flow steering resources.\n ");
3171 static void detach_qp(
struct mlx4_dev *dev,
int slave,
struct res_qp *rqp)
3186 static int _move_all_busy(
struct mlx4_dev *dev,
int slave,
3191 &priv->
mfunc.master.res_tracker;
3198 spin_lock_irq(mlx4_tlock(dev));
3200 if (r->
owner == slave) {
3205 "%s id 0x%llx is busy\n",
3217 spin_unlock_irq(mlx4_tlock(dev));
3222 static int move_all_busy(
struct mlx4_dev *dev,
int slave,
3225 unsigned long begin;
3230 busy = _move_all_busy(dev, slave, type, 0);
3238 busy = _move_all_busy(dev, slave, type, 1);
3242 static void rem_slave_qps(
struct mlx4_dev *dev,
int slave)
3255 err = move_all_busy(dev, slave,
RES_QP);
3257 mlx4_warn(dev,
"rem_slave_qps: Could not move all qps to busy"
3258 "for slave %d\n", slave);
3260 spin_lock_irq(mlx4_tlock(dev));
3262 spin_unlock_irq(mlx4_tlock(dev));
3263 if (qp->
com.owner == slave) {
3264 qpn = qp->
com.res_id;
3265 detach_qp(dev, slave, qp);
3266 state = qp->
com.from_state;
3267 while (state != 0) {
3270 spin_lock_irq(mlx4_tlock(dev));
3274 spin_unlock_irq(mlx4_tlock(dev));
3279 if (!valid_reserved(dev, slave, qpn))
3291 mlx4_dbg(dev,
"rem_slave_qps: failed"
3292 " to move slave %d qpn %d to"
3307 spin_lock_irq(mlx4_tlock(dev));
3309 spin_unlock_irq(mlx4_tlock(dev));
3312 static void rem_slave_srqs(
struct mlx4_dev *dev,
int slave)
3326 err = move_all_busy(dev, slave,
RES_SRQ);
3328 mlx4_warn(dev,
"rem_slave_srqs: Could not move all srqs to "
3329 "busy for slave %d\n", slave);
3331 spin_lock_irq(mlx4_tlock(dev));
3333 spin_unlock_irq(mlx4_tlock(dev));
3334 if (srq->
com.owner == slave) {
3335 srqn = srq->
com.res_id;
3336 state = srq->
com.from_state;
3337 while (state != 0) {
3341 spin_lock_irq(mlx4_tlock(dev));
3345 spin_unlock_irq(mlx4_tlock(dev));
3352 err =
mlx4_cmd(dev, in_param, srqn, 1,
3357 mlx4_dbg(dev,
"rem_slave_srqs: failed"
3358 " to move slave %d srq %d to"
3373 spin_lock_irq(mlx4_tlock(dev));
3375 spin_unlock_irq(mlx4_tlock(dev));
3378 static void rem_slave_cqs(
struct mlx4_dev *dev,
int slave)
3392 err = move_all_busy(dev, slave,
RES_CQ);
3394 mlx4_warn(dev,
"rem_slave_cqs: Could not move all cqs to "
3395 "busy for slave %d\n", slave);
3397 spin_lock_irq(mlx4_tlock(dev));
3399 spin_unlock_irq(mlx4_tlock(dev));
3401 cqn = cq->
com.res_id;
3402 state = cq->
com.from_state;
3403 while (state != 0) {
3407 spin_lock_irq(mlx4_tlock(dev));
3411 spin_unlock_irq(mlx4_tlock(dev));
3418 err =
mlx4_cmd(dev, in_param, cqn, 1,
3423 mlx4_dbg(dev,
"rem_slave_cqs: failed"
3424 " to move slave %d cq %d to"
3436 spin_lock_irq(mlx4_tlock(dev));
3438 spin_unlock_irq(mlx4_tlock(dev));
3441 static void rem_slave_mrs(
struct mlx4_dev *dev,
int slave)
3455 err = move_all_busy(dev, slave,
RES_MPT);
3457 mlx4_warn(dev,
"rem_slave_mrs: Could not move all mpts to "
3458 "busy for slave %d\n", slave);
3460 spin_lock_irq(mlx4_tlock(dev));
3462 spin_unlock_irq(mlx4_tlock(dev));
3463 if (mpt->
com.owner == slave) {
3464 mptn = mpt->
com.res_id;
3465 state = mpt->
com.from_state;
3466 while (state != 0) {
3470 spin_lock_irq(mlx4_tlock(dev));
3474 spin_unlock_irq(mlx4_tlock(dev));
3486 err =
mlx4_cmd(dev, in_param, mptn, 0,
3491 mlx4_dbg(dev,
"rem_slave_mrs: failed"
3492 " to move slave %d mpt %d to"
3504 spin_lock_irq(mlx4_tlock(dev));
3506 spin_unlock_irq(mlx4_tlock(dev));
3509 static void rem_slave_mtts(
struct mlx4_dev *dev,
int slave)
3513 &priv->
mfunc.master.res_tracker;
3523 err = move_all_busy(dev, slave, RES_MTT);
3525 mlx4_warn(dev,
"rem_slave_mtts: Could not move all mtts to "
3526 "busy for slave %d\n", slave);
3528 spin_lock_irq(mlx4_tlock(dev));
3530 spin_unlock_irq(mlx4_tlock(dev));
3531 if (mtt->
com.owner == slave) {
3532 base = mtt->
com.res_id;
3533 state = mtt->
com.from_state;
3534 while (state != 0) {
3539 spin_lock_irq(mlx4_tlock(dev));
3543 spin_unlock_irq(mlx4_tlock(dev));
3553 spin_lock_irq(mlx4_tlock(dev));
3555 spin_unlock_irq(mlx4_tlock(dev));
3558 static void rem_slave_fs_rule(
struct mlx4_dev *dev,
int slave)
3562 &priv->
mfunc.master.res_tracker;
3573 mlx4_warn(dev,
"rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3576 spin_lock_irq(mlx4_tlock(dev));
3578 spin_unlock_irq(mlx4_tlock(dev));
3579 if (fs_rule->
com.owner == slave) {
3580 base = fs_rule->
com.res_id;
3581 state = fs_rule->
com.from_state;
3582 while (state != 0) {
3591 spin_lock_irq(mlx4_tlock(dev));
3595 spin_unlock_irq(mlx4_tlock(dev));
3605 spin_lock_irq(mlx4_tlock(dev));
3607 spin_unlock_irq(mlx4_tlock(dev));
3610 static void rem_slave_eqs(
struct mlx4_dev *dev,
int slave)
3624 err = move_all_busy(dev, slave,
RES_EQ);
3626 mlx4_warn(dev,
"rem_slave_eqs: Could not move all eqs to "
3627 "busy for slave %d\n", slave);
3629 spin_lock_irq(mlx4_tlock(dev));
3631 spin_unlock_irq(mlx4_tlock(dev));
3632 if (eq->
com.owner == slave) {
3633 eqn = eq->
com.res_id;
3634 state = eq->
com.from_state;
3635 while (state != 0) {
3638 spin_lock_irq(mlx4_tlock(dev));
3642 spin_unlock_irq(mlx4_tlock(dev));
3649 if (IS_ERR(mailbox)) {
3653 err = mlx4_cmd_box(dev, slave, 0,
3659 mlx4_dbg(dev,
"rem_slave_eqs: failed"
3660 " to move slave %d eqs %d to"
3661 " SW ownership\n", slave, eqn);
3672 spin_lock_irq(mlx4_tlock(dev));
3674 spin_unlock_irq(mlx4_tlock(dev));
3677 static void rem_slave_counters(
struct mlx4_dev *dev,
int slave)
3690 mlx4_warn(dev,
"rem_slave_counters: Could not move all counters to "
3691 "busy for slave %d\n", slave);
3693 spin_lock_irq(mlx4_tlock(dev));
3695 if (counter->
com.owner == slave) {
3696 index = counter->
com.res_id;
3704 spin_unlock_irq(mlx4_tlock(dev));
3707 static void rem_slave_xrcdns(
struct mlx4_dev *dev,
int slave)
3718 err = move_all_busy(dev, slave,
RES_XRCD);
3720 mlx4_warn(dev,
"rem_slave_xrcdns: Could not move all xrcdns to "
3721 "busy for slave %d\n", slave);
3723 spin_lock_irq(mlx4_tlock(dev));
3725 if (xrcd->
com.owner == slave) {
3726 xrcdn = xrcd->
com.res_id;
3733 spin_unlock_irq(mlx4_tlock(dev));
3742 rem_slave_macs(dev, slave);
3743 rem_slave_qps(dev, slave);
3744 rem_slave_srqs(dev, slave);
3745 rem_slave_cqs(dev, slave);
3746 rem_slave_mrs(dev, slave);
3747 rem_slave_eqs(dev, slave);
3748 rem_slave_mtts(dev, slave);
3749 rem_slave_counters(dev, slave);
3750 rem_slave_xrcdns(dev, slave);
3751 rem_slave_fs_rule(dev, slave);