46 #include <linux/slab.h>
60 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
100 switch (ib_qp_state) {
128 switch (ehca_qp_state) {
144 ehca_gen_err(
"invalid ehca_qp_state=%x", ehca_qp_state);
187 switch (ib_tostate) {
192 switch (ib_fromstate) {
206 switch (ib_fromstate) {
240 static inline int ibqptype2servicetype(
enum ib_qp_type ibqptype)
279 static inline int init_qp_queue(
struct ehca_shca *shca,
288 int ret,
cnt, ipz_rc, nr_q_pages;
310 ehca_err(ib_dev,
"Cannot allocate page for queue. ipz_rc=%i",
316 for (cnt = 0; cnt < nr_q_pages; cnt++) {
319 ehca_err(ib_dev,
"ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage);
331 if (cnt == (nr_q_pages - 1)) {
332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev,
"hipz_qp_register_rpage() "
334 "h_ret=%lli", h_ret);
340 ehca_err(ib_dev,
"ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage);
346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev,
"hipz_qp_register_rpage() "
348 "h_ret=%lli", h_ret);
355 ipz_qeit_reset(queue);
364 static inline int ehca_calc_wqe_size(
int act_nr_sge,
int is_llqp)
367 return 128 << act_nr_sge;
370 u.nud.sg_list[act_nr_sge]);
374 int req_nr_sge,
int is_llqp)
376 u32 wqe_size, q_size;
377 int act_nr_sge = req_nr_sge;
381 for (act_nr_sge = 4; act_nr_sge <= 252;
382 act_nr_sge = 4 + 2 * act_nr_sge)
383 if (act_nr_sge >= req_nr_sge)
386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
387 q_size = wqe_size * (queue->
max_wr + 1);
391 else if (q_size <= 1024)
409 list = &qp->
send_cq->sqp_err_list;
412 list = &qp->
recv_cq->rqp_err_list;
416 if (list_empty(node))
428 if (!list_empty(node))
431 spin_unlock_irqrestore(&cq->
spinlock, flags);
441 for (i = 0; i < qmap->
entries; i++) {
442 qmap->
map[
i].reported = 1;
443 qmap->
map[
i].cqe_req = 0;
452 static struct ehca_qp *internal_create_qp(
464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge,
ret;
469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
476 "use the number_of_qps module parameter.\n");
485 memset(&parms, 0,
sizeof(parms));
497 if (qp_type & 0x80) {
506 if (init_attr->
srq) {
520 if (is_llqp && has_srq) {
529 parms.srq_limit = srq_init_attr->
attr.srq_limit;
530 if (init_attr->
cap.max_recv_sge > 3) {
532 "supported for SRQ pd=%p max_sge=%x",
533 pd, init_attr->
cap.max_recv_sge);
553 if ((init_attr->
cap.max_send_wr > 255) ||
554 (init_attr->
cap.max_recv_wr > 255)) {
556 "Invalid Number of max_sq_wr=%x "
557 "or max_rq_wr=%x for RC LLQP",
558 init_attr->
cap.max_send_wr,
559 init_attr->
cap.max_recv_wr);
571 if (!(init_attr->
cap.max_send_sge <= 5
572 && init_attr->
cap.max_send_sge >= 1
573 && init_attr->
cap.max_recv_sge <= 5
574 && init_attr->
cap.max_recv_sge >= 1)) {
576 "Invalid Number of max_send_sge=%x "
577 "or max_recv_sge=%x for UD LLQP",
578 init_attr->
cap.max_send_sge,
579 init_attr->
cap.max_recv_sge);
582 }
else if (init_attr->
cap.max_send_wr > 255) {
585 "max_send_wr=%x for UD QP_TYPE=%x",
586 init_attr->
cap.max_send_wr, qp_type);
601 if (init_attr->
cap.max_send_sge > max_sge
602 || init_attr->
cap.max_recv_sge > max_sge) {
604 "send_sge=%x recv_sge=%x max_sge=%x",
605 init_attr->
cap.max_send_sge,
606 init_attr->
cap.max_recv_sge, max_sge);
612 my_qp = kmem_cache_zalloc(qp_cache,
GFP_KERNEL);
621 context = pd->
uobject->context;
643 goto create_qp_exit0;
654 goto create_qp_exit0;
657 if (my_qp->
token > 0x1FFFFFF) {
660 goto create_qp_exit1;
664 parms.srq_token = my_qp->
token;
666 parms.servicetype = ibqptype2servicetype(qp_type);
667 if (parms.servicetype < 0) {
670 goto create_qp_exit1;
677 max_send_sge = init_attr->
cap.max_send_sge;
678 max_recv_sge = init_attr->
cap.max_recv_sge;
679 if (parms.servicetype ==
ST_UD && !is_llqp) {
684 parms.token = my_qp->
token;
685 parms.eq_handle = shca->
eq.ipz_eq_handle;
686 parms.pd = my_pd->
fw_pd;
688 parms.send_cq_handle = my_qp->
send_cq->ipz_cq_handle;
690 parms.recv_cq_handle = my_qp->
recv_cq->ipz_cq_handle;
692 parms.squeue.
max_wr = init_attr->
cap.max_send_wr;
693 parms.rqueue.
max_wr = init_attr->
cap.max_recv_wr;
694 parms.squeue.
max_sge = max_send_sge;
695 parms.rqueue.
max_sge = max_recv_sge;
703 ehca_determine_small_queue(
704 &parms.squeue, max_send_sge, is_llqp);
706 ehca_determine_small_queue(
707 &parms.rqueue, max_recv_sge, is_llqp);
713 if (h_ret != H_SUCCESS) {
717 goto create_qp_exit1;
720 ib_qp_num = my_qp->
real_qp_num = parms.real_qp_num;
722 my_qp->
galpas = parms.galpas;
724 swqe_size = ehca_calc_wqe_size(parms.squeue.
act_nr_sges, is_llqp);
725 rwqe_size = ehca_calc_wqe_size(parms.rqueue.
act_nr_sges, is_llqp);
766 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
767 &parms.squeue, swqe_size);
770 "and pages ret=%i", ret);
771 goto create_qp_exit2;
782 goto create_qp_exit3;
786 reset_queue_map(&my_qp->
sq_map);
793 H_SUCCESS, &parms.rqueue, rwqe_size);
796 "and pages ret=%i", ret);
797 goto create_qp_exit4;
807 goto create_qp_exit5;
811 reset_queue_map(&my_qp->
rq_map);
813 }
else if (init_attr->
srq && !is_user) {
828 my_qp->
ib_qp.qp_num = ib_qp_num;
835 my_qp->
ib_qp.qp_type = qp_type;
842 init_attr->
cap.max_inline_data = 0;
862 "Could not alloc mod_qp_parm");
863 goto create_qp_exit5;
871 if (h_ret != H_SUCCESS) {
878 goto create_qp_exit6;
886 "Couldn't assign qp to send_cq ret=%i", ret);
887 goto create_qp_exit7;
892 if (context && udata) {
910 if (ib_copy_to_udata(udata, &
resp,
sizeof resp)) {
913 goto create_qp_exit8;
926 if (
HAS_RQ(my_qp) && !is_user)
934 if (
HAS_SQ(my_qp) && !is_user)
961 ret = internal_create_qp(pd, qp_init_attr,
NULL, udata, 0);
962 return IS_ERR(ret) ? (
struct ib_qp *)ret : &ret->
ib_qp;
978 u64 hret, update_mask;
986 memset(&qp_init_attr, 0,
sizeof(qp_init_attr));
991 qp_init_attr.
cap.max_recv_wr = srq_init_attr->
attr.max_wr;
992 qp_init_attr.
cap.max_recv_sge = srq_init_attr->
attr.max_sge;
994 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
996 return (
struct ib_srq *)my_qp;
999 srq_init_attr->
attr.max_wr = qp_init_attr.
cap.max_recv_wr;
1000 srq_init_attr->
attr.max_sge = 3;
1006 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->
real_qp_num);
1018 mqpcb, my_qp->
galpas.kernel);
1019 if (hret != H_SUCCESS) {
1021 "ehca_qp=%p qp_num=%x h_ret=%lli",
1032 mqpcb, my_qp->
galpas.kernel);
1033 if (hret != H_SUCCESS) {
1035 "ehca_qp=%p qp_num=%x h_ret=%lli",
1046 mqpcb, my_qp->
galpas.kernel);
1047 if (hret != H_SUCCESS) {
1049 "ehca_qp=%p qp_num=%x h_ret=%lli",
1063 internal_destroy_qp(pd->
device, my_qp, my_qp->
ib_srq.uobject);
1078 void *bad_send_wqe_p, *bad_send_wqe_v;
1081 int qp_num = my_qp->
ib_qp.qp_num;
1086 &bad_send_wqe_p,
NULL, 2);
1087 if (h_ret != H_SUCCESS) {
1089 " ehca_qp=%p qp_num=%x h_ret=%lli",
1090 my_qp, qp_num, h_ret);
1093 bad_send_wqe_p = (
void *)((
u64)bad_send_wqe_p & (~(1
L << 63)));
1095 qp_num, bad_send_wqe_p);
1097 bad_send_wqe_v =
__va((
u64)bad_send_wqe_p);
1099 ehca_dmp(bad_send_wqe_v, 32,
"qp_num=%x bad_wqe", qp_num);
1103 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1108 wqe = (
struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1110 while (wqe->
optype != 0xff && wqe->
wqef != 0xff) {
1112 ehca_dmp(wqe, 32,
"qp_num=%x wqe", qp_num);
1115 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1116 wqe = (
struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1117 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1124 qp_num, (*bad_wqe_cnt)-1);
1136 unsigned int tail_idx;
1139 wqe_p = wqe_p & (~(1
UL << 63));
1141 wqe_v =
__va(wqe_p);
1144 ehca_gen_err(
"Invalid offset for calculating left cqes "
1145 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1153 while (tail_idx != wqe_idx) {
1154 if (qmap->
map[tail_idx].cqe_req)
1156 tail_idx = next_index(tail_idx, qmap->
entries);
1163 static int check_for_left_cqes(
struct ehca_qp *my_qp,
struct ehca_shca *shca)
1166 void *send_wqe_p, *recv_wqe_p;
1168 unsigned long flags;
1169 int qp_num = my_qp->
ib_qp.qp_num;
1176 &send_wqe_p, &recv_wqe_p, 4);
1177 if (h_ret != H_SUCCESS) {
1179 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1180 my_qp, qp_num, h_ret);
1192 spin_unlock_irqrestore(&my_qp->
send_cq->spinlock, flags);
1200 spin_unlock_irqrestore(&my_qp->
recv_cq->spinlock, flags);
1205 my_qp->
sq_map.left_to_poll = 0;
1206 my_qp->
sq_map.next_wqe_idx = next_index(my_qp->
sq_map.tail,
1208 spin_unlock_irqrestore(&my_qp->
send_cq->spinlock, flags);
1211 my_qp->
rq_map.left_to_poll = 0;
1212 my_qp->
rq_map.next_wqe_idx = next_index(my_qp->
rq_map.tail,
1214 spin_unlock_irqrestore(&my_qp->
recv_cq->spinlock, flags);
1218 if ((my_qp->
sq_map.left_to_poll == 0) &&
1219 (my_qp->
rq_map.left_to_poll == 0)) {
1222 spin_unlock_irqrestore(&my_qp->
send_cq->spinlock, flags);
1227 spin_unlock_irqrestore(&my_qp->
recv_cq->spinlock,
1241 static int internal_modify_qp(
struct ib_qp *ibqp,
1243 int attr_mask,
int smi_reset2init)
1246 int cnt, qp_attr_idx, ret = 0;
1254 int bad_wqe_cnt = 0;
1256 int squeue_locked = 0;
1257 unsigned long flags = 0;
1263 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->
qp_num);
1270 mqpcb, my_qp->
galpas.kernel);
1271 if (h_ret != H_SUCCESS) {
1273 "ehca_qp=%p qp_num=%x h_ret=%lli",
1274 my_qp, ibqp->
qp_num, h_ret);
1276 goto modify_qp_exit1;
1281 qp_cur_state = ehca2ib_qp_state(mqpcb->
qp_state);
1283 if (qp_cur_state == -
EINVAL) {
1286 "ehca_qp=%p qp_num=%x",
1288 goto modify_qp_exit1;
1294 if (smi_reset2init == 0 &&
1305 int smiqp_attr_mask = IB_QP_STATE |
IB_QP_PORT |
1307 int smirc = internal_modify_qp(
1308 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1311 "ehca_modify_qp() rc=%i", smirc);
1313 goto modify_qp_exit1;
1323 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1324 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1326 goto modify_qp_exit1;
1329 ehca_dbg(ibqp->
device,
"ehca_qp=%p qp_num=%x current qp_state=%x "
1330 "new qp_state=%x attribute_mask=%x",
1333 qp_new_state = attr_mask & IB_QP_STATE ? attr->
qp_state : qp_cur_state;
1334 if (!smi_reset2init &&
1339 "Invalid qp transition new_state=%x cur_state=%x "
1340 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1341 qp_cur_state, my_qp, ibqp->
qp_num, attr_mask);
1342 goto modify_qp_exit1;
1345 mqpcb->
qp_state = ib2ehca_qp_state(qp_new_state);
1351 "ehca_qp=%p qp_num=%x",
1352 qp_new_state, my_qp, ibqp->
qp_num);
1353 goto modify_qp_exit1;
1357 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1358 if (statetrans < 0) {
1361 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1362 "qp_num=%x", qp_cur_state, qp_new_state,
1363 statetrans, my_qp, ibqp->
qp_num);
1364 goto modify_qp_exit1;
1367 qp_attr_idx = ib2ehcaqptype(ibqp->
qp_type);
1369 if (qp_attr_idx < 0) {
1372 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1374 goto modify_qp_exit1;
1378 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1379 my_qp, ibqp->
qp_num, statetrans);
1410 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1413 "ehca_qp=%p qp_num=%x ret=%i",
1414 my_qp, ibqp->
qp_num, ret);
1415 goto modify_qp_exit2;
1442 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1444 goto modify_qp_exit2;
1455 "ehca_qp=%p qp_num=%x num_ports=%x",
1458 goto modify_qp_exit2;
1466 goto modify_qp_exit2;
1479 "either port is being activated (try again) "
1480 "or cabling issue", attr->
port_num);
1481 goto modify_qp_exit2;
1502 goto modify_qp_exit2;
1523 for (cnt = 0; cnt < 16; cnt++)
1525 attr->
ah_attr.grh.dgid.raw[cnt];
1560 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1578 "ehca_qp=%p qp_num=%x num_ports=%x",
1581 goto modify_qp_exit2;
1588 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1590 goto modify_qp_exit2;
1603 goto modify_qp_exit2;
1633 for (cnt = 0; cnt < 16; cnt++)
1674 goto modify_qp_exit2;
1700 mqpcb, my_qp->
galpas.kernel);
1702 if (h_ret != H_SUCCESS) {
1705 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->
qp_num);
1706 goto modify_qp_exit2;
1715 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1733 if (h_ret != H_SUCCESS) {
1736 "RESET_2_INIT failed! Maybe you didn't get "
1737 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1738 h_ret, my_qp, ibqp->
qp_num);
1739 goto modify_qp_exit2;
1744 ret = check_for_left_cqes(my_qp, shca);
1746 goto modify_qp_exit2;
1753 if (qp_cur_state ==
IB_QPS_ERR && !is_user) {
1757 del_from_err_list(my_qp->
recv_cq,
1761 reset_queue_map(&my_qp->
sq_map);
1763 if (
HAS_RQ(my_qp) && !is_user)
1764 reset_queue_map(&my_qp->
rq_map);
1767 if (attr_mask & IB_QP_QKEY)
1771 if (squeue_locked) {
1772 spin_unlock_irqrestore(&my_qp->
spinlock_s, flags);
1809 unsigned long flags;
1816 "mod_qp_parm overflow state=%x port=%x"
1825 p->
mask = attr_mask;
1829 "Saved qp_attr for state=%x port=%x type=%x",
1838 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1841 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1853 int i, qp_parm_idx,
ret;
1854 unsigned long flags, wr_cnt;
1862 for (i = 0; i < qp_parm_idx; i++) {
1863 attr = qp_parm[
i].
attr;
1864 ret = internal_modify_qp(sqp, &attr, qp_parm[i].
mask, 0);
1867 "qp_num=%x ret=%x", port, sqp->
qp_num, ret);
1875 wr_cnt = my_sqp->
ipz_rqueue.current_q_offset /
1879 hipz_update_rqa(my_sqp, wr_cnt);
1880 spin_unlock_irqrestore(&my_sqp->
spinlock_r, flags);
1882 port, sqp->
qp_num, wr_cnt);
1905 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1906 my_qp, qp->
qp_num, qp_attr_mask);
1913 "ehca_qp=%p qp_num=%x", my_qp, qp->
qp_num);
1920 qpcb, my_qp->
galpas.kernel);
1922 if (h_ret != H_SUCCESS) {
1925 "ehca_qp=%p qp_num=%x h_ret=%lli",
1926 my_qp, qp->
qp_num, h_ret);
1927 goto query_qp_exit1;
1936 "ehca_qp=%p qp_num=%x",
1938 goto query_qp_exit1;
1954 qp_attr->
cap.max_send_sge =
1956 qp_attr->
cap.max_recv_sge =
1959 qp_attr->
cap.max_send_sge =
1961 qp_attr->
cap.max_recv_sge =
1999 for (cnt = 0; cnt < 16; cnt++)
2000 qp_attr->
ah_attr.grh.dgid.raw[cnt] =
2019 for (cnt = 0; cnt < 16; cnt++)
2051 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->
real_qp_num);
2057 attr_mask &= ~IB_SRQ_LIMIT;
2069 "attr_mask=%x", attr_mask);
2071 goto modify_srq_exit0;
2078 NULL, update_mask, mqpcb,
2081 if (h_ret != H_SUCCESS) {
2084 "ehca_qp=%p qp_num=%x",
2107 "ehca_qp=%p qp_num=%x", my_qp, my_qp->
real_qp_num);
2114 if (h_ret != H_SUCCESS) {
2117 "ehca_qp=%p qp_num=%x h_ret=%lli",
2119 goto query_srq_exit1;
2148 unsigned long flags;
2154 ehca_err(dev,
"Resources still referenced in "
2155 "user space qp_num=%x", qp_num);
2163 ehca_err(dev,
"Couldn't unassign qp from "
2164 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2165 qp_num, my_qp->
send_cq->cq_number);
2181 if (
HAS_SQ(my_qp) && !is_user)
2188 if (h_ret != H_SUCCESS) {
2189 ehca_err(dev,
"hipz_h_destroy_qp() failed h_ret=%lli "
2190 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2201 shca->
sport[port_num - 1].ibqp_sqp[qp_type] =
NULL;
2208 ehca_info(dev,
"device %s: port %x is inactive.",
2234 return internal_destroy_qp(qp->
device,
2241 return internal_destroy_qp(srq->
device,