53 #define ACK_CIRC_THRESHOLD 2000000
65 static u16 get_app_wr_id(
u64 wr_id)
70 static inline int ehca_write_rwqe(
struct ipz_queue *ipz_rqueue,
79 "num_sqe=%x max_nr_of_sg=%x",
90 for (cnt_ds = 0; cnt_ds < recv_wr->
num_sge; cnt_ds++) {
91 wqe_p->
u.
all_rcv.sg_list[cnt_ds].vaddr =
93 wqe_p->
u.
all_rcv.sg_list[cnt_ds].lkey =
95 wqe_p->
u.
all_rcv.sg_list[cnt_ds].length =
96 recv_wr->
sg_list[cnt_ds].length;
108 #if defined(DEBUG_GSI_SEND_WR)
113 static void trace_send_wr_ud(
const struct ib_send_wr *send_wr)
121 "send_flags=%x opcode=%x", idx, send_wr->
wr_id,
126 "mgmt_class=%x class_version=%x method=%x "
127 "status=%x class_specific=%x tid=%lx "
128 "attr_id=%x resv=%x attr_mod=%x",
137 for (j = 0; j < send_wr->
num_sge; j++) {
148 send_wr = send_wr->
next;
154 static inline int ehca_write_swqe(
struct ehca_qp *qp,
163 u32 remote_qkey = send_wr->
wr.
ud.remote_qkey;
169 "num_sqe=%x max_nr_of_sg=%x",
183 switch (send_wr->
opcode) {
226 if (send_wr->
wr.
ud.remote_qkey & 0x80000000)
227 remote_qkey = qp->
qkey;
246 for (idx = 0; idx < send_wr->
num_sge; idx++) {
256 wqe_p->
u.
ud_av.ud_av.pmtu = 1;
258 wqe_p->
pkeyi = send_wr->
wr.
ud.pkey_index;
259 #ifdef DEBUG_GSI_SEND_WR
260 trace_send_wr_ud(send_wr);
271 wqe_p->
u.
nud.remote_virtual_address =
272 send_wr->
wr.
rdma.remote_addr;
280 for (idx = 0; idx < send_wr->
num_sge; idx++) {
281 wqe_p->
u.
nud.sg_list[
idx].vaddr =
283 wqe_p->
u.
nud.sg_list[
idx].lkey =
285 wqe_p->
u.
nud.sg_list[
idx].length =
289 wqe_p->
u.
nud.atomic_1st_op_dma_len = dma_length;
319 switch (cqe_status & 0x3F) {
400 static inline int post_one_send(
struct ehca_qp *my_qp,
414 "qp_num=%x", my_qp->
ib_qp.qp_num);
422 sq_map_idx = start_offset / my_qp->
ipz_squeue.qe_size;
425 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
433 "qp_num=%x", my_qp->
ib_qp.qp_num);
471 memset(&circ_wr, 0,
sizeof(circ_wr));
473 post_one_send(my_qp, &circ_wr, 1);
481 ret = post_one_send(my_qp, send_wr, 0);
483 goto post_send_exit0;
486 send_wr = send_wr->
next;
491 hipz_update_sqa(my_qp, wqe_cnt);
494 my_qp, qp->
qp_num, wqe_cnt, ret);
496 spin_unlock_irqrestore(&my_qp->
spinlock_s, flags);
500 *bad_send_wr = send_wr;
504 static int internal_post_recv(
struct ehca_qp *my_qp,
517 ehca_err(dev,
"QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
534 ehca_err(dev,
"Too many posted WQEs "
536 goto post_recv_exit0;
542 rq_map_idx = start_offset / my_qp->
ipz_rqueue.qe_size;
545 ret = ehca_write_rwqe(&my_qp->
ipz_rqueue, wqe_p, recv_wr,
554 ehca_err(dev,
"Could not write WQE "
556 goto post_recv_exit0;
559 qmap_entry = &my_qp->
rq_map.map[rq_map_idx];
565 recv_wr = recv_wr->
next;
570 hipz_update_rqa(my_qp, wqe_cnt);
572 ehca_dbg(dev,
"ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
574 spin_unlock_irqrestore(&my_qp->
spinlock_r, flags);
578 *bad_recv_wr = recv_wr;
593 *bad_recv_wr = recv_wr;
597 return internal_post_recv(my_qp, qp->
device, recv_wr, bad_recv_wr);
605 srq->
device, recv_wr, bad_recv_wr);
625 static inline int ehca_poll_cq_one(
struct ib_cq *cq,
struct ib_wc *
wc)
627 int ret = 0, qmap_tail_idx;
633 int cqe_count = 0, is_error;
637 ipz_qeit_get_inc_valid(&my_cq->
ipz_queue);
642 "my_cq=%p cq_num=%x", my_cq, my_cq->
cq_number);
643 goto poll_cq_one_exit0;
658 "could not find qp -> ignore cqe",
660 ehca_dmp(cqe, 64,
"cq_num=%x qp_num=%x",
667 spin_unlock_irqrestore(&qp->
spinlock_s, flags);
671 "Got CQE with purged bit qp_num=%x src_qp=%x",
674 ehca_dmp(cqe, 64,
"qp_num=%x src_qp=%x",
691 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
692 is_error ?
"ERROR " :
"", my_cq, my_cq->
cq_number);
693 ehca_dmp(cqe, 64,
"ehca_cq=%p cq_num=%x",
696 "ehca_cq=%p cq_num=%x -------------------------",
716 qmap->
tail = qmap_tail_idx;
723 my_qp->
sq_map.next_wqe_idx = next_index(my_qp->
sq_map.tail,
725 my_qp->
sq_map.left_to_poll = 0;
728 my_qp->
rq_map.next_wqe_idx = next_index(my_qp->
rq_map.tail,
730 my_qp->
rq_map.left_to_poll = 0;
735 qmap_entry = &qmap->
map[qmap_tail_idx];
749 if ((my_qp->
sq_map.left_to_poll == 0) &&
750 (my_qp->
rq_map.left_to_poll == 0)) {
761 "ehca_cq=%p cq_num=%x",
764 ehca_dmp(cqe, 64,
"ehca_cq=%p cq_num=%x",
793 hipz_update_feca(my_cq, cqe_count);
798 static int generate_flush_cqes(
struct ehca_qp *my_qp,
struct ib_cq *cq,
815 while ((nr < num_entries) && (qmap_entry->
reported == 0)) {
818 memset(wc, 0,
sizeof(*wc));
821 wqe = (
struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
882 if (num_entries < 1) {
884 "cq_num=%x", num_entries, my_cq, my_cq->
cq_number);
893 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
898 if (entries_left == 0)
904 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
909 if (entries_left == 0)
913 for (nr = 0; nr < entries_left; nr++) {
914 ret = ehca_poll_cq_one(cq, current_wc);
921 spin_unlock_irqrestore(&my_cq->
spinlock, flags);
922 if (ret == -
EAGAIN || !ret)
923 ret = num_entries - entries_left;
936 hipz_set_cqx_n0(my_cq, 1);
939 hipz_set_cqx_n1(my_cq, 1);
946 unsigned long spl_flags;
948 ret = ipz_qeit_is_valid(&my_cq->
ipz_queue);
949 spin_unlock_irqrestore(&my_cq->
spinlock, spl_flags);