45 #define C2_MAX_ORD_PER_QP 128
46 #define C2_MAX_IRD_PER_QP 128
48 #define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
49 #define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
50 #define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
53 static const u8 c2_opcode[] = {
101 static const char *to_ib_state_str(
int ib_state)
103 static const char *state_str[] = {
114 return "<invalid IB QP state>";
117 return state_str[ib_state];
124 pr_debug(
"%s: qp[%p] state modify %s --> %s\n",
127 to_ib_state_str(qp->
state),
128 to_ib_state_str(new_state));
132 #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
144 pr_debug(
"%s:%d qp=%p, %s --> %s\n",
147 to_ib_state_str(qp->
state),
155 wr.
hdr.context = (
unsigned long) vq_req;
175 pr_debug(
"Generating CLOSE event for QP-->ERR, "
176 "qp=%p, cm_id=%p\n",qp,qp->
cm_id);
181 spin_unlock_irqrestore(&qp->
lock, flags);
223 err = c2_errno(reply);
228 pr_debug(
"%s: c2_errno=%d\n", __func__, err);
240 spin_unlock_irqrestore(&qp->
lock, flags);
246 pr_debug(
"%s:%d qp=%p, cur_state=%s\n",
249 to_ib_state_str(qp->
state));
266 wr.
hdr.context = (
unsigned long) vq_req;
295 err = c2_errno(reply);
302 static int destroy_qp(
struct c2_dev *c2dev,
struct c2_qp *qp)
322 wr.hdr.context = (
unsigned long) vq_req;
333 pr_debug(
"destroy_qp: generating CLOSE event for QP-->ERR, "
334 "qp=%p, cm_id=%p\n",qp,qp->
cm_id);
340 spin_unlock_irqrestore(&qp->
lock, flags);
373 spin_unlock_irqrestore(&qp->
lock, flags);
381 static int c2_alloc_qpn(
struct c2_dev *c2dev,
struct c2_qp *qp)
386 spin_lock_irq(&c2dev->
qp_table.lock);
389 spin_unlock_irq(&c2dev->
qp_table.lock);
390 }
while ((ret == -
EAGAIN) &&
395 static void c2_free_qpn(
struct c2_dev *c2dev,
int qpn)
397 spin_lock_irq(&c2dev->
qp_table.lock);
399 spin_unlock_irq(&c2dev->
qp_table.lock);
409 spin_unlock_irqrestore(&c2dev->
qp_table.lock, flags);
422 unsigned long peer_pa;
427 err = c2_alloc_qpn(c2dev, qp);
436 if (!qp->
sq_mq.shared) {
443 if (!qp->
rq_mq.shared) {
450 if (vq_req ==
NULL) {
456 memset(&wr, 0,
sizeof(wr));
458 wr.
hdr.context = (
unsigned long) vq_req;
499 if ((err = c2_wr_get_result(reply)) != 0) {
558 destroy_qp(c2dev, qp);
568 c2_free_qpn(c2dev, qp->
qpn);
572 static inline void c2_lock_cqs(
struct c2_cq *send_cq,
struct c2_cq *recv_cq)
574 if (send_cq == recv_cq)
575 spin_lock_irq(&send_cq->
lock);
576 else if (send_cq > recv_cq) {
577 spin_lock_irq(&send_cq->
lock);
580 spin_lock_irq(&recv_cq->
lock);
585 static inline void c2_unlock_cqs(
struct c2_cq *send_cq,
struct c2_cq *recv_cq)
587 if (send_cq == recv_cq)
588 spin_unlock_irq(&send_cq->
lock);
589 else if (send_cq > recv_cq) {
590 spin_unlock(&recv_cq->
lock);
591 spin_unlock_irq(&send_cq->
lock);
593 spin_unlock(&send_cq->
lock);
594 spin_unlock_irq(&recv_cq->
lock);
600 struct c2_cq *send_cq;
601 struct c2_cq *recv_cq;
603 send_cq = to_c2cq(qp->
ibqp.send_cq);
604 recv_cq = to_c2cq(qp->
ibqp.recv_cq);
610 c2_lock_cqs(send_cq, recv_cq);
611 c2_free_qpn(c2dev, qp->
qpn);
612 c2_unlock_cqs(send_cq, recv_cq);
617 destroy_qp(c2dev, qp);
623 if (send_cq != recv_cq)
672 if ((tot + src->
length) < tot) {
704 *actual_count = acount;
769 ((c2wr_hdr_t *) wr)->magic =
cpu_to_be32(CCWR_MAGIC);
781 memcpy((
void *) msg, (
void *) wr, size);
792 struct c2_qp *qp = to_c2qp(ibqp);
834 sizeof(struct c2_data_addr) * ib_wr->
num_sge;
842 err = move_sgl((
struct c2_data_addr *) & (wr.
sqwr.
send.data),
845 &tot_len, &actual_sge_count);
847 c2_wr_set_sge_count(&wr, actual_sge_count);
852 (sizeof(struct c2_data_addr) * ib_wr->
num_sge);
864 err = move_sgl((
struct c2_data_addr *)
868 &tot_len, &actual_sge_count);
870 c2_wr_set_sge_count(&wr, actual_sge_count);
915 c2_wr_set_flags(&wr, flags);
921 err = qp_wr_post(&qp->
sq_mq, &wr, qp, msg_size);
923 spin_unlock_irqrestore(&qp->
lock, lock_flags);
930 c2_activity(c2dev, qp->
sq_mq.index, qp->
sq_mq.hint_count);
931 spin_unlock_irqrestore(&qp->
lock, lock_flags);
946 struct c2_qp *qp = to_c2qp(ibqp);
971 wr.
rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->
wr_id;
973 c2_wr_set_flags(&wr, 0);
979 ib_wr->
num_sge, &tot_len, &actual_sge_count);
980 c2_wr_set_sge_count(&wr, actual_sge_count);
992 err = qp_wr_post(&qp->
rq_mq, &wr, qp, qp->
rq_mq.msg_size);
994 spin_unlock_irqrestore(&qp->
lock, lock_flags);
1001 c2_activity(c2dev, qp->
rq_mq.index, qp->
rq_mq.hint_count);
1002 spin_unlock_irqrestore(&qp->
lock, lock_flags);
1004 ib_wr = ib_wr->
next;