41 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
42 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
56 if (((off & qpt->
mask) >> 1) >= n)
57 off = (off | qpt->
mask) + 2;
66 static u32 credit_table[31] = {
108 spin_lock(&qpt->
lock);
112 map->
page = (
void *)page;
113 spin_unlock(&qpt->
lock);
131 n = 1 << (ret + 2 * (port - 1));
132 spin_lock(&qpt->
lock);
137 spin_unlock(&qpt->
lock);
145 qpn = (qpn | qpt->
mask) + 2;
151 get_map_page(qpt, map);
163 qpn =
mk_qpn(qpt, map, offset);
178 if (++i > max_scan) {
190 qpn =
mk_qpn(qpt, map, offset);
210 return jhash_1word(qpn, dev->
qp_rnd) &
223 unsigned n = qpn_hash(dev, qp->
ibqp.qp_num);
228 if (qp->
ibqp.qp_num == 0)
230 else if (qp->
ibqp.qp_num == 1)
237 spin_unlock_irqrestore(&dev->
qpt_lock, flags);
248 unsigned n = qpn_hash(dev, qp->
ibqp.qp_num);
254 lockdep_is_held(&dev->
qpt_lock)) == qp) {
258 lockdep_is_held(&dev->
qpt_lock)) == qp) {
268 for (;
q; qpp = &q->
next) {
282 spin_unlock_irqrestore(&dev->
qpt_lock, flags);
298 unsigned n, qp_inuse = 0;
323 spin_unlock_irqrestore(&dev->
qpt_lock, flags);
348 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
349 unsigned n = qpn_hash(dev, qpn);
354 if (qp->
ibqp.qp_num == qpn)
388 qp->
s_state = IB_OPCODE_RC_SEND_LAST;
389 qp->
r_state = IB_OPCODE_RC_SEND_LAST;
391 qp->
s_state = IB_OPCODE_UC_SEND_LAST;
392 qp->
r_state = IB_OPCODE_UC_SEND_LAST;
406 memset(qp->s_ack_queue, 0,
sizeof(qp->s_ack_queue));
411 qp->
r_rq.wq->head = 0;
412 qp->
r_rq.wq->tail = 0;
414 qp->
r_sge.num_sge = 0;
417 static void clear_mr_refs(
struct qib_qp *qp,
int clr_sends)
424 qib_put_ss(&qp->
r_sge);
431 for (i = 0; i < wqe->
wr.num_sge; i++) {
452 for (n = 0; n <
ARRAY_SIZE(qp->s_ack_queue); n++) {
455 if (e->
opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
495 list_del_init(&qp->
iowait);
515 clear_mr_refs(qp, 0);
517 memset(&wc, 0,
sizeof(wc));
533 spin_lock(&qp->
r_rq.lock);
538 if (head >= qp->
r_rq.size)
541 if (tail >= qp->
r_rq.size)
543 while (tail != head) {
544 wc.
wr_id = get_rwqe_ptr(&qp->
r_rq, tail)->wr_id;
545 if (++tail >= qp->
r_rq.size)
551 spin_unlock(&qp->
r_rq.lock);
552 }
else if (qp->
ibqp.event_handler)
569 int attr_mask,
struct ib_udata *udata)
572 struct qib_qp *qp = to_iqp(ibqp);
580 spin_lock_irq(&qp->r_lock);
581 spin_lock(&qp->s_lock);
646 mtu = ib_mtu_enum_to_int(attr->
path_mtu);
649 if (mtu > dd->
pport[pidx].ibmtu) {
650 switch (dd->
pport[pidx].ibmtu) {
699 if (!list_empty(&qp->
iowait))
700 list_del_init(&qp->
iowait);
703 spin_unlock(&qp->s_lock);
704 spin_unlock_irq(&qp->r_lock);
715 spin_lock_irq(&qp->r_lock);
716 spin_lock(&qp->s_lock);
717 clear_mr_refs(qp, 1);
718 qib_reset_qp(qp, ibqp->
qp_type);
748 if (attr_mask & IB_QP_PKEY_INDEX)
751 if (attr_mask & IB_QP_PORT)
754 if (attr_mask & IB_QP_DEST_QPN)
771 if (attr_mask & IB_QP_AV) {
776 if (attr_mask & IB_QP_ALT_PATH) {
781 if (attr_mask & IB_QP_PATH_MIG_STATE) {
790 if (attr_mask & IB_QP_PATH_MTU) {
792 qp->
pmtu = ib_mtu_enum_to_int(pmtu);
795 if (attr_mask & IB_QP_RETRY_CNT) {
800 if (attr_mask & IB_QP_RNR_RETRY) {
805 if (attr_mask & IB_QP_MIN_RNR_TIMER)
818 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
824 spin_unlock(&qp->s_lock);
825 spin_unlock_irq(&qp->r_lock);
834 qp->
ibqp.event_handler(&ev, qp->
ibqp.qp_context);
840 qp->
ibqp.event_handler(&ev, qp->
ibqp.qp_context);
846 spin_unlock(&qp->s_lock);
847 spin_unlock_irq(&qp->r_lock);
857 struct qib_qp *qp = to_iqp(ibqp);
869 attr->
cap.max_recv_wr = qp->
ibqp.srq ? 0 : qp->
r_rq.size - 1;
871 attr->
cap.max_recv_sge = qp->
r_rq.max_sge;
872 attr->
cap.max_inline_data = 0;
894 init_attr->
cap = attr->
cap;
929 if (head >= qp->
r_rq.size)
932 if (tail >= qp->
r_rq.size)
939 credits = head -
tail;
940 if ((
int)credits < 0)
941 credits += qp->
r_rq.size;
950 if (credit_table[x] == credits)
952 if (credit_table[x] > credits)
994 if (!init_attr->
srq) {
1000 if (init_attr->
cap.max_send_sge +
1001 init_attr->
cap.max_send_wr +
1002 init_attr->
cap.max_recv_sge +
1003 init_attr->
cap.max_recv_wr == 0) {
1021 init_attr->
cap.max_send_sge +
1023 swq =
vmalloc((init_attr->
cap.max_send_wr + 1) * sz);
1030 if (init_attr->
srq) {
1031 struct qib_srq *srq = to_isrq(init_attr->
srq);
1033 if (srq->
rq.max_sge > 1)
1034 sg_list_sz =
sizeof(*qp->r_sg_list) *
1035 (srq->
rq.max_sge - 1);
1036 }
else if (init_attr->
cap.max_recv_sge > 1)
1037 sg_list_sz =
sizeof(*qp->r_sg_list) *
1038 (init_attr->
cap.max_recv_sge - 1);
1056 qp->
r_rq.size = init_attr->
cap.max_recv_wr + 1;
1057 qp->
r_rq.max_sge = init_attr->
cap.max_recv_sge;
1058 sz = (
sizeof(
struct ib_sge) * qp->
r_rq.max_sge) +
1061 qp->
r_rq.size * sz);
1081 INIT_LIST_HEAD(&qp->
iowait);
1085 qp->
s_size = init_attr->
cap.max_send_wr + 1;
1089 dev = to_idev(ibpd->
device);
1090 dd = dd_from_dev(dev);
1100 qib_reset_qp(qp, init_attr->
qp_type);
1109 init_attr->
cap.max_inline_data = 0;
1119 err = ib_copy_to_udata(udata, &offset,
1126 u32 s =
sizeof(
struct qib_rwq) + qp->r_rq.size * sz;
1136 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1137 sizeof(qp->ip->offset));
1190 struct qib_qp *qp = to_iqp(ibqp);
1194 spin_lock_irq(&qp->s_lock);
1198 if (!list_empty(&qp->
iowait))
1199 list_del_init(&qp->
iowait);
1202 spin_unlock_irq(&qp->s_lock);
1212 clear_mr_refs(qp, 1);
1214 spin_unlock_irq(&qp->s_lock);
1253 if (qpt->
map[i].page)
1284 if (qib_cmp24(credit, qp->
s_lsn) > 0) {