35 #include <linux/sched.h>
36 #include <linux/slab.h>
42 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
43 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
44 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
46 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
52 static u32 credit_table[31] = {
100 map->
page = (
void *)page;
101 spin_unlock_irqrestore(&qpt->
lock, flags);
119 get_map_page(qpt, map);
140 get_map_page(qpt, map);
153 qpn =
mk_qpn(qpt, map, offset);
169 if (++i > max_scan) {
181 qpn =
mk_qpn(qpt, map, offset);
215 ret = alloc_qpn(qpt, type);
228 spin_unlock_irqrestore(&qpt->
lock, flags);
252 for (; (q = *qpp) !=
NULL; qpp = &q->
next) {
261 spin_unlock_irqrestore(&qpt->
lock, flags);
278 for (n = 0; n < qpt->
max; n++) {
282 for (; qp; qp = qp->
next)
285 spin_unlock_irqrestore(&qpt->
lock, flags);
288 if (qpt->
map[n].page)
308 for (qp = qpt->
table[qpn % qpt->
max]; qp; qp = qp->
next) {
309 if (qp->
ibqp.qp_num == qpn) {
315 spin_unlock_irqrestore(&qpt->
lock, flags);
339 qp->
s_state = IB_OPCODE_RC_SEND_LAST;
340 qp->
r_state = IB_OPCODE_RC_SEND_LAST;
342 qp->
s_state = IB_OPCODE_UC_SEND_LAST;
343 qp->
r_state = IB_OPCODE_UC_SEND_LAST;
361 qp->
r_rq.wq->head = 0;
362 qp->
r_rq.wq->tail = 0;
397 ipath_schedule_send(qp);
399 memset(&wc, 0,
sizeof(wc));
415 spin_lock(&qp->
r_rq.lock);
420 if (head >= qp->
r_rq.size)
423 if (tail >= qp->
r_rq.size)
425 while (tail != head) {
426 wc.
wr_id = get_rwqe_ptr(&qp->
r_rq, tail)->wr_id;
427 if (++tail >= qp->
r_rq.size)
433 spin_unlock(&qp->
r_rq.lock);
434 }
else if (qp->
ibqp.event_handler)
451 int attr_mask,
struct ib_udata *udata)
459 spin_lock_irq(&qp->
s_lock);
475 (attr->
ah_attr.grh.sgid_index > 1))
497 (ib_mtu_enum_to_int(attr->
path_mtu) == -1 ||
521 spin_unlock_irq(&qp->
s_lock);
525 spin_lock_irq(&qp->
s_lock);
527 ipath_reset_qp(qp, ibqp->
qp_type);
550 if (attr_mask & IB_QP_PKEY_INDEX)
567 if (attr_mask & IB_QP_AV) {
572 if (attr_mask & IB_QP_PATH_MTU)
585 if (attr_mask & IB_QP_MIN_RNR_TIMER)
594 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
600 spin_unlock_irq(&qp->
s_lock);
608 qp->
ibqp.event_handler(&ev, qp->
ibqp.qp_context);
614 spin_unlock_irq(&qp->
s_lock);
636 attr->
cap.max_recv_wr = qp->
ibqp.srq ? 0 : qp->
r_rq.size - 1;
638 attr->
cap.max_recv_sge = qp->
r_rq.max_sge;
639 attr->
cap.max_inline_data = 0;
661 init_attr->
cap = attr->
cap;
696 if (head >= qp->
r_rq.size)
699 if (tail >= qp->
r_rq.size)
706 credits = head -
tail;
707 if ((
int)credits < 0)
708 credits += qp->
r_rq.size;
717 if (credit_table[x] == credits)
719 if (credit_table[x] > credits)
765 if (!init_attr->
srq) {
771 if (init_attr->
cap.max_send_sge +
772 init_attr->
cap.max_send_wr +
773 init_attr->
cap.max_recv_sge +
774 init_attr->
cap.max_recv_wr == 0) {
787 init_attr->
cap.max_send_sge +
789 swq =
vmalloc((init_attr->
cap.max_send_wr + 1) * sz);
796 if (init_attr->
srq) {
799 if (srq->
rq.max_sge > 1)
801 (srq->
rq.max_sge - 1);
802 }
else if (init_attr->
cap.max_recv_sge > 1)
804 (init_attr->
cap.max_recv_sge - 1);
820 if (init_attr->
srq) {
823 qp->
r_rq.max_sge = 0;
825 init_attr->
cap.max_recv_wr = 0;
826 init_attr->
cap.max_recv_sge = 0;
828 qp->
r_rq.size = init_attr->
cap.max_recv_wr + 1;
829 qp->
r_rq.max_sge = init_attr->
cap.max_recv_sge;
830 sz = (
sizeof(
struct ib_sge) * qp->
r_rq.max_sge) +
850 INIT_LIST_HEAD(&qp->piowait);
851 INIT_LIST_HEAD(&qp->timerwait);
854 qp->s_size = init_attr->
cap.max_send_wr + 1;
855 qp->s_max_sge = init_attr->
cap.max_send_sge;
860 dev = to_idev(ibpd->
device);
861 err = ipath_alloc_qpn(&dev->
qp_table, qp,
870 ipath_reset_qp(qp, init_attr->
qp_type);
879 init_attr->
cap.max_inline_data = 0;
889 err = ib_copy_to_udata(udata, &offset,
908 err = ib_copy_to_udata(udata, &(qp->ip->offset),
909 sizeof(qp->ip->offset));
968 spin_lock_irq(&qp->
s_lock);
978 spin_unlock_irq(&qp->
s_lock);
983 spin_unlock_irq(&qp->
s_lock);
1069 if (ipath_cmp24(credit, qp->
s_lsn) > 0)
1077 ipath_cmp24(get_swqe_ptr(qp, qp->
s_cur)->ssn,
1078 qp->
s_lsn + 1) <= 0))
1079 ipath_schedule_send(qp);