38 #include <linux/module.h>
39 #include <linux/utsname.h>
42 #include <linux/random.h>
47 static unsigned int ib_qib_qp_table_size = 256;
55 "LKEY table size in bits (2^n, 1 <= n <= 23)");
57 static unsigned int ib_qib_max_pds = 0xFFFF;
60 "Maximum number of protection domains to support");
62 static unsigned int ib_qib_max_ahs = 0xFFFF;
69 "Maximum number of completion queue entries to support");
90 "Maximum number of multicast groups to support");
96 "Maximum number of attached QPs to support");
110 static unsigned int ib_qib_disable_sma;
189 }
else if (sge->
length == 0 && sge->
mr->lkey) {
191 if (++sge->
m >= sge->
mr->mapsz)
196 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
198 sge->
mr->map[sge->
m]->segs[sge->
n].length;
230 }
else if (sge->
length == 0 && sge->
mr->lkey) {
232 if (++sge->
m >= sge->
mr->mapsz)
237 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
239 sge->
mr->map[sge->
m]->segs[sge->
n].length;
265 if (((
long) sge.
vaddr & (
sizeof(
u32) - 1)) ||
266 (len != length && (len & (
sizeof(
u32) - 1)))) {
277 }
else if (sge.
length == 0 && sge.
mr->lkey) {
279 if (++sge.
m >= sge.
mr->mapsz)
284 sge.
mr->map[sge.
m]->segs[sge.
n].vaddr;
286 sge.
mr->map[sge.
m]->segs[sge.
n].length;
315 }
else if (sge->
length == 0 && sge->
mr->lkey) {
317 if (++sge->
m >= sge->
mr->mapsz)
322 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
324 sge->
mr->map[sge->
m]->segs[sge->
n].length;
376 if (qp->
ibqp.pd != wr->
wr.
ud.ah->pd)
396 rkt = &to_idev(qp->
ibqp.device)->lk_table;
397 pd = to_ipd(qp->
ibqp.pd);
398 wqe = get_swqe_ptr(qp, qp->
s_head);
405 for (i = 0; i < wr->
num_sge; i++) {
414 goto bail_inval_free;
422 if (wqe->
length > 0x80000000U)
423 goto bail_inval_free;
424 }
else if (wqe->
length > (dd_from_ibdev(qp->
ibqp.device)->pport +
426 goto bail_inval_free;
444 if (!ret && !wr->
next &&
446 dd_from_ibdev(qp->
ibqp.device)->pport + qp->
port_num - 1)) {
450 spin_unlock_irqrestore(&qp->s_lock, flags);
465 struct qib_qp *qp = to_iqp(ibqp);
469 for (;
wr; wr = wr->
next) {
470 err = qib_post_one_send(qp, wr, &scheduled);
496 struct qib_qp *qp = to_iqp(ibqp);
508 for (;
wr; wr = wr->
next) {
521 if (next >= qp->
r_rq.size)
523 if (next == wq->
tail) {
524 spin_unlock_irqrestore(&qp->
r_rq.lock, flags);
530 wqe = get_rwqe_ptr(&qp->
r_rq, wq->
head);
533 for (i = 0; i < wr->
num_sge; i++)
538 spin_unlock_irqrestore(&qp->
r_rq.lock, flags);
560 int has_grh,
void *data,
u32 tlen,
struct qib_qp *qp)
564 spin_lock(&qp->r_lock);
572 switch (qp->
ibqp.qp_type) {
575 if (ib_qib_disable_sma)
579 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
583 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
587 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
595 spin_unlock(&qp->r_lock);
627 lid &= ~((1 << ppd->
lmc) - 1);
639 ohdr = &hdr->
u.
l.oth;
649 ibp->
opstats[opcode & 0x7f].n_bytes += tlen;
650 ibp->
opstats[opcode & 0x7f].n_packets++;
665 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->
qp);
691 qib_qp_rcv(rcd, hdr, lnh ==
QIB_LRH_GRH, data, tlen, qp);
703 static void mem_timer(
unsigned long data)
711 if (!list_empty(list)) {
713 list_del_init(&qp->
iowait);
715 if (!list_empty(list))
726 spin_unlock_irqrestore(&qp->s_lock, flags);
742 }
else if (sge->
length == 0 && sge->
mr->lkey) {
744 if (++sge->
m >= sge->
mr->mapsz)
748 sge->
vaddr = sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
749 sge->
length = sge->
mr->map[sge->
m]->segs[sge->
n].length;
753 #ifdef __LITTLE_ENDIAN
754 static inline u32 get_upper_bits(
u32 data,
u32 shift)
756 return data >> shift;
759 static inline u32 set_upper_bits(
u32 data,
u32 shift)
761 return data << shift;
764 static inline u32 clear_upper_bytes(
u32 data,
u32 n,
u32 off)
771 static inline u32 get_upper_bits(
u32 data,
u32 shift)
773 return data << shift;
776 static inline u32 set_upper_bits(
u32 data,
u32 shift)
778 return data >> shift;
781 static inline u32 clear_upper_bytes(
u32 data,
u32 n,
u32 off)
790 u32 length,
unsigned flush_wc)
802 if (len > ss->
sge.sge_length)
803 len = ss->
sge.sge_length;
806 off = (
unsigned long)ss->
sge.vaddr & (
sizeof(
u32) - 1);
813 y =
sizeof(
u32) - off;
816 if (len + extra >=
sizeof(
u32)) {
817 data |= set_upper_bits(v, extra *
819 len =
sizeof(
u32) - extra;
830 data |= clear_upper_bytes(v, len, extra);
841 int ushift = 32 - shift;
844 while (
l >=
sizeof(
u32)) {
847 data |= set_upper_bits(v, shift);
849 data = get_upper_bits(v, ushift);
860 if (
l + extra >=
sizeof(
u32)) {
861 data |= set_upper_bits(v, shift);
862 len -=
l + extra -
sizeof(
u32);
873 data |= clear_upper_bytes(v,
l, extra);
880 }
else if (len == length) {
884 }
else if (len == length) {
894 last = ((
u32 *) ss->
sge.vaddr)[w - 1];
902 extra = len & (
sizeof(
u32) - 1);
907 data = clear_upper_bytes(v, extra, 0);
914 update_sge(ss, length);
939 spin_unlock_irqrestore(&qp->s_lock, flags);
943 list_empty(&qp->
iowait)) {
950 spin_unlock_irqrestore(&qp->s_lock, flags);
951 tx = ERR_PTR(-
EBUSY);
973 tx = __get_txreq(dev, qp);
985 dev = to_idev(qp->
ibqp.device);
1006 if (!list_empty(&dev->
txwait)) {
1009 list_del_init(&qp->
iowait);
1018 spin_unlock_irqrestore(&qp->s_lock, flags);
1040 dev = &ppd->
dd->verbs_dev;
1049 if (qp->
s_tx->txreq.sg_count > avail)
1051 avail -= qp->
s_tx->txreq.sg_count;
1052 list_del_init(&qp->
iowait);
1059 for (i = 0; i <
n; i++) {
1061 spin_lock(&qp->s_lock);
1066 spin_unlock(&qp->s_lock);
1081 spin_lock(&qp->s_lock);
1104 spin_unlock(&qp->s_lock);
1111 unsigned long flags;
1117 if (list_empty(&qp->
iowait)) {
1118 if (list_empty(&dev->
memwait))
1127 spin_unlock_irqrestore(&qp->s_lock, flags);
1154 tx = get_txreq(dev, qp);
1166 tx->
txreq.callback = sdma_complete;
1179 ndesc = qib_count_sge(ss, len);
1206 qib_copy_from_sge((
u32 *) &phdr->
hdr + hdrwords, ss, len);
1214 tx->
txreq.sg_count = 1;
1222 ret = wait_kmem(dev, qp);
1236 static int no_bufs_available(
struct qib_qp *qp)
1240 unsigned long flags;
1252 if (list_empty(&qp->
iowait)) {
1256 dd = dd_from_dev(dev);
1263 spin_unlock_irqrestore(&qp->s_lock, flags);
1273 u32 *hdr = (
u32 *) ibhdr;
1277 unsigned long flags;
1284 pbc = ((
u64) control << 32) |
plen;
1287 return no_bufs_available(qp);
1295 piobuf_orig = piobuf;
1309 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1323 !((
unsigned long)ss->
sge.vaddr & (
sizeof(
u32) - 1)))) {
1327 update_sge(ss, len);
1339 copy_io(piobuf, ss, len, flush_wc);
1354 spin_unlock_irqrestore(&qp->s_lock, flags);
1358 spin_unlock_irqrestore(&qp->s_lock, flags);
1380 u32 dwords = (len + 3) >> 2;
1386 plen = hdrwords + dwords + 1;
1395 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1398 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1509 unsigned long flags;
1522 while (!list_empty(list)) {
1526 list_del_init(&qp->
iowait);
1534 for (i = 0; i <
n; i++) {
1542 spin_unlock_irqrestore(&qp->s_lock, flags);
1550 static int qib_query_device(
struct ib_device *ibdev,
1556 memset(props, 0,
sizeof(*props));
1573 props->
max_ah = ib_qib_max_ahs;
1578 props->
max_pd = ib_qib_max_pds;
1600 struct qib_ibport *ibp = to_iport(ibdev, port);
1605 memset(props, 0,
sizeof(*props));
1625 switch (ppd->
ibmtu) {
1651 int device_modify_mask,
1689 static int qib_modify_port(
struct ib_device *ibdev,
u8 port,
1692 struct qib_ibport *ibp = to_iport(ibdev, port);
1706 static int qib_query_gid(
struct ib_device *ibdev,
u8 port,
1715 struct qib_ibport *ibp = to_iport(ibdev, port);
1771 static int qib_dealloc_pd(
struct ib_pd *ibpd)
1773 struct qib_pd *pd = to_ipd(ibpd);
1795 if (ah_attr->
dlid == 0)
1803 if (ah_attr->
sl > 15)
1817 static struct ib_ah *qib_create_ah(
struct ib_pd *pd,
1823 unsigned long flags;
1838 spin_unlock_irqrestore(&dev->
n_ahs_lock, flags);
1845 spin_unlock_irqrestore(&dev->
n_ahs_lock, flags);
1848 ah->
attr = *ah_attr;
1863 memset(&attr, 0,
sizeof attr);
1865 attr.
port_num = ppd_from_ibp(ibp)->port;
1880 static int qib_destroy_ah(
struct ib_ah *ibah)
1883 struct qib_ah *ah = to_iah(ibah);
1884 unsigned long flags;
1891 spin_unlock_irqrestore(&dev->
n_ahs_lock, flags);
1898 static int qib_modify_ah(
struct ib_ah *ibah,
struct ib_ah_attr *ah_attr)
1900 struct qib_ah *ah = to_iah(ibah);
1905 ah->
attr = *ah_attr;
1910 static int qib_query_ah(
struct ib_ah *ibah,
struct ib_ah_attr *ah_attr)
1912 struct qib_ah *ah = to_iah(ibah);
1914 *ah_attr = ah->
attr;
1948 static int qib_query_pkey(
struct ib_device *ibdev,
u8 port,
u16 index,
1990 static int qib_dealloc_ucontext(
struct ib_ucontext *context)
1992 kfree(to_iucontext(context));
2022 cntrs.link_error_recovery_counter;
2032 cntrs.local_link_integrity_errors;
2034 cntrs.excessive_buffer_overrun_errors;
2050 unsigned i, lk_tab_size;
2065 init_ibport(ppd + i);
2096 for (i = 0; i < dev->
lk_table.max; i++)
2102 INIT_LIST_HEAD(&dev->
piowait);
2103 INIT_LIST_HEAD(&dev->
dmawait);
2104 INIT_LIST_HEAD(&dev->
txwait);
2105 INIT_LIST_HEAD(&dev->
memwait);
2227 "QLogic Infiniband HCA %s", init_utsname()->nodename);
2266 qib_dev_err(dd,
"cannot register verbs: %d!\n", -ret);
2276 unsigned lk_tab_size;
2284 if (!list_empty(&dev->
piowait))
2286 if (!list_empty(&dev->
dmawait))
2288 if (!list_empty(&dev->
txwait))
2290 if (!list_empty(&dev->
memwait))
2297 qib_dev_err(dd,
"QP memory leak! %u still in use\n",
2310 if (dd->
pport->sdma_descq_cnt)
2312 dd->
pport->sdma_descq_cnt *
2326 if (qib_send_ok(qp)) {