37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
46 static unsigned int ib_ipath_qp_table_size = 251;
54 "LKEY table size in bits (2^n, 1 <= n <= 23)");
56 static unsigned int ib_ipath_max_pds = 0xFFFF;
59 "Maximum number of protection domains to support");
61 static unsigned int ib_ipath_max_ahs = 0xFFFF;
68 "Maximum number of completion queue entries to support");
91 "Maximum number of multicast groups to support");
97 "Maximum number of attached QPs to support");
113 static unsigned int ib_ipath_disable_sma;
163 static __be64 sys_image_guid;
192 if (++sge->
m >= sge->
mr->mapsz)
197 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
199 sge->
mr->map[sge->
m]->segs[sge->
n].length;
231 if (++sge->
m >= sge->
mr->mapsz)
236 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
238 sge->
mr->map[sge->
m]->segs[sge->
n].length;
264 if (((
long) sge.
vaddr & (
sizeof(
u32) - 1)) ||
265 (len != length && (len & (
sizeof(
u32) - 1)))) {
278 if (++sge.
m >= sge.
mr->mapsz)
283 sge.
mr->map[sge.
m]->segs[sge.
n].vaddr;
285 sge.
mr->map[sge.
m]->segs[sge.
n].length;
317 if (++sge->
m >= sge->
mr->mapsz)
322 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
324 sge->
mr->map[sge->
m]->segs[sge->
n].length;
377 if (qp->
ibqp.pd != wr->
wr.
ud.ah->pd)
397 wqe = get_swqe_ptr(qp, qp->
s_head);
403 for (i = 0, j = 0; i < wr->
num_sge; i++) {
420 if (wqe->
length > 0x80000000U)
422 }
else if (wqe->
length > to_idev(qp->
ibqp.device)->dd->ipath_ibmtu)
433 spin_unlock_irqrestore(&qp->
s_lock, flags);
451 for (;
wr; wr = wr->
next) {
452 err = ipath_post_one_send(qp, wr);
474 static int ipath_post_receive(
struct ib_qp *ibqp,
struct ib_recv_wr *wr,
489 for (;
wr; wr = wr->
next) {
502 if (next >= qp->
r_rq.size)
504 if (next == wq->
tail) {
505 spin_unlock_irqrestore(&qp->
r_rq.lock, flags);
511 wqe = get_rwqe_ptr(&qp->
r_rq, wq->
head);
514 for (i = 0; i < wr->
num_sge; i++)
519 spin_unlock_irqrestore(&qp->
r_rq.lock, flags);
550 switch (qp->
ibqp.qp_type) {
553 if (ib_ipath_disable_sma)
605 lid &= ~((1 << dev->
dd->ipath_lmc) - 1);
617 ohdr = &hdr->
u.
l.oth;
644 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->
qp);
677 static void ipath_ib_timer(
struct ipath_ibdev *dev)
694 while (!list_empty(last)) {
702 if (!list_empty(last)) {
710 if (list_empty(last))
749 while (resend !=
NULL) {
759 spin_unlock_irqrestore(&qp->
s_lock, flags);
765 while (rnr !=
NULL) {
771 ipath_schedule_send(qp);
772 spin_unlock_irqrestore(&qp->
s_lock, flags);
792 if (++sge->
m >= sge->
mr->mapsz)
796 sge->
vaddr = sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
797 sge->
length = sge->
mr->map[sge->
m]->segs[sge->
n].length;
801 #ifdef __LITTLE_ENDIAN
802 static inline u32 get_upper_bits(
u32 data,
u32 shift)
804 return data >> shift;
807 static inline u32 set_upper_bits(
u32 data,
u32 shift)
809 return data << shift;
812 static inline u32 clear_upper_bytes(
u32 data,
u32 n,
u32 off)
819 static inline u32 get_upper_bits(
u32 data,
u32 shift)
821 return data << shift;
824 static inline u32 set_upper_bits(
u32 data,
u32 shift)
826 return data >> shift;
829 static inline u32 clear_upper_bytes(
u32 data,
u32 n,
u32 off)
838 u32 length,
unsigned flush_wc)
850 if (len > ss->
sge.sge_length)
851 len = ss->
sge.sge_length;
854 off = (
unsigned long)ss->
sge.vaddr & (
sizeof(
u32) - 1);
861 y =
sizeof(
u32) - off;
864 if (len + extra >=
sizeof(
u32)) {
865 data |= set_upper_bits(v, extra *
867 len =
sizeof(
u32) - extra;
878 data |= clear_upper_bytes(v, len, extra);
889 int ushift = 32 - shift;
892 while (
l >=
sizeof(
u32)) {
895 data |= set_upper_bits(v, shift);
897 data = get_upper_bits(v, ushift);
908 if (
l + extra >=
sizeof(
u32)) {
909 data |= set_upper_bits(v, shift);
910 len -=
l + extra -
sizeof(
u32);
921 data |= clear_upper_bytes(v,
l,
929 }
else if (len == length) {
933 }
else if (len == length) {
943 last = ((
u32 *) ss->
sge.vaddr)[w - 1];
951 extra = len & (
sizeof(
u32) - 1);
956 data = clear_upper_bytes(v, extra, 0);
963 update_sge(ss, length);
991 static enum ib_rate ipath_mult_to_ib_rate(
unsigned mult)
1005 unsigned long flags;
1018 static inline void put_txreq(
struct ipath_ibdev *dev,
1021 unsigned long flags;
1033 unsigned long flags;
1044 ipath_schedule_send(qp);
1045 spin_unlock_irqrestore(&qp->
s_lock, flags);
1047 }
else if (tx->
wqe) {
1050 spin_unlock_irqrestore(&qp->
s_lock, flags);
1061 static void decrement_dma_busy(
struct ipath_qp *qp)
1063 unsigned long flags;
1070 ipath_schedule_send(qp);
1071 spin_unlock_irqrestore(&qp->
s_lock, flags);
1091 static inline unsigned ipath_pkt_delay(
u32 plen,
u8 snd_mult,
u8 rcv_mult)
1093 return (rcv_mult > snd_mult) ?
1094 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1097 static int ipath_verbs_send_dma(
struct ipath_qp *qp,
1118 decrement_dma_busy(qp);
1123 tx = get_txreq(dev);
1140 tx->
txreq.callback = sdma_complete;
1141 tx->
txreq.callback_cookie =
tx;
1149 control |= 1ULL << 31;
1158 ndesc = ipath_count_sge(ss, len);
1166 memcpy(&tx->
hdr.hdr, hdr, hdrwords << 2);
1168 tx->
map_len = (hdrwords + 2) << 2;
1177 decrement_dma_busy(qp);
1183 tx->
map_len = (plen + 1) << 2;
1189 tx->
txreq.map_addr = piobuf;
1191 tx->
txreq.sg_count = 1;
1195 memcpy(piobuf, hdr, hdrwords << 2);
1196 ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1209 decrement_dma_busy(qp);
1222 static int ipath_verbs_send_pio(
struct ipath_qp *qp,
1228 u32 *hdr = (
u32 *) ibhdr;
1233 unsigned long flags;
1251 control |= 1ULL << 31;
1258 writeq(((
u64) control << 32) | plen, piobuf);
1272 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1286 !((
unsigned long)ss->
sge.vaddr & (
sizeof(
u32) - 1)))) {
1290 update_sge(ss, len);
1302 copy_io(piobuf, ss, len, flush_wc);
1307 spin_unlock_irqrestore(&qp->
s_lock, flags);
1328 u32 dwords = (len + 3) >> 2;
1334 plen = hdrwords + dwords + 1;
1343 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1346 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1462 unsigned long flags;
1471 while (!list_empty(list)) {
1480 while (qplist !=
NULL) {
1486 ipath_schedule_send(qp);
1487 spin_unlock_irqrestore(&qp->
s_lock, flags);
1498 static int ipath_query_device(
struct ib_device *ibdev,
1503 memset(props, 0,
sizeof(*props));
1513 props->
hw_ver = dev->
dd->ipath_pcirev;
1522 props->
max_ah = ib_ipath_max_ahs;
1527 props->
max_pd = ib_ipath_max_pds;
1577 return ipath_read_creg32(dd, dd->
ipath_cregs->cr_errpkey);
1580 static int ipath_query_port(
struct ib_device *ibdev,
1589 memset(props, 0,
sizeof(*props));
1596 props->
state = ipath_ib_linkstate(dd, ibcstat) + 1;
1642 int device_modify_mask,
1657 to_idev(device)->sys_image_guid =
1666 static int ipath_modify_port(
struct ib_device *ibdev,
1667 u8 port,
int port_modify_mask,
1681 static int ipath_query_gid(
struct ib_device *ibdev,
u8 port,
1692 gid->
global.interface_id = dev->
dd->ipath_guid;
1741 static int ipath_dealloc_pd(
struct ib_pd *ibpd)
1743 struct ipath_pd *pd = to_ipd(ibpd);
1762 static struct ib_ah *ipath_create_ah(
struct ib_pd *pd,
1768 unsigned long flags;
1778 if (ah_attr->
dlid == 0) {
1797 spin_unlock_irqrestore(&dev->
n_ahs_lock, flags);
1804 spin_unlock_irqrestore(&dev->
n_ahs_lock, flags);
1807 ah->
attr = *ah_attr;
1822 static int ipath_destroy_ah(
struct ib_ah *ibah)
1825 struct ipath_ah *ah = to_iah(ibah);
1826 unsigned long flags;
1830 spin_unlock_irqrestore(&dev->
n_ahs_lock, flags);
1839 struct ipath_ah *ah = to_iah(ibah);
1841 *ah_attr = ah->
attr;
1874 static int ipath_query_pkey(
struct ib_device *ibdev,
u8 port,
u16 index,
1916 static int ipath_dealloc_ucontext(
struct ib_ucontext *context)
1918 kfree(to_iucontext(context));
1922 static int ipath_verbs_register_sysfs(
struct ib_device *dev);
1924 static void __verbs_timer(
unsigned long arg)
1947 ipath_write_kreg(dd, dd->
ipath_kregs->kr_debugportselect,
1948 0x2074076542310ULL);
1951 ipath_write_kreg(dd, dd->
ipath_kregs->kr_gpio_mask,
1970 ipath_write_kreg(dd, dd->
ipath_kregs->kr_gpio_mask,
2051 INIT_LIST_HEAD(&idev->
pending[0]);
2052 INIT_LIST_HEAD(&idev->
pending[1]);
2053 INIT_LIST_HEAD(&idev->
pending[2]);
2054 INIT_LIST_HEAD(&idev->
piowait);
2055 INIT_LIST_HEAD(&idev->
rnrwait);
2184 IPATH_IDSTR
" %s", init_utsname()->nodename);
2190 if (ipath_verbs_register_sysfs(dev))
2224 if (!list_empty(&dev->
pending[0]) ||
2225 !list_empty(&dev->
pending[1]) ||
2226 !list_empty(&dev->
pending[2]))
2228 if (!list_empty(&dev->
piowait))
2230 if (!list_empty(&dev->
rnrwait))
2254 return sprintf(buf,
"%x\n", dev->
dd->ipath_pcirev);
2264 ret = dev->
dd->ipath_f_get_boardname(dev->
dd, buf, 128);
2306 len +=
sprintf(buf + len,
"%02x %llu/%llu\n", i,
2308 (
unsigned long long) si->
n_bytes);
2325 static int ipath_verbs_register_sysfs(
struct ib_device *dev)
2330 for (i = 0; i <
ARRAY_SIZE(ipath_class_attributes); ++
i)
2332 ipath_class_attributes[i])) {