Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipath_ruc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/sched.h>
35 #include <linux/spinlock.h>
36 
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
39 
40 /*
41  * Convert the AETH RNR timeout code into the number of milliseconds.
42  */
43 const u32 ib_ipath_rnr_table[32] = {
44  656, /* 0 */
45  1, /* 1 */
46  1, /* 2 */
47  1, /* 3 */
48  1, /* 4 */
49  1, /* 5 */
50  1, /* 6 */
51  1, /* 7 */
52  1, /* 8 */
53  1, /* 9 */
54  1, /* A */
55  1, /* B */
56  1, /* C */
57  1, /* D */
58  2, /* E */
59  2, /* F */
60  3, /* 10 */
61  4, /* 11 */
62  6, /* 12 */
63  8, /* 13 */
64  11, /* 14 */
65  16, /* 15 */
66  21, /* 16 */
67  31, /* 17 */
68  41, /* 18 */
69  62, /* 19 */
70  82, /* 1A */
71  123, /* 1B */
72  164, /* 1C */
73  246, /* 1D */
74  328, /* 1E */
75  492 /* 1F */
76 };
77 
88 {
89  struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
90 
91  /* We already did a spin_lock_irqsave(), so just use spin_lock */
92  spin_lock(&dev->pending_lock);
93  if (list_empty(&dev->rnrwait))
94  list_add(&qp->timerwait, &dev->rnrwait);
95  else {
96  struct list_head *l = &dev->rnrwait;
97  struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
98  timerwait);
99 
100  while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
101  qp->s_rnr_timeout -= nqp->s_rnr_timeout;
102  l = l->next;
103  if (l->next == &dev->rnrwait) {
104  nqp = NULL;
105  break;
106  }
107  nqp = list_entry(l->next, struct ipath_qp,
108  timerwait);
109  }
110  if (nqp)
111  nqp->s_rnr_timeout -= qp->s_rnr_timeout;
112  list_add(&qp->timerwait, l);
113  }
114  spin_unlock(&dev->pending_lock);
115 }
116 
123 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
124  u32 *lengthp, struct ipath_sge_state *ss)
125 {
126  int i, j, ret;
127  struct ib_wc wc;
128 
129  *lengthp = 0;
130  for (i = j = 0; i < wqe->num_sge; i++) {
131  if (wqe->sg_list[i].length == 0)
132  continue;
133  /* Check LKEY */
134  if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
135  &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
136  goto bad_lkey;
137  *lengthp += wqe->sg_list[i].length;
138  j++;
139  }
140  ss->num_sge = j;
141  ret = 1;
142  goto bail;
143 
144 bad_lkey:
145  memset(&wc, 0, sizeof(wc));
146  wc.wr_id = wqe->wr_id;
148  wc.opcode = IB_WC_RECV;
149  wc.qp = &qp->ibqp;
150  /* Signal solicited completion event. */
151  ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
152  ret = 0;
153 bail:
154  return ret;
155 }
156 
166 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
167 {
168  unsigned long flags;
169  struct ipath_rq *rq;
170  struct ipath_rwq *wq;
171  struct ipath_srq *srq;
172  struct ipath_rwqe *wqe;
173  void (*handler)(struct ib_event *, void *);
174  u32 tail;
175  int ret;
176 
177  if (qp->ibqp.srq) {
178  srq = to_isrq(qp->ibqp.srq);
179  handler = srq->ibsrq.event_handler;
180  rq = &srq->rq;
181  } else {
182  srq = NULL;
183  handler = NULL;
184  rq = &qp->r_rq;
185  }
186 
187  spin_lock_irqsave(&rq->lock, flags);
189  ret = 0;
190  goto unlock;
191  }
192 
193  wq = rq->wq;
194  tail = wq->tail;
195  /* Validate tail before using it since it is user writable. */
196  if (tail >= rq->size)
197  tail = 0;
198  do {
199  if (unlikely(tail == wq->head)) {
200  ret = 0;
201  goto unlock;
202  }
203  /* Make sure entry is read after head index is read. */
204  smp_rmb();
205  wqe = get_rwqe_ptr(rq, tail);
206  if (++tail >= rq->size)
207  tail = 0;
208  if (wr_id_only)
209  break;
210  qp->r_sge.sg_list = qp->r_sg_list;
211  } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
212  qp->r_wr_id = wqe->wr_id;
213  wq->tail = tail;
214 
215  ret = 1;
217  if (handler) {
218  u32 n;
219 
220  /*
221  * validate head pointer value and compute
222  * the number of remaining WQEs.
223  */
224  n = wq->head;
225  if (n >= rq->size)
226  n = 0;
227  if (n < tail)
228  n += rq->size - tail;
229  else
230  n -= tail;
231  if (n < srq->limit) {
232  struct ib_event ev;
233 
234  srq->limit = 0;
235  spin_unlock_irqrestore(&rq->lock, flags);
236  ev.device = qp->ibqp.device;
237  ev.element.srq = qp->ibqp.srq;
239  handler(&ev, srq->ibsrq.srq_context);
240  goto bail;
241  }
242  }
243 unlock:
244  spin_unlock_irqrestore(&rq->lock, flags);
245 bail:
246  return ret;
247 }
248 
260 static void ipath_ruc_loopback(struct ipath_qp *sqp)
261 {
262  struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
263  struct ipath_qp *qp;
264  struct ipath_swqe *wqe;
265  struct ipath_sge *sge;
266  unsigned long flags;
267  struct ib_wc wc;
268  u64 sdata;
269  atomic64_t *maddr;
270  enum ib_wc_status send_status;
271 
272  /*
273  * Note that we check the responder QP state after
274  * checking the requester's state.
275  */
276  qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
277 
278  spin_lock_irqsave(&sqp->s_lock, flags);
279 
280  /* Return if we are already busy processing a work request. */
281  if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
283  goto unlock;
284 
285  sqp->s_flags |= IPATH_S_BUSY;
286 
287 again:
288  if (sqp->s_last == sqp->s_head)
289  goto clr_busy;
290  wqe = get_swqe_ptr(sqp, sqp->s_last);
291 
292  /* Return if it is not OK to start a new work reqeust. */
295  goto clr_busy;
296  /* We are in the error state, flush the work request. */
297  send_status = IB_WC_WR_FLUSH_ERR;
298  goto flush_send;
299  }
300 
301  /*
302  * We can rely on the entry not changing without the s_lock
303  * being held until we update s_last.
304  * We increment s_cur to indicate s_last is in progress.
305  */
306  if (sqp->s_last == sqp->s_cur) {
307  if (++sqp->s_cur >= sqp->s_size)
308  sqp->s_cur = 0;
309  }
310  spin_unlock_irqrestore(&sqp->s_lock, flags);
311 
312  if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
313  dev->n_pkt_drops++;
314  /*
315  * For RC, the requester would timeout and retry so
316  * shortcut the timeouts and just signal too many retries.
317  */
318  if (sqp->ibqp.qp_type == IB_QPT_RC)
319  send_status = IB_WC_RETRY_EXC_ERR;
320  else
321  send_status = IB_WC_SUCCESS;
322  goto serr;
323  }
324 
325  memset(&wc, 0, sizeof wc);
326  send_status = IB_WC_SUCCESS;
327 
328  sqp->s_sge.sge = wqe->sg_list[0];
329  sqp->s_sge.sg_list = wqe->sg_list + 1;
330  sqp->s_sge.num_sge = wqe->wr.num_sge;
331  sqp->s_len = wqe->length;
332  switch (wqe->wr.opcode) {
333  case IB_WR_SEND_WITH_IMM:
334  wc.wc_flags = IB_WC_WITH_IMM;
335  wc.ex.imm_data = wqe->wr.ex.imm_data;
336  /* FALLTHROUGH */
337  case IB_WR_SEND:
338  if (!ipath_get_rwqe(qp, 0))
339  goto rnr_nak;
340  break;
341 
344  goto inv_err;
345  wc.wc_flags = IB_WC_WITH_IMM;
346  wc.ex.imm_data = wqe->wr.ex.imm_data;
347  if (!ipath_get_rwqe(qp, 1))
348  goto rnr_nak;
349  /* FALLTHROUGH */
350  case IB_WR_RDMA_WRITE:
352  goto inv_err;
353  if (wqe->length == 0)
354  break;
355  if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
356  wqe->wr.wr.rdma.remote_addr,
357  wqe->wr.wr.rdma.rkey,
359  goto acc_err;
360  break;
361 
362  case IB_WR_RDMA_READ:
364  goto inv_err;
365  if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
366  wqe->wr.wr.rdma.remote_addr,
367  wqe->wr.wr.rdma.rkey,
369  goto acc_err;
370  qp->r_sge.sge = wqe->sg_list[0];
371  qp->r_sge.sg_list = wqe->sg_list + 1;
372  qp->r_sge.num_sge = wqe->wr.num_sge;
373  break;
374 
378  goto inv_err;
379  if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
380  wqe->wr.wr.atomic.remote_addr,
381  wqe->wr.wr.atomic.rkey,
383  goto acc_err;
384  /* Perform atomic OP and save result. */
385  maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
386  sdata = wqe->wr.wr.atomic.compare_add;
387  *(u64 *) sqp->s_sge.sge.vaddr =
388  (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
389  (u64) atomic64_add_return(sdata, maddr) - sdata :
390  (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
391  sdata, wqe->wr.wr.atomic.swap);
392  goto send_comp;
393 
394  default:
395  send_status = IB_WC_LOC_QP_OP_ERR;
396  goto serr;
397  }
398 
399  sge = &sqp->s_sge.sge;
400  while (sqp->s_len) {
401  u32 len = sqp->s_len;
402 
403  if (len > sge->length)
404  len = sge->length;
405  if (len > sge->sge_length)
406  len = sge->sge_length;
407  BUG_ON(len == 0);
408  ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
409  sge->vaddr += len;
410  sge->length -= len;
411  sge->sge_length -= len;
412  if (sge->sge_length == 0) {
413  if (--sqp->s_sge.num_sge)
414  *sge = *sqp->s_sge.sg_list++;
415  } else if (sge->length == 0 && sge->mr != NULL) {
416  if (++sge->n >= IPATH_SEGSZ) {
417  if (++sge->m >= sge->mr->mapsz)
418  break;
419  sge->n = 0;
420  }
421  sge->vaddr =
422  sge->mr->map[sge->m]->segs[sge->n].vaddr;
423  sge->length =
424  sge->mr->map[sge->m]->segs[sge->n].length;
425  }
426  sqp->s_len -= len;
427  }
428 
430  goto send_comp;
431 
432  if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
433  wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
434  else
435  wc.opcode = IB_WC_RECV;
436  wc.wr_id = qp->r_wr_id;
437  wc.status = IB_WC_SUCCESS;
438  wc.byte_len = wqe->length;
439  wc.qp = &qp->ibqp;
440  wc.src_qp = qp->remote_qpn;
441  wc.slid = qp->remote_ah_attr.dlid;
442  wc.sl = qp->remote_ah_attr.sl;
443  wc.port_num = 1;
444  /* Signal completion event if the solicited bit is set. */
445  ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
446  wqe->wr.send_flags & IB_SEND_SOLICITED);
447 
448 send_comp:
449  spin_lock_irqsave(&sqp->s_lock, flags);
450 flush_send:
451  sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
452  ipath_send_complete(sqp, wqe, send_status);
453  goto again;
454 
455 rnr_nak:
456  /* Handle RNR NAK */
457  if (qp->ibqp.qp_type == IB_QPT_UC)
458  goto send_comp;
459  /*
460  * Note: we don't need the s_lock held since the BUSY flag
461  * makes this single threaded.
462  */
463  if (sqp->s_rnr_retry == 0) {
464  send_status = IB_WC_RNR_RETRY_EXC_ERR;
465  goto serr;
466  }
467  if (sqp->s_rnr_retry_cnt < 7)
468  sqp->s_rnr_retry--;
469  spin_lock_irqsave(&sqp->s_lock, flags);
470  if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
471  goto clr_busy;
472  sqp->s_flags |= IPATH_S_WAITING;
473  dev->n_rnr_naks++;
476  goto clr_busy;
477 
478 inv_err:
479  send_status = IB_WC_REM_INV_REQ_ERR;
480  wc.status = IB_WC_LOC_QP_OP_ERR;
481  goto err;
482 
483 acc_err:
484  send_status = IB_WC_REM_ACCESS_ERR;
485  wc.status = IB_WC_LOC_PROT_ERR;
486 err:
487  /* responder goes to error state */
488  ipath_rc_error(qp, wc.status);
489 
490 serr:
491  spin_lock_irqsave(&sqp->s_lock, flags);
492  ipath_send_complete(sqp, wqe, send_status);
493  if (sqp->ibqp.qp_type == IB_QPT_RC) {
494  int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
495 
496  sqp->s_flags &= ~IPATH_S_BUSY;
497  spin_unlock_irqrestore(&sqp->s_lock, flags);
498  if (lastwqe) {
499  struct ib_event ev;
500 
501  ev.device = sqp->ibqp.device;
502  ev.element.qp = &sqp->ibqp;
503  ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
504  sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
505  }
506  goto done;
507  }
508 clr_busy:
509  sqp->s_flags &= ~IPATH_S_BUSY;
510 unlock:
511  spin_unlock_irqrestore(&sqp->s_lock, flags);
512 done:
513  if (qp && atomic_dec_and_test(&qp->refcount))
514  wake_up(&qp->wait);
515 }
516 
517 static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
518 {
519  if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
520  qp->ibqp.qp_type == IB_QPT_SMI) {
521  unsigned long flags;
522 
525  ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
526  dd->ipath_sendctrl);
527  ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
528  spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
529  }
530 }
531 
541 static int ipath_no_bufs_available(struct ipath_qp *qp,
542  struct ipath_ibdev *dev)
543 {
544  unsigned long flags;
545  int ret = 1;
546 
547  /*
548  * Note that as soon as want_buffer() is called and
549  * possibly before it returns, ipath_ib_piobufavail()
550  * could be called. Therefore, put QP on the piowait list before
551  * enabling the PIO avail interrupt.
552  */
553  spin_lock_irqsave(&qp->s_lock, flags);
555  dev->n_piowait++;
556  qp->s_flags |= IPATH_S_WAITING;
557  qp->s_flags &= ~IPATH_S_BUSY;
558  spin_lock(&dev->pending_lock);
559  if (list_empty(&qp->piowait))
560  list_add_tail(&qp->piowait, &dev->piowait);
561  spin_unlock(&dev->pending_lock);
562  } else
563  ret = 0;
564  spin_unlock_irqrestore(&qp->s_lock, flags);
565  if (ret)
566  want_buffer(dev->dd, qp);
567  return ret;
568 }
569 
580 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
581  struct ib_global_route *grh, u32 hwords, u32 nwords)
582 {
583  hdr->version_tclass_flow =
584  cpu_to_be32((6 << 28) |
585  (grh->traffic_class << 20) |
586  grh->flow_label);
587  hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
588  /* next_hdr is defined by C8-7 in ch. 8.4.1 */
589  hdr->next_hdr = 0x1B;
590  hdr->hop_limit = grh->hop_limit;
591  /* The SGID is 32-bit aligned. */
592  hdr->sgid.global.subnet_prefix = dev->gid_prefix;
593  hdr->sgid.global.interface_id = dev->dd->ipath_guid;
594  hdr->dgid = grh->dgid;
595 
596  /* GRH header size in 32-bit words. */
597  return sizeof(struct ib_grh) / sizeof(u32);
598 }
599 
600 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
601  struct ipath_other_headers *ohdr,
602  u32 bth0, u32 bth2)
603 {
604  u16 lrh0;
605  u32 nwords;
606  u32 extra_bytes;
607 
608  /* Construct the header. */
609  extra_bytes = -qp->s_cur_size & 3;
610  nwords = (qp->s_cur_size + extra_bytes) >> 2;
611  lrh0 = IPATH_LRH_BTH;
612  if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
613  qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
614  &qp->remote_ah_attr.grh,
615  qp->s_hdrwords, nwords);
616  lrh0 = IPATH_LRH_GRH;
617  }
618  lrh0 |= qp->remote_ah_attr.sl << 4;
619  qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
620  qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
621  qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
622  qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
623  qp->remote_ah_attr.src_path_bits);
624  bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
625  bth0 |= extra_bytes << 20;
626  ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
627  ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
628  ohdr->bth[2] = cpu_to_be32(bth2);
629 }
630 
639 void ipath_do_send(unsigned long data)
640 {
641  struct ipath_qp *qp = (struct ipath_qp *)data;
642  struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
643  int (*make_req)(struct ipath_qp *qp);
644  unsigned long flags;
645 
646  if ((qp->ibqp.qp_type == IB_QPT_RC ||
647  qp->ibqp.qp_type == IB_QPT_UC) &&
648  qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
649  ipath_ruc_loopback(qp);
650  goto bail;
651  }
652 
653  if (qp->ibqp.qp_type == IB_QPT_RC)
654  make_req = ipath_make_rc_req;
655  else if (qp->ibqp.qp_type == IB_QPT_UC)
656  make_req = ipath_make_uc_req;
657  else
658  make_req = ipath_make_ud_req;
659 
660  spin_lock_irqsave(&qp->s_lock, flags);
661 
662  /* Return if we are already busy processing a work request. */
663  if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
664  !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
665  spin_unlock_irqrestore(&qp->s_lock, flags);
666  goto bail;
667  }
668 
669  qp->s_flags |= IPATH_S_BUSY;
670 
671  spin_unlock_irqrestore(&qp->s_lock, flags);
672 
673 again:
674  /* Check for a constructed packet to be sent. */
675  if (qp->s_hdrwords != 0) {
676  /*
677  * If no PIO bufs are available, return. An interrupt will
678  * call ipath_ib_piobufavail() when one is available.
679  */
680  if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
681  qp->s_cur_sge, qp->s_cur_size)) {
682  if (ipath_no_bufs_available(qp, dev))
683  goto bail;
684  }
685  dev->n_unicast_xmit++;
686  /* Record that we sent the packet and s_hdr is empty. */
687  qp->s_hdrwords = 0;
688  }
689 
690  if (make_req(qp))
691  goto again;
692 
693 bail:;
694 }
695 
696 /*
697  * This should be called with s_lock held.
698  */
699 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
700  enum ib_wc_status status)
701 {
702  u32 old_last, last;
703 
704  if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
705  return;
706 
707  /* See ch. 11.2.4.1 and 10.7.3.1 */
708  if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
709  (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
710  status != IB_WC_SUCCESS) {
711  struct ib_wc wc;
712 
713  memset(&wc, 0, sizeof wc);
714  wc.wr_id = wqe->wr.wr_id;
715  wc.status = status;
716  wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
717  wc.qp = &qp->ibqp;
718  if (status == IB_WC_SUCCESS)
719  wc.byte_len = wqe->length;
720  ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
721  status != IB_WC_SUCCESS);
722  }
723 
724  old_last = last = qp->s_last;
725  if (++last >= qp->s_size)
726  last = 0;
727  qp->s_last = last;
728  if (qp->s_cur == old_last)
729  qp->s_cur = last;
730  if (qp->s_tail == old_last)
731  qp->s_tail = last;
732  if (qp->state == IB_QPS_SQD && last == qp->s_cur)
733  qp->s_draining = 0;
734 }